refactor: 实现 ConversionEngine 协议转换引擎,替代旧 protocol 包
引入 Canonical Model 和 ProtocolAdapter 架构,支持 OpenAI/Anthropic 协议间 无缝转换,统一 ProxyHandler 替代分散的 OpenAI/Anthropic Handler,简化 ProviderClient 为协议无关的 HTTP 发送器,Provider 新增 protocol 字段。
This commit is contained in:
449
backend/internal/conversion/anthropic/encoder.go
Normal file
449
backend/internal/conversion/anthropic/encoder.go
Normal file
@@ -0,0 +1,449 @@
|
||||
package anthropic
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"nex/backend/internal/conversion"
|
||||
"nex/backend/internal/conversion/canonical"
|
||||
)
|
||||
|
||||
// encodeRequest 将 Canonical 请求编码为 Anthropic 请求
|
||||
func encodeRequest(req *canonical.CanonicalRequest, provider *conversion.TargetProvider) ([]byte, error) {
|
||||
result := map[string]any{
|
||||
"model": provider.ModelName,
|
||||
"stream": req.Stream,
|
||||
}
|
||||
|
||||
// max_tokens 必填
|
||||
if req.Parameters.MaxTokens != nil {
|
||||
result["max_tokens"] = *req.Parameters.MaxTokens
|
||||
} else {
|
||||
result["max_tokens"] = 4096
|
||||
}
|
||||
|
||||
// 系统消息
|
||||
if req.System != nil {
|
||||
result["system"] = encodeSystem(req.System)
|
||||
}
|
||||
|
||||
// 消息
|
||||
result["messages"] = encodeMessages(req.Messages)
|
||||
|
||||
// 参数
|
||||
if req.Parameters.Temperature != nil {
|
||||
result["temperature"] = *req.Parameters.Temperature
|
||||
}
|
||||
if req.Parameters.TopP != nil {
|
||||
result["top_p"] = *req.Parameters.TopP
|
||||
}
|
||||
if req.Parameters.TopK != nil {
|
||||
result["top_k"] = *req.Parameters.TopK
|
||||
}
|
||||
if len(req.Parameters.StopSequences) > 0 {
|
||||
result["stop_sequences"] = req.Parameters.StopSequences
|
||||
}
|
||||
|
||||
// 工具
|
||||
if len(req.Tools) > 0 {
|
||||
tools := make([]map[string]any, len(req.Tools))
|
||||
for i, t := range req.Tools {
|
||||
tool := map[string]any{
|
||||
"name": t.Name,
|
||||
"input_schema": t.InputSchema,
|
||||
}
|
||||
if t.Description != "" {
|
||||
tool["description"] = t.Description
|
||||
}
|
||||
tools[i] = tool
|
||||
}
|
||||
result["tools"] = tools
|
||||
}
|
||||
if req.ToolChoice != nil {
|
||||
result["tool_choice"] = encodeToolChoice(req.ToolChoice)
|
||||
}
|
||||
|
||||
// 公共字段
|
||||
if req.UserID != "" {
|
||||
result["metadata"] = map[string]any{"user_id": req.UserID}
|
||||
}
|
||||
if req.ParallelToolUse != nil && !*req.ParallelToolUse {
|
||||
result["disable_parallel_tool_use"] = true
|
||||
}
|
||||
if req.Thinking != nil {
|
||||
result["thinking"] = encodeThinkingConfig(req.Thinking)
|
||||
}
|
||||
|
||||
// output_config
|
||||
outputConfig := map[string]any{}
|
||||
hasOutputConfig := false
|
||||
if req.OutputFormat != nil {
|
||||
of := encodeOutputFormat(req.OutputFormat)
|
||||
if of != nil {
|
||||
outputConfig["format"] = of
|
||||
hasOutputConfig = true
|
||||
}
|
||||
}
|
||||
if req.Thinking != nil && req.Thinking.Effort != "" {
|
||||
outputConfig["effort"] = req.Thinking.Effort
|
||||
hasOutputConfig = true
|
||||
}
|
||||
if hasOutputConfig {
|
||||
result["output_config"] = outputConfig
|
||||
}
|
||||
|
||||
body, err := json.Marshal(result)
|
||||
if err != nil {
|
||||
return nil, conversion.NewConversionError(conversion.ErrorCodeEncodingFailure, "编码 Anthropic 请求失败").WithCause(err)
|
||||
}
|
||||
return body, nil
|
||||
}
|
||||
|
||||
// encodeSystem 编码系统消息
|
||||
func encodeSystem(system any) any {
|
||||
switch v := system.(type) {
|
||||
case string:
|
||||
return v
|
||||
case []canonical.SystemBlock:
|
||||
blocks := make([]map[string]any, len(v))
|
||||
for i, b := range v {
|
||||
blocks[i] = map[string]any{"text": b.Text}
|
||||
}
|
||||
return blocks
|
||||
default:
|
||||
return fmt.Sprintf("%v", v)
|
||||
}
|
||||
}
|
||||
|
||||
// encodeMessages 编码消息列表(含角色约束处理)
|
||||
func encodeMessages(msgs []canonical.CanonicalMessage) []map[string]any {
|
||||
var result []map[string]any
|
||||
|
||||
for _, msg := range msgs {
|
||||
switch msg.Role {
|
||||
case canonical.RoleUser:
|
||||
result = append(result, map[string]any{
|
||||
"role": "user",
|
||||
"content": encodeContentBlocks(msg.Content),
|
||||
})
|
||||
case canonical.RoleAssistant:
|
||||
result = append(result, map[string]any{
|
||||
"role": "assistant",
|
||||
"content": encodeContentBlocks(msg.Content),
|
||||
})
|
||||
case canonical.RoleTool:
|
||||
// tool 角色合并到相邻 user 消息
|
||||
toolResults := filterToolResults(msg.Content)
|
||||
if len(result) > 0 && result[len(result)-1]["role"] == "user" {
|
||||
// 合并到最后一条 user 消息
|
||||
lastContent, ok := result[len(result)-1]["content"].([]map[string]any)
|
||||
if ok {
|
||||
result[len(result)-1]["content"] = append(lastContent, toolResults...)
|
||||
} else {
|
||||
result[len(result)-1]["content"] = toolResults
|
||||
}
|
||||
} else {
|
||||
result = append(result, map[string]any{
|
||||
"role": "user",
|
||||
"content": toolResults,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 确保首消息为 user
|
||||
if len(result) > 0 && result[0]["role"] != "user" {
|
||||
result = append([]map[string]any{{"role": "user", "content": []map[string]any{}}}, result...)
|
||||
}
|
||||
|
||||
// 合并连续同角色消息
|
||||
result = mergeConsecutiveRoles(result)
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// encodeContentBlocks 编码内容块列表
|
||||
func encodeContentBlocks(blocks []canonical.ContentBlock) []map[string]any {
|
||||
result := make([]map[string]any, 0, len(blocks))
|
||||
for _, b := range blocks {
|
||||
switch b.Type {
|
||||
case "text":
|
||||
result = append(result, map[string]any{"type": "text", "text": b.Text})
|
||||
case "tool_use":
|
||||
m := map[string]any{
|
||||
"type": "tool_use",
|
||||
"id": b.ID,
|
||||
"name": b.Name,
|
||||
"input": b.Input,
|
||||
}
|
||||
if b.Input == nil {
|
||||
m["input"] = map[string]any{}
|
||||
}
|
||||
result = append(result, m)
|
||||
case "tool_result":
|
||||
m := map[string]any{
|
||||
"type": "tool_result",
|
||||
"tool_use_id": b.ToolUseID,
|
||||
}
|
||||
if b.Content != nil {
|
||||
var contentStr string
|
||||
if json.Unmarshal(b.Content, &contentStr) == nil {
|
||||
m["content"] = contentStr
|
||||
} else {
|
||||
m["content"] = string(b.Content)
|
||||
}
|
||||
} else {
|
||||
m["content"] = ""
|
||||
}
|
||||
if b.IsError != nil {
|
||||
m["is_error"] = *b.IsError
|
||||
}
|
||||
result = append(result, m)
|
||||
case "thinking":
|
||||
result = append(result, map[string]any{"type": "thinking", "thinking": b.Thinking})
|
||||
}
|
||||
}
|
||||
if len(result) == 0 {
|
||||
return []map[string]any{{"type": "text", "text": ""}}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// filterToolResults 过滤工具结果
|
||||
func filterToolResults(blocks []canonical.ContentBlock) []map[string]any {
|
||||
var result []map[string]any
|
||||
for _, b := range blocks {
|
||||
if b.Type == "tool_result" {
|
||||
m := map[string]any{
|
||||
"type": "tool_result",
|
||||
"tool_use_id": b.ToolUseID,
|
||||
}
|
||||
if b.Content != nil {
|
||||
var contentStr string
|
||||
if json.Unmarshal(b.Content, &contentStr) == nil {
|
||||
m["content"] = contentStr
|
||||
} else {
|
||||
m["content"] = string(b.Content)
|
||||
}
|
||||
} else {
|
||||
m["content"] = ""
|
||||
}
|
||||
if b.IsError != nil {
|
||||
m["is_error"] = *b.IsError
|
||||
}
|
||||
result = append(result, m)
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// encodeToolChoice 编码工具选择
|
||||
func encodeToolChoice(choice *canonical.ToolChoice) any {
|
||||
switch choice.Type {
|
||||
case "auto":
|
||||
return map[string]any{"type": "auto"}
|
||||
case "none":
|
||||
return map[string]any{"type": "none"}
|
||||
case "any":
|
||||
return map[string]any{"type": "any"}
|
||||
case "tool":
|
||||
return map[string]any{"type": "tool", "name": choice.Name}
|
||||
}
|
||||
return map[string]any{"type": "auto"}
|
||||
}
|
||||
|
||||
// encodeThinkingConfig 编码思考配置
|
||||
func encodeThinkingConfig(cfg *canonical.ThinkingConfig) map[string]any {
|
||||
switch cfg.Type {
|
||||
case "enabled":
|
||||
m := map[string]any{"type": "enabled"}
|
||||
if cfg.BudgetTokens != nil {
|
||||
m["budget_tokens"] = *cfg.BudgetTokens
|
||||
}
|
||||
return m
|
||||
case "disabled":
|
||||
return map[string]any{"type": "disabled"}
|
||||
case "adaptive":
|
||||
return map[string]any{"type": "adaptive"}
|
||||
}
|
||||
return map[string]any{"type": "disabled"}
|
||||
}
|
||||
|
||||
// encodeOutputFormat 编码输出格式
|
||||
func encodeOutputFormat(format *canonical.OutputFormat) map[string]any {
|
||||
if format == nil {
|
||||
return nil
|
||||
}
|
||||
switch format.Type {
|
||||
case "json_schema":
|
||||
schema := format.Schema
|
||||
if schema == nil {
|
||||
schema = json.RawMessage(`{"type":"object"}`)
|
||||
}
|
||||
return map[string]any{
|
||||
"type": "json_schema",
|
||||
"schema": schema,
|
||||
}
|
||||
case "json_object":
|
||||
return map[string]any{
|
||||
"type": "json_schema",
|
||||
"schema": map[string]any{"type": "object"},
|
||||
}
|
||||
case "text":
|
||||
return nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// encodeResponse 将 Canonical 响应编码为 Anthropic 响应
|
||||
func encodeResponse(resp *canonical.CanonicalResponse) ([]byte, error) {
|
||||
blocks := make([]map[string]any, 0, len(resp.Content))
|
||||
for _, b := range resp.Content {
|
||||
switch b.Type {
|
||||
case "text":
|
||||
blocks = append(blocks, map[string]any{"type": "text", "text": b.Text})
|
||||
case "tool_use":
|
||||
m := map[string]any{
|
||||
"type": "tool_use",
|
||||
"id": b.ID,
|
||||
"name": b.Name,
|
||||
"input": b.Input,
|
||||
}
|
||||
if b.Input == nil {
|
||||
m["input"] = map[string]any{}
|
||||
}
|
||||
blocks = append(blocks, m)
|
||||
case "thinking":
|
||||
blocks = append(blocks, map[string]any{"type": "thinking", "thinking": b.Thinking})
|
||||
}
|
||||
}
|
||||
|
||||
sr := "end_turn"
|
||||
if resp.StopReason != nil {
|
||||
sr = mapCanonicalStopReason(*resp.StopReason)
|
||||
}
|
||||
|
||||
usage := map[string]any{
|
||||
"input_tokens": resp.Usage.InputTokens,
|
||||
"output_tokens": resp.Usage.OutputTokens,
|
||||
}
|
||||
if resp.Usage.CacheReadTokens != nil {
|
||||
usage["cache_read_input_tokens"] = *resp.Usage.CacheReadTokens
|
||||
}
|
||||
if resp.Usage.CacheCreationTokens != nil {
|
||||
usage["cache_creation_input_tokens"] = *resp.Usage.CacheCreationTokens
|
||||
}
|
||||
|
||||
result := map[string]any{
|
||||
"id": resp.ID,
|
||||
"type": "message",
|
||||
"role": "assistant",
|
||||
"model": resp.Model,
|
||||
"content": blocks,
|
||||
"stop_reason": sr,
|
||||
"stop_sequence": nil,
|
||||
"usage": usage,
|
||||
}
|
||||
|
||||
body, err := json.Marshal(result)
|
||||
if err != nil {
|
||||
return nil, conversion.NewConversionError(conversion.ErrorCodeEncodingFailure, "编码 Anthropic 响应失败").WithCause(err)
|
||||
}
|
||||
return body, nil
|
||||
}
|
||||
|
||||
// mapCanonicalStopReason 映射 Canonical 停止原因到 Anthropic
|
||||
func mapCanonicalStopReason(reason canonical.StopReason) string {
|
||||
switch reason {
|
||||
case canonical.StopReasonEndTurn, canonical.StopReasonContentFilter:
|
||||
return "end_turn"
|
||||
case canonical.StopReasonMaxTokens:
|
||||
return "max_tokens"
|
||||
case canonical.StopReasonToolUse:
|
||||
return "tool_use"
|
||||
case canonical.StopReasonStopSequence:
|
||||
return "stop_sequence"
|
||||
case canonical.StopReasonRefusal:
|
||||
return "refusal"
|
||||
default:
|
||||
return "end_turn"
|
||||
}
|
||||
}
|
||||
|
||||
// encodeModelsResponse 编码模型列表响应
|
||||
func encodeModelsResponse(list *canonical.CanonicalModelList) ([]byte, error) {
|
||||
data := make([]map[string]any, len(list.Models))
|
||||
for i, m := range list.Models {
|
||||
name := m.Name
|
||||
if name == "" {
|
||||
name = m.ID
|
||||
}
|
||||
data[i] = map[string]any{
|
||||
"id": m.ID,
|
||||
"type": "model",
|
||||
"display_name": name,
|
||||
"created_at": formatTimestamp(m.Created),
|
||||
}
|
||||
}
|
||||
|
||||
var firstID, lastID *string
|
||||
if len(list.Models) > 0 {
|
||||
fid := list.Models[0].ID
|
||||
firstID = &fid
|
||||
lid := list.Models[len(list.Models)-1].ID
|
||||
lastID = &lid
|
||||
}
|
||||
|
||||
return json.Marshal(map[string]any{
|
||||
"data": data,
|
||||
"has_more": false,
|
||||
"first_id": firstID,
|
||||
"last_id": lastID,
|
||||
})
|
||||
}
|
||||
|
||||
// encodeModelInfoResponse 编码模型详情响应
|
||||
func encodeModelInfoResponse(info *canonical.CanonicalModelInfo) ([]byte, error) {
|
||||
name := info.Name
|
||||
if name == "" {
|
||||
name = info.ID
|
||||
}
|
||||
return json.Marshal(map[string]any{
|
||||
"id": info.ID,
|
||||
"type": "model",
|
||||
"display_name": name,
|
||||
"created_at": formatTimestamp(info.Created),
|
||||
})
|
||||
}
|
||||
|
||||
// mergeConsecutiveRoles 合并连续同角色消息
|
||||
func mergeConsecutiveRoles(messages []map[string]any) []map[string]any {
|
||||
if len(messages) <= 1 {
|
||||
return messages
|
||||
}
|
||||
var result []map[string]any
|
||||
for _, msg := range messages {
|
||||
if len(result) > 0 {
|
||||
lastRole := result[len(result)-1]["role"]
|
||||
currRole := msg["role"]
|
||||
if lastRole == currRole {
|
||||
// 合并 content
|
||||
lastContent := result[len(result)-1]["content"]
|
||||
currContent := msg["content"]
|
||||
switch lv := lastContent.(type) {
|
||||
case []map[string]any:
|
||||
if cv, ok := currContent.([]map[string]any); ok {
|
||||
result[len(result)-1]["content"] = append(lv, cv...)
|
||||
}
|
||||
case string:
|
||||
if cv, ok := currContent.(string); ok {
|
||||
result[len(result)-1]["content"] = lv + cv
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
result = append(result, msg)
|
||||
}
|
||||
return result
|
||||
}
|
||||
Reference in New Issue
Block a user