实现支持 OpenAI 和 Anthropic 双协议的统一大模型 API 网关 MVP 版本,包含: - OpenAI 和 Anthropic 协议代理 - 供应商和模型管理 - 用量统计 - 前端配置界面
235 lines
5.6 KiB
Go
235 lines
5.6 KiB
Go
package anthropic
|
||
|
||
import (
|
||
"encoding/json"
|
||
"fmt"
|
||
|
||
"nex/backend/internal/protocol/openai"
|
||
)
|
||
|
||
// ConvertRequest 将 Anthropic 请求转换为 OpenAI 请求
|
||
func ConvertRequest(anthropicReq *MessagesRequest) (*openai.ChatCompletionRequest, error) {
|
||
openaiReq := &openai.ChatCompletionRequest{
|
||
Model: anthropicReq.Model,
|
||
Temperature: anthropicReq.Temperature,
|
||
TopP: anthropicReq.TopP,
|
||
Stream: anthropicReq.Stream,
|
||
}
|
||
|
||
// 处理 max_tokens(Anthropic 要求必须有,默认 4096)
|
||
if anthropicReq.MaxTokens > 0 {
|
||
openaiReq.MaxTokens = &anthropicReq.MaxTokens
|
||
} else {
|
||
defaultMax := 4096
|
||
openaiReq.MaxTokens = &defaultMax
|
||
}
|
||
|
||
// 处理 stop_sequences
|
||
if len(anthropicReq.StopSequences) > 0 {
|
||
openaiReq.Stop = anthropicReq.StopSequences
|
||
}
|
||
|
||
// 转换 system 消息
|
||
messages := make([]openai.Message, 0)
|
||
if anthropicReq.System != "" {
|
||
messages = append(messages, openai.Message{
|
||
Role: "system",
|
||
Content: anthropicReq.System,
|
||
})
|
||
}
|
||
|
||
// 转换 messages
|
||
for _, msg := range anthropicReq.Messages {
|
||
openaiMsg, err := convertMessage(msg)
|
||
if err != nil {
|
||
return nil, err
|
||
}
|
||
messages = append(messages, openaiMsg...)
|
||
}
|
||
openaiReq.Messages = messages
|
||
|
||
// 转换 tools
|
||
if len(anthropicReq.Tools) > 0 {
|
||
openaiReq.Tools = make([]openai.Tool, len(anthropicReq.Tools))
|
||
for i, tool := range anthropicReq.Tools {
|
||
openaiReq.Tools[i] = openai.Tool{
|
||
Type: "function",
|
||
Function: openai.FunctionDefinition{
|
||
Name: tool.Name,
|
||
Description: tool.Description,
|
||
Parameters: tool.InputSchema,
|
||
},
|
||
}
|
||
}
|
||
}
|
||
|
||
// 转换 tool_choice
|
||
if anthropicReq.ToolChoice != nil {
|
||
toolChoice, err := convertToolChoice(anthropicReq.ToolChoice)
|
||
if err != nil {
|
||
return nil, err
|
||
}
|
||
openaiReq.ToolChoice = toolChoice
|
||
}
|
||
|
||
return openaiReq, nil
|
||
}
|
||
|
||
// ConvertResponse 将 OpenAI 响应转换为 Anthropic 响应
|
||
func ConvertResponse(openaiResp *openai.ChatCompletionResponse) (*MessagesResponse, error) {
|
||
anthropicResp := &MessagesResponse{
|
||
ID: openaiResp.ID,
|
||
Type: "message",
|
||
Role: "assistant",
|
||
Model: openaiResp.Model,
|
||
Usage: Usage{
|
||
InputTokens: openaiResp.Usage.PromptTokens,
|
||
OutputTokens: openaiResp.Usage.CompletionTokens,
|
||
},
|
||
}
|
||
|
||
// 转换 content
|
||
if len(openaiResp.Choices) > 0 {
|
||
choice := openaiResp.Choices[0]
|
||
content := make([]ContentBlock, 0)
|
||
|
||
if choice.Message != nil {
|
||
// 文本内容
|
||
if choice.Message.Content != "" {
|
||
if str, ok := choice.Message.Content.(string); ok && str != "" {
|
||
content = append(content, ContentBlock{
|
||
Type: "text",
|
||
Text: str,
|
||
})
|
||
}
|
||
}
|
||
|
||
// Tool calls
|
||
if len(choice.Message.ToolCalls) > 0 {
|
||
for _, tc := range choice.Message.ToolCalls {
|
||
// 解析 arguments JSON
|
||
var input interface{}
|
||
if err := json.Unmarshal([]byte(tc.Function.Arguments), &input); err != nil {
|
||
return nil, fmt.Errorf("解析 tool_call arguments 失败: %w", err)
|
||
}
|
||
|
||
content = append(content, ContentBlock{
|
||
Type: "tool_use",
|
||
ID: tc.ID,
|
||
Name: tc.Function.Name,
|
||
Input: input,
|
||
})
|
||
}
|
||
}
|
||
}
|
||
|
||
anthropicResp.Content = content
|
||
|
||
// 转换 finish_reason
|
||
switch choice.FinishReason {
|
||
case "stop":
|
||
anthropicResp.StopReason = "end_turn"
|
||
case "tool_calls":
|
||
anthropicResp.StopReason = "tool_use"
|
||
case "length":
|
||
anthropicResp.StopReason = "max_tokens"
|
||
}
|
||
}
|
||
|
||
return anthropicResp, nil
|
||
}
|
||
|
||
// convertMessage 转换单条消息
|
||
func convertMessage(msg AnthropicMessage) ([]openai.Message, error) {
|
||
var messages []openai.Message
|
||
|
||
// 处理 content
|
||
for _, block := range msg.Content {
|
||
switch block.Type {
|
||
case "text":
|
||
// 文本内容
|
||
messages = append(messages, openai.Message{
|
||
Role: msg.Role,
|
||
Content: block.Text,
|
||
})
|
||
|
||
case "tool_result":
|
||
// 工具结果
|
||
content := ""
|
||
if str, ok := block.Content.(string); ok {
|
||
content = str
|
||
} else {
|
||
// 如果是数组或其他类型,序列化为 JSON
|
||
bytes, err := json.Marshal(block.Content)
|
||
if err != nil {
|
||
return nil, fmt.Errorf("序列化 tool_result 内容失败: %w", err)
|
||
}
|
||
content = string(bytes)
|
||
}
|
||
|
||
messages = append(messages, openai.Message{
|
||
Role: "tool",
|
||
Content: content,
|
||
ToolCallID: block.ToolUseID,
|
||
})
|
||
|
||
case "image":
|
||
// MVP 不支持多模态
|
||
return nil, fmt.Errorf("MVP 不支持多模态内容(图片)")
|
||
|
||
default:
|
||
return nil, fmt.Errorf("未知的内容块类型: %s", block.Type)
|
||
}
|
||
}
|
||
|
||
// 如果没有 content,创建空消息(不应该发生)
|
||
if len(messages) == 0 {
|
||
messages = append(messages, openai.Message{
|
||
Role: msg.Role,
|
||
Content: "",
|
||
})
|
||
}
|
||
|
||
return messages, nil
|
||
}
|
||
|
||
// convertToolChoice 转换工具选择
|
||
func convertToolChoice(choice interface{}) (interface{}, error) {
|
||
// 如果是字符串
|
||
if str, ok := choice.(string); ok {
|
||
// "auto" 或 "any" 都映射为 "auto"
|
||
if str == "auto" || str == "any" {
|
||
return "auto", nil
|
||
}
|
||
return nil, fmt.Errorf("无效的 tool_choice 字符串: %s", str)
|
||
}
|
||
|
||
// 如果是对象
|
||
if obj, ok := choice.(map[string]interface{}); ok {
|
||
choiceType, ok := obj["type"].(string)
|
||
if !ok {
|
||
return nil, fmt.Errorf("tool_choice 对象缺少 type 字段")
|
||
}
|
||
|
||
switch choiceType {
|
||
case "auto", "any":
|
||
return "auto", nil
|
||
case "tool":
|
||
name, ok := obj["name"].(string)
|
||
if !ok {
|
||
return nil, fmt.Errorf("tool_choice type=tool 缺少 name 字段")
|
||
}
|
||
return map[string]interface{}{
|
||
"type": "function",
|
||
"function": map[string]string{
|
||
"name": name,
|
||
},
|
||
}, nil
|
||
default:
|
||
return nil, fmt.Errorf("无效的 tool_choice type: %s", choiceType)
|
||
}
|
||
}
|
||
|
||
return nil, fmt.Errorf("tool_choice 格式无效")
|
||
}
|