feat: 实现统一模型 ID 机制
实现统一模型 ID 格式 (provider_id/model_name),支持跨协议模型标识和 Smart Passthrough。 核心变更: - 新增 pkg/modelid 包:解析、格式化、校验统一模型 ID - 数据库迁移:models 表使用 UUID 主键 + UNIQUE(provider_id, model_name) 约束 - Repository 层:FindByProviderAndModelName、ListEnabled 方法 - Service 层:联合唯一校验、provider ID 字符集校验 - Conversion 层:ExtractModelName、RewriteRequestModelName/RewriteResponseModelName 方法 - Handler 层:统一模型 ID 路由、Smart Passthrough、Models API 本地聚合 - 新增 error-responses、unified-model-id 规范 测试覆盖: - 单元测试:modelid、conversion、handler、service、repository - 集成测试:统一模型 ID 路由、Smart Passthrough 保真性、跨协议转换 - 迁移测试:UUID 主键、UNIQUE 约束、级联删除 OpenSpec: - 归档 unified-model-id 变更到 archive/2026-04-21-unified-model-id - 同步 11 个 delta specs 到 main specs - 新增 error-responses、unified-model-id 规范文件
This commit is contained in:
@@ -14,10 +14,8 @@ import (
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"gorm.io/driver/sqlite"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"nex/backend/internal/config"
|
||||
"nex/backend/internal/conversion"
|
||||
"nex/backend/internal/conversion/anthropic"
|
||||
openaiConv "nex/backend/internal/conversion/openai"
|
||||
@@ -43,11 +41,7 @@ func setupConversionTest(t *testing.T) (*gin.Engine, *gorm.DB, *httptest.Server)
|
||||
w.Write([]byte(`{"error":"not mocked"}`))
|
||||
}))
|
||||
|
||||
dir := t.TempDir()
|
||||
db, err := gorm.Open(sqlite.Open(dir+"/test.db"), &gorm.Config{})
|
||||
require.NoError(t, err)
|
||||
err = db.AutoMigrate(&config.Provider{}, &config.Model{}, &config.UsageStats{})
|
||||
require.NoError(t, err)
|
||||
db := setupTestDB(t)
|
||||
t.Cleanup(func() {
|
||||
sqlDB, _ := db.DB()
|
||||
if sqlDB != nil {
|
||||
@@ -60,7 +54,7 @@ func setupConversionTest(t *testing.T) (*gin.Engine, *gorm.DB, *httptest.Server)
|
||||
modelRepo := repository.NewModelRepository(db)
|
||||
statsRepo := repository.NewStatsRepository(db)
|
||||
|
||||
providerService := service.NewProviderService(providerRepo)
|
||||
providerService := service.NewProviderService(providerRepo, modelRepo)
|
||||
modelService := service.NewModelService(modelRepo, providerRepo)
|
||||
routingService := service.NewRoutingService(modelRepo, providerRepo)
|
||||
statsService := service.NewStatsService(statsRepo)
|
||||
@@ -125,7 +119,7 @@ func createProviderAndModel(t *testing.T, r *gin.Engine, providerID, protocol, m
|
||||
require.Equal(t, 201, w.Code)
|
||||
|
||||
modelBody, _ := json.Marshal(map[string]string{
|
||||
"id": modelName,
|
||||
|
||||
"provider_id": providerID,
|
||||
"model_name": modelName,
|
||||
})
|
||||
@@ -156,7 +150,7 @@ func TestConversion_OpenAIToAnthropic_NonStream(t *testing.T) {
|
||||
"id": "msg_test",
|
||||
"type": "message",
|
||||
"role": "assistant",
|
||||
"model": "claude-3-opus",
|
||||
"model": "anthropic_p/claude-3-opus",
|
||||
"content": []map[string]any{
|
||||
{"type": "text", "text": "Hello from Anthropic!"},
|
||||
},
|
||||
@@ -170,11 +164,11 @@ func TestConversion_OpenAIToAnthropic_NonStream(t *testing.T) {
|
||||
json.NewEncoder(w).Encode(resp)
|
||||
})
|
||||
|
||||
createProviderAndModel(t, r, "anthropic-p", "anthropic", "claude-3-opus", upstream.URL)
|
||||
createProviderAndModel(t, r, "anthropic_p", "anthropic", "claude-3-opus", upstream.URL)
|
||||
|
||||
// 使用 OpenAI 格式发送请求
|
||||
openaiReq := map[string]any{
|
||||
"model": "claude-3-opus",
|
||||
"model": "anthropic_p/claude-3-opus",
|
||||
"messages": []map[string]any{
|
||||
{"role": "user", "content": "Hello"},
|
||||
},
|
||||
@@ -233,10 +227,10 @@ func TestConversion_AnthropicToOpenAI_NonStream(t *testing.T) {
|
||||
json.NewEncoder(w).Encode(resp)
|
||||
})
|
||||
|
||||
createProviderAndModel(t, r, "openai-p", "openai", "gpt-4", upstream.URL)
|
||||
createProviderAndModel(t, r, "openai_p", "openai", "gpt-4", upstream.URL)
|
||||
|
||||
anthropicReq := map[string]any{
|
||||
"model": "gpt-4",
|
||||
"model": "openai_p/gpt-4",
|
||||
"max_tokens": 1024,
|
||||
"messages": []map[string]any{
|
||||
{"role": "user", "content": "Hello"},
|
||||
@@ -273,16 +267,18 @@ func TestConversion_OpenAIToOpenAI_Passthrough(t *testing.T) {
|
||||
body, _ := io.ReadAll(r.Body)
|
||||
var req map[string]any
|
||||
json.Unmarshal(body, &req)
|
||||
// Smart Passthrough: 请求体中的统一 ID 应被改写为上游模型名
|
||||
assert.Equal(t, "gpt-4", req["model"])
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
// 上游返回上游模型名
|
||||
w.Write([]byte(`{"id":"chatcmpl-pass","object":"chat.completion","model":"gpt-4","choices":[{"index":0,"message":{"role":"assistant","content":"passthrough"},"finish_reason":"stop"}],"usage":{"prompt_tokens":5,"completion_tokens":1,"total_tokens":6}}`))
|
||||
})
|
||||
|
||||
createProviderAndModel(t, r, "openai-p", "openai", "gpt-4", upstream.URL)
|
||||
createProviderAndModel(t, r, "openai_p", "openai", "gpt-4", upstream.URL)
|
||||
|
||||
reqBody := map[string]any{
|
||||
"model": "gpt-4",
|
||||
"model": "openai_p/gpt-4", // 客户端发送统一 ID
|
||||
"messages": []map[string]any{{"role": "user", "content": "test"}},
|
||||
}
|
||||
body, _ := json.Marshal(reqBody)
|
||||
@@ -293,7 +289,8 @@ func TestConversion_OpenAIToOpenAI_Passthrough(t *testing.T) {
|
||||
r.ServeHTTP(w, req)
|
||||
|
||||
assert.Equal(t, 200, w.Code)
|
||||
assert.Contains(t, w.Body.String(), "passthrough")
|
||||
// Smart Passthrough: 响应体中的上游模型名应被改写为统一 ID
|
||||
assert.Contains(t, w.Body.String(), `"model":"openai_p/gpt-4"`)
|
||||
}
|
||||
|
||||
func TestConversion_AnthropicToAnthropic_Passthrough(t *testing.T) {
|
||||
@@ -302,14 +299,21 @@ func TestConversion_AnthropicToAnthropic_Passthrough(t *testing.T) {
|
||||
upstream.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
assert.Equal(t, "/v1/messages", r.URL.Path)
|
||||
|
||||
body, _ := io.ReadAll(r.Body)
|
||||
var req map[string]any
|
||||
json.Unmarshal(body, &req)
|
||||
// Smart Passthrough: 请求体中的统一 ID 应被改写为上游模型名
|
||||
assert.Equal(t, "claude-3-opus", req["model"])
|
||||
|
||||
// 上游返回上游模型名
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.Write([]byte(`{"id":"msg-pass","type":"message","role":"assistant","model":"claude-3-opus","content":[{"type":"text","text":"passthrough"}],"stop_reason":"end_turn","usage":{"input_tokens":5,"output_tokens":1}}`))
|
||||
})
|
||||
|
||||
createProviderAndModel(t, r, "anthropic-p", "anthropic", "claude-3-opus", upstream.URL)
|
||||
createProviderAndModel(t, r, "anthropic_p", "anthropic", "claude-3-opus", upstream.URL)
|
||||
|
||||
reqBody := map[string]any{
|
||||
"model": "claude-3-opus",
|
||||
"model": "anthropic_p/claude-3-opus", // 客户端发送统一 ID
|
||||
"max_tokens": 1024,
|
||||
"messages": []map[string]any{{"role": "user", "content": "test"}},
|
||||
}
|
||||
@@ -321,7 +325,8 @@ func TestConversion_AnthropicToAnthropic_Passthrough(t *testing.T) {
|
||||
r.ServeHTTP(w, req)
|
||||
|
||||
assert.Equal(t, 200, w.Code)
|
||||
assert.Contains(t, w.Body.String(), "passthrough")
|
||||
// Smart Passthrough: 响应体中的上游模型名应被改写为统一 ID
|
||||
assert.Contains(t, w.Body.String(), `"model":"anthropic_p/claude-3-opus"`)
|
||||
}
|
||||
|
||||
// ============ 流式转换测试 ============
|
||||
@@ -349,10 +354,10 @@ func TestConversion_OpenAIToAnthropic_Stream(t *testing.T) {
|
||||
}
|
||||
})
|
||||
|
||||
createProviderAndModel(t, r, "anthropic-p", "anthropic", "claude-3-opus", upstream.URL)
|
||||
createProviderAndModel(t, r, "anthropic_p", "anthropic", "claude-3-opus", upstream.URL)
|
||||
|
||||
openaiReq := map[string]any{
|
||||
"model": "claude-3-opus",
|
||||
"model": "anthropic_p/claude-3-opus",
|
||||
"messages": []map[string]any{{"role": "user", "content": "Hello"}},
|
||||
"stream": true,
|
||||
}
|
||||
@@ -390,10 +395,10 @@ func TestConversion_AnthropicToOpenAI_Stream(t *testing.T) {
|
||||
}
|
||||
})
|
||||
|
||||
createProviderAndModel(t, r, "openai-p", "openai", "gpt-4", upstream.URL)
|
||||
createProviderAndModel(t, r, "openai_p", "openai", "gpt-4", upstream.URL)
|
||||
|
||||
anthropicReq := map[string]any{
|
||||
"model": "gpt-4",
|
||||
"model": "openai_p/gpt-4",
|
||||
"max_tokens": 1024,
|
||||
"messages": []map[string]any{{"role": "user", "content": "Hello"}},
|
||||
"stream": true,
|
||||
@@ -512,7 +517,7 @@ func TestConversion_ProviderWithProtocol(t *testing.T) {
|
||||
|
||||
// 创建带 protocol 字段的 provider
|
||||
providerBody := map[string]any{
|
||||
"id": "test-protocol",
|
||||
"id": "test_protocol",
|
||||
"name": "Test Protocol",
|
||||
"api_key": "sk-test",
|
||||
"base_url": "https://test.com",
|
||||
@@ -533,7 +538,7 @@ func TestConversion_ProviderWithProtocol(t *testing.T) {
|
||||
|
||||
// 获取时应包含 protocol
|
||||
w = httptest.NewRecorder()
|
||||
req = httptest.NewRequest("GET", "/api/providers/test-protocol", nil)
|
||||
req = httptest.NewRequest("GET", "/api/providers/test_protocol", nil)
|
||||
r.ServeHTTP(w, req)
|
||||
assert.Equal(t, 200, w.Code)
|
||||
|
||||
@@ -547,7 +552,7 @@ func TestConversion_ProviderDefaultProtocol(t *testing.T) {
|
||||
|
||||
// 不指定 protocol,默认应为 openai
|
||||
providerBody := map[string]any{
|
||||
"id": "default-proto",
|
||||
"id": "default_proto",
|
||||
"name": "Default",
|
||||
"api_key": "sk-test",
|
||||
"base_url": "https://test.com",
|
||||
|
||||
@@ -8,8 +8,6 @@ import (
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -17,10 +15,7 @@ import (
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"gorm.io/driver/sqlite"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"nex/backend/internal/config"
|
||||
"nex/backend/internal/conversion"
|
||||
"nex/backend/internal/conversion/anthropic"
|
||||
openaiConv "nex/backend/internal/conversion/openai"
|
||||
@@ -40,25 +35,20 @@ func setupE2ETest(t *testing.T) (*gin.Engine, *httptest.Server) {
|
||||
w.Write([]byte(`{"error":"not mocked"}`))
|
||||
}))
|
||||
|
||||
dir, _ := os.MkdirTemp("", "e2e-test-*")
|
||||
db, err := gorm.Open(sqlite.Open(filepath.Join(dir, "test.db")), &gorm.Config{})
|
||||
require.NoError(t, err)
|
||||
err = db.AutoMigrate(&config.Provider{}, &config.Model{}, &config.UsageStats{})
|
||||
require.NoError(t, err)
|
||||
db := setupTestDB(t)
|
||||
t.Cleanup(func() {
|
||||
sqlDB, _ := db.DB()
|
||||
if sqlDB != nil {
|
||||
sqlDB.Close()
|
||||
}
|
||||
upstream.Close()
|
||||
os.RemoveAll(dir)
|
||||
})
|
||||
|
||||
providerRepo := repository.NewProviderRepository(db)
|
||||
modelRepo := repository.NewModelRepository(db)
|
||||
statsRepo := repository.NewStatsRepository(db)
|
||||
|
||||
providerService := service.NewProviderService(providerRepo)
|
||||
providerService := service.NewProviderService(providerRepo, modelRepo)
|
||||
modelService := service.NewModelService(modelRepo, providerRepo)
|
||||
routingService := service.NewRoutingService(modelRepo, providerRepo)
|
||||
statsService := service.NewStatsService(statsRepo)
|
||||
@@ -105,7 +95,7 @@ func e2eCreateProviderAndModel(t *testing.T, r *gin.Engine, providerID, protocol
|
||||
require.Equal(t, 201, w.Code)
|
||||
|
||||
modelBody, _ := json.Marshal(map[string]string{
|
||||
"id": modelName, "provider_id": providerID, "model_name": modelName,
|
||||
"provider_id": providerID, "model_name": modelName,
|
||||
})
|
||||
w = httptest.NewRecorder()
|
||||
req = httptest.NewRequest("POST", "/api/models", bytes.NewReader(modelBody))
|
||||
@@ -178,10 +168,10 @@ func TestE2E_OpenAI_NonStream_BasicText(t *testing.T) {
|
||||
},
|
||||
})
|
||||
})
|
||||
e2eCreateProviderAndModel(t, r, "openai-p", "openai", "gpt-4o", upstream.URL)
|
||||
e2eCreateProviderAndModel(t, r, "openai_p", "openai", "gpt-4o", upstream.URL)
|
||||
|
||||
body, _ := json.Marshal(map[string]any{
|
||||
"model": "gpt-4o",
|
||||
"model": "openai_p/gpt-4o",
|
||||
"messages": []map[string]any{
|
||||
{"role": "user", "content": "你好"},
|
||||
},
|
||||
@@ -195,7 +185,7 @@ func TestE2E_OpenAI_NonStream_BasicText(t *testing.T) {
|
||||
var resp map[string]any
|
||||
require.NoError(t, json.Unmarshal(w.Body.Bytes(), &resp))
|
||||
assert.Equal(t, "chat.completion", resp["object"])
|
||||
assert.Equal(t, "gpt-4o", resp["model"])
|
||||
assert.Equal(t, "openai_p/gpt-4o", resp["model"])
|
||||
|
||||
choices := resp["choices"].([]any)
|
||||
require.Len(t, choices, 1)
|
||||
@@ -231,10 +221,10 @@ func TestE2E_OpenAI_NonStream_MultiTurn(t *testing.T) {
|
||||
"usage": map[string]any{"prompt_tokens": 100, "completion_tokens": 20, "total_tokens": 120},
|
||||
})
|
||||
})
|
||||
e2eCreateProviderAndModel(t, r, "openai-p", "openai", "gpt-4o", upstream.URL)
|
||||
e2eCreateProviderAndModel(t, r, "openai_p", "openai", "gpt-4o", upstream.URL)
|
||||
|
||||
body, _ := json.Marshal(map[string]any{
|
||||
"model": "gpt-4o",
|
||||
"model": "openai_p/gpt-4o",
|
||||
"messages": []map[string]any{
|
||||
{"role": "system", "content": "你是编程助手"},
|
||||
{"role": "user", "content": "什么是interface?"},
|
||||
@@ -279,10 +269,10 @@ func TestE2E_OpenAI_NonStream_ToolCalls(t *testing.T) {
|
||||
"usage": map[string]any{"prompt_tokens": 80, "completion_tokens": 18, "total_tokens": 98},
|
||||
})
|
||||
})
|
||||
e2eCreateProviderAndModel(t, r, "openai-p", "openai", "gpt-4o", upstream.URL)
|
||||
e2eCreateProviderAndModel(t, r, "openai_p", "openai", "gpt-4o", upstream.URL)
|
||||
|
||||
body, _ := json.Marshal(map[string]any{
|
||||
"model": "gpt-4o",
|
||||
"model": "openai_p/gpt-4o",
|
||||
"messages": []map[string]any{
|
||||
{"role": "user", "content": "北京天气"},
|
||||
},
|
||||
@@ -335,10 +325,10 @@ func TestE2E_OpenAI_NonStream_MaxTokens_Length(t *testing.T) {
|
||||
"usage": map[string]any{"prompt_tokens": 20, "completion_tokens": 30, "total_tokens": 50},
|
||||
})
|
||||
})
|
||||
e2eCreateProviderAndModel(t, r, "openai-p", "openai", "gpt-4o", upstream.URL)
|
||||
e2eCreateProviderAndModel(t, r, "openai_p", "openai", "gpt-4o", upstream.URL)
|
||||
|
||||
body, _ := json.Marshal(map[string]any{
|
||||
"model": "gpt-4o",
|
||||
"model": "openai_p/gpt-4o",
|
||||
"messages": []map[string]any{{"role": "user", "content": "介绍AI历史"}},
|
||||
"max_tokens": 30,
|
||||
})
|
||||
@@ -372,10 +362,10 @@ func TestE2E_OpenAI_NonStream_UsageWithReasoning(t *testing.T) {
|
||||
},
|
||||
})
|
||||
})
|
||||
e2eCreateProviderAndModel(t, r, "openai-p", "openai", "o3", upstream.URL)
|
||||
e2eCreateProviderAndModel(t, r, "openai_p", "openai", "o3", upstream.URL)
|
||||
|
||||
body, _ := json.Marshal(map[string]any{
|
||||
"model": "o3",
|
||||
"model": "openai_p/o3",
|
||||
"messages": []map[string]any{{"role": "user", "content": "15+23*2=?"}},
|
||||
})
|
||||
w := httptest.NewRecorder()
|
||||
@@ -413,10 +403,10 @@ func TestE2E_OpenAI_NonStream_Refusal(t *testing.T) {
|
||||
"usage": map[string]any{"prompt_tokens": 12, "completion_tokens": 35, "total_tokens": 47},
|
||||
})
|
||||
})
|
||||
e2eCreateProviderAndModel(t, r, "openai-p", "openai", "gpt-4o", upstream.URL)
|
||||
e2eCreateProviderAndModel(t, r, "openai_p", "openai", "gpt-4o", upstream.URL)
|
||||
|
||||
body, _ := json.Marshal(map[string]any{
|
||||
"model": "gpt-4o",
|
||||
"model": "openai_p/gpt-4o",
|
||||
"messages": []map[string]any{{"role": "user", "content": "做坏事"}},
|
||||
})
|
||||
w := httptest.NewRecorder()
|
||||
@@ -455,10 +445,10 @@ func TestE2E_OpenAI_Stream_Text(t *testing.T) {
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
})
|
||||
e2eCreateProviderAndModel(t, r, "openai-p", "openai", "gpt-4o", upstream.URL)
|
||||
e2eCreateProviderAndModel(t, r, "openai_p", "openai", "gpt-4o", upstream.URL)
|
||||
|
||||
body, _ := json.Marshal(map[string]any{
|
||||
"model": "gpt-4o",
|
||||
"model": "openai_p/gpt-4o",
|
||||
"messages": []map[string]any{{"role": "user", "content": "你好"}},
|
||||
"stream": true,
|
||||
})
|
||||
@@ -499,10 +489,10 @@ func TestE2E_OpenAI_Stream_ToolCalls(t *testing.T) {
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
})
|
||||
e2eCreateProviderAndModel(t, r, "openai-p", "openai", "gpt-4o", upstream.URL)
|
||||
e2eCreateProviderAndModel(t, r, "openai_p", "openai", "gpt-4o", upstream.URL)
|
||||
|
||||
body, _ := json.Marshal(map[string]any{
|
||||
"model": "gpt-4o",
|
||||
"model": "openai_p/gpt-4o",
|
||||
"messages": []map[string]any{{"role": "user", "content": "北京天气"}},
|
||||
"tools": []map[string]any{{
|
||||
"type": "function",
|
||||
@@ -548,10 +538,10 @@ func TestE2E_OpenAI_Stream_WithUsage(t *testing.T) {
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
})
|
||||
e2eCreateProviderAndModel(t, r, "openai-p", "openai", "gpt-4o", upstream.URL)
|
||||
e2eCreateProviderAndModel(t, r, "openai_p", "openai", "gpt-4o", upstream.URL)
|
||||
|
||||
body, _ := json.Marshal(map[string]any{
|
||||
"model": "gpt-4o",
|
||||
"model": "openai_p/gpt-4o",
|
||||
"messages": []map[string]any{{"role": "user", "content": "hi"}},
|
||||
"stream": true,
|
||||
})
|
||||
@@ -583,10 +573,10 @@ func TestE2E_Anthropic_NonStream_BasicText(t *testing.T) {
|
||||
"usage": map[string]any{"input_tokens": 15, "output_tokens": 25},
|
||||
})
|
||||
})
|
||||
e2eCreateProviderAndModel(t, r, "anthropic-p", "anthropic", "claude-opus-4-7", upstream.URL)
|
||||
e2eCreateProviderAndModel(t, r, "anthropic_p", "anthropic", "claude-opus-4-7", upstream.URL)
|
||||
|
||||
body, _ := json.Marshal(map[string]any{
|
||||
"model": "claude-opus-4-7", "max_tokens": 1024,
|
||||
"model": "anthropic_p/claude-opus-4-7", "max_tokens": 1024,
|
||||
"messages": []map[string]any{{"role": "user", "content": "你好"}},
|
||||
})
|
||||
w := httptest.NewRecorder()
|
||||
@@ -599,7 +589,7 @@ func TestE2E_Anthropic_NonStream_BasicText(t *testing.T) {
|
||||
require.NoError(t, json.Unmarshal(w.Body.Bytes(), &resp))
|
||||
assert.Equal(t, "message", resp["type"])
|
||||
assert.Equal(t, "assistant", resp["role"])
|
||||
assert.Equal(t, "claude-opus-4-7", resp["model"])
|
||||
assert.Equal(t, "anthropic_p/claude-opus-4-7", resp["model"])
|
||||
assert.Equal(t, "end_turn", resp["stop_reason"])
|
||||
|
||||
content := resp["content"].([]any)
|
||||
@@ -629,10 +619,10 @@ func TestE2E_Anthropic_NonStream_WithSystem(t *testing.T) {
|
||||
"usage": map[string]any{"input_tokens": 30, "output_tokens": 15},
|
||||
})
|
||||
})
|
||||
e2eCreateProviderAndModel(t, r, "anthropic-p", "anthropic", "claude-opus-4-7", upstream.URL)
|
||||
e2eCreateProviderAndModel(t, r, "anthropic_p", "anthropic", "claude-opus-4-7", upstream.URL)
|
||||
|
||||
body, _ := json.Marshal(map[string]any{
|
||||
"model": "claude-opus-4-7", "max_tokens": 1024,
|
||||
"model": "anthropic_p/claude-opus-4-7", "max_tokens": 1024,
|
||||
"system": "你是编程助手",
|
||||
"messages": []map[string]any{{"role": "user", "content": "什么是递归?"}},
|
||||
})
|
||||
@@ -658,10 +648,10 @@ func TestE2E_Anthropic_NonStream_ToolUse(t *testing.T) {
|
||||
"usage": map[string]any{"input_tokens": 180, "output_tokens": 42},
|
||||
})
|
||||
})
|
||||
e2eCreateProviderAndModel(t, r, "anthropic-p", "anthropic", "claude-opus-4-7", upstream.URL)
|
||||
e2eCreateProviderAndModel(t, r, "anthropic_p", "anthropic", "claude-opus-4-7", upstream.URL)
|
||||
|
||||
body, _ := json.Marshal(map[string]any{
|
||||
"model": "claude-opus-4-7", "max_tokens": 1024,
|
||||
"model": "anthropic_p/claude-opus-4-7", "max_tokens": 1024,
|
||||
"messages": []map[string]any{{"role": "user", "content": "北京天气"}},
|
||||
"tools": []map[string]any{{
|
||||
"name": "get_weather", "description": "获取天气",
|
||||
@@ -704,10 +694,10 @@ func TestE2E_Anthropic_NonStream_Thinking(t *testing.T) {
|
||||
"usage": map[string]any{"input_tokens": 95, "output_tokens": 280},
|
||||
})
|
||||
})
|
||||
e2eCreateProviderAndModel(t, r, "anthropic-p", "anthropic", "claude-opus-4-7", upstream.URL)
|
||||
e2eCreateProviderAndModel(t, r, "anthropic_p", "anthropic", "claude-opus-4-7", upstream.URL)
|
||||
|
||||
body, _ := json.Marshal(map[string]any{
|
||||
"model": "claude-opus-4-7", "max_tokens": 4096,
|
||||
"model": "anthropic_p/claude-opus-4-7", "max_tokens": 4096,
|
||||
"messages": []map[string]any{{"role": "user", "content": "15+23*2=?"}},
|
||||
"thinking": map[string]any{"type": "enabled", "budget_tokens": 2048},
|
||||
})
|
||||
@@ -736,10 +726,10 @@ func TestE2E_Anthropic_NonStream_MaxTokens(t *testing.T) {
|
||||
"usage": map[string]any{"input_tokens": 22, "output_tokens": 20},
|
||||
})
|
||||
})
|
||||
e2eCreateProviderAndModel(t, r, "anthropic-p", "anthropic", "claude-opus-4-7", upstream.URL)
|
||||
e2eCreateProviderAndModel(t, r, "anthropic_p", "anthropic", "claude-opus-4-7", upstream.URL)
|
||||
|
||||
body, _ := json.Marshal(map[string]any{
|
||||
"model": "claude-opus-4-7", "max_tokens": 20,
|
||||
"model": "anthropic_p/claude-opus-4-7", "max_tokens": 20,
|
||||
"messages": []map[string]any{{"role": "user", "content": "介绍AI历史"}},
|
||||
})
|
||||
w := httptest.NewRecorder()
|
||||
@@ -764,10 +754,10 @@ func TestE2E_Anthropic_NonStream_StopSequence(t *testing.T) {
|
||||
"usage": map[string]any{"input_tokens": 22, "output_tokens": 10},
|
||||
})
|
||||
})
|
||||
e2eCreateProviderAndModel(t, r, "anthropic-p", "anthropic", "claude-opus-4-7", upstream.URL)
|
||||
e2eCreateProviderAndModel(t, r, "anthropic_p", "anthropic", "claude-opus-4-7", upstream.URL)
|
||||
|
||||
body, _ := json.Marshal(map[string]any{
|
||||
"model": "claude-opus-4-7", "max_tokens": 1024,
|
||||
"model": "anthropic_p/claude-opus-4-7", "max_tokens": 1024,
|
||||
"messages": []map[string]any{{"role": "user", "content": "从1数到10"}},
|
||||
"stop_sequences": []string{"5"},
|
||||
})
|
||||
@@ -800,10 +790,10 @@ func TestE2E_Anthropic_NonStream_MetadataUserID(t *testing.T) {
|
||||
"usage": map[string]any{"input_tokens": 12, "output_tokens": 5},
|
||||
})
|
||||
})
|
||||
e2eCreateProviderAndModel(t, r, "anthropic-p", "anthropic", "claude-opus-4-7", upstream.URL)
|
||||
e2eCreateProviderAndModel(t, r, "anthropic_p", "anthropic", "claude-opus-4-7", upstream.URL)
|
||||
|
||||
body, _ := json.Marshal(map[string]any{
|
||||
"model": "claude-opus-4-7", "max_tokens": 1024,
|
||||
"model": "anthropic_p/claude-opus-4-7", "max_tokens": 1024,
|
||||
"messages": []map[string]any{{"role": "user", "content": "你好"}},
|
||||
"metadata": map[string]any{"user_id": "user_12345"},
|
||||
})
|
||||
@@ -829,10 +819,10 @@ func TestE2E_Anthropic_NonStream_UsageWithCache(t *testing.T) {
|
||||
},
|
||||
})
|
||||
})
|
||||
e2eCreateProviderAndModel(t, r, "anthropic-p", "anthropic", "claude-opus-4-7", upstream.URL)
|
||||
e2eCreateProviderAndModel(t, r, "anthropic_p", "anthropic", "claude-opus-4-7", upstream.URL)
|
||||
|
||||
body, _ := json.Marshal(map[string]any{
|
||||
"model": "claude-opus-4-7", "max_tokens": 1024,
|
||||
"model": "anthropic_p/claude-opus-4-7", "max_tokens": 1024,
|
||||
"system": []map[string]any{{"type": "text", "text": "你是编程助手。"}},
|
||||
"messages": []map[string]any{{"role": "user", "content": "你好"}},
|
||||
})
|
||||
@@ -874,10 +864,10 @@ func TestE2E_Anthropic_Stream_Text(t *testing.T) {
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
})
|
||||
e2eCreateProviderAndModel(t, r, "anthropic-p", "anthropic", "claude-opus-4-7", upstream.URL)
|
||||
e2eCreateProviderAndModel(t, r, "anthropic_p", "anthropic", "claude-opus-4-7", upstream.URL)
|
||||
|
||||
body, _ := json.Marshal(map[string]any{
|
||||
"model": "claude-opus-4-7", "max_tokens": 1024,
|
||||
"model": "anthropic_p/claude-opus-4-7", "max_tokens": 1024,
|
||||
"messages": []map[string]any{{"role": "user", "content": "你好"}},
|
||||
"stream": true,
|
||||
})
|
||||
@@ -921,10 +911,10 @@ func TestE2E_Anthropic_Stream_Thinking(t *testing.T) {
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
})
|
||||
e2eCreateProviderAndModel(t, r, "anthropic-p", "anthropic", "claude-opus-4-7", upstream.URL)
|
||||
e2eCreateProviderAndModel(t, r, "anthropic_p", "anthropic", "claude-opus-4-7", upstream.URL)
|
||||
|
||||
body, _ := json.Marshal(map[string]any{
|
||||
"model": "claude-opus-4-7", "max_tokens": 4096,
|
||||
"model": "anthropic_p/claude-opus-4-7", "max_tokens": 4096,
|
||||
"messages": []map[string]any{{"role": "user", "content": "1+1=?"}},
|
||||
"thinking": map[string]any{"type": "enabled", "budget_tokens": 1024},
|
||||
"stream": true,
|
||||
@@ -970,10 +960,10 @@ func TestE2E_CrossProtocol_OpenAIToAnthropic_RequestFormat(t *testing.T) {
|
||||
"usage": map[string]any{"input_tokens": 10, "output_tokens": 5},
|
||||
})
|
||||
})
|
||||
e2eCreateProviderAndModel(t, r, "anthropic-p", "anthropic", "claude-model", upstream.URL)
|
||||
e2eCreateProviderAndModel(t, r, "anthropic_p", "anthropic", "claude-model", upstream.URL)
|
||||
|
||||
body, _ := json.Marshal(map[string]any{
|
||||
"model": "claude-model",
|
||||
"model": "anthropic_p/claude-model",
|
||||
"messages": []map[string]any{{"role": "user", "content": "Hello"}},
|
||||
})
|
||||
w := httptest.NewRecorder()
|
||||
@@ -1011,10 +1001,10 @@ func TestE2E_CrossProtocol_AnthropicToOpenAI_RequestFormat(t *testing.T) {
|
||||
"usage": map[string]any{"prompt_tokens": 10, "completion_tokens": 8, "total_tokens": 18},
|
||||
})
|
||||
})
|
||||
e2eCreateProviderAndModel(t, r, "openai-p", "openai", "gpt-4", upstream.URL)
|
||||
e2eCreateProviderAndModel(t, r, "openai_p", "openai", "gpt-4", upstream.URL)
|
||||
|
||||
body, _ := json.Marshal(map[string]any{
|
||||
"model": "gpt-4", "max_tokens": 1024,
|
||||
"model": "openai_p/gpt-4", "max_tokens": 1024,
|
||||
"messages": []map[string]any{{"role": "user", "content": "Hello"}},
|
||||
})
|
||||
w := httptest.NewRecorder()
|
||||
@@ -1052,10 +1042,10 @@ func TestE2E_CrossProtocol_OpenAIToAnthropic_Stream(t *testing.T) {
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
})
|
||||
e2eCreateProviderAndModel(t, r, "anthropic-p", "anthropic", "claude-model", upstream.URL)
|
||||
e2eCreateProviderAndModel(t, r, "anthropic_p", "anthropic", "claude-model", upstream.URL)
|
||||
|
||||
body, _ := json.Marshal(map[string]any{
|
||||
"model": "claude-model",
|
||||
"model": "anthropic_p/claude-model",
|
||||
"messages": []map[string]any{{"role": "user", "content": "Hello"}},
|
||||
"stream": true,
|
||||
})
|
||||
@@ -1092,10 +1082,10 @@ func TestE2E_CrossProtocol_AnthropicToOpenAI_Stream(t *testing.T) {
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
})
|
||||
e2eCreateProviderAndModel(t, r, "openai-p", "openai", "gpt-4", upstream.URL)
|
||||
e2eCreateProviderAndModel(t, r, "openai_p", "openai", "gpt-4", upstream.URL)
|
||||
|
||||
body, _ := json.Marshal(map[string]any{
|
||||
"model": "gpt-4", "max_tokens": 1024,
|
||||
"model": "openai_p/gpt-4", "max_tokens": 1024,
|
||||
"messages": []map[string]any{{"role": "user", "content": "Hello"}},
|
||||
"stream": true,
|
||||
})
|
||||
@@ -1130,10 +1120,10 @@ func TestE2E_OpenAI_ErrorResponse(t *testing.T) {
|
||||
},
|
||||
})
|
||||
})
|
||||
e2eCreateProviderAndModel(t, r, "openai-p", "openai", "nonexistent", upstream.URL)
|
||||
e2eCreateProviderAndModel(t, r, "openai_p", "openai", "nonexistent", upstream.URL)
|
||||
|
||||
body, _ := json.Marshal(map[string]any{
|
||||
"model": "nonexistent",
|
||||
"model": "openai_p/nonexistent",
|
||||
"messages": []map[string]any{{"role": "user", "content": "test"}},
|
||||
})
|
||||
w := httptest.NewRecorder()
|
||||
@@ -1157,10 +1147,10 @@ func TestE2E_Anthropic_ErrorResponse(t *testing.T) {
|
||||
},
|
||||
})
|
||||
})
|
||||
e2eCreateProviderAndModel(t, r, "anthropic-p", "anthropic", "claude-opus-4-7", upstream.URL)
|
||||
e2eCreateProviderAndModel(t, r, "anthropic_p", "anthropic", "claude-opus-4-7", upstream.URL)
|
||||
|
||||
body, _ := json.Marshal(map[string]any{
|
||||
"model": "claude-opus-4-7", "max_tokens": 1024,
|
||||
"model": "anthropic_p/claude-opus-4-7", "max_tokens": 1024,
|
||||
"messages": []map[string]any{{"role": "user", "content": "test"}},
|
||||
})
|
||||
w := httptest.NewRecorder()
|
||||
@@ -1203,10 +1193,10 @@ func TestE2E_OpenAI_NonStream_ParallelToolCalls(t *testing.T) {
|
||||
"usage": map[string]any{"prompt_tokens": 100, "completion_tokens": 36, "total_tokens": 136},
|
||||
})
|
||||
})
|
||||
e2eCreateProviderAndModel(t, r, "openai-p", "openai", "gpt-4o", upstream.URL)
|
||||
e2eCreateProviderAndModel(t, r, "openai_p", "openai", "gpt-4o", upstream.URL)
|
||||
|
||||
body, _ := json.Marshal(map[string]any{
|
||||
"model": "gpt-4o",
|
||||
"model": "openai_p/gpt-4o",
|
||||
"messages": []map[string]any{{"role": "user", "content": "北京和上海的天气"}},
|
||||
"tools": []map[string]any{{
|
||||
"type": "function",
|
||||
@@ -1255,10 +1245,10 @@ func TestE2E_OpenAI_NonStream_StopSequence(t *testing.T) {
|
||||
"usage": map[string]any{"prompt_tokens": 10, "completion_tokens": 8, "total_tokens": 18},
|
||||
})
|
||||
})
|
||||
e2eCreateProviderAndModel(t, r, "openai-p", "openai", "gpt-4o", upstream.URL)
|
||||
e2eCreateProviderAndModel(t, r, "openai_p", "openai", "gpt-4o", upstream.URL)
|
||||
|
||||
body, _ := json.Marshal(map[string]any{
|
||||
"model": "gpt-4o",
|
||||
"model": "openai_p/gpt-4o",
|
||||
"messages": []map[string]any{{"role": "user", "content": "从1数到10"}},
|
||||
"stop": []string{"5"},
|
||||
})
|
||||
@@ -1293,10 +1283,10 @@ func TestE2E_OpenAI_NonStream_ContentFilter(t *testing.T) {
|
||||
"usage": map[string]any{"prompt_tokens": 8, "completion_tokens": 0, "total_tokens": 8},
|
||||
})
|
||||
})
|
||||
e2eCreateProviderAndModel(t, r, "openai-p", "openai", "gpt-4o", upstream.URL)
|
||||
e2eCreateProviderAndModel(t, r, "openai_p", "openai", "gpt-4o", upstream.URL)
|
||||
|
||||
body, _ := json.Marshal(map[string]any{
|
||||
"model": "gpt-4o",
|
||||
"model": "openai_p/gpt-4o",
|
||||
"messages": []map[string]any{{"role": "user", "content": "危险内容"}},
|
||||
})
|
||||
w := httptest.NewRecorder()
|
||||
@@ -1325,10 +1315,10 @@ func TestE2E_Anthropic_NonStream_MultiToolUse(t *testing.T) {
|
||||
"usage": map[string]any{"input_tokens": 200, "output_tokens": 84},
|
||||
})
|
||||
})
|
||||
e2eCreateProviderAndModel(t, r, "anthropic-p", "anthropic", "claude-opus-4-7", upstream.URL)
|
||||
e2eCreateProviderAndModel(t, r, "anthropic_p", "anthropic", "claude-opus-4-7", upstream.URL)
|
||||
|
||||
body, _ := json.Marshal(map[string]any{
|
||||
"model": "claude-opus-4-7", "max_tokens": 1024,
|
||||
"model": "anthropic_p/claude-opus-4-7", "max_tokens": 1024,
|
||||
"messages": []map[string]any{{"role": "user", "content": "北京和上海的天气"}},
|
||||
"tools": []map[string]any{{
|
||||
"name": "get_weather", "description": "获取天气",
|
||||
@@ -1374,10 +1364,10 @@ func TestE2E_Anthropic_NonStream_ToolChoiceAny(t *testing.T) {
|
||||
"usage": map[string]any{"input_tokens": 100, "output_tokens": 30},
|
||||
})
|
||||
})
|
||||
e2eCreateProviderAndModel(t, r, "anthropic-p", "anthropic", "claude-opus-4-7", upstream.URL)
|
||||
e2eCreateProviderAndModel(t, r, "anthropic_p", "anthropic", "claude-opus-4-7", upstream.URL)
|
||||
|
||||
body, _ := json.Marshal(map[string]any{
|
||||
"model": "claude-opus-4-7", "max_tokens": 1024,
|
||||
"model": "anthropic_p/claude-opus-4-7", "max_tokens": 1024,
|
||||
"messages": []map[string]any{{"role": "user", "content": "现在几点了?"}},
|
||||
"tools": []map[string]any{{
|
||||
"name": "get_time", "description": "获取当前时间",
|
||||
@@ -1417,10 +1407,10 @@ func TestE2E_Anthropic_NonStream_ArraySystemPrompt(t *testing.T) {
|
||||
"usage": map[string]any{"input_tokens": 50, "output_tokens": 10},
|
||||
})
|
||||
})
|
||||
e2eCreateProviderAndModel(t, r, "anthropic-p", "anthropic", "claude-opus-4-7", upstream.URL)
|
||||
e2eCreateProviderAndModel(t, r, "anthropic_p", "anthropic", "claude-opus-4-7", upstream.URL)
|
||||
|
||||
body, _ := json.Marshal(map[string]any{
|
||||
"model": "claude-opus-4-7", "max_tokens": 1024,
|
||||
"model": "anthropic_p/claude-opus-4-7", "max_tokens": 1024,
|
||||
"system": []map[string]any{
|
||||
{"type": "text", "text": "你是编程助手。"},
|
||||
{"type": "text", "text": "请用中文回答。"},
|
||||
@@ -1454,10 +1444,10 @@ func TestE2E_Anthropic_NonStream_ToolResultMessage(t *testing.T) {
|
||||
"usage": map[string]any{"input_tokens": 150, "output_tokens": 20},
|
||||
})
|
||||
})
|
||||
e2eCreateProviderAndModel(t, r, "anthropic-p", "anthropic", "claude-opus-4-7", upstream.URL)
|
||||
e2eCreateProviderAndModel(t, r, "anthropic_p", "anthropic", "claude-opus-4-7", upstream.URL)
|
||||
|
||||
body, _ := json.Marshal(map[string]any{
|
||||
"model": "claude-opus-4-7", "max_tokens": 1024,
|
||||
"model": "anthropic_p/claude-opus-4-7", "max_tokens": 1024,
|
||||
"messages": []map[string]any{
|
||||
{"role": "user", "content": "北京天气"},
|
||||
{"role": "assistant", "content": []map[string]any{
|
||||
@@ -1507,10 +1497,10 @@ func TestE2E_Anthropic_Stream_ToolCalls(t *testing.T) {
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
})
|
||||
e2eCreateProviderAndModel(t, r, "anthropic-p", "anthropic", "claude-opus-4-7", upstream.URL)
|
||||
e2eCreateProviderAndModel(t, r, "anthropic_p", "anthropic", "claude-opus-4-7", upstream.URL)
|
||||
|
||||
body, _ := json.Marshal(map[string]any{
|
||||
"model": "claude-opus-4-7", "max_tokens": 1024,
|
||||
"model": "anthropic_p/claude-opus-4-7", "max_tokens": 1024,
|
||||
"messages": []map[string]any{{"role": "user", "content": "北京天气"}},
|
||||
"tools": []map[string]any{{
|
||||
"name": "get_weather", "description": "获取天气",
|
||||
@@ -1561,10 +1551,10 @@ func TestE2E_CrossProtocol_OpenAIToAnthropic_NonStream_ToolCalls(t *testing.T) {
|
||||
"usage": map[string]any{"input_tokens": 100, "output_tokens": 30},
|
||||
})
|
||||
})
|
||||
e2eCreateProviderAndModel(t, r, "anthropic-p", "anthropic", "claude-model", upstream.URL)
|
||||
e2eCreateProviderAndModel(t, r, "anthropic_p", "anthropic", "claude-model", upstream.URL)
|
||||
|
||||
body, _ := json.Marshal(map[string]any{
|
||||
"model": "claude-model",
|
||||
"model": "anthropic_p/claude-model",
|
||||
"messages": []map[string]any{{"role": "user", "content": "北京天气"}},
|
||||
"tools": []map[string]any{{
|
||||
"type": "function",
|
||||
@@ -1613,10 +1603,10 @@ func TestE2E_CrossProtocol_AnthropicToOpenAI_NonStream_Thinking(t *testing.T) {
|
||||
"usage": map[string]any{"prompt_tokens": 50, "completion_tokens": 100, "total_tokens": 150},
|
||||
})
|
||||
})
|
||||
e2eCreateProviderAndModel(t, r, "openai-p", "openai", "gpt-4", upstream.URL)
|
||||
e2eCreateProviderAndModel(t, r, "openai_p", "openai", "gpt-4", upstream.URL)
|
||||
|
||||
body, _ := json.Marshal(map[string]any{
|
||||
"model": "gpt-4", "max_tokens": 4096,
|
||||
"model": "openai_p/gpt-4", "max_tokens": 4096,
|
||||
"messages": []map[string]any{{"role": "user", "content": "宇宙的答案"}},
|
||||
})
|
||||
w := httptest.NewRecorder()
|
||||
@@ -1643,10 +1633,10 @@ func TestE2E_CrossProtocol_StopReasonMapping(t *testing.T) {
|
||||
"usage": map[string]any{"input_tokens": 10, "output_tokens": 20},
|
||||
})
|
||||
})
|
||||
e2eCreateProviderAndModel(t, r, "anthropic-p", "anthropic", "claude-model", upstream.URL)
|
||||
e2eCreateProviderAndModel(t, r, "anthropic_p", "anthropic", "claude-model", upstream.URL)
|
||||
|
||||
body, _ := json.Marshal(map[string]any{
|
||||
"model": "claude-model",
|
||||
"model": "anthropic_p/claude-model",
|
||||
"messages": []map[string]any{{"role": "user", "content": "长文"}},
|
||||
})
|
||||
w := httptest.NewRecorder()
|
||||
@@ -1685,10 +1675,10 @@ func TestE2E_OpenAI_NonStream_AssistantWithToolResult(t *testing.T) {
|
||||
"usage": map[string]any{"prompt_tokens": 100, "completion_tokens": 20, "total_tokens": 120},
|
||||
})
|
||||
})
|
||||
e2eCreateProviderAndModel(t, r, "openai-p", "openai", "gpt-4o", upstream.URL)
|
||||
e2eCreateProviderAndModel(t, r, "openai_p", "openai", "gpt-4o", upstream.URL)
|
||||
|
||||
body, _ := json.Marshal(map[string]any{
|
||||
"model": "gpt-4o",
|
||||
"model": "openai_p/gpt-4o",
|
||||
"messages": []map[string]any{
|
||||
{"role": "user", "content": "北京天气"},
|
||||
{"role": "assistant", "content": nil, "tool_calls": []map[string]any{{
|
||||
@@ -1732,10 +1722,10 @@ func TestE2E_CrossProtocol_AnthropicToOpenAI_Stream_ToolCalls(t *testing.T) {
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
})
|
||||
e2eCreateProviderAndModel(t, r, "anthropic-p", "anthropic", "claude-model", upstream.URL)
|
||||
e2eCreateProviderAndModel(t, r, "anthropic_p", "anthropic", "claude-model", upstream.URL)
|
||||
|
||||
body, _ := json.Marshal(map[string]any{
|
||||
"model": "claude-model",
|
||||
"model": "anthropic_p/claude-model",
|
||||
"messages": []map[string]any{{"role": "user", "content": "北京天气"}},
|
||||
"tools": []map[string]any{{
|
||||
"type": "function",
|
||||
@@ -1781,10 +1771,10 @@ func TestE2E_CrossProtocol_OpenAIToAnthropic_Stream_ToolCalls(t *testing.T) {
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
})
|
||||
e2eCreateProviderAndModel(t, r, "openai-p", "openai", "gpt-4", upstream.URL)
|
||||
e2eCreateProviderAndModel(t, r, "openai_p", "openai", "gpt-4", upstream.URL)
|
||||
|
||||
body, _ := json.Marshal(map[string]any{
|
||||
"model": "gpt-4", "max_tokens": 1024,
|
||||
"model": "openai_p/gpt-4", "max_tokens": 1024,
|
||||
"messages": []map[string]any{{"role": "user", "content": "北京天气"}},
|
||||
"tools": []map[string]any{{
|
||||
"name": "get_weather", "description": "获取天气",
|
||||
@@ -1819,10 +1809,10 @@ func TestE2E_OpenAI_Upstream5xx_ErrorPassthrough(t *testing.T) {
|
||||
},
|
||||
})
|
||||
})
|
||||
e2eCreateProviderAndModel(t, r, "openai-p", "openai", "gpt-4o", upstream.URL)
|
||||
e2eCreateProviderAndModel(t, r, "openai_p", "openai", "gpt-4o", upstream.URL)
|
||||
|
||||
body, _ := json.Marshal(map[string]any{
|
||||
"model": "gpt-4o",
|
||||
"model": "openai_p/gpt-4o",
|
||||
"messages": []map[string]any{{"role": "user", "content": "test"}},
|
||||
})
|
||||
w := httptest.NewRecorder()
|
||||
@@ -1851,10 +1841,10 @@ func TestE2E_Anthropic_Upstream5xx_ErrorPassthrough(t *testing.T) {
|
||||
},
|
||||
})
|
||||
})
|
||||
e2eCreateProviderAndModel(t, r, "anthropic-p", "anthropic", "claude-opus-4-7", upstream.URL)
|
||||
e2eCreateProviderAndModel(t, r, "anthropic_p", "anthropic", "claude-opus-4-7", upstream.URL)
|
||||
|
||||
body, _ := json.Marshal(map[string]any{
|
||||
"model": "claude-opus-4-7", "max_tokens": 1024,
|
||||
"model": "anthropic_p/claude-opus-4-7", "max_tokens": 1024,
|
||||
"messages": []map[string]any{{"role": "user", "content": "test"}},
|
||||
})
|
||||
w := httptest.NewRecorder()
|
||||
@@ -1889,10 +1879,10 @@ func TestE2E_Anthropic_Stream_TruncatedSSE(t *testing.T) {
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
})
|
||||
e2eCreateProviderAndModel(t, r, "anthropic-p", "anthropic", "claude-opus-4-7", upstream.URL)
|
||||
e2eCreateProviderAndModel(t, r, "anthropic_p", "anthropic", "claude-opus-4-7", upstream.URL)
|
||||
|
||||
body, _ := json.Marshal(map[string]any{
|
||||
"model": "claude-opus-4-7", "max_tokens": 1024,
|
||||
"model": "anthropic_p/claude-opus-4-7", "max_tokens": 1024,
|
||||
"messages": []map[string]any{{"role": "user", "content": "test"}},
|
||||
"stream": true,
|
||||
})
|
||||
|
||||
@@ -9,11 +9,8 @@ import (
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"gorm.io/driver/sqlite"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"nex/backend/internal/config"
|
||||
"nex/backend/internal/domain"
|
||||
"nex/backend/internal/handler"
|
||||
"nex/backend/internal/handler/middleware"
|
||||
@@ -27,23 +24,13 @@ func init() {
|
||||
|
||||
func setupIntegrationTest(t *testing.T) (*gin.Engine, *gorm.DB) {
|
||||
t.Helper()
|
||||
dir := t.TempDir()
|
||||
db, err := gorm.Open(sqlite.Open(dir+"/test.db"), &gorm.Config{})
|
||||
require.NoError(t, err)
|
||||
err = db.AutoMigrate(&config.Provider{}, &config.Model{}, &config.UsageStats{})
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() {
|
||||
sqlDB, _ := db.DB()
|
||||
if sqlDB != nil {
|
||||
sqlDB.Close()
|
||||
}
|
||||
})
|
||||
db := setupTestDB(t)
|
||||
|
||||
providerRepo := repository.NewProviderRepository(db)
|
||||
modelRepo := repository.NewModelRepository(db)
|
||||
statsRepo := repository.NewStatsRepository(db)
|
||||
|
||||
providerService := service.NewProviderService(providerRepo)
|
||||
providerService := service.NewProviderService(providerRepo, modelRepo)
|
||||
modelService := service.NewModelService(modelRepo, providerRepo)
|
||||
_ = service.NewRoutingService(modelRepo, providerRepo)
|
||||
statsService := service.NewStatsService(statsRepo)
|
||||
@@ -97,13 +84,16 @@ func TestOpenAI_CompleteFlow(t *testing.T) {
|
||||
|
||||
// 2. 创建 Model
|
||||
modelBody, _ := json.Marshal(map[string]string{
|
||||
"id": "gpt4", "provider_id": "openai", "model_name": "gpt-4",
|
||||
"provider_id": "openai", "model_name": "gpt-4",
|
||||
})
|
||||
w = httptest.NewRecorder()
|
||||
req = httptest.NewRequest("POST", "/api/models", bytes.NewReader(modelBody))
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
r.ServeHTTP(w, req)
|
||||
assert.Equal(t, 201, w.Code)
|
||||
var createdModel domain.Model
|
||||
json.Unmarshal(w.Body.Bytes(), &createdModel)
|
||||
assert.NotEmpty(t, createdModel.ID)
|
||||
|
||||
// 3. 列出 Provider
|
||||
w = httptest.NewRecorder()
|
||||
@@ -135,7 +125,7 @@ func TestOpenAI_CompleteFlow(t *testing.T) {
|
||||
|
||||
// 6. 删除 Model
|
||||
w = httptest.NewRecorder()
|
||||
req = httptest.NewRequest("DELETE", "/api/models/gpt4", nil)
|
||||
req = httptest.NewRequest("DELETE", "/api/models/"+createdModel.ID, nil)
|
||||
r.ServeHTTP(w, req)
|
||||
assert.Equal(t, 204, w.Code)
|
||||
|
||||
@@ -160,17 +150,19 @@ func TestAnthropic_ModelCreation(t *testing.T) {
|
||||
assert.Equal(t, 201, w.Code)
|
||||
|
||||
modelBody, _ := json.Marshal(map[string]string{
|
||||
"id": "claude3", "provider_id": "anthropic", "model_name": "claude-3-opus-20240229",
|
||||
"provider_id": "anthropic", "model_name": "claude-3-opus-20240229",
|
||||
})
|
||||
w = httptest.NewRecorder()
|
||||
req = httptest.NewRequest("POST", "/api/models", bytes.NewReader(modelBody))
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
r.ServeHTTP(w, req)
|
||||
assert.Equal(t, 201, w.Code)
|
||||
var createdModel domain.Model
|
||||
json.Unmarshal(w.Body.Bytes(), &createdModel)
|
||||
|
||||
// 验证创建成功
|
||||
w = httptest.NewRecorder()
|
||||
req = httptest.NewRequest("GET", "/api/models/claude3", nil)
|
||||
req = httptest.NewRequest("GET", "/api/models/"+createdModel.ID, nil)
|
||||
r.ServeHTTP(w, req)
|
||||
assert.Equal(t, 200, w.Code)
|
||||
}
|
||||
@@ -188,7 +180,7 @@ func TestStats_RecordingAndQuery(t *testing.T) {
|
||||
r.ServeHTTP(w, req)
|
||||
|
||||
modelBody, _ := json.Marshal(map[string]string{
|
||||
"id": "m1", "provider_id": "p1", "model_name": "gpt-4",
|
||||
"provider_id": "p1", "model_name": "gpt-4",
|
||||
})
|
||||
w = httptest.NewRecorder()
|
||||
req = httptest.NewRequest("POST", "/api/models", bytes.NewReader(modelBody))
|
||||
|
||||
37
backend/tests/integration/testhelper.go
Normal file
37
backend/tests/integration/testhelper.go
Normal file
@@ -0,0 +1,37 @@
|
||||
package integration
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"gorm.io/driver/sqlite"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"nex/backend/internal/config"
|
||||
)
|
||||
|
||||
// setupTestDB 创建内存 SQLite 数据库并执行 AutoMigrate。
|
||||
// 使用 MaxOpenConns(1) 确保 :memory: 模式不会被连接池丢弃。
|
||||
func setupTestDB(t *testing.T) *gorm.DB {
|
||||
t.Helper()
|
||||
|
||||
db, err := gorm.Open(sqlite.Open(":memory:"), &gorm.Config{})
|
||||
require.NoError(t, err)
|
||||
|
||||
sqlDB, err := db.DB()
|
||||
require.NoError(t, err)
|
||||
sqlDB.SetMaxOpenConns(1)
|
||||
sqlDB.SetConnMaxLifetime(0)
|
||||
|
||||
err = db.AutoMigrate(&config.Provider{}, &config.Model{}, &config.UsageStats{})
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Cleanup(func() {
|
||||
// 等待异步 goroutine(如 statsService.Record)完成
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
sqlDB.Close()
|
||||
})
|
||||
|
||||
return db
|
||||
}
|
||||
Reference in New Issue
Block a user