1
0

refactor: 实现 ConversionEngine 协议转换引擎,替代旧 protocol 包

引入 Canonical Model 和 ProtocolAdapter 架构,支持 OpenAI/Anthropic 协议间
无缝转换,统一 ProxyHandler 替代分散的 OpenAI/Anthropic Handler,简化
ProviderClient 为协议无关的 HTTP 发送器,Provider 新增 protocol 字段。
This commit is contained in:
2026-04-20 00:36:27 +08:00
parent 26810d9410
commit 1dac347d3b
65 changed files with 9690 additions and 2139 deletions

View File

@@ -11,15 +11,15 @@ import (
"go.uber.org/zap"
"nex/backend/internal/protocol/openai"
"nex/backend/internal/conversion"
)
// StreamConfig 流式处理配置
type StreamConfig struct {
InitialBufferSize int // 初始缓冲区大小(字节),默认 4096
MaxBufferSize int // 最大缓冲区大小(字节),默认 65536
Timeout time.Duration // 流超时时间,默认 5 分钟
ChannelBufferSize int // 事件通道缓冲区大小,默认 100
InitialBufferSize int
MaxBufferSize int
Timeout time.Duration
ChannelBufferSize int
}
// DefaultStreamConfig 返回默认流式处理配置
@@ -32,14 +32,6 @@ func DefaultStreamConfig() StreamConfig {
}
}
// Client OpenAI 兼容供应商客户端
type Client struct {
httpClient *http.Client
adapter *openai.Adapter
logger *zap.Logger
streamCfg StreamConfig
}
// StreamEvent 流事件
type StreamEvent struct {
Data []byte
@@ -47,10 +39,17 @@ type StreamEvent struct {
Done bool
}
// Client 协议无关的供应商客户端
type Client struct {
httpClient *http.Client
logger *zap.Logger
streamCfg StreamConfig
}
// ProviderClient 供应商客户端接口
type ProviderClient interface {
SendRequest(ctx context.Context, req *openai.ChatCompletionRequest, apiKey, baseURL string) (*openai.ChatCompletionResponse, error)
SendStreamRequest(ctx context.Context, req *openai.ChatCompletionRequest, apiKey, baseURL string) (<-chan StreamEvent, error)
Send(ctx context.Context, spec conversion.HTTPRequestSpec) (*conversion.HTTPResponseSpec, error)
SendStream(ctx context.Context, spec conversion.HTTPRequestSpec) (<-chan StreamEvent, error)
}
// NewClient 创建供应商客户端
@@ -59,97 +58,98 @@ func NewClient() *Client {
httpClient: &http.Client{
Timeout: 30 * time.Second,
},
adapter: openai.NewAdapter(),
logger: zap.L(),
streamCfg: DefaultStreamConfig(),
}
}
// SendRequest 发送非流式请求
func (c *Client) SendRequest(ctx context.Context, req *openai.ChatCompletionRequest, apiKey, baseURL string) (*openai.ChatCompletionResponse, error) {
// 准备请求
httpReq, err := c.adapter.PrepareRequest(req, apiKey, baseURL)
// Send 发送非流式请求
func (c *Client) Send(ctx context.Context, spec conversion.HTTPRequestSpec) (*conversion.HTTPResponseSpec, error) {
var bodyReader io.Reader
if len(spec.Body) > 0 {
bodyReader = bytes.NewReader(spec.Body)
}
httpReq, err := http.NewRequestWithContext(ctx, spec.Method, spec.URL, bodyReader)
if err != nil {
return nil, fmt.Errorf("准备请求失败: %w", err)
return nil, fmt.Errorf("创建请求失败: %w", err)
}
for k, v := range spec.Headers {
httpReq.Header.Set(k, v)
}
c.logger.Debug("发送请求",
zap.String("url", httpReq.URL.String()),
zap.String("method", httpReq.Method),
zap.String("url", spec.URL),
zap.String("method", spec.Method),
)
// 设置上下文
httpReq = httpReq.WithContext(ctx)
// 发送请求
resp, err := c.httpClient.Do(httpReq)
if err != nil {
return nil, fmt.Errorf("发送请求失败: %w", err)
}
defer resp.Body.Close()
// 检查状态码
if resp.StatusCode != http.StatusOK {
// 解析错误响应
errorResp, parseErr := c.adapter.ParseErrorResponse(resp)
if parseErr != nil {
return nil, fmt.Errorf("供应商返回错误: HTTP %d", resp.StatusCode)
}
return nil, fmt.Errorf("供应商错误: %s", errorResp.Error.Message)
}
// 解析响应
result, err := c.adapter.ParseResponse(resp)
respBody, err := io.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("解析响应失败: %w", err)
return nil, fmt.Errorf("读取响应失败: %w", err)
}
return result, nil
respHeaders := make(map[string]string)
for k, vs := range resp.Header {
if len(vs) > 0 {
respHeaders[k] = vs[0]
}
}
return &conversion.HTTPResponseSpec{
StatusCode: resp.StatusCode,
Headers: respHeaders,
Body: respBody,
}, nil
}
// SendStreamRequest 发送流式请求
func (c *Client) SendStreamRequest(ctx context.Context, req *openai.ChatCompletionRequest, apiKey, baseURL string) (<-chan StreamEvent, error) {
// 确保请求设置为流式
req.Stream = true
// 准备请求
httpReq, err := c.adapter.PrepareRequest(req, apiKey, baseURL)
if err != nil {
return nil, fmt.Errorf("准备请求失败: %w", err)
// SendStream 发送流式请求
func (c *Client) SendStream(ctx context.Context, spec conversion.HTTPRequestSpec) (<-chan StreamEvent, error) {
var bodyReader io.Reader
if len(spec.Body) > 0 {
bodyReader = bytes.NewReader(spec.Body)
}
// 设置带超时的上下文
streamCtx, cancel := context.WithTimeout(ctx, c.streamCfg.Timeout)
_ = cancel // cancel 在流读取结束后由 ctx 传播处理
httpReq = httpReq.WithContext(streamCtx)
httpReq, err := http.NewRequestWithContext(streamCtx, spec.Method, spec.URL, bodyReader)
if err != nil {
cancel()
return nil, fmt.Errorf("创建请求失败: %w", err)
}
for k, v := range spec.Headers {
httpReq.Header.Set(k, v)
}
// 发送请求
resp, err := c.httpClient.Do(httpReq)
if err != nil {
cancel()
return nil, fmt.Errorf("发送请求失败: %w", err)
}
// 检查状态码
if resp.StatusCode != http.StatusOK {
defer resp.Body.Close()
cancel()
errorResp, parseErr := c.adapter.ParseErrorResponse(resp)
if parseErr != nil {
return nil, fmt.Errorf("供应商返回错误: HTTP %d", resp.StatusCode)
errBody, _ := io.ReadAll(resp.Body)
if len(errBody) > 0 {
return nil, fmt.Errorf("供应商返回错误: HTTP %d: %s", resp.StatusCode, string(errBody))
}
return nil, fmt.Errorf("供应商错误: %s", errorResp.Error.Message)
return nil, fmt.Errorf("供应商返回错误: HTTP %d", resp.StatusCode)
}
// 创建事件通道
eventChan := make(chan StreamEvent, c.streamCfg.ChannelBufferSize)
// 启动 goroutine 读取流
go c.readStream(streamCtx, cancel, resp.Body, eventChan)
return eventChan, nil
}
// readStream 读取 SSE 流(支持动态缓冲区、超时控制和改进的错误处理)
// readStream 读取 SSE 流
func (c *Client) readStream(ctx context.Context, cancel context.CancelFunc, body io.ReadCloser, eventChan chan<- StreamEvent) {
defer close(eventChan)
defer body.Close()
@@ -175,10 +175,8 @@ func (c *Client) readStream(ctx context.Context, cancel context.CancelFunc, body
n, err := body.Read(buf)
if err != nil {
if err == io.EOF {
// 流正常结束
return
}
// 区分网络错误和其他错误
if isNetworkError(err) {
c.logger.Error("流网络错误", zap.String("error", err.Error()))
eventChan <- StreamEvent{Error: fmt.Errorf("网络错误: %w", err)}
@@ -191,7 +189,6 @@ func (c *Client) readStream(ctx context.Context, cancel context.CancelFunc, body
dataBuf = append(dataBuf, buf[:n]...)
// 动态调整缓冲区大小:如果数据量大,增大缓冲区
if len(dataBuf) > bufSize/2 && bufSize < c.streamCfg.MaxBufferSize {
newSize := bufSize * 2
if newSize > c.streamCfg.MaxBufferSize {
@@ -201,34 +198,21 @@ func (c *Client) readStream(ctx context.Context, cancel context.CancelFunc, body
bufSize = newSize
}
// 处理完整的 SSE 事件
for {
// 查找事件边界(双换行)
idx := bytes.Index(dataBuf, []byte("\n\n"))
if idx == -1 {
break
}
// 提取事件
event := dataBuf[:idx]
rawEvent := dataBuf[:idx]
dataBuf = dataBuf[idx+2:]
// 解析 data 行
lines := strings.Split(string(event), "\n")
for _, line := range lines {
if strings.HasPrefix(line, "data: ") {
data := strings.TrimPrefix(line, "data: ")
// 检查是否是结束标记
if data == "[DONE]" {
eventChan <- StreamEvent{Done: true}
return
}
// 发送数据
eventChan <- StreamEvent{Data: []byte(data)}
}
if bytes.Contains(rawEvent, []byte("data: [DONE]")) {
eventChan <- StreamEvent{Done: true}
return
}
eventChan <- StreamEvent{Data: rawEvent}
}
}
}
@@ -245,4 +229,3 @@ func isNetworkError(err error) bool {
strings.Contains(errStr, "timeout") ||
strings.Contains(errStr, "EOF")
}

View File

@@ -2,7 +2,6 @@ package provider
import (
"context"
"encoding/json"
"fmt"
"net/http"
"net/http/httptest"
@@ -11,14 +10,13 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"nex/backend/internal/protocol/openai"
"nex/backend/internal/conversion"
)
func TestNewClient(t *testing.T) {
client := NewClient()
require.NotNil(t, client)
assert.NotNil(t, client.httpClient)
assert.NotNil(t, client.adapter)
assert.Equal(t, 4096, client.streamCfg.InitialBufferSize)
assert.Equal(t, 65536, client.streamCfg.MaxBufferSize)
assert.Equal(t, 100, client.streamCfg.ChannelBufferSize)
@@ -31,67 +29,66 @@ func TestDefaultStreamConfig(t *testing.T) {
assert.Equal(t, 100, cfg.ChannelBufferSize)
}
func TestClient_SendRequest_Success(t *testing.T) {
func TestClient_Send_Success(t *testing.T) {
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
assert.Equal(t, "POST", r.Method)
assert.Equal(t, "application/json", r.Header.Get("Content-Type"))
assert.Equal(t, "Bearer test-key", r.Header.Get("Authorization"))
resp := openai.ChatCompletionResponse{
ID: "chatcmpl-123",
Choices: []openai.Choice{
{Index: 0, Message: &openai.Message{Role: "assistant", Content: "Hello!"}},
},
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(resp)
w.WriteHeader(http.StatusOK)
w.Write([]byte(`{"id":"test","model":"gpt-4"}`))
}))
defer server.Close()
client := NewClient()
req := &openai.ChatCompletionRequest{
Model: "gpt-4",
Messages: []openai.Message{{Role: "user", Content: "Hi"}},
spec := conversion.HTTPRequestSpec{
URL: server.URL + "/v1/chat/completions",
Method: "POST",
Headers: map[string]string{
"Authorization": "Bearer test-key",
"Content-Type": "application/json",
},
Body: []byte(`{"model":"gpt-4","messages":[]}`),
}
result, err := client.SendRequest(context.Background(), req, "test-key", server.URL)
result, err := client.Send(context.Background(), spec)
require.NoError(t, err)
assert.Equal(t, "chatcmpl-123", result.ID)
assert.Equal(t, 200, result.StatusCode)
assert.Contains(t, string(result.Body), "test")
}
func TestClient_SendRequest_ErrorResponse(t *testing.T) {
func TestClient_Send_ErrorResponse(t *testing.T) {
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusUnauthorized)
json.NewEncoder(w).Encode(openai.ErrorResponse{
Error: openai.ErrorDetail{Message: "Invalid API key"},
})
w.Write([]byte(`{"error":{"message":"Invalid API key"}}`))
}))
defer server.Close()
client := NewClient()
req := &openai.ChatCompletionRequest{
Model: "gpt-4",
Messages: []openai.Message{{Role: "user", Content: "Hi"}},
spec := conversion.HTTPRequestSpec{
URL: server.URL + "/v1/chat/completions",
Method: "POST",
Headers: map[string]string{"Authorization": "Bearer bad-key"},
Body: []byte(`{}`),
}
_, err := client.SendRequest(context.Background(), req, "bad-key", server.URL)
assert.Error(t, err)
assert.Contains(t, err.Error(), "Invalid API key")
result, err := client.Send(context.Background(), spec)
require.NoError(t, err)
assert.Equal(t, 401, result.StatusCode)
}
func TestClient_SendRequest_ConnectionError(t *testing.T) {
func TestClient_Send_ConnectionError(t *testing.T) {
client := NewClient()
req := &openai.ChatCompletionRequest{
Model: "gpt-4",
Messages: []openai.Message{{Role: "user", Content: "Hi"}},
spec := conversion.HTTPRequestSpec{
URL: "http://localhost:1/v1/chat/completions",
Method: "POST",
}
_, err := client.SendRequest(context.Background(), req, "key", "http://localhost:1")
_, err := client.Send(context.Background(), spec)
assert.Error(t, err)
}
func TestClient_SendStreamRequest_CreatesChannel(t *testing.T) {
// 使用一个慢服务器确保客户端有时间读取
func TestClient_SendStream_CreatesChannel(t *testing.T) {
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "text/event-stream")
w.WriteHeader(http.StatusOK)
@@ -99,35 +96,36 @@ func TestClient_SendStreamRequest_CreatesChannel(t *testing.T) {
defer server.Close()
client := NewClient()
req := &openai.ChatCompletionRequest{
Model: "gpt-4",
Messages: []openai.Message{{Role: "user", Content: "Hi"}},
spec := conversion.HTTPRequestSpec{
URL: server.URL + "/v1/chat/completions",
Method: "POST",
Headers: map[string]string{"Authorization": "Bearer test-key"},
Body: []byte(`{}`),
}
eventChan, err := client.SendStreamRequest(context.Background(), req, "test-key", server.URL)
eventChan, err := client.SendStream(context.Background(), spec)
require.NoError(t, err)
require.NotNil(t, eventChan)
// 读取直到 channel 关闭(服务器关闭后应产生 EOF
for range eventChan {
// 消费所有事件
}
// channel 应已关闭(不阻塞即通过)
}
func TestClient_SendStreamRequest_ErrorResponse(t *testing.T) {
func TestClient_SendStream_ErrorResponse(t *testing.T) {
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusInternalServerError)
}))
defer server.Close()
client := NewClient()
req := &openai.ChatCompletionRequest{
Model: "gpt-4",
Messages: []openai.Message{{Role: "user", Content: "Hi"}},
spec := conversion.HTTPRequestSpec{
URL: server.URL + "/v1/chat/completions",
Method: "POST",
Headers: map[string]string{"Authorization": "Bearer key"},
Body: []byte(`{}`),
}
_, err := client.SendStreamRequest(context.Background(), req, "key", server.URL)
_, err := client.SendStream(context.Background(), spec)
assert.Error(t, err)
}
@@ -145,7 +143,7 @@ func TestIsNetworkError(t *testing.T) {
{"", false},
}
for _, tt := range tests {
err := fmt.Errorf("%s", tt.input) //nolint:govet
err := fmt.Errorf("%s", tt.input)
assert.Equal(t, tt.want, isNetworkError(err), "isNetworkError(%q)", tt.input)
}
}