diff --git a/.gitignore b/.gitignore index 524c472..3b8b772 100644 --- a/.gitignore +++ b/.gitignore @@ -317,6 +317,87 @@ Network Trash Folder Temporary Items .apdisk +### Python.gitignore ### +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +*.egg-info/ +.installed.cfg +*.egg + +# PyInstaller +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Environments +.venv/ +venv/ +ENV/ +env/ +.python-version + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ + +# Pyre +.pyre/ + +# pytype +.pytype/ + +# Cython debug symbols +cython_debug/ + # Custom .claude .opencode diff --git a/scripts/anthropic_detect.py b/scripts/anthropic_detect.py new file mode 100644 index 0000000..219e4ab --- /dev/null +++ b/scripts/anthropic_detect.py @@ -0,0 +1,715 @@ +#!/usr/bin/env python3 +"""Anthropic 兼容性接口测试脚本 + +用法: + python3 scripts/anthropic_detect.py --base_url [options] + +示例: + python3 scripts/anthropic_detect.py --base_url https://api.example.com + python3 scripts/anthropic_detect.py --base_url https://api.example.com --api_key sk-xxx --model claude-sonnet-4-5 + python3 scripts/anthropic_detect.py --base_url https://api.example.com --stream --tools --vision +""" + +import json +import time +import ssl +import argparse +import urllib.request +import urllib.error + +TIMEOUT = 30 +ANTHROPIC_VERSION = "2023-06-01" + + +def create_ssl_context(): + ctx = ssl.create_default_context() + ctx.check_hostname = False + ctx.verify_mode = ssl.CERT_NONE + return ctx + + +def http_request(url, method="GET", headers=None, body=None, ssl_ctx=None): + req = urllib.request.Request(url, method=method) + if headers: + for k, v in headers.items(): + req.add_header(k, v) + if body is not None: + if isinstance(body, str): + req.data = body.encode("utf-8") + else: + req.data = json.dumps(body).encode("utf-8") + + start = time.time() + try: + resp = urllib.request.urlopen(req, timeout=TIMEOUT, context=ssl_ctx) + elapsed = time.time() - start + return resp.getcode(), resp.read().decode("utf-8"), elapsed + except urllib.error.HTTPError as e: + elapsed = time.time() - start + return e.code, e.read().decode("utf-8"), elapsed + except Exception as e: + elapsed = time.time() - start + return None, str(e), elapsed + + +def http_stream_request(url, headers=None, body=None, ssl_ctx=None): + req = urllib.request.Request(url, method="POST") + if headers: + for k, v in headers.items(): + req.add_header(k, v) + if body is not None: + req.data = json.dumps(body).encode("utf-8") + + start = time.time() + try: + resp = urllib.request.urlopen(req, timeout=TIMEOUT, context=ssl_ctx) + status = resp.getcode() + lines = [] + for raw_line in resp: + line = raw_line.decode("utf-8").rstrip("\n\r") + if line: + lines.append(line) + elapsed = time.time() - start + return status, "\n".join(lines), elapsed + except urllib.error.HTTPError as e: + elapsed = time.time() - start + return e.code, e.read().decode("utf-8"), elapsed + except Exception as e: + elapsed = time.time() - start + return None, str(e), elapsed + + +def format_json(text): + try: + parsed = json.loads(text) + return json.dumps(parsed, ensure_ascii=False, indent=2) + except (json.JSONDecodeError, TypeError): + return text + + +def build_headers(api_key): + h = { + "Content-Type": "application/json", + "anthropic-version": ANTHROPIC_VERSION, + } + if api_key: + h["x-api-key"] = api_key + return h + + +def run_test(index, total, desc, url, method, headers, body, stream, ssl_ctx): + print(f"\n[{index}/{total}] {desc}") + print(f">>> {method} {url}") + if body is not None: + if isinstance(body, str): + print(body) + else: + print(format_json(json.dumps(body, ensure_ascii=False))) + + if stream: + status, data, elapsed = http_stream_request(url, headers, body, ssl_ctx) + else: + status, data, elapsed = http_request(url, method, headers, body, ssl_ctx) + + if status is not None: + print(f"状态码: {status} | 耗时: {elapsed:.2f}s") + else: + print(f"请求失败 | 耗时: {elapsed:.2f}s") + + if stream and status and status < 300: + for line in data.split("\n"): + print(line) + else: + print(format_json(data)) + + return status + + +def main(): + parser = argparse.ArgumentParser( + description="Anthropic 兼容性接口测试", + formatter_class=argparse.RawDescriptionHelpFormatter, + ) + parser.add_argument("--base_url", required=True, help="API 基础地址 (如 https://api.example.com)") + parser.add_argument("--api_key", default="", help="API 密钥 (默认空)") + parser.add_argument("--model", default="claude-sonnet-4-5", help="模型名称 (默认 claude-sonnet-4-5)") + parser.add_argument("--vision", action="store_true", help="执行视觉相关测试") + parser.add_argument("--stream", action="store_true", help="执行流式响应测试") + parser.add_argument("--tools", action="store_true", help="执行工具调用测试") + parser.add_argument("--thinking", action="store_true", help="执行扩展思维测试") + parser.add_argument("--all", action="store_true", help="开启所有扩展测试") + args = parser.parse_args() + + if args.all: + args.vision = True + args.stream = True + args.tools = True + args.thinking = True + + base_url = args.base_url.rstrip("/") + api_key = args.api_key + model = args.model + ssl_ctx = create_ssl_context() + headers = build_headers(api_key) + headers_bad_auth = build_headers("invalid-key-xxx") + + messages_url = f"{base_url}/v1/messages" + models_url = f"{base_url}/v1/models" + count_tokens_url = f"{base_url}/v1/messages/count_tokens" + + # --- 收集用例: (描述, 方法, URL, 请求头, 请求体, 是否流式) --- + cases = [] + + # ==== Models API ==== + cases.append(( + "获取模型列表 (GET /v1/models)", + "GET", models_url, headers, None, False + )) + cases.append(( + "获取模型列表(分页 limit=3)(GET /v1/models?limit=3)", + "GET", f"{models_url}?limit=3", headers, None, False + )) + cases.append(( + "获取指定模型详情 (GET /v1/models/{model})", + "GET", f"{models_url}/{model}", headers, None, False + )) + cases.append(( + "获取不存在的模型 (GET /v1/models/nonexistent-model-xxx)", + "GET", f"{models_url}/nonexistent-model-xxx", headers, None, False + )) + + # ==== Messages API: 正面用例 ==== + cases.append(( + "基本对话(仅 user)", + "POST", messages_url, headers, { + "model": model, + "max_tokens": 5, + "messages": [{"role": "user", "content": "Hi"}] + }, False + )) + cases.append(( + "system prompt + user 对话", + "POST", messages_url, headers, { + "model": model, + "max_tokens": 5, + "system": "You are a helpful assistant.", + "messages": [{"role": "user", "content": "1+1="}] + }, False + )) + cases.append(( + "system prompt 数组格式(带缓存控制)", + "POST", messages_url, headers, { + "model": model, + "max_tokens": 5, + "system": [ + {"type": "text", "text": "You are a helpful assistant.", "cache_control": {"type": "ephemeral"}} + ], + "messages": [{"role": "user", "content": "Hi"}] + }, False + )) + cases.append(( + "多轮对话(含 assistant 历史)", + "POST", messages_url, headers, { + "model": model, + "max_tokens": 5, + "messages": [ + {"role": "user", "content": "Hi"}, + {"role": "assistant", "content": "Hello!"}, + {"role": "user", "content": "1+1="} + ] + }, False + )) + cases.append(( + "assistant prefill(部分回复填充)", + "POST", messages_url, headers, { + "model": model, + "max_tokens": 1, + "messages": [ + {"role": "user", "content": "What is latin for Ant? (A) Apoidea (B) Rhopalocera (C) Formicidae"}, + {"role": "assistant", "content": "The answer is ("} + ] + }, False + )) + cases.append(( + "content 数组格式(多个 text block)", + "POST", messages_url, headers, { + "model": model, + "max_tokens": 5, + "messages": [{"role": "user", "content": [ + {"type": "text", "text": "Hello"}, + {"type": "text", "text": "1+1=?"} + ]}] + }, False + )) + cases.append(( + "temperature + top_p", + "POST", messages_url, headers, { + "model": model, + "max_tokens": 5, + "temperature": 0.5, + "top_p": 0.9, + "messages": [{"role": "user", "content": "Hi"}] + }, False + )) + cases.append(( + "temperature = 0(类确定性输出)", + "POST", messages_url, headers, { + "model": model, + "max_tokens": 5, + "temperature": 0, + "messages": [{"role": "user", "content": "1+1="}] + }, False + )) + cases.append(( + "top_k 参数", + "POST", messages_url, headers, { + "model": model, + "max_tokens": 5, + "top_k": 40, + "messages": [{"role": "user", "content": "Hi"}] + }, False + )) + cases.append(( + "max_tokens 限制", + "POST", messages_url, headers, { + "model": model, + "max_tokens": 10, + "messages": [{"role": "user", "content": "讲一个故事"}] + }, False + )) + cases.append(( + "stop_sequences", + "POST", messages_url, headers, { + "model": model, + "max_tokens": 20, + "stop_sequences": ["5"], + "messages": [{"role": "user", "content": "数数: 1,2,3,"}] + }, False + )) + cases.append(( + "metadata 参数(user_id)", + "POST", messages_url, headers, { + "model": model, + "max_tokens": 5, + "metadata": {"user_id": "test-user-001"}, + "messages": [{"role": "user", "content": "Hi"}] + }, False + )) + cases.append(( + "assistant content 数组格式(text + tool_use 块)", + "POST", messages_url, headers, { + "model": model, + "max_tokens": 20, + "messages": [ + {"role": "user", "content": "帮我查一下北京的天气"}, + {"role": "assistant", "content": [ + {"type": "text", "text": "好的,让我查一下。"}, + {"type": "tool_use", "id": "toolu_prev_001", "name": "get_weather", "input": {"location": "Beijing"}} + ]}, + {"role": "user", "content": [ + {"type": "tool_result", "tool_use_id": "toolu_prev_001", "content": [ + {"type": "text", "text": "{\"temperature\": 22, \"condition\": \"晴\"}"} + ]} + ]} + ] + }, False + )) + + # ==== Count Tokens API ==== + cases.append(( + "计数 Token (POST /v1/messages/count_tokens)", + "POST", count_tokens_url, headers, { + "model": model, + "messages": [{"role": "user", "content": "Hello, how are you?"}] + }, False + )) + cases.append(( + "计数 Token(带 system + tools)", + "POST", count_tokens_url, headers, { + "model": model, + "system": "You are a helpful assistant.", + "messages": [{"role": "user", "content": "Hi"}], + "tools": [{ + "name": "get_weather", + "description": "获取天气", + "input_schema": { + "type": "object", + "properties": {"location": {"type": "string"}}, + "required": ["location"] + } + }] + }, False + )) + cases.append(( + "计数 Token 缺少 model(负面)", + "POST", count_tokens_url, headers, { + "messages": [{"role": "user", "content": "Hi"}] + }, False + )) + + # ==== Messages API: 负面用例 ==== + cases.append(( + "缺少 x-api-key header(无认证)", + "POST", messages_url, { + "Content-Type": "application/json", + "anthropic-version": ANTHROPIC_VERSION, + }, { + "model": model, + "max_tokens": 5, + "messages": [{"role": "user", "content": "Hi"}] + }, False + )) + cases.append(( + "错误的 anthropic-version header", + "POST", messages_url, { + "Content-Type": "application/json", + "anthropic-version": "0000-00-00", + "x-api-key": api_key, + } if api_key else { + "Content-Type": "application/json", + "anthropic-version": "0000-00-00", + }, { + "model": model, + "max_tokens": 5, + "messages": [{"role": "user", "content": "Hi"}] + }, False + )) + cases.append(( + "缺少 model 参数", + "POST", messages_url, headers, { + "max_tokens": 5, + "messages": [{"role": "user", "content": "Hi"}] + }, False + )) + cases.append(( + "缺少 messages 参数", + "POST", messages_url, headers, { + "model": model, + "max_tokens": 5 + }, False + )) + cases.append(( + "缺少 max_tokens 参数", + "POST", messages_url, headers, { + "model": model, + "messages": [{"role": "user", "content": "Hi"}] + }, False + )) + cases.append(( + "messages 为空数组", + "POST", messages_url, headers, { + "model": model, + "max_tokens": 5, + "messages": [] + }, False + )) + cases.append(( + "无效 API key", + "POST", messages_url, headers_bad_auth, { + "model": model, + "max_tokens": 5, + "messages": [{"role": "user", "content": "Hi"}] + }, False + )) + cases.append(( + "不存在的模型", + "POST", messages_url, headers, { + "model": "nonexistent-model-xxx", + "max_tokens": 5, + "messages": [{"role": "user", "content": "Hi"}] + }, False + )) + cases.append(( + "畸形 JSON body", + "POST", messages_url, headers, "invalid json{", False + )) + cases.append(( + "无效 role(非法消息角色)", + "POST", messages_url, headers, { + "model": model, + "max_tokens": 5, + "messages": [{"role": "system", "content": "You are helpful"}] + }, False + )) + cases.append(( + "max_tokens 为负数", + "POST", messages_url, headers, { + "model": model, + "max_tokens": -1, + "messages": [{"role": "user", "content": "Hi"}] + }, False + )) + cases.append(( + "max_tokens = 0", + "POST", messages_url, headers, { + "model": model, + "max_tokens": 0, + "messages": [{"role": "user", "content": "Hi"}] + }, False + )) + cases.append(( + "temperature 超出范围 (2.0)", + "POST", messages_url, headers, { + "model": model, + "max_tokens": 5, + "temperature": 2.0, + "messages": [{"role": "user", "content": "Hi"}] + }, False + )) + + # ==== --vision ==== + if args.vision: + cases.append(( + "图片 URL 输入 (--vision)", + "POST", messages_url, headers, { + "model": model, + "max_tokens": 10, + "messages": [{"role": "user", "content": [ + {"type": "text", "text": "用一个词描述这张图"}, + {"type": "image", "source": { + "type": "url", + "url": "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/" + "Gfp-wisconsin-madison-the-nature-boardwalk.jpg/" + "2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg" + }} + ]}] + }, False + )) + + # ==== --stream ==== + if args.stream: + cases.append(( + "基本流式 (--stream)", + "POST", messages_url, headers, { + "model": model, + "max_tokens": 5, + "stream": True, + "messages": [{"role": "user", "content": "Hi"}] + }, True + )) + cases.append(( + "流式 + system prompt (--stream)", + "POST", messages_url, headers, { + "model": model, + "max_tokens": 5, + "stream": True, + "system": "Reply in one word.", + "messages": [{"role": "user", "content": "1+1="}] + }, True + )) + cases.append(( + "流式 + stop_sequences (--stream)", + "POST", messages_url, headers, { + "model": model, + "max_tokens": 20, + "stream": True, + "stop_sequences": ["5"], + "messages": [{"role": "user", "content": "数数: 1,2,3,"}] + }, True + )) + + # ==== --tools ==== + if args.tools: + tool_weather = { + "name": "get_weather", + "description": "获取指定城市的天气", + "input_schema": { + "type": "object", + "properties": { + "location": {"type": "string", "description": "城市名称"} + }, + "required": ["location"] + } + } + cases.append(( + "工具调用 tool_choice: auto (--tools)", + "POST", messages_url, headers, { + "model": model, + "max_tokens": 50, + "tools": [tool_weather], + "tool_choice": {"type": "auto"}, + "messages": [{"role": "user", "content": "北京天气怎么样?"}] + }, False + )) + cases.append(( + "工具调用 tool_choice: any (--tools)", + "POST", messages_url, headers, { + "model": model, + "max_tokens": 50, + "tools": [tool_weather], + "tool_choice": {"type": "any"}, + "messages": [{"role": "user", "content": "北京天气怎么样?"}] + }, False + )) + cases.append(( + "指定工具调用 tool_choice: {name} (--tools)", + "POST", messages_url, headers, { + "model": model, + "max_tokens": 50, + "tools": [tool_weather], + "tool_choice": {"type": "tool", "name": "get_weather"}, + "messages": [{"role": "user", "content": "北京天气怎么样?"}] + }, False + )) + cases.append(( + "tool_choice: none (--tools)", + "POST", messages_url, headers, { + "model": model, + "max_tokens": 20, + "tools": [tool_weather], + "tool_choice": {"type": "none"}, + "messages": [{"role": "user", "content": "北京天气怎么样?"}] + }, False + )) + cases.append(( + "多轮工具调用(tool_result 返回)(--tools)", + "POST", messages_url, headers, { + "model": model, + "max_tokens": 20, + "tools": [tool_weather], + "messages": [ + {"role": "user", "content": "北京天气怎么样?"}, + {"role": "assistant", "content": [ + {"type": "text", "text": "让我查一下。"}, + {"type": "tool_use", "id": "toolu_001", "name": "get_weather", "input": {"location": "Beijing"}} + ]}, + {"role": "user", "content": [ + {"type": "tool_result", "tool_use_id": "toolu_001", "content": "{\"temperature\": 22, \"condition\": \"晴\"}"} + ]} + ] + }, False + )) + cases.append(( + "多轮工具调用(tool_result 带 is_error)(--tools)", + "POST", messages_url, headers, { + "model": model, + "max_tokens": 20, + "tools": [tool_weather], + "messages": [ + {"role": "user", "content": "北京天气怎么样?"}, + {"role": "assistant", "content": [ + {"type": "tool_use", "id": "toolu_002", "name": "get_weather", "input": {"location": "Beijing"}} + ]}, + {"role": "user", "content": [ + {"type": "tool_result", "tool_use_id": "toolu_002", "is_error": True, "content": "天气服务不可用"} + ]} + ] + }, False + )) + cases.append(( + "tool_choice 指向不存在的工具(负面)(--tools)", + "POST", messages_url, headers, { + "model": model, + "max_tokens": 50, + "tools": [tool_weather], + "tool_choice": {"type": "tool", "name": "nonexistent_tool_xxx"}, + "messages": [{"role": "user", "content": "北京天气怎么样?"}] + }, False + )) + cases.append(( + "多工具定义 (--tools)", + "POST", messages_url, headers, { + "model": model, + "max_tokens": 50, + "tools": [ + tool_weather, + { + "name": "get_time", + "description": "获取指定城市的当前时间", + "input_schema": { + "type": "object", + "properties": { + "location": {"type": "string", "description": "城市名称"} + }, + "required": ["location"] + } + } + ], + "tool_choice": {"type": "auto"}, + "messages": [{"role": "user", "content": "北京现在几点了?天气怎么样?"}] + }, False + )) + + # ==== --thinking ==== + if args.thinking: + cases.append(( + "扩展思维 enabled (--thinking)", + "POST", messages_url, headers, { + "model": model, + "max_tokens": 200, + "thinking": {"type": "enabled", "budget_tokens": 100}, + "messages": [{"role": "user", "content": "1+1=?"}] + }, False + )) + cases.append(( + "扩展思维 adaptive (--thinking)", + "POST", messages_url, headers, { + "model": model, + "max_tokens": 200, + "thinking": {"type": "adaptive", "budget_tokens": 100}, + "messages": [{"role": "user", "content": "1+1=?"}] + }, False + )) + + # ==== --stream + --tools 组合 ==== + if args.stream and args.tools: + tool_weather_stream = { + "name": "get_weather", + "description": "获取指定城市的天气", + "input_schema": { + "type": "object", + "properties": { + "location": {"type": "string", "description": "城市名称"} + }, + "required": ["location"] + } + } + cases.append(( + "流式工具调用 (--stream --tools)", + "POST", messages_url, headers, { + "model": model, + "max_tokens": 50, + "stream": True, + "tools": [tool_weather_stream], + "tool_choice": {"type": "auto"}, + "messages": [{"role": "user", "content": "北京天气怎么样?"}] + }, True + )) + + # ==== 执行测试 ==== + total = len(cases) + count_2xx = 0 + count_other = 0 + + print("=" * 60) + print("Anthropic 兼容性测试") + print(f"目标: {base_url}") + print(f"模型: {model}") + print(f"时间: {time.strftime('%Y-%m-%d %H:%M:%S')}") + flags = [] + if args.vision: + flags.append("vision") + if args.stream: + flags.append("stream") + if args.tools: + flags.append("tools") + if args.thinking: + flags.append("thinking") + print(f"用例: {total} 个" + (f" | 扩展: {', '.join(flags)}" if flags else "")) + print("=" * 60) + + for i, (desc, method, url, hdrs, body, stream) in enumerate(cases, 1): + status = run_test(i, total, desc, url, method, hdrs, body, stream, ssl_ctx) + if status is not None and 200 <= status < 300: + count_2xx += 1 + else: + count_other += 1 + + print() + print("=" * 60) + print(f"测试完成 | 总计: {total} | HTTP 2xx: {count_2xx} | 非 2xx: {count_other}") + print("=" * 60) + + +if __name__ == "__main__": + main() diff --git a/scripts/openai_detect.py b/scripts/openai_detect.py new file mode 100755 index 0000000..2575329 --- /dev/null +++ b/scripts/openai_detect.py @@ -0,0 +1,552 @@ +#!/usr/bin/env python3 +"""OpenAI 兼容性接口测试脚本 + +用法: + python3 scripts/openai_compat_test.py --base_url [options] + +示例: + python3 scripts/openai_compat_test.py --base_url https://api.example.com/v1 + python3 scripts/openai_compat_test.py --base_url https://api.example.com/v1 --api_key sk-xxx --model gpt-4o + python3 scripts/openai_compat_test.py --base_url https://api.example.com/v1 --stream --tools +""" + +import json +import time +import ssl +import argparse +import urllib.request +import urllib.error + +TIMEOUT = 30 + + +def create_ssl_context(): + ctx = ssl.create_default_context() + ctx.check_hostname = False + ctx.verify_mode = ssl.CERT_NONE + return ctx + + +def http_request(url, method="GET", headers=None, body=None, ssl_ctx=None): + req = urllib.request.Request(url, method=method) + if headers: + for k, v in headers.items(): + req.add_header(k, v) + if body is not None: + if isinstance(body, str): + req.data = body.encode("utf-8") + else: + req.data = json.dumps(body).encode("utf-8") + + start = time.time() + try: + resp = urllib.request.urlopen(req, timeout=TIMEOUT, context=ssl_ctx) + elapsed = time.time() - start + return resp.getcode(), resp.read().decode("utf-8"), elapsed + except urllib.error.HTTPError as e: + elapsed = time.time() - start + return e.code, e.read().decode("utf-8"), elapsed + except Exception as e: + elapsed = time.time() - start + return None, str(e), elapsed + + +def http_stream_request(url, headers=None, body=None, ssl_ctx=None): + req = urllib.request.Request(url, method="POST") + if headers: + for k, v in headers.items(): + req.add_header(k, v) + if body is not None: + req.data = json.dumps(body).encode("utf-8") + + start = time.time() + try: + resp = urllib.request.urlopen(req, timeout=TIMEOUT, context=ssl_ctx) + status = resp.getcode() + lines = [] + for raw_line in resp: + line = raw_line.decode("utf-8").rstrip("\n\r") + if line: + lines.append(line) + elapsed = time.time() - start + return status, "\n".join(lines), elapsed + except urllib.error.HTTPError as e: + elapsed = time.time() - start + return e.code, e.read().decode("utf-8"), elapsed + except Exception as e: + elapsed = time.time() - start + return None, str(e), elapsed + + +def format_json(text): + try: + parsed = json.loads(text) + return json.dumps(parsed, ensure_ascii=False, indent=2) + except (json.JSONDecodeError, TypeError): + return text + + +def build_headers(api_key): + h = {"Content-Type": "application/json"} + if api_key: + h["Authorization"] = f"Bearer {api_key}" + return h + + +def run_test(index, total, desc, url, method, headers, body, stream, ssl_ctx): + print(f"\n[{index}/{total}] {desc}") + print(f">>> {method} {url}") + if body is not None: + if isinstance(body, str): + print(body) + else: + print(format_json(json.dumps(body, ensure_ascii=False))) + + if stream: + status, data, elapsed = http_stream_request(url, headers, body, ssl_ctx) + else: + status, data, elapsed = http_request(url, method, headers, body, ssl_ctx) + + if status is not None: + print(f"状态码: {status} | 耗时: {elapsed:.2f}s") + else: + print(f"请求失败 | 耗时: {elapsed:.2f}s") + + if stream and status and status < 300: + # 流式响应按 SSE 行逐行输出 + for line in data.split("\n"): + print(line) + else: + print(format_json(data)) + + return status + + +def main(): + parser = argparse.ArgumentParser( + description="OpenAI 兼容性接口测试", + formatter_class=argparse.RawDescriptionHelpFormatter, + ) + parser.add_argument("--base_url", required=True, help="API 基础地址 (如 https://api.example.com/v1)") + parser.add_argument("--api_key", default="", help="API 密钥 (默认空)") + parser.add_argument("--model", default="gpt-4o", help="模型名称 (默认 gpt-4o)") + parser.add_argument("--vision", action="store_true", help="执行视觉相关测试") + parser.add_argument("--stream", action="store_true", help="执行流式响应测试") + parser.add_argument("--tools", action="store_true", help="执行工具调用测试") + parser.add_argument("--logprobs", action="store_true", help="执行 logprobs 测试") + parser.add_argument("--json_schema", action="store_true", help="执行 Structured Output 测试") + parser.add_argument("--all", action="store_true", help="开启所有扩展测试") + args = parser.parse_args() + + if args.all: + args.vision = True + args.stream = True + args.tools = True + args.logprobs = True + args.json_schema = True + + base_url = args.base_url.rstrip("/") + api_key = args.api_key + model = args.model + ssl_ctx = create_ssl_context() + headers = build_headers(api_key) + headers_bad_auth = build_headers("invalid-key-xxx") + + chat_url = f"{base_url}/chat/completions" + + # --- 收集用例: (描述, 方法, URL, 请求头, 请求体, 是否流式) --- + cases = [] + + # ---- Models API ---- + cases.append(( + "获取模型列表 (GET /models)", + "GET", f"{base_url}/models", headers, None, False + )) + cases.append(( + "获取指定模型详情 (GET /models/{model})", + "GET", f"{base_url}/models/{model}", headers, None, False + )) + cases.append(( + "获取不存在的模型 (GET /models/nonexistent-model-xxx)", + "GET", f"{base_url}/models/nonexistent-model-xxx", headers, None, False + )) + + # ---- Chat Completions: 正面用例 ---- + cases.append(( + "基本对话(仅 user)", + "POST", chat_url, headers, { + "model": model, + "messages": [{"role": "user", "content": "Hi"}], + "max_tokens": 5 + }, False + )) + cases.append(( + "system + user 对话", + "POST", chat_url, headers, { + "model": model, + "messages": [ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": "1+1="} + ], + "max_tokens": 5 + }, False + )) + cases.append(( + "developer + user 对话", + "POST", chat_url, headers, { + "model": model, + "messages": [ + {"role": "developer", "content": "You are a helpful assistant."}, + {"role": "user", "content": "1+1="} + ], + "max_tokens": 5 + }, False + )) + cases.append(( + "多轮对话(含 assistant 历史)", + "POST", chat_url, headers, { + "model": model, + "messages": [ + {"role": "user", "content": "Hi"}, + {"role": "assistant", "content": "Hello!"}, + {"role": "user", "content": "1+1="} + ], + "max_tokens": 5 + }, False + )) + cases.append(( + "temperature + top_p", + "POST", chat_url, headers, { + "model": model, + "messages": [{"role": "user", "content": "Hi"}], + "max_tokens": 5, + "temperature": 0.5, + "top_p": 0.9 + }, False + )) + cases.append(( + "max_tokens 限制", + "POST", chat_url, headers, { + "model": model, + "messages": [{"role": "user", "content": "讲一个故事"}], + "max_tokens": 10 + }, False + )) + cases.append(( + "stop sequences", + "POST", chat_url, headers, { + "model": model, + "messages": [{"role": "user", "content": "数数: 1,2,3,"}], + "max_tokens": 20, + "stop": ["5"] + }, False + )) + cases.append(( + "n=2 多候选", + "POST", chat_url, headers, { + "model": model, + "messages": [{"role": "user", "content": "Hi"}], + "max_tokens": 5, + "n": 2 + }, False + )) + cases.append(( + "seed 参数", + "POST", chat_url, headers, { + "model": model, + "messages": [{"role": "user", "content": "Hi"}], + "max_tokens": 5, + "seed": 42 + }, False + )) + cases.append(( + "frequency_penalty + presence_penalty", + "POST", chat_url, headers, { + "model": model, + "messages": [{"role": "user", "content": "Hi"}], + "max_tokens": 5, + "frequency_penalty": 0.5, + "presence_penalty": 0.5 + }, False + )) + cases.append(( + "max_completion_tokens 参数", + "POST", chat_url, headers, { + "model": model, + "messages": [{"role": "user", "content": "讲一个故事"}], + "max_completion_tokens": 10 + }, False + )) + cases.append(( + "JSON mode (response_format: json_object)", + "POST", chat_url, headers, { + "model": model, + "messages": [ + {"role": "system", "content": "以 JSON 格式回复: {\"answer\": \"ok\"}"}, + {"role": "user", "content": "test"} + ], + "max_tokens": 10, + "response_format": {"type": "json_object"} + }, False + )) + + # ---- Chat Completions: 负面用例 ---- + cases.append(( + "缺少 model 参数", + "POST", chat_url, headers, { + "messages": [{"role": "user", "content": "Hi"}], + "max_tokens": 5 + }, False + )) + cases.append(( + "缺少 messages 参数", + "POST", chat_url, headers, { + "model": model, + "max_tokens": 5 + }, False + )) + cases.append(( + "messages 为空数组", + "POST", chat_url, headers, { + "model": model, + "messages": [], + "max_tokens": 5 + }, False + )) + cases.append(( + "无效 API key", + "POST", chat_url, headers_bad_auth, { + "model": model, + "messages": [{"role": "user", "content": "Hi"}], + "max_tokens": 5 + }, False + )) + cases.append(( + "不存在的模型 (chat)", + "POST", chat_url, headers, { + "model": "nonexistent-model-xxx", + "messages": [{"role": "user", "content": "Hi"}], + "max_tokens": 5 + }, False + )) + cases.append(( + "畸形 JSON body", + "POST", chat_url, headers, "invalid json{", False + )) + + # ---- --vision ---- + if args.vision: + image_url = ( + "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/" + "Gfp-wisconsin-madison-the-nature-boardwalk.jpg/" + "2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg" + ) + cases.append(( + "图片 URL 输入 + detail 参数 (--vision)", + "POST", chat_url, headers, { + "model": model, + "messages": [ + {"role": "system", "content": "简短描述图片"}, + {"role": "user", "content": [ + {"type": "text", "text": "用一个词描述这张图"}, + {"type": "image_url", "image_url": { + "url": image_url, "detail": "low" + }} + ]} + ], + "max_tokens": 10 + }, False + )) + + # ---- --stream ---- + if args.stream: + cases.append(( + "基本流式 (--stream)", + "POST", chat_url, headers, { + "model": model, + "messages": [{"role": "user", "content": "Hi"}], + "max_tokens": 5, + "stream": True + }, True + )) + cases.append(( + "流式 + include_usage (--stream)", + "POST", chat_url, headers, { + "model": model, + "messages": [{"role": "user", "content": "Hi"}], + "max_tokens": 5, + "stream": True, + "stream_options": {"include_usage": True} + }, True + )) + cases.append(( + "流式 + stop sequences (--stream)", + "POST", chat_url, headers, { + "model": model, + "messages": [{"role": "user", "content": "数数: 1,2,3,"}], + "max_tokens": 20, + "stream": True, + "stop": ["5"] + }, True + )) + + # ---- --tools ---- + if args.tools: + tool_weather = { + "type": "function", + "function": { + "name": "get_weather", + "description": "获取指定城市的天气", + "parameters": { + "type": "object", + "properties": { + "location": {"type": "string", "description": "城市名称"} + }, + "required": ["location"] + } + } + } + cases.append(( + "工具调用 tool_choice: auto (--tools)", + "POST", chat_url, headers, { + "model": model, + "messages": [{"role": "user", "content": "北京天气怎么样?"}], + "max_tokens": 50, + "tools": [tool_weather], + "tool_choice": "auto" + }, False + )) + cases.append(( + "工具调用 tool_choice: required (--tools)", + "POST", chat_url, headers, { + "model": model, + "messages": [{"role": "user", "content": "北京天气怎么样?"}], + "max_tokens": 50, + "tools": [tool_weather], + "tool_choice": "required" + }, False + )) + cases.append(( + "指定函数调用 tool_choice: {name} (--tools)", + "POST", chat_url, headers, { + "model": model, + "messages": [{"role": "user", "content": "北京天气怎么样?"}], + "max_tokens": 50, + "tools": [tool_weather], + "tool_choice": { + "type": "function", + "function": {"name": "get_weather"} + } + }, False + )) + cases.append(( + "多轮工具调用(构造 tool 结果)(--tools)", + "POST", chat_url, headers, { + "model": model, + "messages": [ + {"role": "user", "content": "北京天气怎么样?"}, + {"role": "assistant", "content": None, "tool_calls": [{ + "id": "call_001", "type": "function", + "function": { + "name": "get_weather", + "arguments": "{\"location\": \"Beijing\"}" + } + }]}, + {"role": "tool", "tool_call_id": "call_001", + "content": "{\"temperature\": 22, \"condition\": \"晴\"}"} + ], + "max_tokens": 20, + "tools": [tool_weather] + }, False + )) + cases.append(( + "parallel_tool_calls: false (--tools)", + "POST", chat_url, headers, { + "model": model, + "messages": [{"role": "user", "content": "北京和上海的天气怎么样?"}], + "max_tokens": 50, + "tools": [tool_weather], + "tool_choice": "auto", + "parallel_tool_calls": False + }, False + )) + + # ---- --logprobs ---- + if args.logprobs: + cases.append(( + "logprobs + top_logprobs (--logprobs)", + "POST", chat_url, headers, { + "model": model, + "messages": [{"role": "user", "content": "Hi"}], + "max_tokens": 5, + "logprobs": True, + "top_logprobs": 2 + }, False + )) + + # ---- --json-schema ---- + if args.json_schema: + cases.append(( + "Structured Output json_schema (--json_schema)", + "POST", chat_url, headers, { + "model": model, + "messages": [{"role": "user", "content": "1+1等于几?"}], + "max_tokens": 20, + "response_format": { + "type": "json_schema", + "json_schema": { + "name": "math_answer", + "strict": True, + "schema": { + "type": "object", + "properties": { + "answer": {"type": "number"}, + "explanation": {"type": "string"} + }, + "required": ["answer", "explanation"], + "additionalProperties": False + } + } + } + }, False + )) + + # ---- 执行测试 ---- + total = len(cases) + count_2xx = 0 + count_other = 0 + + print("=" * 60) + print("OpenAI 兼容性测试") + print(f"目标: {base_url}") + print(f"模型: {model}") + print(f"时间: {time.strftime('%Y-%m-%d %H:%M:%S')}") + flags = [] + if args.vision: + flags.append("vision") + if args.stream: + flags.append("stream") + if args.tools: + flags.append("tools") + if args.logprobs: + flags.append("logprobs") + if args.json_schema: + flags.append("json-schema") + print(f"用例: {total} 个" + (f" | 扩展: {', '.join(flags)}" if flags else "")) + print("=" * 60) + + for i, (desc, method, url, hdrs, body, stream) in enumerate(cases, 1): + status = run_test(i, total, desc, url, method, hdrs, body, stream, ssl_ctx) + if status is not None and 200 <= status < 300: + count_2xx += 1 + else: + count_other += 1 + + print() + print("=" * 60) + print(f"测试完成 | 总计: {total} | HTTP 2xx: {count_2xx} | 非 2xx: {count_other}") + print("=" * 60) + + +if __name__ == "__main__": + main()