1
0
Files
nex/scripts/detect_openai.py
lanyuanxiaoyao 44d6af026a feat: 完善流式测试覆盖并精简用例
- 提取共享定义(tool_weather, image_url, json_schema_math)到功能块前
- 流式用例精简为代表子集:核心 6-8 个 + 扩展各 1-2 个 + 高级参数代表
- OpenAI: 15 个流式用例(核心 8 + vision/tools/logprobs/json_schema + 高级参数)
- Anthropic: 11 个流式用例(核心 6 + vision/tools/thinking + 高级参数)
- 更新 README:新增流式测试覆盖原则、parse_sse_events 函数说明
2026-04-21 17:18:35 +08:00

1200 lines
39 KiB
Python
Executable File
Raw Blame History

This file contains ambiguous Unicode characters
This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.
#!/usr/bin/env python3
"""OpenAI 兼容性接口测试脚本
用法:
python3 scripts/openai_compat_test.py --base_url <url> [options]
示例:
python3 scripts/openai_compat_test.py --base_url https://api.example.com/v1
python3 scripts/openai_compat_test.py --base_url https://api.example.com/v1 --api_key sk-xxx --model gpt-4o
python3 scripts/openai_compat_test.py --base_url https://api.example.com/v1 --stream --tools
"""
import json
import argparse
from typing import Dict, List, Tuple, Any, Optional
from core import (
create_ssl_context,
TestCase,
run_test_suite,
validate_response_structure,
)
def build_headers(api_key: str) -> Dict[str, str]:
"""构建 OpenAI API 请求头"""
h = {"Content-Type": "application/json"}
if api_key:
h["Authorization"] = f"Bearer {api_key}"
return h
# ==================== OpenAI 响应验证函数 ====================
def validate_openai_models_list_response(response_text: str) -> Tuple[bool, List[str]]:
"""验证 OpenAI Models List 响应
根据API文档响应应包含
- object: "list"
- data: array of Model objects
"""
errors = []
try:
data = json.loads(response_text)
except json.JSONDecodeError as e:
return False, [f"响应不是有效的JSON: {e}"]
# 检查必需字段
if "object" not in data:
errors.append("缺少必需字段: object")
elif data["object"] != "list":
errors.append(f"字段 'object' 值错误: 期望 'list', 实际 '{data['object']}'")
if "data" not in data:
errors.append("缺少必需字段: data")
elif not isinstance(data["data"], list):
errors.append(f"字段 'data' 类型错误: 期望 list, 实际 {type(data['data']).__name__}")
else:
# 验证每个 model 对象
for i, model in enumerate(data["data"]):
if not isinstance(model, dict):
errors.append(f"data[{i}] 不是对象")
continue
# 检查 model 对象的必需字段
model_required = ["id", "object", "created", "owned_by"]
for field in model_required:
if field not in model:
errors.append(f"data[{i}] 缺少必需字段: {field}")
# 检查 object 字段值
if "object" in model and model["object"] != "model":
errors.append(f"data[{i}].object 值错误: 期望 'model', 实际 '{model['object']}'")
return len(errors) == 0, errors
def validate_openai_model_retrieve_response(response_text: str) -> Tuple[bool, List[str]]:
"""验证 OpenAI Model Retrieve 响应
根据API文档响应应包含
- id: string
- object: "model"
- created: number
- owned_by: string
"""
required_fields = ["id", "object", "created", "owned_by"]
field_types = {
"id": str,
"object": str,
"created": (int, float),
"owned_by": str
}
enum_values = {
"object": ["model"]
}
return validate_response_structure(response_text, required_fields, field_types, enum_values)
def validate_openai_chat_completion_response(response_text: str, expected_n: Optional[int] = None) -> Tuple[bool, List[str]]:
"""验证 OpenAI Chat Completion 响应
根据API文档响应应包含
- id: string
- object: "chat.completion"
- created: number
- model: string
- choices: array
- usage: object (可选)
Args:
response_text: 响应文本
expected_n: 期望的 choices 数量(对应请求中的 n 参数)
"""
errors = []
try:
data = json.loads(response_text)
except json.JSONDecodeError as e:
return False, [f"响应不是有效的JSON: {e}"]
# 检查必需字段
required_fields = ["id", "object", "created", "model", "choices"]
for field in required_fields:
if field not in data:
errors.append(f"缺少必需字段: {field}")
# 检查 object 字段值
if "object" in data and data["object"] != "chat.completion":
errors.append(f"字段 'object' 值错误: 期望 'chat.completion', 实际 '{data['object']}'")
# 检查 choices 数组
if "choices" in data:
if not isinstance(data["choices"], list):
errors.append(f"字段 'choices' 类型错误: 期望 list, 实际 {type(data['choices']).__name__}")
else:
# 校验 choices 数量与 n 参数匹配
if expected_n is not None and len(data["choices"]) != expected_n:
errors.append(f"choices 数量不匹配: 期望 {expected_n}, 实际 {len(data['choices'])}")
for i, choice in enumerate(data["choices"]):
if not isinstance(choice, dict):
errors.append(f"choices[{i}] 不是对象")
continue
# 检查 choice 对象的必需字段
choice_required = ["index", "message", "finish_reason"]
for field in choice_required:
if field not in choice:
errors.append(f"choices[{i}] 缺少必需字段: {field}")
# 检查 message 对象
if "message" in choice and isinstance(choice["message"], dict):
msg = choice["message"]
if "role" not in msg:
errors.append(f"choices[{i}].message 缺少必需字段: role")
elif msg["role"] != "assistant":
errors.append(f"choices[{i}].message.role 值错误: 期望 'assistant', 实际 '{msg['role']}'")
# 检查 usage 对象(可选)
if "usage" in data and data["usage"] is not None:
if not isinstance(data["usage"], dict):
errors.append(f"字段 'usage' 类型错误: 期望 object, 实际 {type(data['usage']).__name__}")
else:
usage_required = ["prompt_tokens", "completion_tokens", "total_tokens"]
for field in usage_required:
if field not in data["usage"]:
errors.append(f"usage 缺少必需字段: {field}")
return len(errors) == 0, errors
def validate_openai_streaming_response(response_text: str, expected_n: Optional[int] = None) -> Tuple[bool, List[str]]:
"""验证 OpenAI 流式响应
流式响应使用 SSE 格式,每行以 "data: " 开头,包含 chat.completion.chunk 对象。
最后一个事件是 "data: [DONE]"
验证要点:
- 每个事件是有效的 JSON
- object 字段为 "chat.completion.chunk"
- choices 数组存在
- 如果指定了 expected_n校验 choices 数量匹配
- 最后一个非[DONE]事件的 finish_reason 不为 null
Args:
response_text: SSE 格式的响应文本
expected_n: 期望的 choices 数量
Returns:
(是否验证通过, 错误信息列表)
"""
from core import parse_sse_events
errors = []
events = parse_sse_events(response_text)
if not events:
errors.append("未收到任何 SSE 事件")
return False, errors
chunk_count = 0
choice_counts = set()
for i, event_data in enumerate(events):
try:
event = json.loads(event_data)
except json.JSONDecodeError as e:
errors.append(f"事件[{i}] 不是有效的JSON: {e}")
continue
chunk_count += 1
# 检查 object 字段
if "object" not in event:
errors.append(f"事件[{i}] 缺少必需字段: object")
elif event["object"] != "chat.completion.chunk":
errors.append(f"事件[{i}].object 值错误: 期望 'chat.completion.chunk', 实际 '{event['object']}'")
# 检查 choices 数组
if "choices" not in event:
errors.append(f"事件[{i}] 缺少必需字段: choices")
elif not isinstance(event["choices"], list):
errors.append(f"事件[{i}].choices 类型错误: 期望 list")
else:
choice_counts.add(len(event["choices"]))
if expected_n is not None and len(event["choices"]) != expected_n:
errors.append(f"事件[{i}].choices 数量不匹配: 期望 {expected_n}, 实际 {len(event['choices'])}")
for j, choice in enumerate(event["choices"]):
if not isinstance(choice, dict):
errors.append(f"事件[{i}].choices[{j}] 不是对象")
continue
if "index" not in choice:
errors.append(f"事件[{i}].choices[{j}] 缺少必需字段: index")
# 过滤掉空 choices 的情况(如最后一个 usage chunk
non_empty_choice_counts = {c for c in choice_counts if c > 0}
if expected_n is not None and expected_n not in non_empty_choice_counts:
errors.append(f"流式响应中 choices 数量不一致: 期望 {expected_n}, 实际出现 {non_empty_choice_counts}")
return len(errors) == 0, errors
def main():
parser = argparse.ArgumentParser(
description="OpenAI 兼容性接口测试",
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument("--base_url", required=True, help="API 基础地址 (如 https://api.example.com/v1)")
parser.add_argument("--api_key", default="", help="API 密钥 (默认空)")
parser.add_argument("--model", default="gpt-4o", help="模型名称 (默认 gpt-4o)")
parser.add_argument("--vision", action="store_true", help="执行视觉相关测试")
parser.add_argument("--stream", action="store_true", help="执行流式响应测试")
parser.add_argument("--tools", action="store_true", help="执行工具调用测试")
parser.add_argument("--logprobs", action="store_true", help="执行 logprobs 测试")
parser.add_argument("--json_schema", action="store_true", help="执行 Structured Output 测试")
parser.add_argument("--all", action="store_true", help="开启所有扩展测试")
args = parser.parse_args()
if args.all:
args.vision = True
args.stream = True
args.tools = True
args.logprobs = True
args.json_schema = True
base_url = args.base_url.rstrip("/")
api_key = args.api_key
model = args.model
ssl_ctx = create_ssl_context()
headers = build_headers(api_key)
headers_bad_auth = build_headers("invalid-key-xxx")
chat_url = f"{base_url}/chat/completions"
# --- 收集测试用例 ---
cases: List[TestCase] = []
# ---- Models API ----
cases.append(TestCase(
desc="获取模型列表 (GET /models)",
method="GET",
url=f"{base_url}/models",
headers=headers,
validator=validate_openai_models_list_response
))
cases.append(TestCase(
desc="获取指定模型详情 (GET /models/{model})",
method="GET",
url=f"{base_url}/models/{model}",
headers=headers,
validator=validate_openai_model_retrieve_response
))
cases.append(TestCase(
desc="获取不存在的模型 (GET /models/nonexistent-model-xxx)",
method="GET",
url=f"{base_url}/models/nonexistent-model-xxx",
headers=headers
))
# ---- Chat Completions: 正面用例 ----
cases.append(TestCase(
desc="基本对话(仅 user",
method="POST",
url=chat_url,
headers=headers,
body={
"model": model,
"messages": [{"role": "user", "content": "Hi"}],
"max_tokens": 5
},
validator=validate_openai_chat_completion_response
))
cases.append(TestCase(
desc="system + user 对话",
method="POST",
url=chat_url,
headers=headers,
body={
"model": model,
"messages": [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "1+1="}
],
"max_tokens": 5
},
validator=validate_openai_chat_completion_response
))
cases.append(TestCase(
desc="developer + user 对话",
method="POST",
url=chat_url,
headers=headers,
body={
"model": model,
"messages": [
{"role": "developer", "content": "You are a helpful assistant."},
{"role": "user", "content": "1+1="}
],
"max_tokens": 5
},
validator=validate_openai_chat_completion_response
))
cases.append(TestCase(
desc="多轮对话(含 assistant 历史)",
method="POST",
url=chat_url,
headers=headers,
body={
"model": model,
"messages": [
{"role": "user", "content": "Hi"},
{"role": "assistant", "content": "Hello!"},
{"role": "user", "content": "1+1="}
],
"max_tokens": 5
},
validator=validate_openai_chat_completion_response
))
cases.append(TestCase(
desc="temperature + top_p",
method="POST",
url=chat_url,
headers=headers,
body={
"model": model,
"messages": [{"role": "user", "content": "Hi"}],
"max_tokens": 5,
"temperature": 0.5,
"top_p": 0.9
},
validator=validate_openai_chat_completion_response
))
cases.append(TestCase(
desc="max_tokens 限制",
method="POST",
url=chat_url,
headers=headers,
body={
"model": model,
"messages": [{"role": "user", "content": "讲一个故事"}],
"max_tokens": 10
},
validator=validate_openai_chat_completion_response
))
cases.append(TestCase(
desc="stop sequences",
method="POST",
url=chat_url,
headers=headers,
body={
"model": model,
"messages": [{"role": "user", "content": "数数: 1,2,3,"}],
"max_tokens": 20,
"stop": ["5"]
},
validator=validate_openai_chat_completion_response
))
cases.append(TestCase(
desc="n=2 多候选",
method="POST",
url=chat_url,
headers=headers,
body={
"model": model,
"messages": [{"role": "user", "content": "Hi"}],
"max_tokens": 5,
"n": 2
},
validator=lambda r: validate_openai_chat_completion_response(r, expected_n=2)
))
cases.append(TestCase(
desc="seed 参数",
method="POST",
url=chat_url,
headers=headers,
body={
"model": model,
"messages": [{"role": "user", "content": "Hi"}],
"max_tokens": 5,
"seed": 42
},
validator=validate_openai_chat_completion_response
))
cases.append(TestCase(
desc="frequency_penalty + presence_penalty",
method="POST",
url=chat_url,
headers=headers,
body={
"model": model,
"messages": [{"role": "user", "content": "Hi"}],
"max_tokens": 5,
"frequency_penalty": 0.5,
"presence_penalty": 0.5
},
validator=validate_openai_chat_completion_response
))
cases.append(TestCase(
desc="max_completion_tokens 参数",
method="POST",
url=chat_url,
headers=headers,
body={
"model": model,
"messages": [{"role": "user", "content": "讲一个故事"}],
"max_completion_tokens": 10
},
validator=validate_openai_chat_completion_response
))
cases.append(TestCase(
desc="JSON mode (response_format: json_object)",
method="POST",
url=chat_url,
headers=headers,
body={
"model": model,
"messages": [
{"role": "system", "content": "以 JSON 格式回复: {\"answer\": \"ok\"}"},
{"role": "user", "content": "test"}
],
"max_tokens": 10,
"response_format": {"type": "json_object"}
},
validator=validate_openai_chat_completion_response
))
# ---- Chat Completions: 负面用例 ----
cases.append(TestCase(
desc="缺少 model 参数",
method="POST",
url=chat_url,
headers=headers,
body={
"messages": [{"role": "user", "content": "Hi"}],
"max_tokens": 5
}
))
cases.append(TestCase(
desc="缺少 messages 参数",
method="POST",
url=chat_url,
headers=headers,
body={
"model": model,
"max_tokens": 5
}
))
cases.append(TestCase(
desc="messages 为空数组",
method="POST",
url=chat_url,
headers=headers,
body={
"model": model,
"messages": [],
"max_tokens": 5
}
))
cases.append(TestCase(
desc="无效 API key",
method="POST",
url=chat_url,
headers=headers_bad_auth,
body={
"model": model,
"messages": [{"role": "user", "content": "Hi"}],
"max_tokens": 5
}
))
cases.append(TestCase(
desc="不存在的模型 (chat)",
method="POST",
url=chat_url,
headers=headers,
body={
"model": "nonexistent-model-xxx",
"messages": [{"role": "user", "content": "Hi"}],
"max_tokens": 5
}
))
cases.append(TestCase(
desc="畸形 JSON body",
method="POST",
url=chat_url,
headers=headers,
body="invalid json{"
))
cases.append(TestCase(
desc="max_tokens 为负数",
method="POST",
url=chat_url,
headers=headers,
body={
"model": model,
"messages": [{"role": "user", "content": "Hi"}],
"max_tokens": -1
}
))
cases.append(TestCase(
desc="max_tokens = 0",
method="POST",
url=chat_url,
headers=headers,
body={
"model": model,
"messages": [{"role": "user", "content": "Hi"}],
"max_tokens": 0
}
))
cases.append(TestCase(
desc="temperature 超出范围 (2.5)",
method="POST",
url=chat_url,
headers=headers,
body={
"model": model,
"messages": [{"role": "user", "content": "Hi"}],
"max_tokens": 5,
"temperature": 2.5
}
))
cases.append(TestCase(
desc="frequency_penalty 超出范围 (3.0)",
method="POST",
url=chat_url,
headers=headers,
body={
"model": model,
"messages": [{"role": "user", "content": "Hi"}],
"max_tokens": 5,
"frequency_penalty": 3.0
}
))
cases.append(TestCase(
desc="frequency_penalty 超出范围 (-3.0)",
method="POST",
url=chat_url,
headers=headers,
body={
"model": model,
"messages": [{"role": "user", "content": "Hi"}],
"max_tokens": 5,
"frequency_penalty": -3.0
}
))
cases.append(TestCase(
desc="presence_penalty 超出范围 (3.0)",
method="POST",
url=chat_url,
headers=headers,
body={
"model": model,
"messages": [{"role": "user", "content": "Hi"}],
"max_tokens": 5,
"presence_penalty": 3.0
}
))
cases.append(TestCase(
desc="presence_penalty 超出范围 (-3.0)",
method="POST",
url=chat_url,
headers=headers,
body={
"model": model,
"messages": [{"role": "user", "content": "Hi"}],
"max_tokens": 5,
"presence_penalty": -3.0
}
))
cases.append(TestCase(
desc="top_p 超出范围 (1.5)",
method="POST",
url=chat_url,
headers=headers,
body={
"model": model,
"messages": [{"role": "user", "content": "Hi"}],
"max_tokens": 5,
"top_p": 1.5
}
))
cases.append(TestCase(
desc="top_p 超出范围 (-0.1)",
method="POST",
url=chat_url,
headers=headers,
body={
"model": model,
"messages": [{"role": "user", "content": "Hi"}],
"max_tokens": 5,
"top_p": -0.1
}
))
cases.append(TestCase(
desc="n 为负数",
method="POST",
url=chat_url,
headers=headers,
body={
"model": model,
"messages": [{"role": "user", "content": "Hi"}],
"max_tokens": 5,
"n": -1
}
))
cases.append(TestCase(
desc="n 为 0",
method="POST",
url=chat_url,
headers=headers,
body={
"model": model,
"messages": [{"role": "user", "content": "Hi"}],
"max_tokens": 5,
"n": 0
}
))
# ---- 共享定义(供流式和非流式用例共同使用)----
image_url = (
"https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/"
"Gfp-wisconsin-madison-the-nature-boardwalk.jpg/"
"2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg"
)
tool_weather = {
"type": "function",
"function": {
"name": "get_weather",
"description": "获取指定城市的天气",
"parameters": {
"type": "object",
"properties": {
"location": {"type": "string", "description": "城市名称"}
},
"required": ["location"]
}
}
}
json_schema_math = {
"name": "math_answer",
"strict": True,
"schema": {
"type": "object",
"properties": {
"answer": {"type": "number"},
"explanation": {"type": "string"}
},
"required": ["answer", "explanation"],
"additionalProperties": False
}
}
# ---- --vision ----
if args.vision:
cases.append(TestCase(
desc="图片 URL 输入 + detail 参数 (--vision)",
method="POST",
url=chat_url,
headers=headers,
body={
"model": model,
"messages": [
{"role": "system", "content": "简短描述图片"},
{"role": "user", "content": [
{"type": "text", "text": "用一个词描述这张图"},
{"type": "image_url", "image_url": {
"url": image_url, "detail": "low"
}}
]}
],
"max_tokens": 10
},
validator=validate_openai_chat_completion_response
))
# ---- --stream ----
if args.stream:
# 核心用例
cases.append(TestCase(
desc="流式基本对话",
method="POST",
url=chat_url,
headers=headers,
body={"model": model, "messages": [{"role": "user", "content": "Hi"}], "max_tokens": 5, "stream": True},
stream=True,
validator=validate_openai_streaming_response
))
cases.append(TestCase(
desc="流式 + include_usage",
method="POST",
url=chat_url,
headers=headers,
body={"model": model, "messages": [{"role": "user", "content": "Hi"}], "max_tokens": 5, "stream": True, "stream_options": {"include_usage": True}},
stream=True,
validator=validate_openai_streaming_response
))
cases.append(TestCase(
desc="流式 + system prompt",
method="POST",
url=chat_url,
headers=headers,
body={"model": model, "messages": [{"role": "system", "content": "有帮助的助手"}, {"role": "user", "content": "Hi"}], "max_tokens": 5, "stream": True},
stream=True,
validator=validate_openai_streaming_response
))
cases.append(TestCase(
desc="流式多轮对话",
method="POST",
url=chat_url,
headers=headers,
body={"model": model, "messages": [{"role": "user", "content": "1+1?"}, {"role": "assistant", "content": "2"}, {"role": "user", "content": "2+2?"}], "max_tokens": 5, "stream": True},
stream=True,
validator=validate_openai_streaming_response
))
cases.append(TestCase(
desc="流式 temperature + top_p",
method="POST",
url=chat_url,
headers=headers,
body={"model": model, "messages": [{"role": "user", "content": "Hi"}], "temperature": 0.5, "top_p": 0.9, "max_tokens": 5, "stream": True},
stream=True,
validator=validate_openai_streaming_response
))
cases.append(TestCase(
desc="流式 max_tokens",
method="POST",
url=chat_url,
headers=headers,
body={"model": model, "messages": [{"role": "user", "content": "Hi"}], "max_tokens": 3, "stream": True},
stream=True,
validator=validate_openai_streaming_response
))
cases.append(TestCase(
desc="流式 stop_sequences",
method="POST",
url=chat_url,
headers=headers,
body={"model": model, "messages": [{"role": "user", "content": "数数: 1,2,3,"}], "max_tokens": 10, "stop": ["5"], "stream": True},
stream=True,
validator=validate_openai_streaming_response
))
cases.append(TestCase(
desc="流式 JSON mode",
method="POST",
url=chat_url,
headers=headers,
body={"model": model, "messages": [{"role": "system", "content": "以JSON回复"}, {"role": "user", "content": "颜色"}], "max_tokens": 20, "response_format": {"type": "json_object"}, "stream": True},
stream=True,
validator=validate_openai_streaming_response
))
# 流式 + vision
if args.vision:
cases.append(TestCase(
desc="流式图片输入",
method="POST",
url=chat_url,
headers=headers,
body={"model": model, "messages": [{"role": "user", "content": [{"type": "text", "text": "描述图"}, {"type": "image_url", "image_url": {"url": image_url}}]}], "max_tokens": 10, "stream": True},
stream=True,
validator=validate_openai_streaming_response
))
# 流式 + tools
if args.tools:
cases.append(TestCase(
desc="流式工具调用 auto",
method="POST",
url=chat_url,
headers=headers,
body={"model": model, "messages": [{"role": "user", "content": "北京天气?"}], "max_tokens": 50, "stream": True, "tools": [tool_weather], "tool_choice": "auto"},
stream=True,
validator=validate_openai_streaming_response
))
cases.append(TestCase(
desc="流式多轮工具调用",
method="POST",
url=chat_url,
headers=headers,
body={"model": model, "messages": [{"role": "user", "content": "北京天气?"}, {"role": "assistant", "content": None, "tool_calls": [{"id": "call_001", "type": "function", "function": {"name": "get_weather", "arguments": '{"location": "Beijing"}'}}]}, {"role": "tool", "tool_call_id": "call_001", "content": '{"temp": 22}'}], "max_tokens": 10, "stream": True, "tools": [tool_weather]},
stream=True,
validator=validate_openai_streaming_response
))
# 流式 + logprobs
if args.logprobs:
cases.append(TestCase(
desc="流式 logprobs",
method="POST",
url=chat_url,
headers=headers,
body={"model": model, "messages": [{"role": "user", "content": "Hi"}], "max_tokens": 5, "logprobs": True, "top_logprobs": 2, "stream": True},
stream=True,
validator=validate_openai_streaming_response
))
# 流式 + json_schema
if args.json_schema:
cases.append(TestCase(
desc="流式 json_schema",
method="POST",
url=chat_url,
headers=headers,
body={"model": model, "messages": [{"role": "user", "content": "1+1=?"}], "max_tokens": 20, "response_format": {"type": "json_schema", "json_schema": json_schema_math}, "stream": True},
stream=True,
validator=validate_openai_streaming_response
))
# 流式高级参数(选代表)
cases.append(TestCase(
desc="流式 reasoning_effort: medium",
method="POST",
url=chat_url,
headers=headers,
body={"model": model, "messages": [{"role": "user", "content": "Hi"}], "max_tokens": 5, "reasoning_effort": "medium", "stream": True},
stream=True,
validator=validate_openai_streaming_response
))
cases.append(TestCase(
desc="流式 service_tier: auto",
method="POST",
url=chat_url,
headers=headers,
body={"model": model, "messages": [{"role": "user", "content": "Hi"}], "max_tokens": 5, "service_tier": "auto", "stream": True},
stream=True,
validator=validate_openai_streaming_response
))
# ---- --tools ----
if args.tools:
cases.append(TestCase(
desc="工具调用 tool_choice: auto (--tools)",
method="POST",
url=chat_url,
headers=headers,
body={
"model": model,
"messages": [{"role": "user", "content": "北京天气怎么样?"}],
"max_tokens": 50,
"tools": [tool_weather],
"tool_choice": "auto"
},
validator=validate_openai_chat_completion_response
))
cases.append(TestCase(
desc="工具调用 tool_choice: required (--tools)",
method="POST",
url=chat_url,
headers=headers,
body={
"model": model,
"messages": [{"role": "user", "content": "北京天气怎么样?"}],
"max_tokens": 50,
"tools": [tool_weather],
"tool_choice": "required"
},
validator=validate_openai_chat_completion_response
))
cases.append(TestCase(
desc="指定函数调用 tool_choice: {name} (--tools)",
method="POST",
url=chat_url,
headers=headers,
body={
"model": model,
"messages": [{"role": "user", "content": "北京天气怎么样?"}],
"max_tokens": 50,
"tools": [tool_weather],
"tool_choice": {
"type": "function",
"function": {"name": "get_weather"}
}
},
validator=validate_openai_chat_completion_response
))
cases.append(TestCase(
desc="多轮工具调用(构造 tool 结果)(--tools)",
method="POST",
url=chat_url,
headers=headers,
body={
"model": model,
"messages": [
{"role": "user", "content": "北京天气怎么样?"},
{"role": "assistant", "content": None, "tool_calls": [{
"id": "call_001", "type": "function",
"function": {
"name": "get_weather",
"arguments": "{\"location\": \"Beijing\"}"
}
}]},
{"role": "tool", "tool_call_id": "call_001",
"content": "{\"temperature\": 22, \"condition\": \"\"}"}
],
"max_tokens": 20,
"tools": [tool_weather]
},
validator=validate_openai_chat_completion_response
))
cases.append(TestCase(
desc="parallel_tool_calls: false (--tools)",
method="POST",
url=chat_url,
headers=headers,
body={
"model": model,
"messages": [{"role": "user", "content": "北京和上海的天气怎么样?"}],
"max_tokens": 50,
"tools": [tool_weather],
"tool_choice": "auto",
"parallel_tool_calls": False
},
validator=validate_openai_chat_completion_response
))
# ---- --logprobs ----
if args.logprobs:
cases.append(TestCase(
desc="logprobs + top_logprobs (--logprobs)",
method="POST",
url=chat_url,
headers=headers,
body={
"model": model,
"messages": [{"role": "user", "content": "Hi"}],
"max_tokens": 5,
"logprobs": True,
"top_logprobs": 2
},
validator=validate_openai_chat_completion_response
))
# ---- --json-schema ----
if args.json_schema:
cases.append(TestCase(
desc="Structured Output json_schema (--json_schema)",
method="POST",
url=chat_url,
headers=headers,
body={
"model": model,
"messages": [{"role": "user", "content": "1+1等于几"}],
"max_tokens": 20,
"response_format": {
"type": "json_schema",
"json_schema": json_schema_math
}
},
validator=validate_openai_chat_completion_response
))
# ---- 高级参数测试 ----
# logit_bias: 修改特定token的似然
cases.append(TestCase(
desc="logit_bias 参数测试",
method="POST",
url=chat_url,
headers=headers,
body={
"model": model,
"messages": [{"role": "user", "content": "Hello"}],
"max_tokens": 5,
"logit_bias": {"1234": -100, "5678": 50}
},
validator=validate_openai_chat_completion_response
))
# reasoning_effort: 推理努力级别(需要模型支持)
cases.append(TestCase(
desc="reasoning_effort: none",
method="POST",
url=chat_url,
headers=headers,
body={
"model": model,
"messages": [{"role": "user", "content": "1+1=?"}],
"max_tokens": 10,
"reasoning_effort": "none"
},
validator=validate_openai_chat_completion_response
))
cases.append(TestCase(
desc="reasoning_effort: minimal",
method="POST",
url=chat_url,
headers=headers,
body={
"model": model,
"messages": [{"role": "user", "content": "1+1=?"}],
"max_tokens": 10,
"reasoning_effort": "minimal"
},
validator=validate_openai_chat_completion_response
))
cases.append(TestCase(
desc="reasoning_effort: low",
method="POST",
url=chat_url,
headers=headers,
body={
"model": model,
"messages": [{"role": "user", "content": "1+1=?"}],
"max_tokens": 10,
"reasoning_effort": "low"
},
validator=validate_openai_chat_completion_response
))
cases.append(TestCase(
desc="reasoning_effort: medium",
method="POST",
url=chat_url,
headers=headers,
body={
"model": model,
"messages": [{"role": "user", "content": "1+1=?"}],
"max_tokens": 10,
"reasoning_effort": "medium"
},
validator=validate_openai_chat_completion_response
))
cases.append(TestCase(
desc="reasoning_effort: high",
method="POST",
url=chat_url,
headers=headers,
body={
"model": model,
"messages": [{"role": "user", "content": "1+1=?"}],
"max_tokens": 10,
"reasoning_effort": "high"
},
validator=validate_openai_chat_completion_response
))
# service_tier: 服务层级
cases.append(TestCase(
desc="service_tier: auto",
method="POST",
url=chat_url,
headers=headers,
body={
"model": model,
"messages": [{"role": "user", "content": "Hi"}],
"max_tokens": 5,
"service_tier": "auto"
},
validator=validate_openai_chat_completion_response
))
cases.append(TestCase(
desc="service_tier: default",
method="POST",
url=chat_url,
headers=headers,
body={
"model": model,
"messages": [{"role": "user", "content": "Hi"}],
"max_tokens": 5,
"service_tier": "default"
},
validator=validate_openai_chat_completion_response
))
cases.append(TestCase(
desc="service_tier: flex",
method="POST",
url=chat_url,
headers=headers,
body={
"model": model,
"messages": [{"role": "user", "content": "Hi"}],
"max_tokens": 5,
"service_tier": "flex"
},
validator=validate_openai_chat_completion_response
))
cases.append(TestCase(
desc="service_tier: priority",
method="POST",
url=chat_url,
headers=headers,
body={
"model": model,
"messages": [{"role": "user", "content": "Hi"}],
"max_tokens": 5,
"service_tier": "priority"
},
validator=validate_openai_chat_completion_response
))
# verbosity: 冗长程度
cases.append(TestCase(
desc="verbosity: low",
method="POST",
url=chat_url,
headers=headers,
body={
"model": model,
"messages": [{"role": "user", "content": "介绍一下Python"}],
"max_tokens": 50,
"verbosity": "low"
},
validator=validate_openai_chat_completion_response
))
cases.append(TestCase(
desc="verbosity: medium",
method="POST",
url=chat_url,
headers=headers,
body={
"model": model,
"messages": [{"role": "user", "content": "介绍一下Python"}],
"max_tokens": 50,
"verbosity": "medium"
},
validator=validate_openai_chat_completion_response
))
cases.append(TestCase(
desc="verbosity: high",
method="POST",
url=chat_url,
headers=headers,
body={
"model": model,
"messages": [{"role": "user", "content": "介绍一下Python"}],
"max_tokens": 50,
"verbosity": "high"
},
validator=validate_openai_chat_completion_response
))
# ---- 执行测试 ----
flags = []
if args.vision:
flags.append("vision")
if args.stream:
flags.append("stream")
if args.tools:
flags.append("tools")
if args.logprobs:
flags.append("logprobs")
if args.json_schema:
flags.append("json-schema")
run_test_suite(
cases=cases,
ssl_ctx=ssl_ctx,
title="OpenAI 兼容性测试",
base_url=base_url,
model=model,
flags=flags
)
if __name__ == "__main__":
main()