feat: 补充 OpenAI 枚举参数和边界越界测试
- service_tier: 补充 flex, priority 测试 - reasoning_effort: 补充 none, minimal 测试 - verbosity: 补充 medium, high 测试 - 边界越界测试: frequency_penalty, presence_penalty, top_p, n - core.py: http_stream_request 支持 method 参数 - Anthropic: 补充 content_block_start 事件验证
This commit is contained in:
@@ -140,7 +140,8 @@ def http_stream_request(
|
||||
headers: Optional[Dict[str, str]] = None,
|
||||
body: Optional[Any] = None,
|
||||
ssl_ctx: Optional[ssl.SSLContext] = None,
|
||||
retries: int = MAX_RETRIES
|
||||
retries: int = MAX_RETRIES,
|
||||
method: str = "POST"
|
||||
) -> TestResult:
|
||||
"""执行流式 HTTP 请求 (SSE,支持重试)
|
||||
|
||||
@@ -150,11 +151,12 @@ def http_stream_request(
|
||||
body: 请求体 (dict)
|
||||
ssl_ctx: SSL 上下文
|
||||
retries: 重试次数
|
||||
method: HTTP 方法 (默认 POST)
|
||||
|
||||
Returns:
|
||||
TestResult 对象
|
||||
"""
|
||||
req = urllib.request.Request(url, method="POST")
|
||||
req = urllib.request.Request(url, method=method)
|
||||
if headers:
|
||||
for k, v in headers.items():
|
||||
req.add_header(k, v)
|
||||
|
||||
@@ -262,6 +262,18 @@ def validate_anthropic_streaming_response(response_text: str) -> Tuple[bool, Lis
|
||||
elif event_type == "message_stop":
|
||||
has_message_stop = True
|
||||
|
||||
elif event_type == "content_block_start":
|
||||
if "index" not in event:
|
||||
errors.append(f"content_block_start 事件缺少 index 字段")
|
||||
if "content_block" not in event:
|
||||
errors.append(f"content_block_start 事件缺少 content_block 字段")
|
||||
elif not isinstance(event["content_block"], dict):
|
||||
errors.append(f"content_block_start 事件的 content_block 不是对象")
|
||||
else:
|
||||
cb = event["content_block"]
|
||||
if "type" not in cb:
|
||||
errors.append(f"content_block_start.content_block 缺少 type 字段")
|
||||
|
||||
elif event_type == "content_block_delta":
|
||||
if "delta" not in event:
|
||||
errors.append(f"content_block_delta 事件缺少 delta 字段")
|
||||
|
||||
@@ -564,6 +564,102 @@ def main():
|
||||
"temperature": 2.5
|
||||
}
|
||||
))
|
||||
cases.append(TestCase(
|
||||
desc="frequency_penalty 超出范围 (3.0)",
|
||||
method="POST",
|
||||
url=chat_url,
|
||||
headers=headers,
|
||||
body={
|
||||
"model": model,
|
||||
"messages": [{"role": "user", "content": "Hi"}],
|
||||
"max_tokens": 5,
|
||||
"frequency_penalty": 3.0
|
||||
}
|
||||
))
|
||||
cases.append(TestCase(
|
||||
desc="frequency_penalty 超出范围 (-3.0)",
|
||||
method="POST",
|
||||
url=chat_url,
|
||||
headers=headers,
|
||||
body={
|
||||
"model": model,
|
||||
"messages": [{"role": "user", "content": "Hi"}],
|
||||
"max_tokens": 5,
|
||||
"frequency_penalty": -3.0
|
||||
}
|
||||
))
|
||||
cases.append(TestCase(
|
||||
desc="presence_penalty 超出范围 (3.0)",
|
||||
method="POST",
|
||||
url=chat_url,
|
||||
headers=headers,
|
||||
body={
|
||||
"model": model,
|
||||
"messages": [{"role": "user", "content": "Hi"}],
|
||||
"max_tokens": 5,
|
||||
"presence_penalty": 3.0
|
||||
}
|
||||
))
|
||||
cases.append(TestCase(
|
||||
desc="presence_penalty 超出范围 (-3.0)",
|
||||
method="POST",
|
||||
url=chat_url,
|
||||
headers=headers,
|
||||
body={
|
||||
"model": model,
|
||||
"messages": [{"role": "user", "content": "Hi"}],
|
||||
"max_tokens": 5,
|
||||
"presence_penalty": -3.0
|
||||
}
|
||||
))
|
||||
cases.append(TestCase(
|
||||
desc="top_p 超出范围 (1.5)",
|
||||
method="POST",
|
||||
url=chat_url,
|
||||
headers=headers,
|
||||
body={
|
||||
"model": model,
|
||||
"messages": [{"role": "user", "content": "Hi"}],
|
||||
"max_tokens": 5,
|
||||
"top_p": 1.5
|
||||
}
|
||||
))
|
||||
cases.append(TestCase(
|
||||
desc="top_p 超出范围 (-0.1)",
|
||||
method="POST",
|
||||
url=chat_url,
|
||||
headers=headers,
|
||||
body={
|
||||
"model": model,
|
||||
"messages": [{"role": "user", "content": "Hi"}],
|
||||
"max_tokens": 5,
|
||||
"top_p": -0.1
|
||||
}
|
||||
))
|
||||
cases.append(TestCase(
|
||||
desc="n 为负数",
|
||||
method="POST",
|
||||
url=chat_url,
|
||||
headers=headers,
|
||||
body={
|
||||
"model": model,
|
||||
"messages": [{"role": "user", "content": "Hi"}],
|
||||
"max_tokens": 5,
|
||||
"n": -1
|
||||
}
|
||||
))
|
||||
cases.append(TestCase(
|
||||
desc="n 为 0",
|
||||
method="POST",
|
||||
url=chat_url,
|
||||
headers=headers,
|
||||
body={
|
||||
"model": model,
|
||||
"messages": [{"role": "user", "content": "Hi"}],
|
||||
"max_tokens": 5,
|
||||
"n": 0
|
||||
}
|
||||
))
|
||||
|
||||
# ---- --vision ----
|
||||
if args.vision:
|
||||
@@ -807,7 +903,7 @@ def main():
|
||||
|
||||
# reasoning_effort: 推理努力级别(需要模型支持)
|
||||
cases.append(TestCase(
|
||||
desc="reasoning_effort: medium",
|
||||
desc="reasoning_effort: none",
|
||||
method="POST",
|
||||
url=chat_url,
|
||||
headers=headers,
|
||||
@@ -815,7 +911,20 @@ def main():
|
||||
"model": model,
|
||||
"messages": [{"role": "user", "content": "1+1=?"}],
|
||||
"max_tokens": 10,
|
||||
"reasoning_effort": "medium"
|
||||
"reasoning_effort": "none"
|
||||
},
|
||||
validator=validate_openai_chat_completion_response
|
||||
))
|
||||
cases.append(TestCase(
|
||||
desc="reasoning_effort: minimal",
|
||||
method="POST",
|
||||
url=chat_url,
|
||||
headers=headers,
|
||||
body={
|
||||
"model": model,
|
||||
"messages": [{"role": "user", "content": "1+1=?"}],
|
||||
"max_tokens": 10,
|
||||
"reasoning_effort": "minimal"
|
||||
},
|
||||
validator=validate_openai_chat_completion_response
|
||||
))
|
||||
@@ -832,6 +941,19 @@ def main():
|
||||
},
|
||||
validator=validate_openai_chat_completion_response
|
||||
))
|
||||
cases.append(TestCase(
|
||||
desc="reasoning_effort: medium",
|
||||
method="POST",
|
||||
url=chat_url,
|
||||
headers=headers,
|
||||
body={
|
||||
"model": model,
|
||||
"messages": [{"role": "user", "content": "1+1=?"}],
|
||||
"max_tokens": 10,
|
||||
"reasoning_effort": "medium"
|
||||
},
|
||||
validator=validate_openai_chat_completion_response
|
||||
))
|
||||
cases.append(TestCase(
|
||||
desc="reasoning_effort: high",
|
||||
method="POST",
|
||||
@@ -873,6 +995,32 @@ def main():
|
||||
},
|
||||
validator=validate_openai_chat_completion_response
|
||||
))
|
||||
cases.append(TestCase(
|
||||
desc="service_tier: flex",
|
||||
method="POST",
|
||||
url=chat_url,
|
||||
headers=headers,
|
||||
body={
|
||||
"model": model,
|
||||
"messages": [{"role": "user", "content": "Hi"}],
|
||||
"max_tokens": 5,
|
||||
"service_tier": "flex"
|
||||
},
|
||||
validator=validate_openai_chat_completion_response
|
||||
))
|
||||
cases.append(TestCase(
|
||||
desc="service_tier: priority",
|
||||
method="POST",
|
||||
url=chat_url,
|
||||
headers=headers,
|
||||
body={
|
||||
"model": model,
|
||||
"messages": [{"role": "user", "content": "Hi"}],
|
||||
"max_tokens": 5,
|
||||
"service_tier": "priority"
|
||||
},
|
||||
validator=validate_openai_chat_completion_response
|
||||
))
|
||||
|
||||
# verbosity: 冗长程度
|
||||
cases.append(TestCase(
|
||||
@@ -888,6 +1036,32 @@ def main():
|
||||
},
|
||||
validator=validate_openai_chat_completion_response
|
||||
))
|
||||
cases.append(TestCase(
|
||||
desc="verbosity: medium",
|
||||
method="POST",
|
||||
url=chat_url,
|
||||
headers=headers,
|
||||
body={
|
||||
"model": model,
|
||||
"messages": [{"role": "user", "content": "介绍一下Python"}],
|
||||
"max_tokens": 50,
|
||||
"verbosity": "medium"
|
||||
},
|
||||
validator=validate_openai_chat_completion_response
|
||||
))
|
||||
cases.append(TestCase(
|
||||
desc="verbosity: high",
|
||||
method="POST",
|
||||
url=chat_url,
|
||||
headers=headers,
|
||||
body={
|
||||
"model": model,
|
||||
"messages": [{"role": "user", "content": "介绍一下Python"}],
|
||||
"max_tokens": 50,
|
||||
"verbosity": "high"
|
||||
},
|
||||
validator=validate_openai_chat_completion_response
|
||||
))
|
||||
|
||||
# ---- 执行测试 ----
|
||||
flags = []
|
||||
|
||||
Reference in New Issue
Block a user