195 lines
6.5 KiB
Python
195 lines
6.5 KiB
Python
import requests
|
|
import json
|
|
import sys
|
|
|
|
headers = {
|
|
'accept': 'application/json, text/event-stream',
|
|
'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',
|
|
'authorization': 'Bearer sk-hOdY292wluz2aFrLCe5f1cdscnjdksnjkNKJNJNJKNKJ1769082037063',
|
|
'cache-control': 'no-cache',
|
|
'content-type': 'application/json',
|
|
'dnt': '1',
|
|
'newauth': 'eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpZCI6Mjk4MSwic2Vzc2lvbiI6IjQ3YmFlYTBlNDY0ZDA0YWU3YjYwYjJiZDQ5YTQ2Y2RlIn0.PWjowAqaBepISCdp1XvwwbjRZi8jrrwNE8BntT6VQHs',
|
|
'origin': 'https://proai520.com',
|
|
'pragma': 'no-cache',
|
|
'priority': 'u=1, i',
|
|
'referer': 'https://proai520.com/',
|
|
'sec-ch-ua': '"Not(A:Brand";v="8", "Chromium";v="144", "Microsoft Edge";v="144"',
|
|
'sec-ch-ua-mobile': '?0',
|
|
'sec-ch-ua-platform': '"Windows"',
|
|
'sec-fetch-dest': 'empty',
|
|
'sec-fetch-mode': 'cors',
|
|
'sec-fetch-site': 'same-site',
|
|
'token': '85bb99fa1ae0b45caa50346f26479285',
|
|
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/144.0.0.0 Safari/537.36 Edg/144.0.0.0',
|
|
'username': 'ojAPQ6mcTExxYLTrA7HjGnG7803I',
|
|
}
|
|
|
|
json_data = {
|
|
'messages': [
|
|
{
|
|
'role': 'system',
|
|
'content': '\nYou are ChatGPT, a large language model trained by OpenAI.\nKnowledge cutoff: 2021-09\nCurrent model: gpt-5.2\nCurrent time: Thu Jan 22 2026 19:40:37 GMT+0800 (中国标准时间)\nLatex inline: \\(x^2\\) \nLatex block: $$e=mc^2$$\n\n',
|
|
},
|
|
{
|
|
'role': 'user',
|
|
'content': '你好,你是谁',
|
|
},
|
|
],
|
|
'stream': True,
|
|
'model': 'gpt-5.2',
|
|
'temperature': 0.5,
|
|
'presence_penalty': 0,
|
|
'frequency_penalty': 0,
|
|
'top_p': 1,
|
|
'max_tokens': 8000,
|
|
}
|
|
|
|
|
|
def parse_sse_stream(response):
|
|
"""
|
|
解析 SSE (Server-Sent Events) 流式响应
|
|
返回 OpenAI 兼容格式的完整响应
|
|
"""
|
|
full_content = ""
|
|
response_id = None
|
|
model = None
|
|
created = None
|
|
usage = None
|
|
|
|
# 逐行解析 SSE 流
|
|
for line in response.iter_lines(decode_unicode=True):
|
|
if not line:
|
|
continue
|
|
|
|
# SSE 格式: data: {json}
|
|
if line.startswith('data: '):
|
|
data_str = line[6:] # 移除 'data: ' 前缀
|
|
|
|
# 检查是否是结束标记
|
|
if data_str.strip() == '[DONE]':
|
|
break
|
|
|
|
try:
|
|
chunk = json.loads(data_str)
|
|
|
|
# 提取元数据
|
|
if response_id is None and 'id' in chunk:
|
|
response_id = chunk['id']
|
|
if model is None and 'model' in chunk:
|
|
model = chunk['model']
|
|
if created is None and 'created' in chunk:
|
|
created = chunk['created']
|
|
if usage is None and 'usage' in chunk:
|
|
usage = chunk['usage']
|
|
|
|
# 提取内容增量
|
|
if 'choices' in chunk and len(chunk['choices']) > 0:
|
|
choice = chunk['choices'][0]
|
|
if 'delta' in choice and 'content' in choice['delta']:
|
|
content_delta = choice['delta']['content']
|
|
full_content += content_delta
|
|
|
|
except json.JSONDecodeError as e:
|
|
# 忽略无法解析的行
|
|
continue
|
|
|
|
# 构建 OpenAI 兼容格式的完整响应
|
|
openai_response = {
|
|
"id": response_id or "chatcmpl-unknown",
|
|
"object": "chat.completion",
|
|
"created": created or 0,
|
|
"model": model or json_data.get('model', 'unknown'),
|
|
"choices": [
|
|
{
|
|
"index": 0,
|
|
"message": {
|
|
"role": "assistant",
|
|
"content": full_content
|
|
},
|
|
"finish_reason": "stop"
|
|
}
|
|
],
|
|
"usage": usage or {
|
|
"prompt_tokens": 0,
|
|
"completion_tokens": 0,
|
|
"total_tokens": 0
|
|
}
|
|
}
|
|
|
|
return openai_response
|
|
|
|
|
|
def stream_sse_response(response):
|
|
"""
|
|
流式输出 SSE 响应,转换为 OpenAI 兼容格式
|
|
每个 chunk 输出为 OpenAI 流式格式
|
|
"""
|
|
for line in response.iter_lines(decode_unicode=True):
|
|
if not line:
|
|
continue
|
|
|
|
if line.startswith('data: '):
|
|
data_str = line[6:]
|
|
|
|
if data_str.strip() == '[DONE]':
|
|
# 输出结束标记
|
|
print('data: [DONE]\n')
|
|
break
|
|
|
|
try:
|
|
chunk = json.loads(data_str)
|
|
|
|
# 转换为 OpenAI 流式格式
|
|
openai_chunk = {
|
|
"id": chunk.get('id', 'chatcmpl-unknown'),
|
|
"object": "chat.completion.chunk",
|
|
"created": chunk.get('created', 0),
|
|
"model": chunk.get('model', json_data.get('model', 'unknown')),
|
|
"choices": []
|
|
}
|
|
|
|
if 'choices' in chunk and len(chunk['choices']) > 0:
|
|
original_choice = chunk['choices'][0]
|
|
openai_choice = {
|
|
"index": 0,
|
|
"delta": original_choice.get('delta', {}),
|
|
"finish_reason": original_choice.get('finish_reason')
|
|
}
|
|
openai_chunk["choices"] = [openai_choice]
|
|
|
|
# 输出 OpenAI 格式的 chunk
|
|
print(f"data: {json.dumps(openai_chunk, ensure_ascii=False)}\n")
|
|
sys.stdout.flush()
|
|
|
|
except json.JSONDecodeError:
|
|
continue
|
|
|
|
|
|
# 发送请求
|
|
response = requests.post(
|
|
'https://api.proai520.com/v1/chat/completions',
|
|
headers=headers,
|
|
json=json_data,
|
|
stream=True # 启用流式响应
|
|
)
|
|
|
|
# 方式1: 解析为完整的 OpenAI 格式响应(非流式)
|
|
print("=" * 60)
|
|
print("方式1: 完整响应格式(非流式)")
|
|
print("=" * 60)
|
|
complete_response = parse_sse_stream(response)
|
|
print(json.dumps(complete_response, ensure_ascii=False, indent=2))
|
|
|
|
# 如果需要流式输出,可以使用以下代码(需要重新发送请求)
|
|
# print("\n" + "=" * 60)
|
|
# print("方式2: 流式响应格式")
|
|
# print("=" * 60)
|
|
# response2 = requests.post(
|
|
# 'https://api.proai520.com/v1/chat/completions',
|
|
# headers=headers,
|
|
# json=json_data,
|
|
# stream=True
|
|
# )
|
|
# stream_sse_response(response2)
|