Spaces:
Running
Running
Upload 3 files
Browse files- Dockerfile +15 -0
- main.py +247 -0
- requirements.txt +7 -0
Dockerfile
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# 第一阶段:构建阶段
|
2 |
+
FROM python:3.9-slim as builder
|
3 |
+
|
4 |
+
WORKDIR /app
|
5 |
+
COPY requirements.txt .
|
6 |
+
RUN pip install --user -r requirements.txt
|
7 |
+
|
8 |
+
# 第二阶段:运行时镜像
|
9 |
+
FROM python:3.9-slim
|
10 |
+
WORKDIR /app
|
11 |
+
COPY --from=builder /root/.local /root/.local
|
12 |
+
COPY . .
|
13 |
+
ENV PATH=/root/.local/bin:$PATH
|
14 |
+
EXPOSE 8000
|
15 |
+
CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8000"]
|
main.py
ADDED
@@ -0,0 +1,247 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from fastapi import FastAPI, Request, Response, HTTPException
|
2 |
+
|
3 |
+
from fastapi.middleware.cors import CORSMiddleware
|
4 |
+
from fastapi.responses import StreamingResponse
|
5 |
+
import httpx
|
6 |
+
import json
|
7 |
+
import uuid
|
8 |
+
from typing import Optional, List, Dict, Any
|
9 |
+
from pydantic import BaseModel
|
10 |
+
import asyncio
|
11 |
+
|
12 |
+
# 创建FastAPI应用
|
13 |
+
app = FastAPI()
|
14 |
+
|
15 |
+
# 配置CORS
|
16 |
+
app.add_middleware(
|
17 |
+
CORSMiddleware,
|
18 |
+
allow_origins=["*"],
|
19 |
+
allow_credentials=True,
|
20 |
+
allow_methods=["*"],
|
21 |
+
allow_headers=["*"],
|
22 |
+
)
|
23 |
+
|
24 |
+
# 定义数据模型
|
25 |
+
class Message(BaseModel):
|
26 |
+
role: str
|
27 |
+
content: str
|
28 |
+
|
29 |
+
class ChatRequest(BaseModel):
|
30 |
+
messages: List[Message]
|
31 |
+
model: str
|
32 |
+
stream: Optional[bool] = True
|
33 |
+
|
34 |
+
class ChatResponse(BaseModel):
|
35 |
+
id: str
|
36 |
+
object: str = "chat.completion"
|
37 |
+
created: int
|
38 |
+
model: str
|
39 |
+
choices: List[Dict[str, Any]]
|
40 |
+
usage: Optional[Dict[str, int]] = None
|
41 |
+
|
42 |
+
# 模型映射
|
43 |
+
MODEL_MAPPING = {
|
44 |
+
"gpt-4o-mini-abacus": "OPENAI_GPT4O_MINI",
|
45 |
+
"claude-3.5-sonnet-abacus": "CLAUDE_V3_5_SONNET",
|
46 |
+
"claude-3.7-sonnet-abacus": "CLAUDE_V3_7_SONNET",
|
47 |
+
"claude-3.7-sonnet-thinking-abacus": "CLAUDE_V3_7_SONNET_THINKING",
|
48 |
+
"o3-mini-abacus": "OPENAI_O3_MINI",
|
49 |
+
"o3-mini-high-abacus": "OPENAI_O3_MINI_HIGH",
|
50 |
+
"o1-mini-abacus": "OPENAI_O1_MINI",
|
51 |
+
"deepseek-r1-abacus": "DEEPSEEK_R1",
|
52 |
+
"gemini-2-pro-abacus": "GEMINI_2_PRO",
|
53 |
+
"gemini-2-flash-thinking-abacus": "GEMINI_2_FLASH_THINKING",
|
54 |
+
"gemini-2-flash-abacus": "GEMINI_2_FLASH",
|
55 |
+
"gemini-1.5-pro-abacus": "GEMINI_1_5_PRO",
|
56 |
+
"xai-grok-abacus": "XAI_GROK",
|
57 |
+
"deepseek-v3-abacus": "DEEPSEEK_V3",
|
58 |
+
"llama3-1-405b-abacus": "LLAMA3_1_405B",
|
59 |
+
"gpt-4o-abacus": "OPENAI_GPT4O",
|
60 |
+
"gpt-4o-2024-08-06-abacus": "OPENAI_GPT4O",
|
61 |
+
"gpt-3.5-turbo-abacus": "OPENAI_O3_MINI",
|
62 |
+
"gpt-3.5-turbo-16k-abacus": "OPENAI_O3_MINI_HIGH"
|
63 |
+
}
|
64 |
+
|
65 |
+
BASE_URL = "https://pa002.abacus.ai"
|
66 |
+
|
67 |
+
TIMEOUT = 30.0 # 请求超时时间(秒)
|
68 |
+
MAX_RETRIES = 3 # 最大重试次数
|
69 |
+
RETRY_DELAY = 1 # 重试延迟(秒)
|
70 |
+
|
71 |
+
@app.get("/v1/models")
|
72 |
+
async def list_models():
|
73 |
+
"""返回支持的模型列表"""
|
74 |
+
models = [
|
75 |
+
{
|
76 |
+
"id": model_id,
|
77 |
+
"object": "model",
|
78 |
+
"created": 1677610602,
|
79 |
+
"owned_by": "system",
|
80 |
+
}
|
81 |
+
for model_id in MODEL_MAPPING.keys()
|
82 |
+
]
|
83 |
+
return {
|
84 |
+
"object": "list",
|
85 |
+
"data": models
|
86 |
+
}
|
87 |
+
|
88 |
+
# 工具函数:获取请求头
|
89 |
+
def get_headers(auth_token: str) -> Dict[str, str]:
|
90 |
+
"""生成请求头"""
|
91 |
+
return {
|
92 |
+
"sec-ch-ua-platform": "Windows",
|
93 |
+
"sec-ch-ua": '"Not(A:Brand";v="99", "Microsoft Edge";v="133", "Chromium";v="133"',
|
94 |
+
"sec-ch-ua-mobile": "?0",
|
95 |
+
"X-Abacus-Org-Host": "apps",
|
96 |
+
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/133.0.0.0 Safari/537.36 Edg/133.0.0.0",
|
97 |
+
"Sec-Fetch-Site": "same-site",
|
98 |
+
"Sec-Fetch-Mode": "cors",
|
99 |
+
"Sec-Fetch-Dest": "empty",
|
100 |
+
"host": "pa002.abacus.ai",
|
101 |
+
"Cookie": auth_token,
|
102 |
+
"Accept": "text/event-stream",
|
103 |
+
"Content-Type": "text/plain;charset=UTF-8"
|
104 |
+
}
|
105 |
+
|
106 |
+
def process_messages(messages: List[Message]) -> str:
|
107 |
+
"""处理消息列表,合并成单个消息"""
|
108 |
+
system_message = next((msg.content for msg in messages if msg.role == "system"), None)
|
109 |
+
context_messages = [msg for msg in messages if msg.role != "system"][:-1]
|
110 |
+
current_message = messages[-1].content
|
111 |
+
|
112 |
+
full_message = current_message
|
113 |
+
|
114 |
+
if system_message:
|
115 |
+
full_message = f"System: {system_message}\n\n{full_message}"
|
116 |
+
|
117 |
+
if context_messages:
|
118 |
+
context_str = "\n".join(f"{msg.role}: {msg.content}" for msg in context_messages)
|
119 |
+
full_message = f"Previous conversation:\n{context_str}\nCurrent message: {full_message}"
|
120 |
+
|
121 |
+
return full_message
|
122 |
+
|
123 |
+
@app.post("/v1/chat/completions")
|
124 |
+
async def chat_completions(request: Request, chat_request: ChatRequest):
|
125 |
+
"""处理聊天完成请求"""
|
126 |
+
# 获取认证token
|
127 |
+
auth_header = request.headers.get("Authorization", "")
|
128 |
+
if not auth_header.startswith("Bearer "):
|
129 |
+
return Response(
|
130 |
+
content=json.dumps({"error": "未提供有效的Authorization header"}),
|
131 |
+
status_code=401
|
132 |
+
)
|
133 |
+
|
134 |
+
auth_token = auth_header.replace("Bearer ", "")
|
135 |
+
|
136 |
+
# 创建会话ID
|
137 |
+
conversation_id = str(uuid.uuid4())
|
138 |
+
|
139 |
+
# 处理消息
|
140 |
+
full_message = process_messages(chat_request.messages)
|
141 |
+
|
142 |
+
# 准备请求数据
|
143 |
+
request_data = {
|
144 |
+
"requestId": str(uuid.uuid4()),
|
145 |
+
"deploymentConversationId": conversation_id,
|
146 |
+
"message": full_message,
|
147 |
+
"isDesktop": True,
|
148 |
+
"chatConfig": {
|
149 |
+
"timezone": "Asia/Shanghai",
|
150 |
+
"language": "zh-CN"
|
151 |
+
},
|
152 |
+
"llmName": MODEL_MAPPING.get(chat_request.model, chat_request.model),
|
153 |
+
"externalApplicationId": str(uuid.uuid4())
|
154 |
+
}
|
155 |
+
|
156 |
+
# 流式请求处理
|
157 |
+
async def generate_stream():
|
158 |
+
headers = get_headers(auth_token)
|
159 |
+
|
160 |
+
for retry in range(MAX_RETRIES):
|
161 |
+
try:
|
162 |
+
async with httpx.AsyncClient() as client:
|
163 |
+
async with client.stream(
|
164 |
+
"POST",
|
165 |
+
f"{BASE_URL}/api/_chatLLMSendMessageSSE",
|
166 |
+
headers=headers,
|
167 |
+
content=json.dumps(request_data),
|
168 |
+
timeout=TIMEOUT
|
169 |
+
) as response:
|
170 |
+
async for line in response.aiter_lines():
|
171 |
+
if not line.strip():
|
172 |
+
continue
|
173 |
+
|
174 |
+
try:
|
175 |
+
data = json.loads(line)
|
176 |
+
|
177 |
+
if data.get("type") == "text" and data.get("title") != "Thinking...":
|
178 |
+
chunk = {
|
179 |
+
"id": str(uuid.uuid4()),
|
180 |
+
"object": "chat.completion.chunk",
|
181 |
+
"created": int(uuid.uuid1().time_low),
|
182 |
+
"model": chat_request.model,
|
183 |
+
"choices": [{
|
184 |
+
"delta": {
|
185 |
+
"role": "assistant",
|
186 |
+
"content": data.get("segment", "")
|
187 |
+
},
|
188 |
+
"index": 0
|
189 |
+
}]
|
190 |
+
}
|
191 |
+
yield f"data: {json.dumps(chunk)}\n\n"
|
192 |
+
|
193 |
+
if data.get("end"):
|
194 |
+
# 发送结束标记
|
195 |
+
chunk = {
|
196 |
+
"id": str(uuid.uuid4()),
|
197 |
+
"object": "chat.completion.chunk",
|
198 |
+
"created": int(uuid.uuid1().time_low),
|
199 |
+
"model": chat_request.model,
|
200 |
+
"choices": [{
|
201 |
+
"delta": {"content": ""},
|
202 |
+
"index": 0,
|
203 |
+
"finish_reason": "stop"
|
204 |
+
}]
|
205 |
+
}
|
206 |
+
yield f"data: {json.dumps(chunk)}\n\n"
|
207 |
+
yield "data: [DONE]\n\n"
|
208 |
+
break # 成功完成,退出重试循环
|
209 |
+
|
210 |
+
except json.JSONDecodeError:
|
211 |
+
continue
|
212 |
+
except (httpx.TimeoutException, httpx.RequestError) as e:
|
213 |
+
if retry == MAX_RETRIES - 1: # 最后一次重试
|
214 |
+
yield f"data: {json.dumps({'error': str(e)})}\n\n"
|
215 |
+
yield "data: [DONE]\n\n"
|
216 |
+
return
|
217 |
+
await asyncio.sleep(RETRY_DELAY)
|
218 |
+
|
219 |
+
return StreamingResponse(
|
220 |
+
generate_stream(),
|
221 |
+
media_type="text/event-stream"
|
222 |
+
)
|
223 |
+
|
224 |
+
@app.get("/")
|
225 |
+
async def health_check():
|
226 |
+
"""健康检查"""
|
227 |
+
return {"status": "ok", "version": "1.0.0"}
|
228 |
+
|
229 |
+
@app.exception_handler(Exception)
|
230 |
+
async def global_exception_handler(request: Request, exc: Exception):
|
231 |
+
"""全局异常处理"""
|
232 |
+
error_message = str(exc)
|
233 |
+
return Response(
|
234 |
+
content=json.dumps({
|
235 |
+
"error": {
|
236 |
+
"message": error_message,
|
237 |
+
"type": exc.__class__.__name__,
|
238 |
+
"code": 500
|
239 |
+
}
|
240 |
+
}),
|
241 |
+
status_code=500,
|
242 |
+
media_type="application/json"
|
243 |
+
)
|
244 |
+
|
245 |
+
if __name__ == "__main__":
|
246 |
+
import uvicorn
|
247 |
+
uvicorn.run(app, host="0.0.0.0", port=8000)
|
requirements.txt
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
fastapi>=0.68.0,<0.69.0
|
2 |
+
uvicorn>=0.15.0,<0.16.0
|
3 |
+
httpx>=0.24.0
|
4 |
+
pydantic>=1.8.0,<2.0.0
|
5 |
+
python-multipart>=0.0.5
|
6 |
+
typing-extensions>=4.0.0
|
7 |
+
openai>=1.0.0,<2.0.0
|