playingapi commited on
Commit
167f028
·
verified ·
1 Parent(s): 0dd44f1

Upload 3 files

Browse files
Files changed (3) hide show
  1. Dockerfile +11 -0
  2. openai_ondemand_adapter.py +311 -0
  3. requirements.txt +2 -0
Dockerfile ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.10-slim
2
+ # 安装pip依赖
3
+ WORKDIR /workspace
4
+ COPY requirements.txt .
5
+ RUN pip install --no-cache-dir -r requirements.txt
6
+ # 复制你的源码
7
+ COPY . .
8
+ # Space 必须监听 0.0.0.0:7860 或 3000,建议 7860!
9
+ ENV PORT=7860
10
+ EXPOSE 7860
11
+ CMD ["python", "openai_ondemand_adapter.py"]
openai_ondemand_adapter.py ADDED
@@ -0,0 +1,311 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from flask import Flask, request, Response, jsonify
2
+ import requests
3
+ import uuid
4
+ import time
5
+ import json
6
+ import threading
7
+ import logging
8
+ import os
9
+
10
+ # ====== 读取 Huggingface Secret 配置的私有key =======
11
+ PRIVATE_KEY = os.environ.get("PRIVATE_KEY", "114514")
12
+ SAFE_HEADER = "X-API-KEY"
13
+
14
+ # 全局接口访问权限检查
15
+ def check_private_key():
16
+ if request.path in ["/", "/favicon.ico"]:
17
+ return
18
+ key = request.headers.get(SAFE_HEADER)
19
+ if not key or key != PRIVATE_KEY:
20
+ return jsonify({"error": "Unauthorized, must provide correct X-API-KEY"}), 401
21
+
22
+ # 应用所有API鉴权
23
+ app = Flask(__name__)
24
+ app.before_request(check_private_key)
25
+
26
+ # ========== KEY池(每行一个)==========
27
+ ONDEMAND_APIKEYS = [
28
+ "7oGmV4VoDgkRFUoJzlgEULWLEB0OyF7H",
29
+ ]
30
+ BAD_KEY_RETRY_INTERVAL = 600 # 秒
31
+ SESSION_TIMEOUT = 600 # 对话超时时间(10分钟)
32
+
33
+ # ========== OnDemand模型映射 ==========
34
+ MODEL_MAP = {
35
+ "gpto3-mini": "predefined-openai-gpto3-mini",
36
+ "gpt-4o": "predefined-openai-gpt4o",
37
+ "gpt-4.1": "predefined-openai-gpt4.1",
38
+ "gpt-4.1-mini": "predefined-openai-gpt4.1-mini",
39
+ "gpt-4.1-nano": "predefined-openai-gpt4.1-nano",
40
+ "gpt-4o-mini": "predefined-openai-gpt4o-mini",
41
+ "deepseek-v3": "predefined-deepseek-v3",
42
+ "deepseek-r1": "predefined-deepseek-r1",
43
+ "claude-3.7-sonnet": "predefined-claude-3.7-sonnet",
44
+ "gemini-2.0-flash": "predefined-gemini-2.0-flash",
45
+ }
46
+ DEFAULT_ONDEMAND_MODEL = "predefined-openai-gpt4o"
47
+ # ==========================================
48
+
49
+ class KeyManager:
50
+ def __init__(self, key_list):
51
+ self.key_list = list(key_list)
52
+ self.lock = threading.Lock()
53
+ self.key_status = {k: {"bad": False, "bad_ts": None} for k in self.key_list}
54
+ self.idx = 0
55
+ # 新增:当前正在使用的key和session
56
+ self.current_key = None
57
+ self.current_session = None
58
+ self.last_used_time = None
59
+
60
+ def display_key(self, key):
61
+ return f"{key[:6]}...{key[-4:]}"
62
+
63
+ def get(self):
64
+ with self.lock:
65
+ now = time.time()
66
+ # 检查对话是否超时
67
+ if self.current_key and self.last_used_time and (now - self.last_used_time > SESSION_TIMEOUT):
68
+ print(f"【对话超时】上次使用时间: {time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(self.last_used_time))}")
69
+ print(f"【对话超时】当前时间: {time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(now))}")
70
+ print(f"【对话超时】超时{SESSION_TIMEOUT//60}分钟,切换新会话")
71
+ self.current_key = None
72
+ self.current_session = None
73
+
74
+ # 如果已有正在使用的key,继续使用
75
+ if self.current_key:
76
+ if not self.key_status[self.current_key]["bad"]:
77
+ print(f"【对话请求】【继续使用API KEY: {self.display_key(self.current_key)}】【状态:正常】")
78
+ self.last_used_time = now
79
+ return self.current_key
80
+ else:
81
+ # 当前key已标记为异常,需要切换
82
+ self.current_key = None
83
+ self.current_session = None
84
+
85
+ # 如果没有当前key或当前key无效,选择新的key
86
+ total = len(self.key_list)
87
+ for _ in range(total):
88
+ key = self.key_list[self.idx]
89
+ self.idx = (self.idx + 1) % total
90
+ s = self.key_status[key]
91
+ if not s["bad"]:
92
+ print(f"【对话请求】【使用新API KEY: {self.display_key(key)}】【状态:正常】")
93
+ self.current_key = key
94
+ self.current_session = None # 强制创建新会话
95
+ self.last_used_time = now
96
+ return key
97
+ if s["bad"] and s["bad_ts"]:
98
+ ago = now - s["bad_ts"]
99
+ if ago >= BAD_KEY_RETRY_INTERVAL:
100
+ print(f"【KEY自动尝试恢复】API KEY: {self.display_key(key)} 满足重试周期,标记为正常")
101
+ self.key_status[key]["bad"] = False
102
+ self.key_status[key]["bad_ts"] = None
103
+ self.current_key = key
104
+ self.current_session = None # 强制创建新会话
105
+ self.last_used_time = now
106
+ return key
107
+
108
+ print("【警告】全部KEY已被禁用,强制选用第一个KEY继续尝试:", self.display_key(self.key_list[0]))
109
+ for k in self.key_list:
110
+ self.key_status[k]["bad"] = False
111
+ self.key_status[k]["bad_ts"] = None
112
+ self.idx = 0
113
+ self.current_key = self.key_list[0]
114
+ self.current_session = None # 强制创建新会话
115
+ self.last_used_time = now
116
+ print(f"【对话请求】【使用API KEY: {self.display_key(self.current_key)}】【状态:强制尝试(全部异常)】")
117
+ return self.current_key
118
+
119
+ def mark_bad(self, key):
120
+ with self.lock:
121
+ if key in self.key_status and not self.key_status[key]["bad"]:
122
+ print(f"【禁用KEY】API KEY: {self.display_key(key)},接口返回无效(将在{BAD_KEY_RETRY_INTERVAL//60}分钟后自动重试)")
123
+ self.key_status[key]["bad"] = True
124
+ self.key_status[key]["bad_ts"] = time.time()
125
+ if self.current_key == key:
126
+ self.current_key = None
127
+ self.current_session = None
128
+
129
+ def get_session(self, apikey):
130
+ with self.lock:
131
+ if not self.current_session:
132
+ try:
133
+ self.current_session = create_session(apikey)
134
+ print(f"【创建新会话】SESSION ID: {self.current_session}")
135
+ except Exception as e:
136
+ print(f"【创建会话失败】错误: {str(e)}")
137
+ raise
138
+ self.last_used_time = time.time()
139
+ return self.current_session
140
+
141
+ keymgr = KeyManager(ONDEMAND_APIKEYS)
142
+
143
+ ONDEMAND_API_BASE = "https://api.on-demand.io/chat/v1"
144
+
145
+ def get_endpoint_id(openai_model):
146
+ m = str(openai_model or "").lower().replace(" ", "")
147
+ return MODEL_MAP.get(m, DEFAULT_ONDEMAND_MODEL)
148
+
149
+ def create_session(apikey, external_user_id=None, plugin_ids=None):
150
+ url = f"{ONDEMAND_API_BASE}/sessions"
151
+ payload = {"externalUserId": external_user_id or str(uuid.uuid4())}
152
+ if plugin_ids is not None:
153
+ payload["pluginIds"] = plugin_ids
154
+ headers = {"apikey": apikey, "Content-Type": "application/json"}
155
+ resp = requests.post(url, json=payload, headers=headers, timeout=20)
156
+ resp.raise_for_status()
157
+ return resp.json()["data"]["id"]
158
+
159
+ def format_openai_sse_delta(chunk_str):
160
+ return f"data: {json.dumps(chunk_str, ensure_ascii=False)}\n\n"
161
+
162
+ @app.route("/v1/chat/completions", methods=["POST"])
163
+ def chat_completions():
164
+ data = request.json
165
+ if not data or "messages" not in data:
166
+ return jsonify({"error": "请求缺少messages字段"}), 400
167
+
168
+ messages = data["messages"]
169
+ openai_model = data.get("model", "gpt-4o")
170
+ endpoint_id = get_endpoint_id(openai_model)
171
+ is_stream = bool(data.get("stream", False))
172
+
173
+ user_msg = None
174
+ for msg in reversed(messages):
175
+ if msg.get("role") == "user":
176
+ user_msg = msg.get("content")
177
+ break
178
+ if user_msg is None:
179
+ return jsonify({"error": "未找到用户消息"}), 400
180
+
181
+ def with_valid_key(func):
182
+ bad_cnt = 0
183
+ max_retry = len(keymgr.key_list)*2
184
+ while bad_cnt < max_retry:
185
+ key = keymgr.get()
186
+ try:
187
+ return func(key)
188
+ except Exception as e:
189
+ if hasattr(e, 'response'):
190
+ r = e.response
191
+ if r.status_code in (401, 403, 429, 500):
192
+ keymgr.mark_bad(key)
193
+ bad_cnt += 1
194
+ continue
195
+ raise
196
+ return jsonify({"error": "没有可用API KEY,请补充新KEY或联系技术支持"}), 500
197
+
198
+ if is_stream:
199
+ def generate():
200
+ def do_once(apikey):
201
+ # 使用KeyManager获取或创建session
202
+ sid = keymgr.get_session(apikey)
203
+ url = f"{ONDEMAND_API_BASE}/sessions/{sid}/query"
204
+ payload = {
205
+ "query": user_msg,
206
+ "endpointId": endpoint_id,
207
+ "pluginIds": [],
208
+ "responseMode": "stream"
209
+ }
210
+ headers = {"apikey": apikey, "Content-Type": "application/json", "Accept": "text/event-stream"}
211
+ with requests.post(url, json=payload, headers=headers, stream=True, timeout=120) as resp:
212
+ if resp.status_code != 200:
213
+ raise requests.HTTPError(response=resp)
214
+ answer_acc = ""
215
+ first_chunk = True
216
+ for line in resp.iter_lines():
217
+ if not line:
218
+ continue
219
+ line = line.decode("utf-8")
220
+ if line.startswith("data:"):
221
+ datapart = line[5:].strip()
222
+ if datapart == "[DONE]":
223
+ yield "data: [DONE]\n\n"
224
+ break
225
+ elif datapart.startswith("[ERROR]:"):
226
+ err_json = datapart[len("[ERROR]:"):].strip()
227
+ yield format_openai_sse_delta({"error": err_json})
228
+ break
229
+ else:
230
+ try:
231
+ js = json.loads(datapart)
232
+ except Exception:
233
+ continue
234
+ if js.get("eventType") == "fulfillment":
235
+ delta = js.get("answer", "")
236
+ answer_acc += delta
237
+ chunk = {
238
+ "id": "chatcmpl-" + str(uuid.uuid4())[:8],
239
+ "object": "chat.completion.chunk",
240
+ "created": int(time.time()),
241
+ "model": openai_model,
242
+ "choices": [{
243
+ "delta": {
244
+ "role": "assistant",
245
+ "content": delta
246
+ } if first_chunk else {
247
+ "content": delta
248
+ },
249
+ "index": 0,
250
+ "finish_reason": None
251
+ }]
252
+ }
253
+ yield format_openai_sse_delta(chunk)
254
+ first_chunk = False
255
+ yield "data: [DONE]\n\n"
256
+ yield from with_valid_key(do_once)
257
+ return Response(generate(), content_type='text/event-stream')
258
+
259
+ def nonstream(apikey):
260
+ # 使用KeyManager获取或创建session
261
+ sid = keymgr.get_session(apikey)
262
+ url = f"{ONDEMAND_API_BASE}/sessions/{sid}/query"
263
+ payload = {
264
+ "query": user_msg,
265
+ "endpointId": endpoint_id,
266
+ "pluginIds": [],
267
+ "responseMode": "sync"
268
+ }
269
+ headers = {"apikey": apikey, "Content-Type": "application/json"}
270
+ resp = requests.post(url, json=payload, headers=headers, timeout=120)
271
+ if resp.status_code != 200:
272
+ raise requests.HTTPError(response=resp)
273
+ ai_response = resp.json()["data"]["answer"]
274
+ resp_obj = {
275
+ "id": "chatcmpl-" + str(uuid.uuid4())[:8],
276
+ "object": "chat.completion",
277
+ "created": int(time.time()),
278
+ "model": openai_model,
279
+ "choices": [
280
+ {
281
+ "index": 0,
282
+ "message": {"role": "assistant", "content": ai_response},
283
+ "finish_reason": "stop"
284
+ }
285
+ ],
286
+ "usage": {}
287
+ }
288
+ return jsonify(resp_obj)
289
+
290
+ return with_valid_key(nonstream)
291
+
292
+ @app.route("/v1/models", methods=["GET"])
293
+ def models():
294
+ model_objs = []
295
+ for mdl in MODEL_MAP.keys():
296
+ model_objs.append({
297
+ "id": mdl,
298
+ "object": "model",
299
+ "owned_by": "ondemand-proxy"
300
+ })
301
+ uniq = {m["id"]: m for m in model_objs}.values()
302
+ return jsonify({
303
+ "object": "list",
304
+ "data": list(uniq)
305
+ })
306
+
307
+ if __name__ == "__main__":
308
+ log_fmt = '[%(asctime)s] %(levelname)s: %(message)s'
309
+ logging.basicConfig(level=logging.INFO, format=log_fmt)
310
+ print("======== OnDemand KEY池数量:", len(ONDEMAND_APIKEYS), "========")
311
+ app.run(host="0.0.0.0", port=7860, debug=False)
requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ Flask
2
+ requests