isididiidid commited on
Commit
f05ec72
·
verified ·
1 Parent(s): 3a9e1b3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1083 -440
app.py CHANGED
@@ -1,479 +1,1122 @@
1
- from flask import Flask, request, jsonify, Response
2
- import cloudscraper # 替换requests库,专门用于绕过Cloudflare保护
3
- import io
4
  import json
5
- import re
6
  import uuid
7
- import random
8
- import time
 
 
9
  import os
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
 
11
- app = Flask(__name__)
12
-
13
- TARGET_URL = "https://grok.com/rest/app-chat/conversations/new"
14
- MODELS = ["grok-2", "grok-3", "grok-3-thinking"]
15
- COOKIE_NUM = 0
16
- COOKIE_LIST = []
17
- LAST_COOKIE_INDEX = {}
18
- TEMPORARY_MODE = False
19
-
20
- USER_AGENTS = [
21
- # Windows - Chrome
22
- "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36",
23
- "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36",
24
- # Windows - Firefox
25
- "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:132.0) Gecko/20100101 Firefox/132.0",
26
- # Windows - Edge
27
- "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/123.0.0.0 Safari/537.36 Edg/123.0.2420.81",
28
- # Windows - Opera
29
- "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/123.0.0.0 Safari/537.36 OPR/109.0.0.0",
30
- # macOS - Chrome
31
- "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36",
32
- "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36",
33
- # macOS - Safari
34
- "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/18.0.1 Safari/605.1.15",
35
- # macOS - Firefox
36
- "Mozilla/5.0 (Macintosh; Intel Mac OS X 14.7; rv:132.0) Gecko/20100101 Firefox/132.0",
37
- # macOS - Opera
38
- "Mozilla/5.0 (Macintosh; Intel Mac OS X 14_4_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/123.0.0.0 Safari/537.36 OPR/109.0.0.0",
39
- "Mozilla/5.0 (Macintosh; Intel Mac OS X 14.4; rv:124.0) Gecko/20100101 Firefox/124.0",
40
- # Linux - Chrome
41
- "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/123.0.0.0 Safari/537.36",
42
- # Linux - Firefox
43
- "Mozilla/5.0 (X11; Linux i686; rv:124.0) Gecko/20100101 Firefox/124.0",
44
- ]
45
-
46
- def create_cf_scraper(cookie_string):
47
- """创建一个配置好的cloudscraper实例"""
48
- # 只使用cloudscraper支持的浏览器
49
- browser = random.choice(['chrome', 'firefox']) # 修复:移除不支持的'edge'
50
- platform = random.choice(['windows', 'darwin', 'linux'])
 
 
 
 
 
 
 
 
51
 
52
- # 创建cloudscraper会话
53
- scraper = cloudscraper.create_scraper(
54
- browser={
55
- 'browser': browser,
56
- 'platform': platform,
57
- 'desktop': True
58
- },
59
- delay=random.uniform(5, 10), # 等待Cloudflare检查
60
- interpreter='js2py', # 使用js2py解释JavaScript挑战
61
- )
62
 
63
- # 设置自定义用户代理
64
- selected_ua = random.choice(USER_AGENTS)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
65
 
66
- # 设置基本头信息
67
- scraper.headers.update({
68
- "user-agent": selected_ua,
69
- "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8",
70
- "accept-language": "en-US,en;q=0.5",
71
- "accept-encoding": "gzip, deflate, br",
72
- "dnt": "1",
73
- "sec-fetch-dest": "document",
74
- "sec-fetch-mode": "navigate",
75
- "sec-fetch-site": "none",
76
- "sec-fetch-user": "?1",
77
- "upgrade-insecure-requests": "1",
78
- "cookie": cookie_string
79
- })
80
 
81
- return scraper
 
 
 
 
 
 
 
 
82
 
83
- def resolve_config():
84
- global COOKIE_NUM, COOKIE_LIST, LAST_COOKIE_INDEX, TEMPORARY_MODE
85
- COOKIE_LIST = []
86
- cookie_index = 1
 
 
 
 
 
 
 
 
87
 
88
- while True:
89
- cookie_env_name = f"GROK_COOKIE_{cookie_index}"
90
- cookie_string = os.environ.get(cookie_env_name)
91
- if cookie_string:
92
- try:
93
- print(f"创建Cookie {cookie_index} 的CloudScraper实例...")
94
- scraper = create_cf_scraper(cookie_string)
95
- COOKIE_LIST.append(scraper)
96
- cookie_index += 1
97
- except Exception as e:
98
- print(f"为Cookie {cookie_index} 创建CloudScraper失败: {e}")
99
- cookie_index += 1
100
- else:
101
- break
102
 
103
- COOKIE_NUM = len(COOKIE_LIST)
104
- if COOKIE_NUM == 0:
105
- raise ValueError("未提供Grok cookies,请通过环境变量设置 (GROK_COOKIE_1, GROK_COOKIE_2, ...)")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
106
 
107
- temporary_mode_str = os.environ.get("GROK_TEMPORARY_MODE", "false").lower()
108
- TEMPORARY_MODE = temporary_mode_str == "true" or temporary_mode_str == "1"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
109
 
110
- LAST_COOKIE_INDEX = {model: 0 for model in MODELS}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
111
 
112
- print(f"已从环境变量加载 {COOKIE_NUM} 个Grok cookies。")
113
- print(f"临时模式: {TEMPORARY_MODE}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
114
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
115
 
116
- @app.route("/", methods=["GET"])
117
- def root():
118
- return "Grok Proxy is running (Cloudflare Protected)", 200, {'Content-Type': 'text/plain'}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
119
 
120
 
121
- @app.route("/health", methods=["GET"])
122
- def health_check():
123
- return "OK", 200, {'Content-Type': 'text/plain'}
124
 
 
 
 
125
 
126
- @app.route("/v1/models", methods=["GET"])
127
- def get_models():
128
- model_list = []
129
- for model in MODELS:
130
- model_list.append(
131
  {
132
  "id": model,
133
  "object": "model",
134
- "created": int(time.time()),
135
- "owned_by": "Elbert",
136
- "name": model,
137
- }
138
- )
139
- return jsonify({"object": "list", "data": model_list})
140
-
141
-
142
- @app.route("/v1/chat/completions", methods=["POST"])
143
- def chat_completions():
144
- print("Received request")
145
- openai_request = request.get_json()
146
- print(openai_request)
147
- stream = openai_request.get("stream", False)
148
- messages = openai_request.get("messages")
149
- model = openai_request.get("model")
150
- if model not in MODELS:
151
- return jsonify({"error": "Model not available"}), 500
152
- if messages is None:
153
- return jsonify({"error": "Messages is required"}), 400
154
- disable_search, force_concise, messages = magic(messages)
155
- message = format_message(messages)
156
- is_reasoning = len(model) > 6
157
- model = model[0:6]
158
- return (
159
- send_message(message, model, disable_search, force_concise, is_reasoning)
160
- if stream
161
- else send_message_non_stream(
162
- message, model, disable_search, force_concise, is_reasoning)
163
- )
164
-
165
-
166
- def get_next_account(model):
167
- current = (LAST_COOKIE_INDEX[model] + 1) % COOKIE_NUM
168
- LAST_COOKIE_INDEX[model] = current
169
- print(f"Using account {current+1}/{COOKIE_NUM} for {model}")
170
- return COOKIE_LIST[current]
171
-
172
-
173
- def send_message(message, model, disable_search, force_concise, is_reasoning):
174
- headers = {
175
- "authority": "grok.com",
176
- "accept": "*/*",
177
- "accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
178
- "cache-control": "no-cache",
179
- "content-type": "application/json",
180
- "origin": "https://grok.com",
181
- "pragma": "no-cache",
182
- "referer": "https://grok.com/",
183
- "sec-ch-ua": '"Not(A:Brand";v="99", "Google Chrome";v="133", "Chromium";v="133"',
184
- "sec-ch-ua-mobile": "?0",
185
- "sec-ch-ua-platform": '"Windows"',
186
- "sec-fetch-dest": "empty",
187
- "sec-fetch-mode": "cors",
188
- "sec-fetch-site": "same-origin",
189
- }
190
- payload = {
191
- "temporary": TEMPORARY_MODE,
192
- "modelName": "grok-3",
193
- "message": message,
194
- "fileAttachments": [],
195
- "imageAttachments": [],
196
- "disableSearch": disable_search,
197
- "enableImageGeneration": False,
198
- "returnImageBytes": False,
199
- "returnRawGrokInXaiRequest": False,
200
- "enableImageStreaming": True,
201
- "imageGenerationCount": 2,
202
- "forceConcise": force_concise,
203
- "toolOverrides": {},
204
- "enableSideBySide": True,
205
- "isPreset": False,
206
- "sendFinalMetadata": True,
207
- "customInstructions": "",
208
- "deepsearchPreset": "",
209
- "isReasoning": is_reasoning,
210
- }
211
 
212
- try:
213
- scraper = get_next_account(model)
 
 
214
 
215
- # 预热Cloudflare
216
- print("预热Cloudflare权限...")
217
- scraper.get("https://grok.com/", timeout=15)
218
- time.sleep(random.uniform(1.0, 2.0)) # 随机等待以模拟人类
219
-
220
- print("发送消息请求...")
221
- response = scraper.post(TARGET_URL, headers=headers, json=payload, stream=True)
222
- response.raise_for_status()
223
 
224
- def generate():
225
- try:
226
- print("---------- Response ----------")
227
- cnt = 2
228
- thinking = 2
229
- for line in response.iter_lines():
230
- if line:
231
- if cnt != 0:
232
- cnt -= 1
233
- else:
234
- decoded_line = line.decode("utf-8")
235
- data = json.loads(decoded_line)
236
- token = data["result"]["response"]["token"]
237
- content = ""
238
- if is_reasoning:
239
- if thinking == 2:
240
- thinking = 1
241
- content = f"<Thinking>\n{token}"
242
- print(f"{content}", end="")
243
- elif thinking & (
244
- not data["result"]["response"]["isThinking"]
245
- ):
246
- thinking = 0
247
- content = f"\n</Thinking>\n{token}"
248
- print(f"{content}", end="")
249
- else:
250
- content = token
251
- print(content, end="")
252
- else:
253
- content = token
254
- print(content, end="")
255
- openai_chunk = {
256
- "id": "chatcmpl-" + str(uuid.uuid4()),
257
- "object": "chat.completion.chunk",
258
- "created": int(time.time()),
259
- "model": model,
260
- "choices": [
261
- {
262
- "index": 0,
263
- "delta": {"content": content},
264
- "finish_reason": None,
265
- }
266
- ],
267
- }
268
- yield f"data: {json.dumps(openai_chunk)}\n\n"
269
- if data["result"]["response"]["isSoftStop"]:
270
- openai_chunk = {
271
- "id": "chatcmpl-" + str(uuid.uuid4()),
272
- "object": "chat.completion.chunk",
273
- "created": int(time.time()),
274
- "model": model,
275
- "choices": [
276
- {
277
- "index": 0,
278
- "delta": {"content": content},
279
- "finish_reason": "completed",
280
- }
281
- ],
282
- }
283
- yield f"data: {json.dumps(openai_chunk)}\n\n"
284
- break
285
- print("\n---------- Response End ----------")
286
- yield f"data: [DONE]\n\n"
287
- except Exception as e:
288
- print(f"Failed to send message: {e}")
289
- yield f'data: {{"error": "{e}"}}\n\n'
290
-
291
- return Response(generate(), content_type="text/event-stream")
292
- except Exception as e:
293
- print(f"Failed to send message: {e}")
294
- return jsonify({"error": f"Failed to send message: {e}"}), 500
295
-
296
-
297
- def send_message_non_stream(
298
- message, model, disable_search, force_concise, is_reasoning
299
- ):
300
- headers = {
301
- "authority": "grok.com",
302
- "accept": "*/*",
303
- "accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
304
- "cache-control": "no-cache",
305
- "content-type": "application/json",
306
- "origin": "https://grok.com",
307
- "pragma": "no-cache",
308
- "referer": "https://grok.com/",
309
- "sec-ch-ua": '"Not(A:Brand";v="99", "Google Chrome";v="133", "Chromium";v="133"',
310
- "sec-ch-ua-mobile": "?0",
311
- "sec-ch-ua-platform": '"Windows"',
312
- "sec-fetch-dest": "empty",
313
- "sec-fetch-mode": "cors",
314
- "sec-fetch-site": "same-origin",
315
- }
316
- payload = {
317
- "temporary": TEMPORARY_MODE,
318
- "modelName": "grok-3",
319
- "message": message,
320
- "fileAttachments": [],
321
- "imageAttachments": [],
322
- "disableSearch": disable_search,
323
- "enableImageGeneration": False,
324
- "returnImageBytes": False,
325
- "returnRawGrokInXaiRequest": False,
326
- "enableImageStreaming": True,
327
- "imageGenerationCount": 2,
328
- "forceConcise": force_concise,
329
- "toolOverrides": {},
330
- "enableSideBySide": True,
331
- "isPreset": False,
332
- "sendFinalMetadata": True,
333
- "customInstructions": "",
334
- "deepsearchPreset": "",
335
- "isReasoning": is_reasoning,
336
- }
337
 
338
- thinking = 2
 
 
 
 
339
  try:
340
- scraper = get_next_account(model)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
341
 
342
- # 预热Cloudflare
343
- print("预热Cloudflare权限...")
344
- scraper.get("https://grok.com/", timeout=15)
345
- time.sleep(random.uniform(1.0, 2.0)) # 随机等待以模拟人类
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
346
 
347
- print("发送非流式消息请求...")
348
- response = scraper.post(TARGET_URL, headers=headers, json=payload, stream=True)
349
- response.raise_for_status()
350
- cnt = 2
351
  try:
352
- print("---------- Response ----------")
353
- buffer = io.StringIO()
354
- for line in response.iter_lines():
355
- if line:
356
- if cnt != 0:
357
- cnt -= 1
358
- else:
359
- decoded_line = line.decode("utf-8")
360
- data = json.loads(decoded_line)
361
- token = data["result"]["response"]["token"]
362
- content = ""
363
- if is_reasoning:
364
- if thinking == 2:
365
- thinking = 1
366
- content = f"<Thinking>\n{token}"
367
- print(f"{content}", end="")
368
- buffer.write(content)
369
- elif thinking & (
370
- not data["result"]["response"]["isThinking"]
371
- ):
372
- thinking = 0
373
- content = f"\n</Thinking>\n{token}"
374
- print(f"{content}", end="")
375
- buffer.write(content)
376
- else:
377
- content = token
378
- print(content, end="")
379
- buffer.write(content)
380
- else:
381
- content = token
382
- print(content, end="")
383
- buffer.write(content)
384
- if data["result"]["response"]["isSoftStop"]:
385
- break
386
- print("\n---------- Response End ----------")
387
- openai_response = {
388
- "id": "chatcmpl-" + str(uuid.uuid4()),
389
- "object": "chat.completion",
390
- "created": int(time.time()),
391
- "model": model,
392
- "choices": [
393
- {
394
- "index": 0,
395
- "message": {"role": "assistant", "content": buffer.getvalue()},
396
- "finish_reason": "completed",
397
  }
398
- ],
399
- }
400
- return jsonify(openai_response)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
401
  except Exception as e:
402
- print(f"Failed to process response: {e}")
403
- return jsonify({"error": f"Failed to process response: {e}"}), 500
 
 
 
 
 
 
404
  except Exception as e:
405
- print(f"Failed to send message: {e}")
406
- return jsonify({"error": f"Failed to send message: {e}"}), 500
407
-
408
-
409
- def format_message(messages):
410
- buffer = io.StringIO()
411
- role_map, prefix, messages = extract_role(messages)
412
- for message in messages:
413
- role = message.get("role")
414
- role = "\b" + role_map[role] if prefix else role_map[role]
415
- content = message.get("content").replace("\\n", "\n")
416
- pattern = re.compile(r"<\|removeRole\|>\n")
417
- if pattern.match(content):
418
- content = pattern.sub("", content)
419
- buffer.write(f"{content}\n")
420
- else:
421
- buffer.write(f"{role}: {content}\n")
422
- return buffer.getvalue()
423
-
424
-
425
- def extract_role(messages):
426
- role_map = {"user": "Human", "assistant": "Assistant", "system": "System"}
427
- prefix = False
428
- first_message = messages[0]["content"]
429
- pattern = re.compile(
430
- r"""
431
- <roleInfo>\s*
432
- user:\s*(?P<user>[^\n]*)\s*
433
- assistant:\s*(?P<assistant>[^\n]*)\s*
434
- system:\s*(?P<system>[^\n]*)\s*
435
- prefix:\s*(?P<prefix>[^\n]*)\s*
436
- </roleInfo>\n
437
- """,
438
- re.VERBOSE,
439
- )
440
- match = pattern.search(first_message)
441
- if match:
442
- role_map = {
443
- "user": match.group("user"),
444
- "assistant": match.group("assistant"),
445
- "system": match.group("system"),
446
- }
447
- prefix = match.group("prefix") == "1"
448
- messages[0]["content"] = pattern.sub("", first_message)
449
- print(f"Extracted role map:")
450
- print(
451
- f"User: {role_map['user']}, {role_map['assistant']}, System: {role_map['system']}"
452
- )
453
- print(f"Using prefix: {prefix}")
454
- return (role_map, prefix, messages)
455
-
456
-
457
- def magic(messages):
458
- first_message = messages[0]["content"]
459
- disable_search = False
460
- if re.search(r"<\|disableSearch\|>", first_message):
461
- disable_search = True
462
- print("Disable search")
463
- first_message = re.sub(r"<\|disableSearch\|>", "", first_message)
464
- force_concise = False
465
- if re.search(r"<\|forceConcise\|>", first_message):
466
- force_concise = True
467
- print("Force concise")
468
- first_message = re.sub(r"<\|forceConcise\|>", "", first_message)
469
- messages[0]["content"] = first_message
470
- return (disable_search, force_concise, messages)
471
-
472
 
473
- # 初始化配置
474
- resolve_config()
 
475
 
476
  if __name__ == "__main__":
477
- app.run(host="0.0.0.0", port=7860)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
478
 
479
 
 
1
+ import os
 
 
2
  import json
 
3
  import uuid
4
+ import base64
5
+ import sys
6
+ import inspect
7
+ from loguru import logger
8
  import os
9
+ import asyncio
10
+ import time
11
+ import aiohttp
12
+ import io
13
+ from datetime import datetime
14
+ from functools import partial
15
+
16
+ from quart import Quart, request, jsonify, Response
17
+ from quart_cors import cors
18
+ import cloudscraper
19
+ from dotenv import load_dotenv
20
+
21
+ load_dotenv()
22
+
23
+ CONFIG = {
24
+ "MODELS": {
25
+ 'grok-2': 'grok-latest',
26
+ 'grok-2-imageGen': 'grok-latest',
27
+ 'grok-2-search': 'grok-latest',
28
+ "grok-3": "grok-3",
29
+ "grok-3-search": "grok-3",
30
+ "grok-3-imageGen": "grok-3",
31
+ "grok-3-deepsearch": "grok-3",
32
+ "grok-3-reasoning": "grok-3"
33
+ },
34
+ "API": {
35
+ "BASE_URL": "https://grok.com",
36
+ "API_KEY": os.getenv("API_KEY", "sk-123456"),
37
+ "IS_TEMP_CONVERSATION": os.getenv("IS_TEMP_CONVERSATION", "false").lower() == "true",
38
+ "PICGO_KEY": os.getenv("PICGO_KEY", None), # 想要流式生图的话需要填入这个PICGO图床的key
39
+ "TUMY_KEY": os.getenv("TUMY_KEY", None), # 想要流式生图的话需要填入这个TUMY图床的key
40
+ "IS_CUSTOM_SSO": os.getenv("IS_CUSTOM_SSO", "false").lower() == "true"
41
+ },
42
+ "SERVER": {
43
+ "PORT": int(os.getenv("PORT", 3000))
44
+ },
45
+ "RETRY": {
46
+ "MAX_ATTEMPTS": 2
47
+ },
48
+ "SHOW_THINKING": os.getenv("SHOW_THINKING", "false").lower() == "true",
49
+ "IS_THINKING": False,
50
+ "IS_IMG_GEN": False,
51
+ "IS_IMG_GEN2": False,
52
+ "ISSHOW_SEARCH_RESULTS": os.getenv("ISSHOW_SEARCH_RESULTS", "true").lower() == "true"
53
+ }
54
+
55
+ class Logger:
56
+ def __init__(self, level="INFO", colorize=True, format=None):
57
+ # 移除默认的日志处理器
58
+ logger.remove()
59
 
60
+ if format is None:
61
+ format = (
62
+ "<green>{time:YYYY-MM-DD HH:mm:ss}</green> | "
63
+ "<level>{level: <8}</level> | "
64
+ "<cyan>{extra[filename]}</cyan>:<cyan>{extra[function]}</cyan>:<cyan>{extra[lineno]}</cyan> | "
65
+ "<level>{message}</level>"
66
+ )
67
+
68
+ logger.add(
69
+ sys.stderr,
70
+ level=level,
71
+ format=format,
72
+ colorize=colorize,
73
+ backtrace=True,
74
+ diagnose=True
75
+ )
76
+
77
+ self.logger = logger
78
+
79
+ def _get_caller_info(self):
80
+ frame = inspect.currentframe()
81
+ try:
82
+ caller_frame = frame.f_back.f_back
83
+ full_path = caller_frame.f_code.co_filename
84
+ function = caller_frame.f_code.co_name
85
+ lineno = caller_frame.f_lineno
86
+
87
+ filename = os.path.basename(full_path)
88
+
89
+ return {
90
+ 'filename': filename,
91
+ 'function': function,
92
+ 'lineno': lineno
93
+ }
94
+ finally:
95
+ del frame
96
+
97
+ def info(self, message, source="API"):
98
+ caller_info = self._get_caller_info()
99
+ self.logger.bind(**caller_info).info(f"[{source}] {message}")
100
+
101
+ def error(self, message, source="API"):
102
+ caller_info = self._get_caller_info()
103
+
104
+ if isinstance(message, Exception):
105
+ self.logger.bind(**caller_info).exception(f"[{source}] {str(message)}")
106
+ else:
107
+ self.logger.bind(**caller_info).error(f"[{source}] {message}")
108
 
109
+ def warning(self, message, source="API"):
110
+ caller_info = self._get_caller_info()
111
+ self.logger.bind(**caller_info).warning(f"[{source}] {message}")
 
 
 
 
 
 
 
112
 
113
+ def debug(self, message, source="API"):
114
+ caller_info = self._get_caller_info()
115
+ self.logger.bind(**caller_info).debug(f"[{source}] {message}")
116
+
117
+ async def request_logger(self, request):
118
+ caller_info = self._get_caller_info()
119
+ self.logger.bind(**caller_info).info(f"请求: {request.method} {request.path}", "Request")
120
+
121
+ logger = Logger(level="INFO")
122
+
123
+ class AuthTokenManager:
124
+ def __init__(self):
125
+ self.token_model_map = {}
126
+ self.expired_tokens = set()
127
+ self.token_status_map = {}
128
+ self.token_reset_switch = False
129
+ self.token_reset_timer = None
130
+ self.is_custom_sso = os.getenv("IS_CUSTOM_SSO", "false").lower() == "true"
131
+
132
+ self.model_config = {
133
+ "grok-2": {
134
+ "RequestFrequency": 2,
135
+ "ExpirationTime": 1 * 60 * 60
136
+ },
137
+ "grok-3": {
138
+ "RequestFrequency": 20,
139
+ "ExpirationTime": 2 * 60 * 60 #
140
+ },
141
+ "grok-3-deepsearch": {
142
+ "RequestFrequency": 10,
143
+ "ExpirationTime": 24 * 60 * 60
144
+ },
145
+ "grok-3-reasoning": {
146
+ "RequestFrequency": 10,
147
+ "ExpirationTime": 24 * 60 * 60
148
+ }
149
+ }
150
+
151
+ async def add_token(self, token):
152
+ sso = token.split("sso=")[1].split(";")[0]
153
+ for model in self.model_config.keys():
154
+ if model not in self.token_model_map:
155
+ self.token_model_map[model] = []
156
+
157
+ if sso not in self.token_status_map:
158
+ self.token_status_map[sso] = {}
159
+
160
+ existing_token_entry = next((entry for entry in self.token_model_map[model]
161
+ if entry.get("token") == token), None)
162
+
163
+ if not existing_token_entry:
164
+ self.token_model_map[model].append({
165
+ "token": token,
166
+ "RequestCount": 0,
167
+ "AddedTime": time.time(),
168
+ "StartCallTime": None
169
+ })
170
+
171
+ if model not in self.token_status_map[sso]:
172
+ self.token_status_map[sso][model] = {
173
+ "isValid": True,
174
+ "invalidatedTime": None,
175
+ "totalRequestCount": 0
176
+ }
177
+ logger.info(f"添加令牌成功: {token}", "TokenManager")
178
+
179
+ async def set_token(self, token):
180
+ models = list(self.model_config.keys())
181
+ for model in models:
182
+ self.token_model_map[model] = [{
183
+ "token": token,
184
+ "RequestCount": 0,
185
+ "AddedTime": time.time(),
186
+ "StartCallTime": None
187
+ }]
188
+
189
+ sso = token.split("sso=")[1].split(";")[0]
190
+ self.token_status_map[sso] = {}
191
+ for model in models:
192
+ self.token_status_map[sso][model] = {
193
+ "isValid": True,
194
+ "invalidatedTime": None,
195
+ "totalRequestCount": 0
196
+ }
197
+ logger.info(f"设置令牌成功: {token}", "TokenManager")
198
+
199
+ async def delete_token(self, token):
200
+ try:
201
+ sso = token.split("sso=")[1].split(";")[0]
202
+
203
+ for model in self.token_model_map:
204
+ self.token_model_map[model] = [
205
+ entry for entry in self.token_model_map[model]
206
+ if entry.get("token") != token
207
+ ]
208
+
209
+ if sso in self.token_status_map:
210
+ del self.token_status_map[sso]
211
+
212
+ logger.info(f"令牌已成功移除: {token}", "TokenManager")
213
+ return True
214
+ except Exception as error:
215
+ logger.error(f"令牌删除失败: {error}", "TokenManager")
216
+ return False
217
+
218
+ def get_next_token_for_model(self, model_id):
219
+ normalized_model = self.normalize_model_name(model_id)
220
+
221
+ if normalized_model not in self.token_model_map or not self.token_model_map[normalized_model]:
222
+ return None
223
+
224
+ token_entry = self.token_model_map[normalized_model][0]
225
+
226
+ if token_entry:
227
+ if self.is_custom_sso:
228
+ return token_entry["token"]
229
+
230
+ if token_entry["StartCallTime"] is None:
231
+ token_entry["StartCallTime"] = time.time()
232
+
233
+ if not self.token_reset_switch:
234
+ self.start_token_reset_process()
235
+ self.token_reset_switch = True
236
+
237
+ token_entry["RequestCount"] += 1
238
+
239
+ if token_entry["RequestCount"] > self.model_config[normalized_model]["RequestFrequency"]:
240
+ self.remove_token_from_model(normalized_model, token_entry["token"])
241
+ if not self.token_model_map[normalized_model]:
242
+ return None
243
+ next_token_entry = self.token_model_map[normalized_model][0]
244
+ return next_token_entry["token"] if next_token_entry else None
245
+
246
+ sso = token_entry["token"].split("sso=")[1].split(";")[0]
247
+ if sso in self.token_status_map and normalized_model in self.token_status_map[sso]:
248
+ if token_entry["RequestCount"] == self.model_config[normalized_model]["RequestFrequency"]:
249
+ self.token_status_map[sso][normalized_model]["isValid"] = False
250
+ self.token_status_map[sso][normalized_model]["invalidatedTime"] = time.time()
251
+
252
+ self.token_status_map[sso][normalized_model]["totalRequestCount"] += 1
253
+
254
+ return token_entry["token"]
255
+
256
+ return None
257
+
258
+ def remove_token_from_model(self, model_id, token):
259
+ normalized_model = self.normalize_model_name(model_id)
260
+
261
+ if normalized_model not in self.token_model_map:
262
+ logger.error(f"模型 {normalized_model} 不存在", "TokenManager")
263
+ return False
264
+
265
+ model_tokens = self.token_model_map[normalized_model]
266
+ token_index = -1
267
+
268
+ for i, entry in enumerate(model_tokens):
269
+ if entry["token"] == token:
270
+ token_index = i
271
+ break
272
+
273
+ if token_index != -1:
274
+ removed_token_entry = model_tokens.pop(token_index)
275
+ self.expired_tokens.add((
276
+ removed_token_entry["token"],
277
+ normalized_model,
278
+ time.time()
279
+ ))
280
+
281
+ if not self.token_reset_switch:
282
+ self.start_token_reset_process()
283
+ self.token_reset_switch = True
284
+
285
+ logger.info(f"模型{model_id}的令牌已失效,已成功移除令牌: {token}", "TokenManager")
286
+ return True
287
+
288
+ logger.error(f"在模型 {normalized_model} 中未找到 token: {token}", "TokenManager")
289
+ return False
290
+
291
+ def get_expired_tokens(self):
292
+ return list(self.expired_tokens)
293
+
294
+ def normalize_model_name(self, model):
295
+ if model.startswith('grok-') and 'deepsearch' not in model and 'reasoning' not in model:
296
+ return '-'.join(model.split('-')[:2])
297
+ return model
298
+
299
+ def get_token_count_for_model(self, model_id):
300
+ normalized_model = self.normalize_model_name(model_id)
301
+ return len(self.token_model_map.get(normalized_model, []))
302
+
303
+ def get_remaining_token_request_capacity(self):
304
+ remaining_capacity_map = {}
305
+
306
+ for model in self.model_config:
307
+ model_tokens = self.token_model_map.get(model, [])
308
+ model_request_frequency = self.model_config[model]["RequestFrequency"]
309
+
310
+ total_used_requests = sum(entry.get("RequestCount", 0) for entry in model_tokens)
311
+ remaining_capacity = (len(model_tokens) * model_request_frequency) - total_used_requests
312
+ remaining_capacity_map[model] = max(0, remaining_capacity)
313
+
314
+ return remaining_capacity_map
315
+
316
+ def get_token_array_for_model(self, model_id):
317
+ normalized_model = self.normalize_model_name(model_id)
318
+ return self.token_model_map.get(normalized_model, [])
319
+
320
+ def start_token_reset_process(self):
321
+ if hasattr(self, '_reset_task') and self._reset_task:
322
+ pass
323
+ else:
324
+ self._reset_task = asyncio.create_task(self._token_reset_worker())
325
+
326
+ async def _token_reset_worker(self):
327
+ while True:
328
+ try:
329
+ current_time = time.time()
330
+
331
+ expired_tokens_to_remove = set()
332
+ for token_info in self.expired_tokens:
333
+ token, model, expired_time = token_info
334
+ expiration_time = self.model_config[model]["ExpirationTime"]
335
+
336
+ if current_time - expired_time >= expiration_time:
337
+ if not any(entry["token"] == token for entry in self.token_model_map[model]):
338
+ self.token_model_map[model].append({
339
+ "token": token,
340
+ "RequestCount": 0,
341
+ "AddedTime": current_time,
342
+ "StartCallTime": None
343
+ })
344
+
345
+ sso = token.split("sso=")[1].split(";")[0]
346
+ if sso in self.token_status_map and model in self.token_status_map[sso]:
347
+ self.token_status_map[sso][model]["isValid"] = True
348
+ self.token_status_map[sso][model]["invalidatedTime"] = None
349
+ self.token_status_map[sso][model]["totalRequestCount"] = 0
350
+
351
+ expired_tokens_to_remove.add(token_info)
352
+
353
+ for token_info in expired_tokens_to_remove:
354
+ self.expired_tokens.remove(token_info)
355
+
356
+ for model in self.model_config:
357
+ if model not in self.token_model_map:
358
+ continue
359
+
360
+ for token_entry in self.token_model_map[model]:
361
+ if token_entry["StartCallTime"] is None:
362
+ continue
363
+
364
+ expiration_time = self.model_config[model]["ExpirationTime"]
365
+ if current_time - token_entry["StartCallTime"] >= expiration_time:
366
+ sso = token_entry["token"].split("sso=")[1].split(";")[0]
367
+ if sso in self.token_status_map and model in self.token_status_map[sso]:
368
+ self.token_status_map[sso][model]["isValid"] = True
369
+ self.token_status_map[sso][model]["invalidatedTime"] = None
370
+ self.token_status_map[sso][model]["totalRequestCount"] = 0
371
+
372
+ token_entry["RequestCount"] = 0
373
+ token_entry["StartCallTime"] = None
374
+
375
+ await asyncio.sleep(3600)
376
+ except Exception as e:
377
+ logger.error(f"令牌重置过程中出错: {e}", "TokenManager")
378
+ await asyncio.sleep(3600)
379
+
380
+ def get_all_tokens(self):
381
+ all_tokens = set()
382
+ for model_tokens in self.token_model_map.values():
383
+ for entry in model_tokens:
384
+ all_tokens.add(entry["token"])
385
+ return list(all_tokens)
386
+
387
+ def get_token_status_map(self):
388
+ return self.token_status_map
389
+
390
+ token_manager = AuthTokenManager()
391
+
392
+ async def initialize_tokens():
393
+ sso_array = os.getenv("SSO", "").split(',')
394
+ logger.info("开始加载令牌", "Server")
395
 
396
+ for sso in sso_array:
397
+ if sso.strip():
398
+ await token_manager.add_token(f"sso-rw={sso};sso={sso}")
 
 
 
 
 
 
 
 
 
 
 
399
 
400
+ logger.info(f"成功加载令牌: {json.dumps(token_manager.get_all_tokens(), indent=2)}", "Server")
401
+ logger.info(f"令牌加载完成,共加载: {len(token_manager.get_all_tokens())}个令牌", "Server")
402
+ logger.info("初始化完成", "Server")
403
+
404
+ class Utils:
405
+ @staticmethod
406
+ async def organize_search_results(search_results):
407
+ if not search_results or "results" not in search_results:
408
+ return ''
409
 
410
+ results = search_results["results"]
411
+ formatted_results = []
412
+
413
+ for index, result in enumerate(results):
414
+ title = result.get("title", "未知标题")
415
+ url = result.get("url", "#")
416
+ preview = result.get("preview", "无预览内容")
417
+
418
+ formatted_result = f"\r\n<details><summary>资料[{index}]: {title}</summary>\r\n{preview}\r\n\n[Link]({url})\r\n</details>"
419
+ formatted_results.append(formatted_result)
420
+
421
+ return '\n\n'.join(formatted_results)
422
 
423
+ @staticmethod
424
+ async def run_in_executor(func, *args, **kwargs):
425
+ return await asyncio.get_event_loop().run_in_executor(
426
+ None, partial(func, *args, **kwargs)
427
+ )
 
 
 
 
 
 
 
 
 
428
 
429
+ class GrokApiClient:
430
+ def __init__(self, model_id):
431
+ if model_id not in CONFIG["MODELS"]:
432
+ raise ValueError(f"不支持的模型: {model_id}")
433
+ self.model = model_id
434
+ self.model_id = CONFIG["MODELS"][model_id]
435
+ self.scraper = cloudscraper.create_scraper()
436
+
437
+ def process_message_content(self, content):
438
+ if isinstance(content, str):
439
+ return content
440
+ return None
441
+
442
+ def get_image_type(self, base64_string):
443
+ mime_type = 'image/jpeg'
444
+ if 'data:image' in base64_string:
445
+ import re
446
+ matches = re.match(r'data:([a-zA-Z0-9]+\/[a-zA-Z0-9-.+]+);base64,', base64_string)
447
+ if matches:
448
+ mime_type = matches.group(1)
449
+
450
+ extension = mime_type.split('/')[1]
451
+ file_name = f"image.{extension}"
452
+
453
+ return {
454
+ "mimeType": mime_type,
455
+ "fileName": file_name
456
+ }
457
+
458
+ async def upload_base64_image(self, base64_data, url):
459
+ try:
460
+ if 'data:image' in base64_data:
461
+ image_buffer = base64_data.split(',')[1]
462
+ else:
463
+ image_buffer = base64_data
464
+
465
+ image_info = self.get_image_type(base64_data)
466
+ mime_type = image_info["mimeType"]
467
+ file_name = image_info["fileName"]
468
+
469
+ upload_data = {
470
+ "rpc": "uploadFile",
471
+ "req": {
472
+ "fileName": file_name,
473
+ "fileMimeType": mime_type,
474
+ "content": image_buffer
475
+ }
476
+ }
477
+
478
+ logger.info("发送图片请求", "Server")
479
+
480
+ token = token_manager.get_next_token_for_model(self.model)
481
+ if not token:
482
+ logger.error("没有可用的token", "Server")
483
+ return ''
484
+
485
+ headers = {
486
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/133.0.0.0 Safari/537.36",
487
+ "Connection": "keep-alive",
488
+ "Accept": "text/event-stream",
489
+ "Content-Type": "text/plain;charset=UTF-8",
490
+ "Cookie": token,
491
+ "baggage": "sentry-public_key=b311e0f2690c81f25e2c4cf6d4f7ce1c"
492
+ }
493
+
494
+ response = await Utils.run_in_executor(
495
+ self.scraper.post,
496
+ url,
497
+ headers=headers,
498
+ data=json.dumps(upload_data),
499
+ )
500
+
501
+ if response.status_code != 200:
502
+ logger.error(f"上传图片失败,状态码:{response.status_code},原因:{response.text}", "Server")
503
+ return ''
504
+
505
+ result = response.json()
506
+ logger.info(f'上传图片成功: {result}', "Server")
507
+ return result["fileMetadataId"]
508
+
509
+ except Exception as error:
510
+ logger.error(error, "Server")
511
+ return ''
512
+
513
+ async def prepare_chat_request(self, request_data):
514
+ todo_messages = request_data["messages"]
515
+ if request_data["model"] in ["grok-2-imageGen", "grok-3-imageGen", "grok-3-deepsearch"]:
516
+ last_message = todo_messages[-1]
517
+ if last_message["role"] != "user":
518
+ raise ValueError("画图模型的最后一条消息必须是用户消息!")
519
+ todo_messages = [last_message]
520
+
521
+ file_attachments = []
522
+ messages = ''
523
+ last_role = None
524
+ last_content = ''
525
+ search = request_data["model"] in ["grok-2-search", "grok-3-search"]
526
+
527
+ def remove_think_tags(text):
528
+ import re
529
+ text = re.sub(r'<think>[\s\S]*?<\/think>', '', text).strip()
530
+ text = re.sub(r'!\[image\]\(data:.*?base64,.*?\)', '[图片]', text)
531
+ return text
532
+
533
+ async def process_image_url(content):
534
+ if content["type"] == "image_url" and "data:image" in content["image_url"]["url"]:
535
+ image_response = await self.upload_base64_image(
536
+ content["image_url"]["url"],
537
+ f"{CONFIG['API']['BASE_URL']}/api/rpc"
538
+ )
539
+ return image_response
540
+ return None
541
+
542
+ async def process_content(content):
543
+ if isinstance(content, list):
544
+ text_content = ''
545
+ for item in content:
546
+ if item["type"] == "image_url":
547
+ text_content += ("[图片]" if text_content else '') + "\n" if text_content else "[图片]"
548
+ elif item["type"] == "text":
549
+ text_content += ("\n" + remove_think_tags(item["text"]) if text_content else remove_think_tags(item["text"]))
550
+ return text_content
551
+ elif isinstance(content, dict) and content is not None:
552
+ if content["type"] == "image_url":
553
+ return "[图片]"
554
+ elif content["type"] == "text":
555
+ return remove_think_tags(content["text"])
556
+ return remove_think_tags(self.process_message_content(content))
557
+
558
+ for current in todo_messages:
559
+ role = "assistant" if current["role"] == "assistant" else "user"
560
+ is_last_message = current == todo_messages[-1]
561
+
562
+ logger.info(json.dumps(current, indent=2, ensure_ascii=False), "Server")
563
+ if is_last_message and "content" in current:
564
+ if isinstance(current["content"], list):
565
+ for item in current["content"]:
566
+ if item["type"] == "image_url":
567
+ logger.info("处理图片附件", "Server")
568
+ processed_image = await process_image_url(item)
569
+ if processed_image:
570
+ file_attachments.append(processed_image)
571
+ elif isinstance(current["content"], dict) and current["content"].get("type") == "image_url":
572
+ processed_image = await process_image_url(current["content"])
573
+ if processed_image:
574
+ file_attachments.append(processed_image)
575
+
576
+ text_content = await process_content(current["content"])
577
+
578
+ if text_content or (is_last_message and file_attachments):
579
+ if role == last_role and text_content:
580
+ last_content += '\n' + text_content
581
+ messages = messages[:messages.rindex(f"{role.upper()}: ")] + f"{role.upper()}: {last_content}\n"
582
+ else:
583
+ messages += f"{role.upper()}: {text_content or '[图片]'}\n"
584
+ last_content = text_content
585
+ last_role = role
586
+ return {
587
+ "temporary": CONFIG["API"]["IS_TEMP_CONVERSATION"],
588
+ "modelName": self.model_id,
589
+ "message": messages.strip(),
590
+ "fileAttachments": file_attachments[:4],
591
+ "imageAttachments": [],
592
+ "disableSearch": False,
593
+ "enableImageGeneration": True,
594
+ "returnImageBytes": False,
595
+ "returnRawGrokInXaiRequest": False,
596
+ "enableImageStreaming": False,
597
+ "imageGenerationCount": 1,
598
+ "forceConcise": False,
599
+ "toolOverrides": {
600
+ "imageGen": request_data["model"] in ["grok-2-imageGen", "grok-3-imageGen"],
601
+ "webSearch": search,
602
+ "xSearch": search,
603
+ "xMediaSearch": search,
604
+ "trendsSearch": search,
605
+ "xPostAnalyze": search
606
+ },
607
+ "enableSideBySide": True,
608
+ "isPreset": False,
609
+ "sendFinalMetadata": True,
610
+ "customInstructions": "",
611
+ "deepsearchPreset": "default" if request_data["model"] == "grok-3-deepsearch" else "",
612
+ "isReasoning": request_data["model"] == "grok-3-reasoning"
613
+ }
614
 
615
+ class MessageProcessor:
616
+ @staticmethod
617
+ def create_chat_response(message, model, is_stream=False):
618
+ base_response = {
619
+ "id": f"chatcmpl-{str(uuid.uuid4())}",
620
+ "created": int(datetime.now().timestamp()),
621
+ "model": model
622
+ }
623
+
624
+ if is_stream:
625
+ return {
626
+ **base_response,
627
+ "object": "chat.completion.chunk",
628
+ "choices": [{
629
+ "index": 0,
630
+ "delta": {
631
+ "content": message
632
+ }
633
+ }]
634
+ }
635
+
636
+ return {
637
+ **base_response,
638
+ "object": "chat.completion",
639
+ "choices": [{
640
+ "index": 0,
641
+ "message": {
642
+ "role": "assistant",
643
+ "content": message
644
+ },
645
+ "finish_reason": "stop"
646
+ }],
647
+ "usage": None
648
+ }
649
 
650
+ async def process_model_response(response, model):
651
+ result = {"token": None, "imageUrl": None}
652
+
653
+ if CONFIG["IS_IMG_GEN"]:
654
+ if response and response.get("cachedImageGenerationResponse") and not CONFIG["IS_IMG_GEN2"]:
655
+ result["imageUrl"] = response["cachedImageGenerationResponse"]["imageUrl"]
656
+ return result
657
+
658
+ if model == "grok-2":
659
+ result["token"] = response.get("token")
660
+ elif model in ["grok-2-search", "grok-3-search"]:
661
+ if response and response.get("webSearchResults") and CONFIG["ISSHOW_SEARCH_RESULTS"]:
662
+ result["token"] = f"\r\n<think>{await Utils.organize_search_results(response['webSearchResults'])}</think>\r\n"
663
+ else:
664
+ result["token"] = response.get("token")
665
+ elif model == "grok-3":
666
+ result["token"] = response.get("token")
667
+ elif model == "grok-3-deepsearch":
668
+ if response and response.get("messageTag") == "final":
669
+ result["token"] = response.get("token")
670
+ elif model == "grok-3-reasoning":
671
+ if response and response.get("isThinking", False) and not CONFIG["SHOW_THINKING"]:
672
+ return result
673
+
674
+ if response and response.get("isThinking", False) and not CONFIG["IS_THINKING"]:
675
+ result["token"] = "<think>" + response.get("token", "")
676
+ CONFIG["IS_THINKING"] = True
677
+ elif response and not response.get("isThinking", True) and CONFIG["IS_THINKING"]:
678
+ result["token"] = "</think>" + response.get("token", "")
679
+ CONFIG["IS_THINKING"] = False
680
+ else:
681
+ result["token"] = response.get("token")
682
+
683
+ return result
684
 
685
+ async def stream_response_generator(response, model):
686
+ try:
687
+ CONFIG["IS_THINKING"] = False
688
+ CONFIG["IS_IMG_GEN"] = False
689
+ CONFIG["IS_IMG_GEN2"] = False
690
+ logger.info("开始处理流式响应", "Server")
691
+
692
+ async def iter_lines():
693
+ line_iter = response.iter_lines()
694
+ while True:
695
+ try:
696
+ line = await Utils.run_in_executor(lambda: next(line_iter, None))
697
+ if line is None:
698
+ break
699
+ yield line
700
+ except StopIteration:
701
+ break
702
+ except Exception as e:
703
+ logger.error(f"迭代行时出错: {str(e)}", "Server")
704
+ break
705
+
706
+ async for line in iter_lines():
707
+ if not line:
708
+ continue
709
+
710
+ try:
711
+ try:
712
+ line_str = line.decode('utf-8', errors='replace')
713
+ # 添加检查,确保解码后的字符串是有效的JSON格式
714
+ if not line_str.strip() or not line_str.strip()[0] in ['{', '[']:
715
+ logger.warning(f"无效的JSON数据: {line_str}", "Server")
716
+ continue
717
+ except UnicodeDecodeError:
718
+ logger.warning("解码失败,跳过此行数据", "Server")
719
+ continue
720
+
721
+ try:
722
+ line_json = json.loads(line_str)
723
+ except json.JSONDecodeError as e:
724
+ logger.warning(f"JSON解析失败: {e}, 数据: {line_str[:50]}...", "Server")
725
+ continue
726
+
727
+ if line_json and line_json.get("error"):
728
+ raise ValueError("RateLimitError")
729
+
730
+ response_data = line_json.get("result", {}).get("response")
731
+ if not response_data:
732
+ continue
733
+
734
+ if response_data.get("doImgGen") or response_data.get("imageAttachmentInfo"):
735
+ CONFIG["IS_IMG_GEN"] = True
736
+
737
+ result = await process_model_response(response_data, model)
738
+
739
+ if result["token"]:
740
+ yield f"data: {json.dumps(MessageProcessor.create_chat_response(result['token'], model, True))}\n\n"
741
+
742
+ if result["imageUrl"]:
743
+ CONFIG["IS_IMG_GEN2"] = True
744
+ data_image = await handle_image_response(result["imageUrl"], model)
745
+ yield f"data: {json.dumps(MessageProcessor.create_chat_response(data_image, model, True))}\n\n"
746
+
747
+ except Exception as error:
748
+ logger.error(f"处理行数据错误: {str(error)}", "Server")
749
+ continue
750
+
751
+ yield "data: [DONE]\n\n"
752
+
753
+ except Exception as error:
754
+ logger.error(f"流式响应总体错误: {str(error)}", "Server")
755
+ raise error
756
 
757
+ async def handle_normal_response(response, model):
758
+ try:
759
+ full_response = ''
760
+ CONFIG["IS_THINKING"] = False
761
+ CONFIG["IS_IMG_GEN"] = False
762
+ CONFIG["IS_IMG_GEN2"] = False
763
+ logger.info("开始处理非流式响应", "Server")
764
+ image_url = None
765
+
766
+ async def iter_lines():
767
+ line_iter = response.iter_lines()
768
+ while True:
769
+ try:
770
+ line = await Utils.run_in_executor(lambda: next(line_iter, None))
771
+ if line is None:
772
+ break
773
+ yield line
774
+ except StopIteration:
775
+ break
776
+ except Exception as e:
777
+ logger.error(f"迭代行时出错: {str(e)}", "Server")
778
+ break
779
+
780
+ async for line in iter_lines():
781
+ if not line:
782
+ continue
783
+
784
+ try:
785
+ try:
786
+ line_str = line.decode('utf-8', errors='replace')
787
+ # 添加检查,确保解码后的字符串是有效的JSON格式
788
+ if not line_str.strip() or not line_str.strip()[0] in ['{', '[']:
789
+ logger.warning(f"无效的JSON数据: {line_str}", "Server")
790
+ continue
791
+ except UnicodeDecodeError:
792
+ logger.warning("解码失败,跳过此行数据", "Server")
793
+ continue
794
+
795
+ try:
796
+ line_json = json.loads(line_str)
797
+ except json.JSONDecodeError as e:
798
+ logger.warning(f"JSON解析失败: {e}, 数据: {line_str[:50]}...", "Server")
799
+ continue
800
+
801
+ if line_json and line_json.get("error"):
802
+ raise ValueError("RateLimitError")
803
+
804
+ response_data = line_json.get("result", {}).get("response")
805
+ if not response_data:
806
+ continue
807
+
808
+ if response_data.get("doImgGen") or response_data.get("imageAttachmentInfo"):
809
+ CONFIG["IS_IMG_GEN"] = True
810
+
811
+ result = await process_model_response(response_data, model)
812
+
813
+ if result["token"]:
814
+ full_response += result["token"]
815
+
816
+ if result["imageUrl"]:
817
+ CONFIG["IS_IMG_GEN2"] = True
818
+ image_url = result["imageUrl"]
819
+
820
+ except Exception as error:
821
+ logger.error(f"处理行数据错误: {str(error)}", "Server")
822
+ continue
823
+
824
+ if CONFIG["IS_IMG_GEN2"] and image_url:
825
+ data_image = await handle_image_response(image_url, model)
826
+ return MessageProcessor.create_chat_response(data_image, model)
827
+ else:
828
+ return MessageProcessor.create_chat_response(full_response, model)
829
+
830
+ except Exception as error:
831
+ logger.error(f"非流式响应总体错误: {str(error)}", "Server")
832
+ raise error
833
 
834
+ async def handle_image_response(image_url,model):
835
+ MAX_RETRIES = 2
836
+ retry_count = 0
837
+ scraper = cloudscraper.create_scraper()
838
+
839
+ while retry_count < MAX_RETRIES:
840
+ try:
841
+ token = token_manager.get_next_token_for_model(model)
842
+ if not token:
843
+ raise ValueError("没有可用的token")
844
+
845
+ image_response = await Utils.run_in_executor(
846
+ scraper.get,
847
+ f"https://assets.grok.com/{image_url}",
848
+ headers={
849
+ **CONFIG["DEFAULT_HEADERS"],
850
+ "cookie": token
851
+ }
852
+ )
853
+
854
+ if image_response.status_code == 200:
855
+ break
856
+
857
+ retry_count += 1
858
+ if retry_count == MAX_RETRIES:
859
+ raise ValueError(f"上游服务请求失败! status: {image_response.status_code}")
860
+
861
+ await asyncio.sleep(1 * retry_count)
862
+
863
+ except Exception as error:
864
+ logger.error(error, "Server")
865
+ retry_count += 1
866
+ if retry_count == MAX_RETRIES:
867
+ raise error
868
+
869
+ await asyncio.sleep(1 * retry_count)
870
+
871
+ image_content = image_response.content
872
+
873
+ if CONFIG["API"]["PICGO_KEY"]:
874
+ form = aiohttp.FormData()
875
+ form.add_field('source',
876
+ io.BytesIO(image_content),
877
+ filename=f'image-{int(datetime.now().timestamp())}.jpg',
878
+ content_type='image/jpeg')
879
+
880
+ async with aiohttp.ClientSession() as session:
881
+ async with session.post(
882
+ "https://www.picgo.net/api/1/upload",
883
+ data=form,
884
+ headers={"X-API-Key": CONFIG["API"]["PICGO_KEY"]}
885
+ ) as response_url:
886
+ if response_url.status != 200:
887
+ return "生图失败,请查看PICGO图床密钥是否设置正确"
888
+ else:
889
+ logger.info("生图成功", "Server")
890
+ result = await response_url.json()
891
+ return f"![image]({result['image']['url']})"
892
+ elif CONFIG["API"]["TUMY_KEY"]:
893
+ form = aiohttp.FormData()
894
+ form.add_field('file',
895
+ io.BytesIO(image_content),
896
+ filename=f'image-{int(datetime.now().timestamp())}.jpg',
897
+ content_type='image/jpeg')
898
+
899
+ async with aiohttp.ClientSession() as session:
900
+ async with session.post(
901
+ "https://tu.my/api/v1/upload",
902
+ data=form,
903
+ headers={
904
+ "Accept": "application/json",
905
+ "Authorization": f"Bearer {CONFIG['API']['TUMY_KEY']}"
906
+ }
907
+ ) as response_url:
908
+ if response_url.status != 200:
909
+ return "生图失败,请查看TUMY图床密钥是否设置正确"
910
+ else:
911
+ logger.info("生图成功", "Server")
912
+ result = await response_url.json()
913
+ return f"![image]({result['image']['url']})"
914
+ # 如果没有PICGO_KEY或者TUMY_KEY则返回base64图片
915
+ image_base64 = base64.b64encode(image_content).decode('utf-8')
916
+ return f"![image](data:image/jpeg;base64,{image_base64})"
917
 
918
 
919
+ app = Quart(__name__)
920
+ app = cors(app, allow_origin="*", allow_methods=["GET", "POST", "OPTIONS"], allow_headers=["Content-Type", "Authorization"])
 
921
 
922
+ @app.before_request
923
+ async def before_request():
924
+ await logger.request_logger(request)
925
 
926
+ @app.route('/v1/models', methods=['GET'])
927
+ async def models():
928
+ return jsonify({
929
+ "object": "list",
930
+ "data": [
931
  {
932
  "id": model,
933
  "object": "model",
934
+ "created": int(datetime.now().timestamp()),
935
+ "owned_by": "grok"
936
+ } for model in CONFIG["MODELS"].keys()
937
+ ]
938
+ })
939
+
940
+
941
+ @app.route('/get/tokens', methods=['GET'])
942
+ async def get_tokens():
943
+ auth_token = request.headers.get('Authorization', '').replace('Bearer ', '')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
944
 
945
+ if CONFIG["API"]["IS_CUSTOM_SSO"]:
946
+ return jsonify({"error": '自定义的SSO令牌模式无法获取轮询sso令牌状态'}), 403
947
+ elif auth_token != CONFIG["API"]["API_KEY"]:
948
+ return jsonify({"error": 'Unauthorized'}), 401
949
 
950
+ return jsonify(token_manager.get_token_status_map())
 
 
 
 
 
 
 
951
 
952
+ @app.route('/add/token', methods=['POST'])
953
+ async def add_token():
954
+ auth_token = request.headers.get('Authorization', '').replace('Bearer ', '')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
955
 
956
+ if CONFIG["API"]["IS_CUSTOM_SSO"]:
957
+ return jsonify({"error": '自定义的SSO令牌模式无法添加sso令牌'}), 403
958
+ elif auth_token != CONFIG["API"]["API_KEY"]:
959
+ return jsonify({"error": 'Unauthorized'}), 401
960
+
961
  try:
962
+ data = await request.get_json()
963
+ sso = data.get('sso')
964
+ if not sso:
965
+ return jsonify({"error": 'SSO令牌不能为空'}), 400
966
+
967
+ await token_manager.add_token(f"sso-rw={sso};sso={sso}")
968
+ return jsonify(token_manager.get_token_status_map().get(sso, {}))
969
+ except Exception as error:
970
+ logger.error(error, "Server")
971
+ return jsonify({"error": '添加sso令牌失败'}), 500
972
+
973
+ @app.route('/delete/token', methods=['POST'])
974
+ async def delete_token():
975
+ auth_token = request.headers.get('Authorization', '').replace('Bearer ', '')
976
+
977
+ if CONFIG["API"]["IS_CUSTOM_SSO"]:
978
+ return jsonify({"error": '自定义的SSO令牌模式无法删除sso令牌'}), 403
979
+ elif auth_token != CONFIG["API"]["API_KEY"]:
980
+ return jsonify({"error": 'Unauthorized'}), 401
981
 
982
+ try:
983
+ data = await request.get_json()
984
+ sso = data.get('sso')
985
+ if not sso:
986
+ return jsonify({"error": 'SSO令牌不能为空'}), 400
987
+
988
+ success = await token_manager.delete_token(f"sso-rw={sso};sso={sso}")
989
+ if success:
990
+ return jsonify({"message": '删除sso令牌成功'})
991
+ else:
992
+ return jsonify({"error": '删除sso令牌失败'}), 500
993
+ except Exception as error:
994
+ logger.error(error, "Server")
995
+ return jsonify({"error": '删除sso令牌失败'}), 500
996
+
997
+ @app.route('/v1/chat/completions', methods=['POST'])
998
+ async def chat_completions():
999
+ try:
1000
+ data = await request.get_json()
1001
+ auth_token = request.headers.get('Authorization', '').replace('Bearer ', '')
1002
+
1003
+ if auth_token:
1004
+ if CONFIG["API"]["IS_CUSTOM_SSO"]:
1005
+ await token_manager.set_token(f"sso-rw={auth_token};sso={auth_token}")
1006
+ elif auth_token != CONFIG["API"]["API_KEY"]:
1007
+ return jsonify({"error": "Unauthorized"}), 401
1008
+ else:
1009
+ return jsonify({"error": "Unauthorized"}), 401
1010
+
1011
+ model = data.get("model")
1012
+ stream = data.get("stream", False)
1013
+ retry_count = 0
1014
 
 
 
 
 
1015
  try:
1016
+ grok_client = GrokApiClient(model)
1017
+ request_payload = await grok_client.prepare_chat_request(data)
1018
+
1019
+ while retry_count < CONFIG["RETRY"]["MAX_ATTEMPTS"]:
1020
+ retry_count += 1
1021
+ logger.info(f"开始请求(第{retry_count}次尝试)", "Server")
1022
+
1023
+ token = token_manager.get_next_token_for_model(model)
1024
+ if not token:
1025
+ logger.error(f"没有可用的{model}模型令牌", "Server")
1026
+ if retry_count == CONFIG["RETRY"]["MAX_ATTEMPTS"]:
1027
+ raise ValueError(f"没有可用的{model}模型令牌")
1028
+ continue
1029
+
1030
+ scraper = cloudscraper.create_scraper()
1031
+
1032
+ try:
1033
+ headers = {
1034
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/133.0.0.0 Safari/537.36",
1035
+ "Connection": "keep-alive",
1036
+ "Accept": "text/event-stream",
1037
+ "Content-Type": "text/plain;charset=UTF-8",
1038
+ "Cookie": token,
1039
+ "baggage": "sentry-public_key=b311e0f2690c81f25e2c4cf6d4f7ce1c"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1040
  }
1041
+ logger.info(f"使用令牌: {token}", "Server")
1042
+
1043
+ response = await Utils.run_in_executor(
1044
+ scraper.post,
1045
+ f"{CONFIG['API']['BASE_URL']}/rest/app-chat/conversations/new",
1046
+ headers=headers,
1047
+ data=json.dumps(request_payload),
1048
+ stream=True
1049
+ )
1050
+
1051
+ if response.status_code == 200:
1052
+ logger.info("请求成功", "Server")
1053
+
1054
+ if stream:
1055
+ return Response(
1056
+ stream_response_generator(response, model),
1057
+ content_type='text/event-stream',
1058
+ headers={
1059
+ 'Cache-Control': 'no-cache',
1060
+ 'Connection': 'keep-alive'
1061
+ }
1062
+ )
1063
+ else:
1064
+ result = await handle_normal_response(response, model)
1065
+ return jsonify(result)
1066
+ else:
1067
+ logger.error(f"请求失败: 状态码 {response.status_code}", "Server")
1068
+ token_manager.remove_token_from_model(model, token)
1069
+
1070
+ except Exception as e:
1071
+ logger.error(f"请求异常: {str(e)}", "Server")
1072
+ token_manager.remove_token_from_model(model, token)
1073
+
1074
+ raise ValueError("请求失败,已达到最大重试次数")
1075
+
1076
  except Exception as e:
1077
+ logger.error(e, "ChatAPI")
1078
+ return jsonify({
1079
+ "error": {
1080
+ "message": str(e),
1081
+ "type": "server_error"
1082
+ }
1083
+ }), 500
1084
+
1085
  except Exception as e:
1086
+ logger.error(e, "ChatAPI")
1087
+ return jsonify({
1088
+ "error": {
1089
+ "message": str(e),
1090
+ "type": "server_error"
1091
+ }
1092
+ }), 500
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1093
 
1094
+ @app.route('/', methods=['GET'])
1095
+ async def index():
1096
+ return "api运行正常"
1097
 
1098
  if __name__ == "__main__":
1099
+ asyncio.run(initialize_tokens())
1100
+ app.run(host="0.0.0.0", port=CONFIG["SERVER"]["PORT"])
1101
+ # 在现有app.py文件的最底部添加或修改这段代码
1102
+ if __name__ == "__main__":
1103
+ # 初始化令牌
1104
+ import asyncio
1105
+ asyncio.run(initialize_tokens())
1106
+
1107
+ # 获取端口,优先使用环境变量
1108
+ port = int(os.getenv("PORT", CONFIG["SERVER"]["PORT"]))
1109
+
1110
+ # 启动服务器
1111
+ from hypercorn.asyncio import serve
1112
+ from hypercorn.config import Config as HyperConfig
1113
+
1114
+ config = HyperConfig()
1115
+ config.bind = [f"0.0.0.0:{port}"]
1116
+ config.use_reloader = True
1117
+
1118
+ logger.info(f"服务器正在启动,端口:{port}", "Server")
1119
+ asyncio.run(serve(app, config))
1120
+
1121
 
1122