Niansuh commited on
Commit
483f9a9
·
verified ·
1 Parent(s): 67de47e

Upload 11 files

Browse files
Files changed (8) hide show
  1. api/app.py +5 -12
  2. api/config.py +36 -254
  3. api/logger.py +20 -56
  4. api/models.py +16 -16
  5. api/routes.py +62 -61
  6. api/utils.py +158 -241
  7. main.py +1 -1
  8. requirements.txt +7 -6
api/app.py CHANGED
@@ -2,32 +2,25 @@ from fastapi import FastAPI, Request
2
  from starlette.middleware.cors import CORSMiddleware
3
  from fastapi.responses import JSONResponse
4
  from api.logger import setup_logger
5
- from api.routes import router
6
-
7
 
8
  logger = setup_logger(__name__)
9
 
10
  def create_app():
11
- app = FastAPI(
12
- title="NiansuhAI API Gateway",
13
- docs_url=None, # Disable Swagger UI
14
- redoc_url=None, # Disable ReDoc
15
- openapi_url=None, # Disable OpenAPI schema
16
- )
17
 
18
- # CORS settings
19
  app.add_middleware(
20
  CORSMiddleware,
21
- allow_origins=["*"], # Adjust as needed for security
22
  allow_credentials=True,
23
  allow_methods=["*"],
24
  allow_headers=["*"],
25
  )
26
 
27
- # Include routes
28
  app.include_router(router)
29
 
30
- # Global exception handler for better error reporting
31
  @app.exception_handler(Exception)
32
  async def global_exception_handler(request: Request, exc: Exception):
33
  logger.error(f"An error occurred: {str(exc)}")
 
2
  from starlette.middleware.cors import CORSMiddleware
3
  from fastapi.responses import JSONResponse
4
  from api.logger import setup_logger
5
+ from api.routes import router # 导入router而不是单独的函数
 
6
 
7
  logger = setup_logger(__name__)
8
 
9
  def create_app():
10
+ app = FastAPI()
 
 
 
 
 
11
 
12
+ # 配置CORS
13
  app.add_middleware(
14
  CORSMiddleware,
15
+ allow_origins=["*"],
16
  allow_credentials=True,
17
  allow_methods=["*"],
18
  allow_headers=["*"],
19
  )
20
 
21
+ # 添加路由
22
  app.include_router(router)
23
 
 
24
  @app.exception_handler(Exception)
25
  async def global_exception_handler(request: Request, exc: Exception):
26
  logger.error(f"An error occurred: {str(exc)}")
api/config.py CHANGED
@@ -1,254 +1,36 @@
1
- # api/config.py
2
-
3
- import os
4
- from dotenv import load_dotenv
5
-
6
- load_dotenv()
7
-
8
- # Base URL and Common Headers
9
- BASE_URL = "https://www.blackbox.ai"
10
- common_headers = {
11
- 'accept': '*/*',
12
- 'accept-language': 'en-US,en;q=0.9',
13
- 'cache-control': 'no-cache',
14
- 'origin': BASE_URL,
15
- 'pragma': 'no-cache',
16
- 'priority': 'u=1, i',
17
- 'sec-ch-ua': '"Chromium";v="130", "Google Chrome";v="130", "Not?A_Brand";v="99"',
18
- 'sec-ch-ua-mobile': '?0',
19
- 'sec-ch-ua-platform': '"Windows"',
20
- 'sec-fetch-dest': 'empty',
21
- 'sec-fetch-mode': 'cors',
22
- 'sec-fetch-site': 'same-origin',
23
- 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) '
24
- 'AppleWebKit/537.36 (KHTML, like Gecko) '
25
- 'Chrome/130.0.0.0 Safari/537.36',
26
- }
27
-
28
- # Header Configurations for Specific API Calls
29
- def get_headers_api_chat(referer_url):
30
- return {**common_headers, 'Content-Type': 'application/json', 'Referer': referer_url}
31
-
32
- def get_headers_chat(chat_url, next_action, next_router_state_tree):
33
- return {
34
- **common_headers,
35
- 'Accept': 'text/x-component',
36
- 'Content-Type': 'text/plain;charset=UTF-8',
37
- 'Referer': chat_url,
38
- 'next-action': next_action,
39
- 'next-router-state-tree': next_router_state_tree,
40
- 'next-url': '/',
41
- }
42
-
43
- APP_SECRET = os.getenv("APP_SECRET")
44
-
45
- # Allowed Models
46
- ALLOWED_MODELS = [
47
- {"id": "blackboxai", "name": "blackboxai"},
48
- {"id": "blackboxai-pro", "name": "blackboxai-pro"},
49
- {"id": "flux", "name": "flux"},
50
- {"id": "llama-3.1-8b", "name": "llama-3.1-8b"},
51
- {"id": "llama-3.1-70b", "name": "llama-3.1-70b"},
52
- {"id": "llama-3.1-405b", "name": "llama-3.1-405"},
53
- {"id": "gpt-4o", "name": "gpt-4o"},
54
- {"id": "gemini-pro", "name": "gemini-pro"},
55
- {"id": "gemini-1.5-flash", "name": "gemini-1.5-flash"},
56
- {"id": "claude-sonnet-3.5", "name": "claude-sonnet-3.5"},
57
- {"id": "PythonAgent", "name": "PythonAgent"},
58
- {"id": "JavaAgent", "name": "JavaAgent"},
59
- {"id": "JavaScriptAgent", "name": "JavaScriptAgent"},
60
- {"id": "HTMLAgent", "name": "HTMLAgent"},
61
- {"id": "GoogleCloudAgent", "name": "GoogleCloudAgent"},
62
- {"id": "AndroidDeveloper", "name": "AndroidDeveloper"},
63
- {"id": "SwiftDeveloper", "name": "SwiftDeveloper"},
64
- {"id": "Next.jsAgent", "name": "Next.jsAgent"},
65
- {"id": "MongoDBAgent", "name": "MongoDBAgent"},
66
- {"id": "PyTorchAgent", "name": "PyTorchAgent"},
67
- {"id": "ReactAgent", "name": "ReactAgent"},
68
- {"id": "XcodeAgent", "name": "XcodeAgent"},
69
- {"id": "AngularJSAgent", "name": "AngularJSAgent"},
70
- {"id": "HerokuAgent", "name": "HerokuAgent"},
71
- {"id": "GodotAgent", "name": "GodotAgent"},
72
- {"id": "GoAgent", "name": "GoAgent"},
73
- {"id": "GitlabAgent", "name": "GitlabAgent"},
74
- {"id": "GitAgent", "name": "GitAgent"},
75
- {"id": "RepoMap", "name": "RepoMap"},
76
- {"id": "gemini-1.5-pro-latest", "name": "gemini-pro"},
77
- {"id": "gemini-1.5-pro", "name": "gemini-1.5-pro"},
78
- {"id": "claude-3-5-sonnet-20240620", "name": "claude-sonnet-3.5"},
79
- {"id": "claude-3-5-sonnet", "name": "claude-sonnet-3.5"},
80
- {"id": "Niansuh", "name": "Niansuh"},
81
- {"id": "o1-preview", "name": "o1-preview"},
82
-
83
- # Added New Agents
84
- {"id": "FlaskAgent", "name": "FlaskAgent"},
85
- {"id": "FirebaseAgent", "name": "FirebaseAgent"},
86
- {"id": "FastAPIAgent", "name": "FastAPIAgent"},
87
- {"id": "ErlangAgent", "name": "ErlangAgent"},
88
- {"id": "ElectronAgent", "name": "ElectronAgent"},
89
- {"id": "DockerAgent", "name": "DockerAgent"},
90
- {"id": "DigitalOceanAgent", "name": "DigitalOceanAgent"},
91
- {"id": "BitbucketAgent", "name": "BitbucketAgent"},
92
- {"id": "AzureAgent", "name": "AzureAgent"},
93
- {"id": "FlutterAgent", "name": "FlutterAgent"},
94
- {"id": "YoutubeAgent", "name": "YoutubeAgent"},
95
- {"id": "builderAgent", "name": "builderAgent"},
96
- ]
97
-
98
- # Model Mapping: Aliases to Primary Models
99
- MODEL_MAPPING = {
100
- "blackboxai": "blackboxai",
101
- "blackboxai-pro": "blackboxai-pro",
102
- "flux": "flux",
103
- "ImageGeneration": "flux",
104
- "llama-3.1-8b": "llama-3.1-8b",
105
- "llama-3.1-70b": "llama-3.1-70b",
106
- "llama-3.1-405b": "llama-3.1-405",
107
- "gpt-4o": "gpt-4o",
108
- "gemini-pro": "gemini-pro",
109
- "gemini-1.5-flash": "gemini-1.5-flash",
110
- "claude-sonnet-3.5": "claude-sonnet-3.5",
111
- "PythonAgent": "PythonAgent",
112
- "JavaAgent": "JavaAgent",
113
- "JavaScriptAgent": "JavaScriptAgent",
114
- "HTMLAgent": "HTMLAgent",
115
- "GoogleCloudAgent": "GoogleCloudAgent",
116
- "AndroidDeveloper": "AndroidDeveloper",
117
- "SwiftDeveloper": "SwiftDeveloper",
118
- "Next.jsAgent": "Next.jsAgent",
119
- "MongoDBAgent": "MongoDBAgent",
120
- "PyTorchAgent": "PyTorchAgent",
121
- "ReactAgent": "ReactAgent",
122
- "XcodeAgent": "XcodeAgent",
123
- "AngularJSAgent": "AngularJSAgent",
124
- "HerokuAgent": "HerokuAgent",
125
- "GodotAgent": "GodotAgent",
126
- "GoAgent": "GoAgent",
127
- "GitlabAgent": "GitlabAgent",
128
- "GitAgent": "GitAgent",
129
- "RepoMap": "RepoMap",
130
- # Additional mappings
131
- "gemini-flash": "gemini-1.5-flash",
132
- "claude-3.5-sonnet": "claude-sonnet-3.5",
133
- "flux": "flux",
134
- "gemini-1.5-pro-latest": "gemini-pro",
135
- "gemini-1.5-pro": "gemini-1.5-pro",
136
- "claude-3-5-sonnet-20240620": "claude-sonnet-3.5",
137
- "claude-3-5-sonnet": "claude-sonnet-3.5",
138
- "Niansuh": "Niansuh",
139
- "o1-preview": "o1-preview",
140
-
141
- # Added New Agents
142
- "FlaskAgent": "FlaskAgent",
143
- "FirebaseAgent": "FirebaseAgent",
144
- "FastAPIAgent": "FastAPIAgent",
145
- "ErlangAgent": "ErlangAgent",
146
- "ElectronAgent": "ElectronAgent",
147
- "DockerAgent": "DockerAgent",
148
- "DigitalOceanAgent": "DigitalOceanAgent",
149
- "BitbucketAgent": "BitbucketAgent",
150
- "AzureAgent": "AzureAgent",
151
- "FlutterAgent": "FlutterAgent",
152
- "YoutubeAgent": "YoutubeAgent",
153
- "builderAgent": "builderAgent",
154
- }
155
-
156
- # Agent Modes
157
- AGENT_MODE = {
158
- 'flux': {'mode': True, 'id': "ImageGenerationLV45LJp", 'name': "flux"},
159
- 'Niansuh': {'mode': True, 'id': "NiansuhAIk1HgESy", 'name': "Niansuh"},
160
- 'o1-preview': {'mode': True, 'id': "o1Dst8La8", 'name': "o1-preview"},
161
- }
162
-
163
- # Trending Agent Modes
164
- TRENDING_AGENT_MODE = {
165
- "blackboxai": {},
166
- "gemini-1.5-flash": {'mode': True, 'id': 'Gemini'},
167
- "llama-3.1-8b": {'mode': True, 'id': "llama-3.1-8b"},
168
- 'llama-3.1-70b': {'mode': True, 'id': "llama-3.1-70b"},
169
- 'llama-3.1-405b': {'mode': True, 'id': "llama-3.1-405"},
170
- 'blackboxai-pro': {'mode': True, 'id': "BLACKBOXAI-PRO"},
171
- 'PythonAgent': {'mode': True, 'id': "Python Agent"},
172
- 'JavaAgent': {'mode': True, 'id': "Java Agent"},
173
- 'JavaScriptAgent': {'mode': True, 'id': "JavaScript Agent"},
174
- 'HTMLAgent': {'mode': True, 'id': "HTML Agent"},
175
- 'GoogleCloudAgent': {'mode': True, 'id': "Google Cloud Agent"},
176
- 'AndroidDeveloper': {'mode': True, 'id': "Android Developer"},
177
- 'SwiftDeveloper': {'mode': True, 'id': "Swift Developer"},
178
- 'Next.jsAgent': {'mode': True, 'id': "Next.js Agent"},
179
- 'MongoDBAgent': {'mode': True, 'id': "MongoDB Agent"},
180
- 'PyTorchAgent': {'mode': True, 'id': "PyTorch Agent"},
181
- 'ReactAgent': {'mode': True, 'id': "React Agent"},
182
- 'XcodeAgent': {'mode': True, 'id': "Xcode Agent"},
183
- 'AngularJSAgent': {'mode': True, 'id': "AngularJS Agent"},
184
- 'HerokuAgent': {'mode': True, 'id': "HerokuAgent"},
185
- 'GodotAgent': {'mode': True, 'id': "GodotAgent"},
186
- 'GoAgent': {'mode': True, 'id': "GoAgent"},
187
- 'GitlabAgent': {'mode': True, 'id': "GitlabAgent"},
188
- 'GitAgent': {'mode': True, 'id': "GitAgent"},
189
- 'RepoMap': {'mode': True, 'id': "repomap"},
190
-
191
- # Added New Agents
192
- 'FlaskAgent': {'mode': True, 'id': "FlaskAgent"},
193
- 'FirebaseAgent': {'mode': True, 'id': "FirebaseAgent"},
194
- 'FastAPIAgent': {'mode': True, 'id': "FastAPIAgent"},
195
- 'ErlangAgent': {'mode': True, 'id': "ErlangAgent"},
196
- 'ElectronAgent': {'mode': True, 'id': "ElectronAgent"},
197
- 'DockerAgent': {'mode': True, 'id': "DockerAgent"},
198
- 'DigitalOceanAgent': {'mode': True, 'id': "DigitalOceanAgent"},
199
- 'BitbucketAgent': {'mode': True, 'id': "BitbucketAgent"},
200
- 'AzureAgent': {'mode': True, 'id': "AzureAgent"},
201
- 'FlutterAgent': {'mode': True, 'id': "FlutterAgent"},
202
- 'YoutubeAgent': {'mode': True, 'id': "YoutubeAgent"},
203
- 'builderAgent': {'mode': True, 'id': "builderAgent"},
204
- }
205
-
206
- # Model Prefixes
207
- MODEL_PREFIXES = {
208
- 'gpt-4o': '@GPT-4o',
209
- 'gemini-pro': '@Gemini-PRO',
210
- 'PythonAgent': '@Python Agent',
211
- 'JavaAgent': '@Java Agent',
212
- 'JavaScriptAgent': '@JavaScript Agent',
213
- 'HTMLAgent': '@HTML Agent',
214
- 'GoogleCloudAgent': '@Google Cloud Agent',
215
- 'AndroidDeveloper': '@Android Developer',
216
- 'SwiftDeveloper': '@Swift Developer',
217
- 'Next.jsAgent': '@Next.js Agent',
218
- 'MongoDBAgent': '@MongoDB Agent',
219
- 'PyTorchAgent': '@PyTorch Agent',
220
- 'ReactAgent': '@React Agent',
221
- 'XcodeAgent': '@Xcode Agent',
222
- 'AngularJSAgent': '@AngularJS Agent',
223
- 'HerokuAgent': '@Heroku Agent',
224
- 'GodotAgent': '@Godot Agent',
225
- 'GoAgent': '@Go Agent',
226
- 'GitlabAgent': '@Gitlab Agent',
227
- 'GitAgent': '@GitAgent',
228
- 'blackboxai-pro': '@BLACKBOXAI-PRO',
229
- 'flux': '@Image Generation',
230
- # Add any additional prefixes if necessary
231
-
232
- # Added New Agents
233
- 'FlaskAgent': '@Flask Agent',
234
- 'FirebaseAgent': '@Firebase Agent',
235
- 'FastAPIAgent': '@FastAPI Agent',
236
- 'ErlangAgent': '@Erlang Agent',
237
- 'ElectronAgent': '@Electron Agent',
238
- 'DockerAgent': '@Docker Agent',
239
- 'DigitalOceanAgent': '@DigitalOcean Agent',
240
- 'BitbucketAgent': '@Bitbucket Agent',
241
- 'AzureAgent': '@Azure Agent',
242
- 'FlutterAgent': '@Flutter Agent',
243
- 'YoutubeAgent': '@Youtube Agent',
244
- 'builderAgent': '@builder Agent',
245
- }
246
-
247
- # Model Referers
248
- MODEL_REFERERS = {
249
- "blackboxai": "/?model=blackboxai",
250
- "gpt-4o": "/?model=gpt-4o",
251
- "gemini-pro": "/?model=gemini-pro",
252
- "claude-sonnet-3.5": "/?model=claude-sonnet-3.5",
253
- # Add any additional referers if necessary
254
- }
 
1
+ import os
2
+ from dotenv import load_dotenv
3
+
4
+ load_dotenv()
5
+
6
+ BASE_URL = "https://www.blackbox.ai"
7
+ headers = {
8
+ 'accept': '*/*',
9
+ 'accept-language': 'zh-CN,zh;q=0.9',
10
+ 'origin': 'https://www.blackbox.ai',
11
+ 'priority': 'u=1, i',
12
+ 'sec-ch-ua': '"Google Chrome";v="129", "Not=A?Brand";v="8", "Chromium";v="129"',
13
+ 'sec-ch-ua-mobile': '?0',
14
+ 'sec-ch-ua-platform': '"Windows"',
15
+ 'sec-fetch-dest': 'empty',
16
+ 'sec-fetch-mode': 'cors',
17
+ 'sec-fetch-site': 'same-origin',
18
+ 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36',
19
+ }
20
+ APP_SECRET = os.getenv("APP_SECRET")
21
+ ALLOWED_MODELS = [
22
+ {"id": "gpt-4o", "name": "gpt-4o"},
23
+ {"id": "gemini-1.5-pro-latest", "name": "gemini-pro"},
24
+ {"id": "gemini-1.5-pro", "name": "gemini-pro"},
25
+ {"id": "gemini-pro", "name": "gemini-pro"},
26
+ {"id": "claude-3-5-sonnet-20240620", "name": "claude-sonnet-3.5"},
27
+ {"id": "claude-3-5-sonnet", "name": "claude-sonnet-3.5"},
28
+ ]
29
+ MODEL_MAPPING = {
30
+ "gpt-4o":"gpt-4o",
31
+ "gemini-1.5-pro-latest": "gemini-pro",
32
+ "gemini-1.5-pro":"gemini-1.5-pro",
33
+ "gemini-pro":"gemini-pro",
34
+ "claude-3-5-sonnet-20240620":"claude-sonnet-3.5",
35
+ "claude-3-5-sonnet":"claude-sonnet-3.5",
36
+ }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
api/logger.py CHANGED
@@ -1,56 +1,20 @@
1
- # api/logger.py
2
-
3
- import logging
4
-
5
- # Setup logger with a consistent format
6
- def setup_logger(name):
7
- logger = logging.getLogger(name)
8
- if not logger.handlers:
9
- logger.setLevel(logging.INFO)
10
- formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
11
-
12
- # Console handler
13
- console_handler = logging.StreamHandler()
14
- console_handler.setFormatter(formatter)
15
- logger.addHandler(console_handler)
16
-
17
- # File Handler - Uncomment if you want to log errors to a file
18
- # error_file_handler = logging.FileHandler('error.log')
19
- # error_file_handler.setFormatter(formatter)
20
- # error_file_handler.setLevel(logging.ERROR)
21
- # logger.addHandler(error_file_handler)
22
-
23
- return logger
24
-
25
- logger = setup_logger(__name__)
26
-
27
- # Log functions to structure specific logs in utils.py
28
- def log_generated_chat_id_with_referer(chat_id, model, referer_url):
29
- """
30
- Log the generated Chat ID with model and referer URL if it exists.
31
- """
32
- logger.info(f"Generated Chat ID: {chat_id} - Model: {model} - URL: {referer_url}")
33
-
34
- def log_model_delay(delay_seconds, model, chat_id):
35
- """
36
- Log the delay introduced for specific models.
37
- """
38
- logger.info(f"Introducing a delay of {delay_seconds} seconds for model '{model}' (Chat ID: {chat_id})")
39
-
40
- def log_http_error(error, chat_id):
41
- """
42
- Log HTTP errors encountered during requests.
43
- """
44
- logger.error(f"HTTP error occurred for Chat ID {chat_id}: {error}")
45
-
46
- def log_request_error(error, chat_id):
47
- """
48
- Log request errors unrelated to HTTP status.
49
- """
50
- logger.error(f"Request error occurred for Chat ID {chat_id}: {error}")
51
-
52
- def log_strip_prefix(model_prefix, content):
53
- """
54
- Log when a model prefix is stripped from the content.
55
- """
56
- logger.debug(f"Stripping prefix '{model_prefix}' from content.")
 
1
+ import logging
2
+
3
+ def setup_logger(name):
4
+ logger = logging.getLogger(name)
5
+ if not logger.handlers:
6
+ logger.setLevel(logging.INFO)
7
+ formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
8
+
9
+ # 控制台处理器
10
+ console_handler = logging.StreamHandler()
11
+ console_handler.setFormatter(formatter)
12
+ logger.addHandler(console_handler)
13
+
14
+ # 文件处理器 - 错误级别
15
+ # error_file_handler = logging.FileHandler('error.log')
16
+ # error_file_handler.setFormatter(formatter)
17
+ # error_file_handler.setLevel(logging.ERROR)
18
+ # logger.addHandler(error_file_handler)
19
+
20
+ return logger
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
api/models.py CHANGED
@@ -1,16 +1,16 @@
1
- # api/models.py
2
-
3
- from typing import List, Optional, Union
4
- from pydantic import BaseModel
5
-
6
- class Message(BaseModel):
7
- role: str
8
- content: Union[str, List[dict]] # Allow content to be a string or a list of dictionaries for images
9
-
10
- class ChatRequest(BaseModel):
11
- model: str
12
- messages: List[Message]
13
- stream: Optional[bool] = False
14
- temperature: Optional[float] = 0.5
15
- top_p: Optional[float] = 0.9
16
- max_tokens: Optional[int] = 1024
 
1
+ from typing import List, Optional
2
+ from pydantic import BaseModel
3
+
4
+
5
+ class Message(BaseModel):
6
+ role: str
7
+ content: str | list
8
+
9
+
10
+ class ChatRequest(BaseModel):
11
+ model: str
12
+ messages: List[Message]
13
+ stream: Optional[bool] = False
14
+ temperature: Optional[float] = 0.7
15
+ top_p: Optional[float] = 0.9
16
+ max_tokens: Optional[int] = 8192
api/routes.py CHANGED
@@ -1,61 +1,62 @@
1
- # api/routes.py
2
-
3
- import json
4
- from fastapi import APIRouter, Depends, HTTPException, Request, Response
5
- from fastapi.responses import StreamingResponse
6
- from api.auth import verify_app_secret
7
- from api.config import ALLOWED_MODELS
8
- from api.models import ChatRequest
9
- from api.utils import process_non_streaming_response, process_streaming_response
10
- from api.logger import setup_logger
11
-
12
- logger = setup_logger(__name__)
13
-
14
- router = APIRouter()
15
-
16
- @router.options("/v1/chat/completions")
17
- @router.options("/api/v1/chat/completions")
18
- async def chat_completions_options():
19
- return Response(
20
- status_code=200,
21
- headers={
22
- "Access-Control-Allow-Origin": "*",
23
- "Access-Control-Allow-Methods": "POST, OPTIONS",
24
- "Access-Control-Allow-Headers": "Content-Type, Authorization",
25
- },
26
- )
27
-
28
- @router.get("/v1/models")
29
- @router.get("/api/v1/models")
30
- async def list_models():
31
- return {"object": "list", "data": ALLOWED_MODELS}
32
-
33
- @router.post("/v1/chat/completions")
34
- @router.post("/api/v1/chat/completions")
35
- async def chat_completions(
36
- request: ChatRequest, app_secret: str = Depends(verify_app_secret)
37
- ):
38
- logger.info("Entering chat_completions route")
39
- logger.info(f"Processing chat completion request for model: {request.model}")
40
-
41
- if request.model not in [model["id"] for model in ALLOWED_MODELS]:
42
- raise HTTPException(
43
- status_code=400,
44
- detail=f"Model {request.model} is not allowed. Allowed models are: {', '.join(model['id'] for model in ALLOWED_MODELS)}",
45
- )
46
-
47
- if request.stream:
48
- logger.info("Streaming response")
49
- return StreamingResponse(process_streaming_response(request), media_type="text/event-stream")
50
- else:
51
- logger.info("Non-streaming response")
52
- return await process_non_streaming_response(request)
53
-
54
- @router.route('/')
55
- @router.route('/healthz')
56
- @router.route('/ready')
57
- @router.route('/alive')
58
- @router.route('/status')
59
- @router.get("/health")
60
- def health_check(request: Request):
61
- return Response(content=json.dumps({"status": "ok"}), media_type="application/json")
 
 
1
+ import json
2
+ from fastapi import APIRouter, Depends, HTTPException, Request, Response
3
+ from fastapi.responses import StreamingResponse
4
+ from api.auth import verify_app_secret
5
+ from api.config import ALLOWED_MODELS
6
+ from api.models import ChatRequest
7
+ from api.utils import process_non_streaming_response, process_streaming_response
8
+ from api.logger import setup_logger
9
+
10
+ logger = setup_logger(__name__)
11
+
12
+ router = APIRouter()
13
+
14
+ @router.options("/v1/chat/completions")
15
+ @router.options("/api/v1/chat/completions")
16
+ async def chat_completions_options():
17
+ return Response(
18
+ status_code=200,
19
+ headers={
20
+ "Access-Control-Allow-Origin": "*",
21
+ "Access-Control-Allow-Methods": "POST, OPTIONS",
22
+ "Access-Control-Allow-Headers": "Content-Type, Authorization",
23
+ },
24
+ )
25
+
26
+ @router.get("/v1/models")
27
+ @router.get("/api/v1/models")
28
+ async def list_models():
29
+ return {"object": "list", "data": ALLOWED_MODELS}
30
+
31
+ @router.post("/v1/chat/completions")
32
+ @router.post("/api/v1/chat/completions")
33
+ async def chat_completions(
34
+ request: ChatRequest, app_secret: str = Depends(verify_app_secret)
35
+ ):
36
+ logger.info("Entering chat_completions route")
37
+ logger.info(f"Received request: {request}")
38
+ logger.info(f"App secret: {app_secret}")
39
+ logger.info(f"Received chat completion request for model: {request.model}")
40
+
41
+ if request.model not in [model["id"] for model in ALLOWED_MODELS]:
42
+ raise HTTPException(
43
+ status_code=400,
44
+ detail=f"Model {request.model} is not allowed. Allowed models are: {', '.join(model['id'] for model in ALLOWED_MODELS)}",
45
+ )
46
+
47
+ if request.stream:
48
+ logger.info("Streaming response")
49
+ return StreamingResponse(process_streaming_response(request), media_type="text/event-stream")
50
+ else:
51
+ logger.info("Non-streaming response")
52
+ return await process_non_streaming_response(request)
53
+
54
+
55
+ @router.route('/')
56
+ @router.route('/healthz')
57
+ @router.route('/ready')
58
+ @router.route('/alive')
59
+ @router.route('/status')
60
+ @router.get("/health")
61
+ def health_check(request: Request):
62
+ return Response(content=json.dumps({"status": "ok"}), media_type="application/json")
api/utils.py CHANGED
@@ -1,241 +1,158 @@
1
- # api/utils.py
2
-
3
- from datetime import datetime
4
- import json
5
- import uuid
6
- import asyncio
7
- import random
8
- import string
9
- from typing import Any, Dict, Optional, List, Union
10
-
11
- import httpx
12
- from fastapi import HTTPException
13
- from api.config import (
14
- MODEL_MAPPING,
15
- get_headers_api_chat,
16
- get_headers_chat,
17
- BASE_URL,
18
- AGENT_MODE,
19
- TRENDING_AGENT_MODE,
20
- MODEL_PREFIXES,
21
- MODEL_REFERERS
22
- )
23
- from api.models import ChatRequest, Message
24
- from api.logger import setup_logger
25
-
26
- logger = setup_logger(__name__)
27
-
28
- # Helper function to create a random alphanumeric chat ID
29
- def generate_chat_id(length: int = 7) -> str:
30
- characters = string.ascii_letters + string.digits
31
- return ''.join(random.choices(characters, k=length))
32
-
33
- # Helper function to create chat completion data
34
- def create_chat_completion_data(
35
- content: str, model: str, timestamp: int, finish_reason: Optional[str] = None
36
- ) -> Dict[str, Any]:
37
- return {
38
- "id": f"chatcmpl-{uuid.uuid4()}",
39
- "object": "chat.completion.chunk",
40
- "created": timestamp,
41
- "model": model,
42
- "choices": [
43
- {
44
- "index": 0,
45
- "delta": {"content": content, "role": "assistant"},
46
- "finish_reason": finish_reason,
47
- }
48
- ],
49
- "usage": None,
50
- }
51
-
52
- # Function to convert message to dictionary format, ensuring base64 data and optional model prefix
53
- def message_to_dict(message: Message, model_prefix: Optional[str] = None) -> Dict[str, Any]:
54
- if isinstance(message.content, str):
55
- content = message.content
56
- elif isinstance(message.content, list) and len(message.content) > 0:
57
- content = message.content[0].get("text", "")
58
- else:
59
- content = ""
60
-
61
- if model_prefix:
62
- content = f"{model_prefix} {content}"
63
-
64
- # Handle image content
65
- if isinstance(message.content, list) and len(message.content) == 2 and "image_url" in message.content[1]:
66
- # Ensure base64 images are always included for all models
67
- return {
68
- "role": message.role,
69
- "content": content,
70
- "data": {
71
- "imageBase64": message.content[1]["image_url"]["url"],
72
- "fileText": "",
73
- "title": "snapshot",
74
- },
75
- }
76
- return {"role": message.role, "content": content}
77
-
78
- # Function to strip model prefix from content if present
79
- def strip_model_prefix(content: str, model_prefix: Optional[str] = None) -> str:
80
- """Remove the model prefix from the response content if present."""
81
- if model_prefix and content.startswith(model_prefix):
82
- logger.debug(f"Stripping prefix '{model_prefix}' from content.")
83
- return content[len(model_prefix):].strip()
84
- return content
85
-
86
- # Function to get the correct referer URL for logging
87
- def get_referer_url(chat_id: str, model: str) -> str:
88
- """Generate the referer URL based on specific models listed in MODEL_REFERERS."""
89
- if model in MODEL_REFERERS:
90
- return f"{BASE_URL}/chat/{chat_id}?model={model}"
91
- return BASE_URL
92
-
93
- # Process streaming response with headers from config.py
94
- async def process_streaming_response(request: ChatRequest):
95
- chat_id = generate_chat_id()
96
- referer_url = get_referer_url(chat_id, request.model)
97
- logger.info(f"Generated Chat ID: {chat_id} - Model: {request.model} - URL: {referer_url}")
98
-
99
- agent_mode = AGENT_MODE.get(request.model, {})
100
- trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
101
- model_prefix = MODEL_PREFIXES.get(request.model, "")
102
-
103
- headers_api_chat = get_headers_api_chat(referer_url)
104
-
105
- if request.model == 'o1-preview':
106
- delay_seconds = random.randint(1, 60)
107
- logger.info(f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview' (Chat ID: {chat_id})")
108
- await asyncio.sleep(delay_seconds)
109
-
110
- json_data = {
111
- "agentMode": agent_mode,
112
- "clickedAnswer2": False,
113
- "clickedAnswer3": False,
114
- "clickedForceWebSearch": False,
115
- "codeModelMode": True,
116
- "githubToken": None,
117
- "id": chat_id,
118
- "isChromeExt": False,
119
- "isMicMode": False,
120
- "maxTokens": request.max_tokens,
121
- "messages": [message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages],
122
- "mobileClient": False,
123
- "playgroundTemperature": request.temperature,
124
- "playgroundTopP": request.top_p,
125
- "previewToken": None,
126
- "trendingAgentMode": trending_agent_mode,
127
- "userId": None,
128
- "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
129
- "userSystemPrompt": None,
130
- "validated": "69783381-2ce4-4dbd-ac78-35e9063feabc",
131
- "visitFromDelta": False,
132
- }
133
-
134
- async with httpx.AsyncClient() as client:
135
- try:
136
- async with client.stream(
137
- "POST",
138
- f"{BASE_URL}/api/chat",
139
- headers=headers_api_chat,
140
- json=json_data,
141
- timeout=100,
142
- ) as response:
143
- response.raise_for_status()
144
- async for line in response.aiter_lines():
145
- timestamp = int(datetime.now().timestamp())
146
- if line:
147
- content = line
148
- if content.startswith("$@$v=undefined-rv1$@$"):
149
- content = content[21:]
150
- cleaned_content = strip_model_prefix(content, model_prefix)
151
- yield f"data: {json.dumps(create_chat_completion_data(cleaned_content, request.model, timestamp))}\n\n"
152
-
153
- yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, 'stop'))}\n\n"
154
- yield "data: [DONE]\n\n"
155
- except httpx.HTTPStatusError as e:
156
- logger.error(f"HTTP error occurred for Chat ID {chat_id}: {e}")
157
- raise HTTPException(status_code=e.response.status_code, detail=str(e))
158
- except httpx.RequestError as e:
159
- logger.error(f"Request error occurred for Chat ID {chat_id}: {e}")
160
- raise HTTPException(status_code=500, detail=str(e))
161
-
162
- # Process non-streaming response with headers from config.py
163
- async def process_non_streaming_response(request: ChatRequest):
164
- chat_id = generate_chat_id()
165
- referer_url = get_referer_url(chat_id, request.model)
166
- logger.info(f"Generated Chat ID: {chat_id} - Model: {request.model} - URL: {referer_url}")
167
-
168
- agent_mode = AGENT_MODE.get(request.model, {})
169
- trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
170
- model_prefix = MODEL_PREFIXES.get(request.model, "")
171
-
172
- headers_api_chat = get_headers_api_chat(referer_url)
173
- headers_chat = get_headers_chat(referer_url, next_action=str(uuid.uuid4()), next_router_state_tree=json.dumps([""]))
174
-
175
- if request.model == 'o1-preview':
176
- delay_seconds = random.randint(20, 60)
177
- logger.info(f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview' (Chat ID: {chat_id})")
178
- await asyncio.sleep(delay_seconds)
179
-
180
- json_data = {
181
- "agentMode": agent_mode,
182
- "clickedAnswer2": False,
183
- "clickedAnswer3": False,
184
- "clickedForceWebSearch": False,
185
- "codeModelMode": True,
186
- "githubToken": None,
187
- "id": chat_id,
188
- "isChromeExt": False,
189
- "isMicMode": False,
190
- "maxTokens": request.max_tokens,
191
- "messages": [message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages],
192
- "mobileClient": False,
193
- "playgroundTemperature": request.temperature,
194
- "playgroundTopP": request.top_p,
195
- "previewToken": None,
196
- "trendingAgentMode": trending_agent_mode,
197
- "userId": None,
198
- "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
199
- "userSystemPrompt": None,
200
- "validated": "69783381-2ce4-4dbd-ac78-35e9063feabc",
201
- "visitFromDelta": False,
202
- }
203
-
204
- full_response = ""
205
- async with httpx.AsyncClient() as client:
206
- try:
207
- async with client.stream(
208
- method="POST",
209
- url=f"{BASE_URL}/api/chat",
210
- headers=headers_api_chat,
211
- json=json_data
212
- ) as response:
213
- response.raise_for_status()
214
- async for chunk in response.aiter_text():
215
- full_response += chunk
216
- except httpx.HTTPStatusError as e:
217
- logger.error(f"HTTP error occurred for Chat ID {chat_id}: {e}")
218
- raise HTTPException(status_code=e.response.status_code, detail=str(e))
219
- except httpx.RequestError as e:
220
- logger.error(f"Request error occurred for Chat ID {chat_id}: {e}")
221
- raise HTTPException(status_code=500, detail=str(e))
222
-
223
- if full_response.startswith("$@$v=undefined-rv1$@$"):
224
- full_response = full_response[21:]
225
-
226
- cleaned_full_response = strip_model_prefix(full_response, model_prefix)
227
-
228
- return {
229
- "id": f"chatcmpl-{uuid.uuid4()}",
230
- "object": "chat.completion",
231
- "created": int(datetime.now().timestamp()),
232
- "model": request.model,
233
- "choices": [
234
- {
235
- "index": 0,
236
- "message": {"role": "assistant", "content": cleaned_full_response},
237
- "finish_reason": "stop",
238
- }
239
- ],
240
- "usage": None,
241
- }
 
1
+ from datetime import datetime
2
+ from http.client import HTTPException
3
+ import json
4
+ from typing import Any, Dict, Optional
5
+ import uuid
6
+
7
+ import httpx
8
+ from api.config import MODEL_MAPPING, headers
9
+ from fastapi import Depends, security
10
+ from fastapi.security import HTTPAuthorizationCredentials
11
+
12
+ from api.config import APP_SECRET, BASE_URL
13
+ from api.models import ChatRequest
14
+
15
+ from api.logger import setup_logger
16
+
17
+ logger = setup_logger(__name__)
18
+
19
+
20
+ def create_chat_completion_data(
21
+ content: str, model: str, timestamp: int, finish_reason: Optional[str] = None
22
+ ) -> Dict[str, Any]:
23
+ return {
24
+ "id": f"chatcmpl-{uuid.uuid4()}",
25
+ "object": "chat.completion.chunk",
26
+ "created": timestamp,
27
+ "model": model,
28
+ "choices": [
29
+ {
30
+ "index": 0,
31
+ "delta": {"content": content, "role": "assistant"},
32
+ "finish_reason": finish_reason,
33
+ }
34
+ ],
35
+ "usage": None,
36
+ }
37
+
38
+
39
+ def verify_app_secret(credentials: HTTPAuthorizationCredentials = Depends(security)):
40
+ if credentials.credentials != APP_SECRET:
41
+ raise HTTPException(status_code=403, detail="Invalid APP_SECRET")
42
+ return credentials.credentials
43
+
44
+
45
+ def message_to_dict(message):
46
+ if isinstance(message.content, str):
47
+ return {"role": message.role, "content": message.content}
48
+ elif isinstance(message.content, list) and len(message.content) == 2:
49
+ return {
50
+ "role": message.role,
51
+ "content": message.content[0]["text"],
52
+ "data": {
53
+ "imageBase64": message.content[1]["image_url"]["url"],
54
+ "fileText": "",
55
+ "title": "snapshoot",
56
+ },
57
+ }
58
+ else:
59
+ return {"role": message.role, "content": message.content}
60
+
61
+
62
+ async def process_streaming_response(request: ChatRequest):
63
+ json_data = {
64
+ "messages": [message_to_dict(msg) for msg in request.messages],
65
+ "previewToken": None,
66
+ "userId": None,
67
+ "codeModelMode": True,
68
+ "agentMode": {},
69
+ "trendingAgentMode": {},
70
+ "isMicMode": False,
71
+ "userSystemPrompt": None,
72
+ "maxTokens": request.max_tokens,
73
+ "playgroundTopP": request.top_p,
74
+ "playgroundTemperature": request.temperature,
75
+ "isChromeExt": False,
76
+ "githubToken": None,
77
+ "clickedAnswer2": False,
78
+ "clickedAnswer3": False,
79
+ "clickedForceWebSearch": False,
80
+ "visitFromDelta": False,
81
+ "mobileClient": False,
82
+ "userSelectedModel": MODEL_MAPPING.get(request.model),
83
+ }
84
+
85
+ async with httpx.AsyncClient() as client:
86
+ try:
87
+ async with client.stream(
88
+ "POST",
89
+ f"{BASE_URL}/api/chat",
90
+ headers=headers,
91
+ json=json_data,
92
+ timeout=100,
93
+ ) as response:
94
+ response.raise_for_status()
95
+ async for line in response.aiter_lines():
96
+ timestamp = int(datetime.now().timestamp())
97
+ if line:
98
+ content = line + "\n"
99
+ if content.startswith("$@$v=undefined-rv1$@$"):
100
+ yield f"data: {json.dumps(create_chat_completion_data(content[21:], request.model, timestamp))}\n\n"
101
+ else:
102
+ yield f"data: {json.dumps(create_chat_completion_data(content, request.model, timestamp))}\n\n"
103
+
104
+ yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, 'stop'))}\n\n"
105
+ yield "data: [DONE]\n\n"
106
+ except httpx.HTTPStatusError as e:
107
+ logger.error(f"HTTP error occurred: {e}")
108
+ raise HTTPException(status_code=e.response.status_code, detail=str(e))
109
+ except httpx.RequestError as e:
110
+ logger.error(f"Error occurred during request: {e}")
111
+ raise HTTPException(status_code=500, detail=str(e))
112
+
113
+
114
+ async def process_non_streaming_response(request: ChatRequest):
115
+ json_data = {
116
+ "messages": [message_to_dict(msg) for msg in request.messages],
117
+ "previewToken": None,
118
+ "userId": None,
119
+ "codeModelMode": True,
120
+ "agentMode": {},
121
+ "trendingAgentMode": {},
122
+ "isMicMode": False,
123
+ "userSystemPrompt": None,
124
+ "maxTokens": request.max_tokens,
125
+ "playgroundTopP": request.top_p,
126
+ "playgroundTemperature": request.temperature,
127
+ "isChromeExt": False,
128
+ "githubToken": None,
129
+ "clickedAnswer2": False,
130
+ "clickedAnswer3": False,
131
+ "clickedForceWebSearch": False,
132
+ "visitFromDelta": False,
133
+ "mobileClient": False,
134
+ "userSelectedModel": MODEL_MAPPING.get(request.model),
135
+ }
136
+ full_response = ""
137
+ async with httpx.AsyncClient() as client:
138
+ async with client.stream(
139
+ method="POST", url=f"{BASE_URL}/api/chat", headers=headers, json=json_data
140
+ ) as response:
141
+ async for chunk in response.aiter_text():
142
+ full_response += chunk
143
+ if full_response.startswith("$@$v=undefined-rv1$@$"):
144
+ full_response = full_response[21:]
145
+ return {
146
+ "id": f"chatcmpl-{uuid.uuid4()}",
147
+ "object": "chat.completion",
148
+ "created": int(datetime.now().timestamp()),
149
+ "model": request.model,
150
+ "choices": [
151
+ {
152
+ "index": 0,
153
+ "message": {"role": "assistant", "content": full_response},
154
+ "finish_reason": "stop",
155
+ }
156
+ ],
157
+ "usage": None,
158
+ }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
main.py CHANGED
@@ -2,4 +2,4 @@ import uvicorn
2
  from api.app import app
3
 
4
  if __name__ == "__main__":
5
- uvicorn.run(app, host="0.0.0.0", port=8001)
 
2
  from api.app import app
3
 
4
  if __name__ == "__main__":
5
+ uvicorn.run(app, host="0.0.0.0", port=8001)
requirements.txt CHANGED
@@ -1,6 +1,7 @@
1
- fastapi==0.95.2
2
- httpx==0.23.3
3
- pydantic==1.10.4
4
- python-dotenv==0.21.0
5
- uvicorn==0.21.1
6
- gunicorn==20.1.0
 
 
1
+ fastapi
2
+ httpx
3
+ pydantic
4
+ pyinstaller
5
+ python-dotenv
6
+ starlette
7
+ uvicorn