Update main.py
Browse files
main.py
CHANGED
@@ -1,3 +1,170 @@
|
|
1 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
|
3 |
run_api(host="0.0.0.0", port=7860)
|
|
|
1 |
+
import logging
|
2 |
+
import json
|
3 |
+
import uvicorn
|
4 |
+
import nest_asyncio
|
5 |
+
import random
|
6 |
+
|
7 |
+
from fastapi import FastAPI, Response, Request
|
8 |
+
from fastapi.responses import StreamingResponse, RedirectResponse, HTMLResponse, JSONResponse
|
9 |
+
from fastapi.exceptions import RequestValidationError
|
10 |
+
from starlette.status import HTTP_422_UNPROCESSABLE_ENTITY
|
11 |
+
from fastapi.encoders import jsonable_encoder
|
12 |
+
from pydantic import BaseModel
|
13 |
+
from typing import List, Union
|
14 |
+
|
15 |
+
import g4f
|
16 |
+
import g4f.debug
|
17 |
+
from g4f.client import Client
|
18 |
+
from g4f.typing import Messages
|
19 |
+
|
20 |
+
class ChatCompletionsConfig(BaseModel):
|
21 |
+
messages: Messages
|
22 |
+
model: str
|
23 |
+
provider: Union[str, None] = None
|
24 |
+
stream: bool = False
|
25 |
+
temperature: Union[float, None] = None
|
26 |
+
max_tokens: Union[int, None] = None
|
27 |
+
stop: Union[list[str], str, None] = None
|
28 |
+
api_key: Union[str, None] = None
|
29 |
+
|
30 |
+
with open("./p.txt", "r", encoding="utf-8") as f:
|
31 |
+
pr = f.readlines()
|
32 |
+
|
33 |
+
class Api:
|
34 |
+
def __init__(self, engine: g4f, debug: bool = True, sentry: bool = False,
|
35 |
+
list_ignored_providers: List[str] = None) -> None:
|
36 |
+
self.engine = engine
|
37 |
+
self.debug = debug
|
38 |
+
self.sentry = sentry
|
39 |
+
self.list_ignored_providers = list_ignored_providers
|
40 |
+
|
41 |
+
if debug:
|
42 |
+
g4f.debug.logging = True
|
43 |
+
|
44 |
+
self.mkclient()
|
45 |
+
|
46 |
+
nest_asyncio.apply()
|
47 |
+
self.app = FastAPI()
|
48 |
+
|
49 |
+
self.routes()
|
50 |
+
self.register_validation_exception_handler()
|
51 |
+
|
52 |
+
def mkclient(self):
|
53 |
+
if self.n >= 5:
|
54 |
+
self.client = Client(proxies="http://" + random.choice(pr))
|
55 |
+
self.n = 0
|
56 |
+
|
57 |
+
self.n += 1
|
58 |
+
|
59 |
+
def register_validation_exception_handler(self):
|
60 |
+
@self.app.exception_handler(RequestValidationError)
|
61 |
+
async def validation_exception_handler(request: Request, exc: RequestValidationError):
|
62 |
+
details = exc.errors()
|
63 |
+
modified_details = []
|
64 |
+
for error in details:
|
65 |
+
modified_details.append(
|
66 |
+
{
|
67 |
+
"loc": error["loc"],
|
68 |
+
"message": error["msg"],
|
69 |
+
"type": error["type"],
|
70 |
+
}
|
71 |
+
)
|
72 |
+
return JSONResponse(
|
73 |
+
status_code=HTTP_422_UNPROCESSABLE_ENTITY,
|
74 |
+
content=jsonable_encoder({"detail": modified_details}),
|
75 |
+
)
|
76 |
+
|
77 |
+
def routes(self):
|
78 |
+
@self.app.get("/")
|
79 |
+
async def read_root():
|
80 |
+
return RedirectResponse("/v1", 302)
|
81 |
+
|
82 |
+
@self.app.get("/v1")
|
83 |
+
async def read_root_v1():
|
84 |
+
return HTMLResponse('g4f API: Go to '
|
85 |
+
'<a href="/v1/chat/completions">chat/completions</a> '
|
86 |
+
'or <a href="/v1/models">models</a>.')
|
87 |
+
|
88 |
+
@self.app.get("/v1/models")
|
89 |
+
async def models():
|
90 |
+
model_list = dict(
|
91 |
+
(model, g4f.ModelUtils.convert[model])
|
92 |
+
for model in g4f.Model.__all__()
|
93 |
+
)
|
94 |
+
model_list = [{
|
95 |
+
'id': model_id,
|
96 |
+
'object': 'model',
|
97 |
+
'created': 0,
|
98 |
+
'owned_by': model.base_provider
|
99 |
+
} for model_id, model in model_list.items()]
|
100 |
+
return JSONResponse(model_list)
|
101 |
+
|
102 |
+
@self.app.get("/v1/models/{model_name}")
|
103 |
+
async def model_info(model_name: str):
|
104 |
+
try:
|
105 |
+
model_info = g4f.ModelUtils.convert[model_name]
|
106 |
+
return JSONResponse({
|
107 |
+
'id': model_name,
|
108 |
+
'object': 'model',
|
109 |
+
'created': 0,
|
110 |
+
'owned_by': model_info.base_provider
|
111 |
+
})
|
112 |
+
except:
|
113 |
+
return JSONResponse({"error": "The model does not exist."})
|
114 |
+
|
115 |
+
@self.app.post("/v1/chat/completions")
|
116 |
+
async def chat_completions(config: ChatCompletionsConfig = None, request: Request = None, provider: str = None):
|
117 |
+
self.mkclient() # if reached, change the client
|
118 |
+
try:
|
119 |
+
config.provider = provider if config.provider is None else config.provider
|
120 |
+
if config.api_key is None and request is not None:
|
121 |
+
auth_header = request.headers.get("Authorization")
|
122 |
+
if auth_header is not None:
|
123 |
+
auth_header = auth_header.split(None, 1)[-1]
|
124 |
+
if auth_header and auth_header != "Bearer":
|
125 |
+
config.api_key = auth_header
|
126 |
+
response = self.client.chat.completions.create(
|
127 |
+
**config.dict(exclude_none=True),
|
128 |
+
ignored=self.list_ignored_providers
|
129 |
+
)
|
130 |
+
except Exception as e:
|
131 |
+
logging.exception(e)
|
132 |
+
return Response(content=format_exception(e, config), status_code=500, media_type="application/json")
|
133 |
+
|
134 |
+
if not config.stream:
|
135 |
+
return JSONResponse(response.to_json())
|
136 |
+
|
137 |
+
def streaming():
|
138 |
+
try:
|
139 |
+
for chunk in response:
|
140 |
+
yield f"data: {json.dumps(chunk.to_json())}\n\n"
|
141 |
+
except GeneratorExit:
|
142 |
+
pass
|
143 |
+
except Exception as e:
|
144 |
+
logging.exception(e)
|
145 |
+
yield f'data: {format_exception(e, config)}'
|
146 |
+
|
147 |
+
return StreamingResponse(streaming(), media_type="text/event-stream")
|
148 |
+
|
149 |
+
@self.app.post("/v1/completions")
|
150 |
+
async def completions():
|
151 |
+
return Response(content=json.dumps({'info': 'Not working yet.'}, indent=4), media_type="application/json")
|
152 |
+
|
153 |
+
def run(self, ip, use_colors : bool = False):
|
154 |
+
split_ip = ip.split(":")
|
155 |
+
uvicorn.run(app=self.app, host=split_ip[0], port=int(split_ip[1]), use_colors=use_colors)
|
156 |
+
|
157 |
+
def format_exception(e: Exception, config: ChatCompletionsConfig) -> str:
|
158 |
+
last_provider = g4f.get_last_provider(True)
|
159 |
+
return json.dumps({
|
160 |
+
"error": {"message": f"{e.__class__.__name__}: {e}"},
|
161 |
+
"model": last_provider.get("model") if last_provider else config.model,
|
162 |
+
"provider": last_provider.get("name") if last_provider else config.provider
|
163 |
+
})
|
164 |
+
|
165 |
+
def run_api(host: str = '0.0.0.0', port: int = 1337, debug: bool = False, use_colors=True) -> None:
|
166 |
+
print(f'Starting server... [g4f v-{g4f.version.utils.current_version}]')
|
167 |
+
app = Api(engine=g4f, debug=debug)
|
168 |
+
app.run(f"{host}:{port}", use_colors=use_colors)
|
169 |
|
170 |
run_api(host="0.0.0.0", port=7860)
|