Synced repo using 'sync_with_huggingface' Github Action
Browse files- Dockerfile +19 -0
- Spacefile +10 -0
- app.py +396 -0
- check.py +48 -0
- check.sh +1 -0
- g4f/Provider/Provider.py +16 -0
- g4f/Provider/Providers/AItianhu.py +39 -0
- g4f/Provider/Providers/Acytoo.py +43 -0
- g4f/Provider/Providers/AiFree.py +41 -0
- g4f/Provider/Providers/AiService.py +43 -0
- g4f/Provider/Providers/Aichat.py +47 -0
- g4f/Provider/Providers/B88.py +47 -0
- g4f/Provider/Providers/Bing.py +395 -0
- g4f/Provider/Providers/Bingo.py +66 -0
- g4f/Provider/Providers/ChatFree.py +48 -0
- g4f/Provider/Providers/ChatGPTunli.py +37 -0
- g4f/Provider/Providers/ChatgptAi.py +53 -0
- g4f/Provider/Providers/Chimera.py +70 -0
- g4f/Provider/Providers/ClaudeAI.py +69 -0
- g4f/Provider/Providers/DeepAi.py +48 -0
- g4f/Provider/Providers/EasyChat.py +52 -0
- g4f/Provider/Providers/EzChat.py +52 -0
- g4f/Provider/Providers/Forefront.py +47 -0
- g4f/Provider/Providers/Free2gpt.py +42 -0
- g4f/Provider/Providers/Fusionbrain.py +58 -0
- g4f/Provider/Providers/Gravityengine.py +33 -0
- g4f/Provider/Providers/H2o.py +94 -0
- g4f/Provider/Providers/PerplexityAI.py +28 -0
- g4f/Provider/Providers/Phind.py +36 -0
- g4f/Provider/Providers/PizzaGPT.py +34 -0
- g4f/Provider/Providers/Poe.py +28 -0
- g4f/Provider/Providers/PowerChat.py +35 -0
- g4f/Provider/Providers/Slack.py +109 -0
- g4f/Provider/Providers/Theb.py +54 -0
- g4f/Provider/Providers/Vercel.py +38 -0
- g4f/Provider/Providers/Wewordle.py +73 -0
- g4f/Provider/Providers/Zhulei.py +31 -0
- g4f/Provider/Providers/helpers/perplexityai.py +25 -0
- g4f/Provider/Providers/helpers/phind.py +88 -0
- g4f/Provider/Providers/helpers/theb.py +48 -0
- g4f/Provider/__init__.py +34 -0
- g4f/__init__.py +42 -0
- g4f/models.py +248 -0
- g4f/typing.py +3 -0
- g4f/utils.py +49 -0
- install.sh +1 -0
- requirements.txt +21 -0
- start.sh +1 -0
- test.py +11 -0
- vercel.json +19 -0
Dockerfile
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
FROM python:3.10.12-slim-bullseye
|
2 |
+
|
3 |
+
WORKDIR /app
|
4 |
+
|
5 |
+
COPY requirements.txt requirements.txt
|
6 |
+
|
7 |
+
RUN python3 -m venv venv
|
8 |
+
ENV PATH="/app/venv/bin:$PATH"
|
9 |
+
|
10 |
+
RUN apt-get update && \
|
11 |
+
apt-get install -y --no-install-recommends build-essential libffi-dev cmake libcurl4-openssl-dev nodejs screen && \
|
12 |
+
pip3 install --no-cache-dir -r requirements.txt
|
13 |
+
|
14 |
+
COPY . .
|
15 |
+
RUN chmod +x ./app.py
|
16 |
+
RUN chmod +x ./start.sh
|
17 |
+
RUN chmod -R 777 /app
|
18 |
+
CMD screen -d -m python3 check.py
|
19 |
+
CMD uvicorn app:app --host 0.0.0.0 --port 7860
|
Spacefile
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Spacefile Docs: https://go.deta.dev/docs/spacefile/v0
|
2 |
+
v: 0
|
3 |
+
micros:
|
4 |
+
- name: gpt4free
|
5 |
+
src: ./
|
6 |
+
engine: python3.9
|
7 |
+
primary: true
|
8 |
+
public_routes:
|
9 |
+
- "/*"
|
10 |
+
run: uvicorn app:app
|
app.py
ADDED
@@ -0,0 +1,396 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import time
|
3 |
+
import json
|
4 |
+
import random
|
5 |
+
import requests
|
6 |
+
import g4f
|
7 |
+
from fastapi import FastAPI,Response, status
|
8 |
+
from typing import Dict, NewType, Union, Optional, List, get_type_hints
|
9 |
+
from fastapi.middleware.cors import CORSMiddleware
|
10 |
+
from fastapi.responses import StreamingResponse,JSONResponse
|
11 |
+
from fastapi.openapi.utils import get_openapi
|
12 |
+
from starlette.exceptions import HTTPException as StarletteHTTPException
|
13 |
+
from pydantic import BaseModel
|
14 |
+
|
15 |
+
|
16 |
+
|
17 |
+
#app = FastAPI(docs_url=None, redoc_url=None)
|
18 |
+
app = FastAPI()
|
19 |
+
app.add_middleware(
|
20 |
+
CORSMiddleware,
|
21 |
+
allow_origins=["*"],
|
22 |
+
allow_methods=["*"],
|
23 |
+
allow_headers=["*"],
|
24 |
+
)
|
25 |
+
|
26 |
+
class chat_completions_Item(BaseModel):
|
27 |
+
stream : bool = False
|
28 |
+
model : str = 'gpt-3.5-turbo'
|
29 |
+
messages : list = [{'role': 'user', 'content':"Say 'Hello World'."}]
|
30 |
+
provider: Union[str, None] = None
|
31 |
+
temperature : float = 0.8
|
32 |
+
presence_penalty: float = 0
|
33 |
+
frequency_penalty: float = 0
|
34 |
+
top_p: float = 1
|
35 |
+
|
36 |
+
class completions_Item(BaseModel):
|
37 |
+
stream : bool = False
|
38 |
+
model : str = 'gpt-3.5-turbo'
|
39 |
+
prompt : str = "Say 'Hello World'."
|
40 |
+
provider : Union[str, None] = None
|
41 |
+
temperature : float = 0.8
|
42 |
+
presence_penalty: float = 0
|
43 |
+
frequency_penalty: float = 0
|
44 |
+
top_p: float = 1
|
45 |
+
|
46 |
+
def auto_select(model:str='gpt-3.5-turbo',stream:bool=False):
|
47 |
+
r = requests.get('https://gpt.lemonsoftware.eu.org/v1/status')
|
48 |
+
data = r.json()['data']
|
49 |
+
model_providers = set()
|
50 |
+
random.shuffle(data)
|
51 |
+
for provider_info in data:
|
52 |
+
for model_info in provider_info['model']:
|
53 |
+
if model in model_info:
|
54 |
+
model_providers.add(provider_info['provider'])
|
55 |
+
if model_info[model]['status'] == 'Active':
|
56 |
+
if stream == True and getattr(g4f.Provider,provider_info['provider']).supports_stream == False:
|
57 |
+
continue
|
58 |
+
return [getattr(g4f.Provider,provider_info['provider']),provider_info['provider']]
|
59 |
+
else:
|
60 |
+
continue
|
61 |
+
break
|
62 |
+
if not model_providers:
|
63 |
+
return None
|
64 |
+
active_providers = set()
|
65 |
+
for provider_info in data:
|
66 |
+
for model_info in provider_info['model']:
|
67 |
+
for model in model_info.values():
|
68 |
+
if model['status'] == 'Active':
|
69 |
+
if stream == True and getattr(g4f.Provider,provider_info['provider']).supports_stream == False:
|
70 |
+
continue
|
71 |
+
active_providers.add(provider_info['provider'])
|
72 |
+
chooseable_providers = model_providers & active_providers
|
73 |
+
if not chooseable_providers:
|
74 |
+
return None
|
75 |
+
chooseable_provider = random.choice(list(chooseable_providers))
|
76 |
+
return [getattr(g4f.Provider,chooseable_provider),chooseable_provider]
|
77 |
+
|
78 |
+
@app.post("/v1/chat/completions")
|
79 |
+
def chat_completions(item: chat_completions_Item,responses: Response):
|
80 |
+
stream = item.stream
|
81 |
+
model = item.model.lower()
|
82 |
+
messages = item.messages
|
83 |
+
provider_name = item.provider
|
84 |
+
temperature = item.temperature
|
85 |
+
presence_penalty = item.presence_penalty
|
86 |
+
frequency_penalty = item.frequency_penalty
|
87 |
+
top_p = item.top_p
|
88 |
+
|
89 |
+
if provider_name:
|
90 |
+
try:
|
91 |
+
response = g4f.ChatCompletion.create(model=model, provider=getattr(g4f.Provider,provider_name),stream=stream,messages=messages,temperature=temperature,presence_penalty=presence_penalty,frequency_penalty=frequency_penalty,top_p=top_p)
|
92 |
+
except:
|
93 |
+
return JSONResponse(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, content={"error": {"message": "There was an error","type": "invalid_request_error","param": None,"code": 500}})
|
94 |
+
else:
|
95 |
+
provider = auto_select(model=model,stream=stream)
|
96 |
+
if provider == None:
|
97 |
+
return JSONResponse(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, content={"error": {"message": "The model is invalid or not working.","type": "invalid_request_error","param": None,"code": 500}})
|
98 |
+
if stream and provider[0].supports_stream == False:
|
99 |
+
return JSONResponse(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, content={"error": {"message": "Stream is not supported.","type": "invalid_request_error","param": None,"code": 500}})
|
100 |
+
provider_name = provider[1]
|
101 |
+
try:
|
102 |
+
response = g4f.ChatCompletion.create(model=model, provider=provider[0],stream=stream,messages=messages,temperature=temperature,presence_penalty=presence_penalty,frequency_penalty=frequency_penalty,top_p=top_p)
|
103 |
+
except:
|
104 |
+
provider = auto_select(model=model,stream=stream)
|
105 |
+
if provider == None:
|
106 |
+
return JSONResponse(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, content={"error": {"message": "The model is invalid or not working.","type": "invalid_request_error","param": None,"code": 500}})
|
107 |
+
if stream and provider[0].supports_stream == False:
|
108 |
+
return JSONResponse(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, content={"error": {"message": "Stream is not supported.","type": "invalid_request_error","param": None,"code": 500}})
|
109 |
+
provider_name = provider[1]
|
110 |
+
try:
|
111 |
+
response = g4f.ChatCompletion.create(model=model, provider=provider[0],stream=stream,messages=messages,temperature=temperature,presence_penalty=presence_penalty,frequency_penalty=frequency_penalty,top_p=top_p)
|
112 |
+
except:
|
113 |
+
return JSONResponse(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, content={"error": {"message": "There was an error.Please try again.","type": "server_error","param": None,"code": 500}})
|
114 |
+
|
115 |
+
|
116 |
+
if not stream:
|
117 |
+
completion_timestamp = int(time.time())
|
118 |
+
completion_id = ''.join(random.choices(
|
119 |
+
'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789', k=28))
|
120 |
+
return {
|
121 |
+
'id': 'chatcmpl-%s' % completion_id,
|
122 |
+
'object': 'chat.completion',
|
123 |
+
'created': completion_timestamp,
|
124 |
+
'model': model,
|
125 |
+
'provider':provider_name,
|
126 |
+
'usage': {
|
127 |
+
'prompt_tokens': len(messages),
|
128 |
+
'completion_tokens': len(response),
|
129 |
+
'total_tokens': len(messages)+len(response)
|
130 |
+
},
|
131 |
+
'choices': [{
|
132 |
+
'message': {
|
133 |
+
'role': 'assistant',
|
134 |
+
'content': response
|
135 |
+
},
|
136 |
+
'finish_reason': 'stop',
|
137 |
+
'index': 0
|
138 |
+
}]
|
139 |
+
}
|
140 |
+
|
141 |
+
def stream():
|
142 |
+
nonlocal response
|
143 |
+
for token in response:
|
144 |
+
completion_timestamp = int(time.time())
|
145 |
+
completion_id = ''.join(random.choices(
|
146 |
+
'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789', k=28))
|
147 |
+
completion_data = {
|
148 |
+
'id': f'chatcmpl-{completion_id}',
|
149 |
+
'object': 'chat.completion.chunk',
|
150 |
+
'created': completion_timestamp,
|
151 |
+
'model': model,
|
152 |
+
'provider':provider_name,
|
153 |
+
'choices': [
|
154 |
+
{
|
155 |
+
'delta': {
|
156 |
+
'content': token
|
157 |
+
},
|
158 |
+
'index': 0,
|
159 |
+
'finish_reason': None
|
160 |
+
}
|
161 |
+
]
|
162 |
+
}
|
163 |
+
yield 'data: %s\n\n' % json.dumps(completion_data, separators=(',' ':'))
|
164 |
+
time.sleep(0.1)
|
165 |
+
return StreamingResponse(stream(), media_type='text/event-stream')
|
166 |
+
|
167 |
+
@app.post("/v1/completions")
|
168 |
+
def completions(item: completions_Item,responses: Response):
|
169 |
+
stream = item.stream
|
170 |
+
model = item.model.lower()
|
171 |
+
messages = [{'role': 'user', 'content':item.prompt}]
|
172 |
+
provider_name = item.provider
|
173 |
+
temperature = item.temperature
|
174 |
+
presence_penalty = item.presence_penalty
|
175 |
+
frequency_penalty = item.frequency_penalty
|
176 |
+
top_p = item.top_p
|
177 |
+
if provider_name:
|
178 |
+
try:
|
179 |
+
response = g4f.ChatCompletion.create(model=model, provider=getattr(g4f.Provider,provider_name),stream=stream,messages=messages,temperature=temperature,presence_penalty=presence_penalty,frequency_penalty=frequency_penalty,top_p=top_p)
|
180 |
+
except:
|
181 |
+
return JSONResponse(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, content={"error": {"message": "There was an error.","type": "invalid_request_error","param": None,"code": 500}})
|
182 |
+
|
183 |
+
else:
|
184 |
+
provider = auto_select(model=model,stream=stream)
|
185 |
+
if provider == None:
|
186 |
+
return JSONResponse(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, content={"error": {"message": "The model is invalid or not working.","type": "invalid_request_error","param": None,"code": 500}})
|
187 |
+
if stream and provider[0].supports_stream == False:
|
188 |
+
return JSONResponse(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, content={"error": {"message": "Stream is not supported.","type": "invalid_request_error","param": None,"code": 500}})
|
189 |
+
provider_name = provider[1]
|
190 |
+
try:
|
191 |
+
response = g4f.ChatCompletion.create(model=model, provider=provider[0],stream=stream,messages=messages,temperature=temperature,presence_penalty=presence_penalty,frequency_penalty=frequency_penalty,top_p=top_p)
|
192 |
+
except:
|
193 |
+
provider = auto_select(model=model,stream=stream)
|
194 |
+
if provider == None:
|
195 |
+
return JSONResponse(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, content={"error": {"message": "The model is invalid or not working.","type": "invalid_request_error","param": None,"code": 500}})
|
196 |
+
if stream and provider[0].supports_stream == False:
|
197 |
+
return JSONResponse(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, content={"error": {"message": "Stream is not supported.","type": "invalid_request_error","param": None,"code": 500}})
|
198 |
+
provider_name = provider[1]
|
199 |
+
try:
|
200 |
+
response = g4f.ChatCompletion.create(model=model, provider=provider[0],stream=stream,messages=messages,temperature=temperature,presence_penalty=presence_penalty,frequency_penalty=frequency_penalty,top_p=top_p)
|
201 |
+
except:
|
202 |
+
return JSONResponse(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, content={"error": {"message": "There was an error.Please try again.","type": "server_error","param": None,"code": 500}})
|
203 |
+
|
204 |
+
if not stream:
|
205 |
+
completion_timestamp = int(time.time())
|
206 |
+
completion_id = ''.join(random.choices(
|
207 |
+
'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789', k=28))
|
208 |
+
return {
|
209 |
+
'id': 'cmpl-%s' % completion_id,
|
210 |
+
'object': 'text.completion',
|
211 |
+
'created': completion_timestamp,
|
212 |
+
'model': model,
|
213 |
+
'provider':provider_name,
|
214 |
+
'usage': {
|
215 |
+
'prompt_tokens': len(messages),
|
216 |
+
'completion_tokens': len(response),
|
217 |
+
'total_tokens': len(messages)+len(response)
|
218 |
+
},
|
219 |
+
'choices': [{
|
220 |
+
'text': response,
|
221 |
+
'finish_reason': 'length',
|
222 |
+
"logprobs": None,
|
223 |
+
'index': 0
|
224 |
+
}]
|
225 |
+
}
|
226 |
+
|
227 |
+
def stream():
|
228 |
+
nonlocal response
|
229 |
+
for token in response:
|
230 |
+
completion_timestamp = int(time.time())
|
231 |
+
completion_id = ''.join(random.choices(
|
232 |
+
'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789', k=28))
|
233 |
+
completion_data = {
|
234 |
+
'id': f'cmpl-{completion_id}',
|
235 |
+
'object': 'text.completion.chunk',
|
236 |
+
'created': completion_timestamp,
|
237 |
+
'model': model,
|
238 |
+
'provider':provider_name,
|
239 |
+
'choices': [
|
240 |
+
{
|
241 |
+
'delta': {
|
242 |
+
'text': token
|
243 |
+
},
|
244 |
+
'index': 0,
|
245 |
+
'finish_reason': None
|
246 |
+
}
|
247 |
+
]
|
248 |
+
}
|
249 |
+
yield 'data: %s\n\n' % json.dumps(completion_data, separators=(',' ':'))
|
250 |
+
time.sleep(0.1)
|
251 |
+
return StreamingResponse(stream(), media_type='text/event-stream')
|
252 |
+
|
253 |
+
@app.get("/v1/dashboard/billing/subscription")
|
254 |
+
@app.get("/dashboard/billing/subscription")
|
255 |
+
async def billing_subscription():
|
256 |
+
return {
|
257 |
+
"object": "billing_subscription",
|
258 |
+
"has_payment_method": True,
|
259 |
+
"canceled": False,
|
260 |
+
"canceled_at": None,
|
261 |
+
"delinquent": None,
|
262 |
+
"access_until": 2556028800,
|
263 |
+
"soft_limit": 6944500,
|
264 |
+
"hard_limit": 166666666,
|
265 |
+
"system_hard_limit": 166666666,
|
266 |
+
"soft_limit_usd": 416.67,
|
267 |
+
"hard_limit_usd": 9999.99996,
|
268 |
+
"system_hard_limit_usd": 9999.99996,
|
269 |
+
"plan": {
|
270 |
+
"title": "Pay-as-you-go",
|
271 |
+
"id": "payg"
|
272 |
+
},
|
273 |
+
"primary": True,
|
274 |
+
"account_name": "OpenAI",
|
275 |
+
"po_number": None,
|
276 |
+
"billing_email": None,
|
277 |
+
"tax_ids": None,
|
278 |
+
"billing_address": {
|
279 |
+
"city": "New York",
|
280 |
+
"line1": "OpenAI",
|
281 |
+
"country": "US",
|
282 |
+
"postal_code": "NY10031"
|
283 |
+
},
|
284 |
+
"business_address": None
|
285 |
+
}
|
286 |
+
|
287 |
+
|
288 |
+
@app.get("/v1/dashboard/billing/usage")
|
289 |
+
@app.get("/dashboard/billing/usage")
|
290 |
+
async def billing_usage(start_date:str='2023-01-01',end_date:str='2023-01-31'):
|
291 |
+
return {
|
292 |
+
"object": "list",
|
293 |
+
"daily_costs": [
|
294 |
+
{
|
295 |
+
"timestamp": time.time(),
|
296 |
+
"line_items": [
|
297 |
+
{
|
298 |
+
"name": "GPT-4",
|
299 |
+
"cost": 0.0
|
300 |
+
},
|
301 |
+
{
|
302 |
+
"name": "Chat models",
|
303 |
+
"cost": 1.01
|
304 |
+
},
|
305 |
+
{
|
306 |
+
"name": "InstructGPT",
|
307 |
+
"cost": 0.0
|
308 |
+
},
|
309 |
+
{
|
310 |
+
"name": "Fine-tuning models",
|
311 |
+
"cost": 0.0
|
312 |
+
},
|
313 |
+
{
|
314 |
+
"name": "Embedding models",
|
315 |
+
"cost": 0.0
|
316 |
+
},
|
317 |
+
{
|
318 |
+
"name": "Image models",
|
319 |
+
"cost": 16.0
|
320 |
+
},
|
321 |
+
{
|
322 |
+
"name": "Audio models",
|
323 |
+
"cost": 0.0
|
324 |
+
}
|
325 |
+
]
|
326 |
+
}
|
327 |
+
],
|
328 |
+
"total_usage": 1.01
|
329 |
+
}
|
330 |
+
|
331 |
+
@app.get("/v1/models")
|
332 |
+
def models():
|
333 |
+
import g4f.models
|
334 |
+
model = {"data":[]}
|
335 |
+
for i in g4f.models.ModelUtils.convert:
|
336 |
+
model['data'].append({
|
337 |
+
"id": i,
|
338 |
+
"object": "model",
|
339 |
+
"owned_by": g4f.models.ModelUtils.convert[i].base_provider,
|
340 |
+
"tokens": 99999,
|
341 |
+
"fallbacks": None,
|
342 |
+
"endpoints": [
|
343 |
+
"/v1/chat/completions"
|
344 |
+
],
|
345 |
+
"limits": None,
|
346 |
+
"permission": []
|
347 |
+
})
|
348 |
+
return model
|
349 |
+
|
350 |
+
@app.get("/v1/providers")
|
351 |
+
async def providers():
|
352 |
+
files = os.listdir("g4f/Provider/Providers")
|
353 |
+
files = [f for f in files if os.path.isfile(os.path.join("g4f/Provider/Providers", f))]
|
354 |
+
files.sort(key=str.lower)
|
355 |
+
providers_data = {"data":[]}
|
356 |
+
for file in files:
|
357 |
+
if file.endswith(".py"):
|
358 |
+
name = file[:-3]
|
359 |
+
try:
|
360 |
+
p = getattr(g4f.Provider,name)
|
361 |
+
providers_data["data"].append({
|
362 |
+
"provider": str(name),
|
363 |
+
"model": list(p.model),
|
364 |
+
"url": str(p.url),
|
365 |
+
"working": bool(p.working),
|
366 |
+
"supports_stream": bool(p.supports_stream)
|
367 |
+
})
|
368 |
+
except:
|
369 |
+
pass
|
370 |
+
return providers_data
|
371 |
+
|
372 |
+
def custom_openapi():
|
373 |
+
if app.openapi_schema:
|
374 |
+
return app.openapi_schema
|
375 |
+
openapi_schema = get_openapi(
|
376 |
+
title="GPT API",
|
377 |
+
version="1.0.0",
|
378 |
+
summary="GPT API",
|
379 |
+
description="[Try Online](https://chatgpt-next-web.lemonsoftware.eu.org/)",
|
380 |
+
routes=app.routes,
|
381 |
+
)
|
382 |
+
openapi_schema["info"]["x-logo"] = {
|
383 |
+
"url": "https://gpt-status.lemonsoftware.eu.org/icon.svg"
|
384 |
+
}
|
385 |
+
app.openapi_schema = openapi_schema
|
386 |
+
return app.openapi_schema
|
387 |
+
app.openapi = custom_openapi
|
388 |
+
|
389 |
+
@app.exception_handler(StarletteHTTPException)
|
390 |
+
|
391 |
+
async def custom_http_exception_handler(request, exc):
|
392 |
+
return JSONResponse(status_code=status.HTTP_404_NOT_FOUND, content={"error": {"message": "Invalid URL","type": "invalid_request_error","param": None,"code": 404}})
|
393 |
+
|
394 |
+
if __name__ == '__main__':
|
395 |
+
import uvicorn
|
396 |
+
uvicorn.run(app, host="0.0.0.0", port=7860)
|
check.py
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import g4f
|
3 |
+
import requests
|
4 |
+
import json
|
5 |
+
import time
|
6 |
+
import pytz
|
7 |
+
from datetime import datetime
|
8 |
+
|
9 |
+
|
10 |
+
files = os.listdir("g4f/Provider/Providers")
|
11 |
+
files = [f for f in files if os.path.isfile(os.path.join("g4f/Provider/Providers", f))]
|
12 |
+
files.sort(key=str.lower)
|
13 |
+
|
14 |
+
while True:
|
15 |
+
status = {'data':[]}
|
16 |
+
for file in files:
|
17 |
+
if file.endswith(".py"):
|
18 |
+
name = file[:-3]
|
19 |
+
try:
|
20 |
+
p = getattr(g4f.Provider,name)
|
21 |
+
status['data'].append({
|
22 |
+
"provider": name,
|
23 |
+
"model": [],
|
24 |
+
"url":p.url
|
25 |
+
})
|
26 |
+
for i in p.model:
|
27 |
+
status['data'][-1]['model'].append({i:{'status':''}})
|
28 |
+
try:
|
29 |
+
response = g4f.ChatCompletion.create(model=i, provider=p, messages=[{"role": "user", "content": "Say 'Hello World!'"}], stream=False)
|
30 |
+
if 'Hello World' in response or 'Hello' in response or 'hello' in response or 'world' in response or 'th' in response or 'images' in response:
|
31 |
+
status['data'][-1]['model'][-1][i]['status'] = 'Active'
|
32 |
+
else:
|
33 |
+
status['data'][-1]['model'][-1][i]['status'] = 'Inactive'
|
34 |
+
except:
|
35 |
+
status['data'][-1]['model'][-1][i]['status'] = 'Inactive'
|
36 |
+
|
37 |
+
except:
|
38 |
+
pass
|
39 |
+
|
40 |
+
print(status)
|
41 |
+
status['key'] = "test"
|
42 |
+
tz = pytz.timezone('Asia/Shanghai')
|
43 |
+
now = datetime.now(tz)
|
44 |
+
print(now)
|
45 |
+
status['time'] = now.strftime("%Y-%m-%d %H:%M:%S")
|
46 |
+
r = requests.post("https://gpt.lemonsoftware.eu.org/v1/status",data=json.dumps(status),headers={"content-type": "application/json"})
|
47 |
+
print(r.text)
|
48 |
+
time.sleep(300)
|
check.sh
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
screen -S test python3 -m check.py
|
g4f/Provider/Provider.py
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from ..typing import sha256, Dict, get_type_hints
|
3 |
+
|
4 |
+
url = None
|
5 |
+
model = None
|
6 |
+
supports_stream = False
|
7 |
+
needs_auth = False
|
8 |
+
working = False
|
9 |
+
|
10 |
+
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
11 |
+
return
|
12 |
+
|
13 |
+
|
14 |
+
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
|
15 |
+
'(%s)' % ', '.join(
|
16 |
+
[f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
|
g4f/Provider/Providers/AItianhu.py
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from curl_cffi import requests
|
3 |
+
from ...typing import sha256, Dict, get_type_hints
|
4 |
+
import json
|
5 |
+
|
6 |
+
url = "https://www.aitianhu.com/api/chat-process"
|
7 |
+
model = ['gpt-3.5-turbo']
|
8 |
+
supports_stream = False
|
9 |
+
needs_auth = False
|
10 |
+
working = True
|
11 |
+
|
12 |
+
|
13 |
+
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
14 |
+
base = ''
|
15 |
+
for message in messages:
|
16 |
+
base += '%s: %s\n' % (message['role'], message['content'])
|
17 |
+
base += 'assistant:'
|
18 |
+
|
19 |
+
headers = {
|
20 |
+
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36"
|
21 |
+
}
|
22 |
+
data = {
|
23 |
+
"prompt": base,
|
24 |
+
"options": {},
|
25 |
+
"systemMessage": "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully. Respond using markdown.",
|
26 |
+
"temperature": kwargs.get("temperature", 0.8),
|
27 |
+
"top_p": kwargs.get("top_p", 1)
|
28 |
+
}
|
29 |
+
response = requests.post(url, headers=headers, json=data,impersonate='chrome110')
|
30 |
+
if response.status_code == 200:
|
31 |
+
lines = response.text.strip().split('\n')
|
32 |
+
res = json.loads(lines[-1])
|
33 |
+
yield res['text']
|
34 |
+
else:
|
35 |
+
print(f"Error Occurred::{response.status_code}")
|
36 |
+
return None
|
37 |
+
|
38 |
+
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
|
39 |
+
'(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
|
g4f/Provider/Providers/Acytoo.py
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from curl_cffi import requests
|
3 |
+
from ...typing import sha256, Dict, get_type_hints
|
4 |
+
import json
|
5 |
+
|
6 |
+
url = "https://chat.acytoo.com/api/completions"
|
7 |
+
model = ['gpt-3.5-turbo']
|
8 |
+
supports_stream = False
|
9 |
+
needs_auth = False
|
10 |
+
working = True
|
11 |
+
|
12 |
+
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
13 |
+
base = ''
|
14 |
+
for message in messages:
|
15 |
+
base += '%s: %s\n' % (message['role'], message['content'])
|
16 |
+
base += 'assistant:'
|
17 |
+
|
18 |
+
headers = {
|
19 |
+
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36"
|
20 |
+
}
|
21 |
+
data = {
|
22 |
+
"key": "",
|
23 |
+
"model": "gpt-3.5-turbo",
|
24 |
+
"messages": [
|
25 |
+
{
|
26 |
+
"role": "user",
|
27 |
+
"content": base,
|
28 |
+
"createdAt": 1688518523500
|
29 |
+
}
|
30 |
+
],
|
31 |
+
"temperature": 1,
|
32 |
+
"password": ""
|
33 |
+
}
|
34 |
+
|
35 |
+
response = requests.post(url, headers=headers,impersonate='chrome110', data=json.dumps(data))
|
36 |
+
if response.status_code == 200:
|
37 |
+
yield response.text
|
38 |
+
else:
|
39 |
+
print(f"Error Occurred::{response.status_code}")
|
40 |
+
return None
|
41 |
+
|
42 |
+
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
|
43 |
+
'(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
|
g4f/Provider/Providers/AiFree.py
ADDED
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import requests
|
3 |
+
from datetime import datetime
|
4 |
+
import base64,hashlib,json
|
5 |
+
from ...typing import sha256, Dict, get_type_hints
|
6 |
+
|
7 |
+
url = 'https://openai.aifree.site/'
|
8 |
+
model = ['gpt-3.5-turbo']
|
9 |
+
supports_stream = True
|
10 |
+
needs_auth = False
|
11 |
+
working = True
|
12 |
+
|
13 |
+
headers = {
|
14 |
+
'Origin':'https://openai.aifree.site',
|
15 |
+
'Referer':'https://openai.aifree.site/',
|
16 |
+
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36',
|
17 |
+
'Content-Type':'text/plain;charset=UTF-8',
|
18 |
+
}
|
19 |
+
|
20 |
+
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
21 |
+
current_time = datetime.now()
|
22 |
+
timestamp_in_seconds = current_time.timestamp()
|
23 |
+
timestamp_in_milliseconds = int(round(timestamp_in_seconds * 1000))
|
24 |
+
sign = str(timestamp_in_milliseconds)+':'+messages[-1]['content']+':'
|
25 |
+
sign = hashlib.sha256(sign.encode('utf-8')).hexdigest()
|
26 |
+
data = {
|
27 |
+
"messages": messages,
|
28 |
+
"time": timestamp_in_milliseconds,
|
29 |
+
"pass": None,
|
30 |
+
"sign": sign
|
31 |
+
}
|
32 |
+
r = requests.post('https://openai.aifree.site/api/generate',json=data,headers=headers,stream=True)
|
33 |
+
for chunk in r.iter_content(chunk_size=2048):
|
34 |
+
if chunk:
|
35 |
+
if b'rate_limit_exceeded' in chunk:
|
36 |
+
yield 'Rate Limited'
|
37 |
+
return
|
38 |
+
yield chunk.decode(errors='ignore')
|
39 |
+
|
40 |
+
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
|
41 |
+
'(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
|
g4f/Provider/Providers/AiService.py
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from curl_cffi import requests
|
3 |
+
from ...typing import get_type_hints
|
4 |
+
|
5 |
+
url = "https://aiservice.vercel.app/api/chat/answer"
|
6 |
+
model = ['gpt-3.5-turbo']
|
7 |
+
supports_stream = False
|
8 |
+
needs_auth = False
|
9 |
+
working = True
|
10 |
+
|
11 |
+
|
12 |
+
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
13 |
+
base = ''
|
14 |
+
for message in messages:
|
15 |
+
base += '%s: %s\n' % (message['role'], message['content'])
|
16 |
+
base += 'assistant:'
|
17 |
+
|
18 |
+
headers = {
|
19 |
+
"accept": "*/*",
|
20 |
+
"content-type": "text/plain;charset=UTF-8",
|
21 |
+
"sec-fetch-dest": "empty",
|
22 |
+
"sec-fetch-mode": "cors",
|
23 |
+
"sec-fetch-site": "same-origin",
|
24 |
+
"Referer": "https://aiservice.vercel.app/chat",
|
25 |
+
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36',
|
26 |
+
|
27 |
+
}
|
28 |
+
data = {
|
29 |
+
"input": base
|
30 |
+
}
|
31 |
+
response = requests.post(url, headers=headers, json=data,impersonate='chrome110')
|
32 |
+
if response.status_code == 200:
|
33 |
+
_json = response.json()
|
34 |
+
yield _json['data']
|
35 |
+
else:
|
36 |
+
print(f"Error Occurred::{response.status_code}")
|
37 |
+
return None
|
38 |
+
|
39 |
+
|
40 |
+
|
41 |
+
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
|
42 |
+
'(%s)' % ', '.join(
|
43 |
+
[f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
|
g4f/Provider/Providers/Aichat.py
ADDED
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from curl_cffi import requests
|
3 |
+
from ...typing import sha256, Dict, get_type_hints
|
4 |
+
|
5 |
+
url = 'https://chat-gpt.org/chat'
|
6 |
+
model = ['gpt-3.5-turbo']
|
7 |
+
supports_stream = False
|
8 |
+
needs_auth = False
|
9 |
+
working = True
|
10 |
+
|
11 |
+
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
12 |
+
base = ''
|
13 |
+
for message in messages:
|
14 |
+
base += '%s: %s\n' % (message['role'], message['content'])
|
15 |
+
base += 'assistant:'
|
16 |
+
|
17 |
+
|
18 |
+
headers = {
|
19 |
+
'authority': 'chat-gpt.org',
|
20 |
+
'accept': '*/*',
|
21 |
+
'cache-control': 'no-cache',
|
22 |
+
'content-type': 'application/json',
|
23 |
+
'origin': 'https://chat-gpt.org',
|
24 |
+
'pragma': 'no-cache',
|
25 |
+
'referer': 'https://chat-gpt.org/chat',
|
26 |
+
'sec-ch-ua-mobile': '?0',
|
27 |
+
'sec-ch-ua-platform': '"macOS"',
|
28 |
+
'sec-fetch-dest': 'empty',
|
29 |
+
'sec-fetch-mode': 'cors',
|
30 |
+
'sec-fetch-site': 'same-origin',
|
31 |
+
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36',
|
32 |
+
'Cookie':'XSRF-TOKEN=eyJpdiI6InJqNG4xZ05mMW9nRVNvL1p4Nkt3S3c9PSIsInZhbHVlIjoiUVVUS0h0YjlsS2l4Vzg0K3FVcEdpMkRIOWRmOVRnZjF0UHpXZVVNcGR3cVFDK0lJRjVEVHBPZmhRSVZXL1d6SGgwVTFva2gwaHlFaVVTdnNuNElJQ0FzNjB5eHZ0N3NWOUQyenBaMUJ0c1RmY3pKUGxLaHlIbitNR21JMHZ4ZnciLCJtYWMiOiIxZDhkNWIyZmZiODc0MTEzOTdmZjg1OTNjZDMxNzBhNTVlYjc3NzVhMjY4NWUxNGRlMjJlOGYxZjNiMDg2YjkxIn0%3D; chatgptchatorg_session=eyJpdiI6IlpmaUV4dVVZZm9GTHFtNmFEaTlvT1E9PSIsInZhbHVlIjoiZ28wcUpEYXRyNC94VjYwc1BwVnBXNElxMkphajRMRTFlMW43N3dWZ2JMdFhNTEh5YmJFT1AzRTNRbUQxVVo3SnBiN3c5V1N1ZnMrcGtVcGRQT1Jtekd3TWF3NHZDMnMweVlVMDU0SFBJLzFaMTgwSTBiWVA5eWJBTXJ5TXBBRW8iLCJtYWMiOiI2MmMwZTcwZWUzYjk1Yjk2NmNlZWFiYzQ2NzNiYjE4NzQ2YWQ1ZDVjMTljMjIyNzI2ZWM3ZTUxZmYxMzY2NDU5In0%3D; cf_clearance=IERj8_nuFs9IYmUaK39KxwUnoFc9.bFDZ96KbWODjoc-1690707463-0-0.2.1690707463'
|
33 |
+
}
|
34 |
+
|
35 |
+
json_data = {
|
36 |
+
'message':base,
|
37 |
+
'temperature': 1,
|
38 |
+
'presence_penalty': 0,
|
39 |
+
'top_p': 1,
|
40 |
+
'frequency_penalty': 0
|
41 |
+
}
|
42 |
+
|
43 |
+
response = requests.post('https://chat-gpt.org/api/text', headers=headers,impersonate='chrome110',json=json_data)
|
44 |
+
yield response.json()['message']
|
45 |
+
|
46 |
+
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
|
47 |
+
'(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
|
g4f/Provider/Providers/B88.py
ADDED
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import requests
|
3 |
+
|
4 |
+
from ...typing import sha256, Dict, get_type_hints
|
5 |
+
url = 'https://1.b88.asia/'
|
6 |
+
models = {
|
7 |
+
"gpt-3.5-turbo": "chinchilla",
|
8 |
+
"claude-instant-100k":"a2_100k",
|
9 |
+
"claude-instant":"a2",
|
10 |
+
"claude-2-100k":"a2_2",
|
11 |
+
"palm2":"acouchy"
|
12 |
+
}
|
13 |
+
model = models.keys()
|
14 |
+
supports_stream = True
|
15 |
+
needs_auth = False
|
16 |
+
working = True
|
17 |
+
|
18 |
+
headers = {
|
19 |
+
'Content-Type':'application/json',
|
20 |
+
'origin': 'https://1.b88.asia',
|
21 |
+
'referer': 'https://1.b88.asia/',
|
22 |
+
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36',
|
23 |
+
}
|
24 |
+
|
25 |
+
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
26 |
+
conversation = '这是一个人和一个语言模型之间的对话。语言模型应该始终作为助理进行响应,如果需要,可以参考过去的消息历史。\n'
|
27 |
+
for message in messages:
|
28 |
+
conversation += '%s:%s\n' % (message['role'], message['content'])
|
29 |
+
conversation += '助理: '
|
30 |
+
json_data = {
|
31 |
+
"prompt": conversation,
|
32 |
+
"options": {},
|
33 |
+
"systemMessage": "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully. Respond using markdown.",
|
34 |
+
"temperature": kwargs.get("temperature", 0.8),
|
35 |
+
"top_p": kwargs.get("top_p", 1),
|
36 |
+
"model": models[model],
|
37 |
+
"user": None
|
38 |
+
}
|
39 |
+
response = requests.post('https://1.b88.asia/api/chat-process', headers=headers, json=json_data, stream=True)
|
40 |
+
response.encoding = 'utf-8'
|
41 |
+
for token in response.iter_content(chunk_size=2048):
|
42 |
+
if token:
|
43 |
+
yield token.decode('utf-8',errors='ignore')
|
44 |
+
|
45 |
+
|
46 |
+
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
|
47 |
+
'(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
|
g4f/Provider/Providers/Bing.py
ADDED
@@ -0,0 +1,395 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import json
|
3 |
+
import random
|
4 |
+
import json
|
5 |
+
import os
|
6 |
+
import uuid
|
7 |
+
import ssl
|
8 |
+
import certifi
|
9 |
+
import aiohttp
|
10 |
+
import asyncio
|
11 |
+
import time
|
12 |
+
import urllib.parse as urlparse
|
13 |
+
from bs4 import BeautifulSoup
|
14 |
+
import requests
|
15 |
+
from ...typing import sha256, Dict, get_type_hints
|
16 |
+
|
17 |
+
url = 'https://bing.com/chat'
|
18 |
+
model = ['bing','dall-e']
|
19 |
+
supports_stream = True
|
20 |
+
needs_auth = False
|
21 |
+
working = True
|
22 |
+
|
23 |
+
cookies = ['MUID=2CDF36CF1BED6F453AA325951AFF6E9A; MUIDB=2CDF36CF1BED6F453AA325951AFF6E9A; _EDGE_V=1; SRCHD=AF=UNKSBD; SRCHUID=V=2&GUID=7E766696152D40CDB1078CC2B9E1CE95&dmnchg=1; _UR=QS=0&TQS=0; MicrosoftApplicationsTelemetryDeviceId=0847c270-ac5f-4d28-af4b-aef32a9d3e34; ANON=A=645B9833C1A53C9364F261D2FFFFFFFF&E=1c9e&W=1; NAP=V=1.9&E=1c44&C=akcivvAqGgPkamNz0ldEfRw_bfFGYEcsNusia3ITSXF8b1W3UWv3tA&W=1; PPLState=1; KievRPSSecAuth=FAByBBRaTOJILtFsMkpLVWSG6AN6C/svRwNmAAAEgAAACE2BEL4egNsbMASW9UIQXfqutBopNvzNle6zaYLlNl/hsL2Jac3h4MBVhCQ88oz+vrbCeEcqxZ368alFZ8g5fhmyr6Xrc6F12hY60z8TatM3T7OnV5MYxTTkhHfKylq36sgGUe6EhZ8uYwsarjq/YJg2lNgyi6tbnPRyXi1+4niuEBI3V3dazwb9y57mJZDU1OOIqL25q2cYI0/LnFpFUcwF54fP1t2dh2OLT/fZVsR8tyKtk9tfGfifyTRG5kbsXih1FnlL3s6V2yFkRDGWe7sgWZncFOpT4tIRCgCYwNHuQ23bkuqSj5Ymk7r2v/tg8UlQuqxM3GabGBasY/VWrFav+kTR/rh0yKQdbkVMQlBVoiFnkHip+uM5yS+qlwIuGSJfEHjZ+HPpgrSsSf3ze7agIA0icWf9bqKOBDz8JIAxQPuPKiaPx1PUyQ7gqmWdR/42tnzMj5PtZmLMw8ku95Z8WtnFlG2L6Er4V3FdbU1Wiyo0oxMmYYc64T8xYxS9dUv1Coq+nvBetzHgC0EZM4JNwPGTtpKg96dNW3aNvWq1iPJiRmgPaku53fw8IfjikJwukWHWi1g2074SSgFx8xY/5iT36ok68uUfuxYCUMezNC+g2NOwcmIsjjnT2W8HmbUWiWVVINohqQVPKd4qJ/82LN3FSoEFbaMJV57jN9J11RaHkDjoVhBmgMFNPLkFNy8sbww+4mjf4xp6Jh6vI1GdvjwHFgcy0zIp+M7jvsb/C2sutvp6yoPw39IhSYiGDhs8QyOqc2vHZBOuvWKa0tEC/Te265FVIwHNKGbT4PiTfzSNI0H/Nxcfm7GfJ6fF4mSPR+HFHD8PFopymbOTrfCgiPfBFAphiG1i3dmll0jgxVeNsat19U7xoDj96bhhQokXSwNzNcO2erYzCmXfVOsywDf6zHF+kjIz8I4+ULy3EVzzEbhCfzGMMnlMEvNoLdfKh0phqGO0qhaI7+LJ3vtTIMSDBy7L1VQdD7baTOxiXhdWsinIxA/RWyPE9il7hsETU43bEw1fj/QuwbZqJoMHu2Rp7EF8Knlum9vsg6NmYsLOEYgt+bkGNszVB/J2SJak8NkkRxXnHP/MIHTjc8ymb0CuNFmyZ4z92nfbcM2ZJcHWQdD+sNEyaChp2QLw4Fhtvpqw9ws2tIOH2V3Ge0Av4NzQ2JYvCvPnVlqaxWrdBc3lN7DH+z5rcy/25MFZjkW7az+784QDJnpjKeHZoE27qOCKCnAXVD8UOklfZMmMitqLtSU3u2M5g+d9UtWtLiIWbPP9AwRXu5CFmRx1RxeHL8PwXyo2+Cn8/nrtgJs0Y7MDBAXFb0mdVg98E6Ma17GCix5X2at8egCzl3EOR7mJcCFAT2NQMrAKSYwIyqHWF8vkddhn9Y0TRUlpIMjtrg5ejxH8dTJWLl/GDGAuZfMvZWerMArH0NC+FADNvDoxBXi+LAQQ0N+6AtAsWcRD3A==; _U=1iz6DwIeSr1uHIkU7I-pSFbPfAsMnC7wTKjkT0Cbo4-T-Mk8T8PkzDZDMIS1DsVtXhPi5q_GP_LKV8wlaXVOFGydDRQtLcyyvHDgfFHXLZAJo3kaDglWq4zamZopeM0_AWT0mS3LmzkmMO99ng1OoM4E3yqoJCQhXmVJN_hhXjz1x-1trQyfG4yGNRUoG9i8I9FMchwGIapFSdwmcGBGtbw; WLID=E29T04WkWyMhLD8Qab7oErkSwFZe4vAjSWJy7CtfhTctmsR8B5upFhh0nZ6PDldK/N4bU7cpEVO70kVJ7CntuY+qACiq44rIU6y9YQjkzew=; _HPVN=CS=eyJQbiI6eyJDbiI6MSwiU3QiOjAsIlFzIjowLCJQcm9kIjoiUCJ9LCJTYyI6eyJDbiI6MSwiU3QiOjAsIlFzIjowLCJQcm9kIjoiSCJ9LCJReiI6eyJDbiI6MSwiU3QiOjAsIlFzIjowLCJQcm9kIjoiVCJ9LCJBcCI6dHJ1ZSwiTXV0ZSI6dHJ1ZSwiTGFkIjoiMjAyMy0wNy0yNlQwMDowMDowMFoiLCJJb3RkIjowLCJHd2IiOjAsIkRmdCI6bnVsbCwiTXZzIjowLCJGbHQiOjAsIkltcCI6NH0=; SnrOvr=X=rebateson; USRLOC=HS=1&ELOC=LAT=43.710304260253906|LON=-98.48551940917969|N=%E6%99%AE%E6%9C%97%E9%87%91%E9%A1%BF%EF%BC%8C%E5%8D%97%E8%BE%BE%E7%A7%91%E4%BB%96|ELT=1|; ABDEF=V=13&ABDV=13&MRNB=1690532475452&MRB=0; cct=AtPNzx1w_FSdkdsToLIYWEAeYOSTBrWagktAUqprkJ4lHzXsnPShx3j9XY40Xosyc3OzoBdRnguN8BPxLnkc9A; SUID=A; _EDGE_S=SID=164EE26E8F14621B0A67F1338E4563C7; WLS=C=0911dcf849c41c02&N=Smith; SRCHUSR=DOB=20230726&T=1690604434000&POEX=W; _RwBf=ilt=1&ihpd=1&ispd=0&rc=67&rb=67&gb=0&rg=0&pc=67&mtu=0&rbb=0.0&g=0&cid=&clo=0&v=4&l=2023-07-28T07:00:00.0000000Z&lft=0001-01-01T00:00:00.0000000&aof=0&o=0&p=BINGCOPILOTWAITLIST&c=MR000T&t=4107&s=2023-07-24T15:46:31.7545551+00:00&ts=2023-07-29T04:20:36.6769802+00:00&rwred=0&wls=2&lka=0&lkt=0&TH=&dci=0&W=1&r=1&mta=0&e=fPeEcp-5tY4SeU5r6IWp5lxG3K3Awj4xbTrEHar-qoblAB9rKGwwgQz2psjo2_zrpeQfoTFCatplWNHqWqaqqA&A=; _SS=SID=164EE26E8F14621B0A67F1338E4563C7&R=67&RB=67&GB=0&RG=0&RP=67; GC=AtPNzx1w_FSdkdsToLIYWEAeYOSTBrWagktAUqprkJ6p7gca286Aa5NNb7vGXDonDWfq0k-qY8f2iUCpcUyGTA; ipv6=hit=1690608040581&t=4; SRCHHPGUSR=SRCHLANG=zh-Hans&IG=241DC1B763D54572AB1481C331F4892F&PV=10.0.0&BRW=HTP&BRH=M&CW=927&CH=754&SCW=927&SCH=5800&DPR=1.3&UTC=480&DM=0&HV=1690604437&WTS=63825944964&PRVCW=927&PRVCH=754&EXLTT=8','MUID=37F7249B8FCD6EF92BB037C08EE36F48; MUIDB=37F7249B8FCD6EF92BB037C08EE36F48; SRCHD=AF=NOFORM; SRCHUID=V=2&GUID=95335F81D9C046BCB2754DA32F69B849&dmnchg=1; _UR=QS=0&TQS=0; ANON=A=7F66B4E7DCD4F4008ADA6E1EFFFFFFFF&E=1c9f&W=1; PPLState=1; KievRPSSecAuth=FAByBBRaTOJILtFsMkpLVWSG6AN6C/svRwNmAAAEgAAACCF/Ue3zc09gMAQrV8eDFJQjg32yqeHgBh1mqpdAT+0KuiphUvHoo6NjAfuaZ3YrdSFFABWJNSW65kVwVhb87N/O9+3WOnAVE8NmWYHn05oH7XdU7O0N8Bebouh3cLnW1OmnU0A9/tZAhwdCOiUnyebCiHc/Glm9XqgHt8puX/53k0Aq5/9vNCu2zjHMrnA7FKjaulRnytyE3xsPNlpTRhP4P8nCaig9rcjVp1Eqq61xg4KXmIivdgBjfvk5CBEwwJA7nM+JFEha+TJx51Tj2xuxsqRDOyTvjD8LXf+LwV1DOfOHdiK7l0WJKdEzhDZE1DjPf+Dcp89yn1GVrHsmjdhtRmmkLDpHk0jOZP34VIcOtL7I42Kd/CKh5IJQ4+Vouib7Iuq0vmxemjTnTcbswH48Gnvb2RKsHblh6Xmcb+i0kKpu+2DgnSCiQi74ZZjElY77podiOUuVhnWS0Jc5eioJ33es2kdWMRo3BoWEAW+hnzPV9b0oOUaKANXDTgoTnmqyxV+AWAxADnnL7AmFT19onokRhdsKaM6iS6i9W55o2Sz3vTW84rgLe7pCwuxuEE1g595kSQpat0FZz4OzfvdGP1RihbVM0svLBPWvaeSktJYmVsOQ3bIQOVN+zAY/sNcGVDWskJWCqjpRDomEDXC9HSrMVg+HEr/W/+hk6ZX3oipYFOr7mgRtSz7jHDvjwzYaJLLH0FETRjSKj2N/ohv8MfpIXhEkA5wm8fHk8hw3GrhGuljEAyPO7NftcKr1UhhB23a9ZOwM4bR3u+FIk/A71WVNu+bEytwpKML05wMJy6fGzeMW8UF+epWEfZGDsUAPTsLYayVolpB4e8By6YMCvUIVjLdd4/cc/NLRvs216IxIEFhqra953vC5NzP/Cwz2mkgCmcduh6rgv8yhgICQ5tOy3IPuzJPoBzpWuBfJ51q/Dyf8xF4oSvrLmPZEz30w+uaFLa3wsmztsG+mIUIKpBbujv5FB6uEijcem0Sa4t3sQbMQHPiDUkTPnEZSbzKorMYYwQeIjrWkUojacPGOp95DkfEWlveyjfzX9ArBhWd/uA2Gkml6BQAHdArU7HTGBko0fyefuZ4qD+q7Qyt0+QprvwDFDX0HxwybtQB9zJBB91M+mOCEO/twT9SCI29MeBdjhHYQhhPPJMmbkzb7Nb/aeMtUV+UmKDZdC6Q8VRiTLyHAT+RAOHZaTjTznC+ShzP2g8cX8Cmi8QHcIxmazAZor6ZEjBSysM/7EBmw0aRZ6VhTiFGbFp34QvR42dZvI/xJav5vDETkUkA4Yfzep8UScNgJXjZqu+r1b7VPfmMGuOprhGn7criB9XyLoabsvmz7hoLyb030Y7Y+CljHF3v4+bSoGcL/YPG1mXoiMtOYEKoNaiA1dN876/eL1X5XJBZKkY+G6sDfPJfZH3Zp9OntsDdxnYEJFADDlTGRTqRKLopRgvmMPveoYj/7tg==; _U=18zdJpcOM6t1jfs6O7pPyJtaF1Pcla2T-xnsUFKmFGpENRX3wpzRKavRSfYLpJdb9OxDGD5fqv8eRTd5YMM0IbTPFH0RoW2E8E8YbbI8QEYgx9-EGh2BCpbQhE0ataHVnO4kN3QZj-flMnj5YK-xbbfg4-faSeBgIm4l12jVGz06WPI1IOwU2EU37ap8gusm6zdACsyTNY_qN-KK7gnQwjeoEHcw0lJAvh7N2fEcZTvI; WLID=F7198T5aDAqsbhT1lyRdcQX2oDHdv674MHw09DTfeIbZ3ynu8AxwEFvIG4MZaIUw15A+sluvqJsrYQGb2N/cvjr5FmPjjs9LTK6b5atYG8M=; _HPVN=CS=eyJQbiI6eyJDbiI6MSwiU3QiOjAsIlFzIjowLCJQcm9kIjoiUCJ9LCJTYyI6eyJDbiI6MSwiU3QiOjAsIlFzIjowLCJQcm9kIjoiSCJ9LCJReiI6eyJDbiI6MSwiU3QiOjAsIlFzIjowLCJQcm9kIjoiVCJ9LCJBcCI6dHJ1ZSwiTXV0ZSI6dHJ1ZSwiTGFkIjoiMjAyMy0wNy0yN1QwMDowMDowMFoiLCJJb3RkIjowLCJHd2IiOjAsIkRmdCI6bnVsbCwiTXZzIjowLCJGbHQiOjAsIkltcCI6M30=; USRLOC=HS=1&ELOC=LAT=43.710304260253906|LON=-98.48551940917969|N=%E6%99%AE%E6%9C%97%E9%87%91%E9%A1%BF%EF%BC%8C%E5%8D%97%E8%BE%BE%E7%A7%91%E4%BB%96|ELT=1|; SnrOvr=X=rebateson; ABDEF=V=13&ABDV=13&MRNB=1690532539329&MRB=0; SUID=A; WLS=C=84f89dfd83bfd741&N=Smith; _EDGE_S=SID=027784E1E18263852BF897BCE09062E0; SRCHUSR=DOB=20230727&T=1690604562000&POEX=W; _RwBf=ilt=2&ihpd=0&ispd=1&rc=0&rb=0&gb=0&rg=0&pc=0&mtu=0&rbb=0.0&g=0&cid=&clo=0&v=3&l=2023-07-28T07:00:00.0000000Z&lft=0001-01-01T00:00:00.0000000&aof=0&o=0&p=BINGCOPILOTWAITLIST&c=MR000T&t=1799&s=2023-07-27T07:10:22.7171259+00:00&ts=2023-07-29T04:22:46.6627522+00:00&rwred=0&wls=2&lka=0&lkt=0&TH=&dci=0&W=1&r=1&mta=0&e=GJe87IrXcmQRHN3bICBR-gNbDsx4TP7q-rQrUEYM0cuvsL8EgWetIvrL5Xz9dZdIhUKT27u8U56UwMYR4KwRRQ&A=; _SS=SID=3C8878AC6DB76CD822456BF16C006DBC&R=0&RB=0&GB=0&RG=0&RP=0; ipv6=hit=1690608168189&t=4; GC=-tMw9SSxs052FGdJoWta7F_KycXU7Jm-ax_eoNXKn8RzeDY9lFHndGmr7l2SaxzUQWXdH2fySLau-GZwKHnh1Q; cct=-tMw9SSxs052FGdJoWta7F_KycXU7Jm-ax_eoNXKn8TxYPHORoTMPn9k_VjT7OkxpJqm1JqTFZSNJQoIdLFqeQ; SRCHHPGUSR=SRCHLANG=zh-Hans&IG=177DB3DD380D4799B33BFB549A75E2A1&PV=10.0.0&BRW=HTP&BRH=M&CW=927&CH=754&SCW=1164&SCH=3795&DPR=1.3&UTC=480&DM=0&WTS=63826037984&HV=1690604567&PRVCW=1482&PRVCH=754&EXLTT=6','MUID=1AB8B5F154E660D1186CA6AD557F6112; MUIDB=1AB8B5F154E660D1186CA6AD557F6112; _EDGE_V=1; SRCHD=AF=NOFORM; SRCHUID=V=2&GUID=AF3645A9D6044A6EB5EA6C23A00AEAFC&dmnchg=1; _UR=QS=0&TQS=0; MicrosoftApplicationsTelemetryDeviceId=54b32a89-dd37-4f54-8616-7df466a80a3b; ANON=A=4648389BAD678A4DC268B12FFFFFFFFF&E=1ca0&W=1; NAP=V=1.9&E=1c46&C=QYC-TsNC5BDgfCTL8z2ibQf0BftAOaZ2EL-fRA6OmPWW4RCx-V175g&W=1; PPLState=1; KievRPSSecAuth=FAB6BBRaTOJILtFsMkpLVWSG6AN6C/svRwNmAAAEgAAACKbjerwSPUbLOAS2nVP9Zbn9iq0ygXK1vrHWLrvU9hUtPkd48YK/JVNcoFgZg1vpV9xCv87kRs72ryCSX7vIRR9kIvtPMKR+ShvRLB1Ty5EI/mrKw6dPgtyZ5aaO4xL/gXi1YXubz+Z2dIgeTO5dCMrMXVYTz3mpFaXIG3p9Q0GYSLf6yfLqJh2VhDo4XzLSIIK/sd8QsxdngxCUS9pKHuOusk3OWVUMOV2szgkyOFNaewkem30UY0WPO5m/L5BTy/0hBEkTXMbD19Q4rDTmVKnUVuEcTNC34mm7DdHZ5WIZsWt53o6nAdqbf7R6Rl2wGUUFCG6avMDkcMQMRfI69+6bXPaB88umo5nZAR3IJhhHy+F5e6HNOzH1IlqrTQAqtmmjrRg6tINKPXqDaq3c+a7jM3yZ82f9d2wkpb/Q8b2E6j72dDJBhQ9+mJoqJERHKW8oLz1IIVH7HiASjFAzhq0q3nreQVHrc8rffB9aHSOLYL3eh8hqkHg2vW5CZijZrHcawfm/bp2e74QxMZVXX7nzfLtfZgUEWpkFa06p77irDZCSQa409yolXkXYhtfNz4uKHbmPilBImUYhufU/J4oFEDBZ6qv0B9BnXyPS7EF5HL7w6gN5C1bdRPQuXLd+J7DNikLV+uAlLnAf7XE21eZWoT4eUlJc7Woj//4rwoAieh3cXfbIYs5iHhLExCzqu87ksBIMGgkqQuHUTJT5DvPP6orAIXXIFAsr/W9T1QtxIsLZFEk8gFpjMbfGHffYthj0R30lmKrwpVRI84g6+DxzhutNMBvthqapSdrxjNYOZHk5vflERE/j1EaPuN/szp41xZL80B1LRO5XQJrolVP/9Hg2jdRk4MNREq3dLVwQM3MmHsfe/SgHqnyvjqwPFdehpVbvPak7JPh7PVFWofsUc3l2D1ijdWQMTZ5neLVjt2lEVlxW99VCllnlvUSyqUe51QCCnNt1+XGCE8b9oFct+FyG+V+zRGGVfUDTGPWaqcyN+eBaZAH7tiBaZCtAarNUGs/+4DUanbZ64S5mJqvkB7Rfck5cUgayXDMiMNsDwMhhWOOliAqq7UwDCRnRZ5dpk0cuBxHNzoUtAOloOZDPuKRC/nWlhV1s8DDP/e+RF00AWcHhLDub3VKWix2LD9eZmLB7kaiIapaGxQ7famTXP3EBp2nwbhlMbYzRbLxi4XgO/4DLuPUo+dxpQQotQ693fJr67KqiSmA+qjBzPegPUlOoNhyGzIuA1S/ogjJDxVsdL/I1JYUxGag8rW/A7SdGWunwzuspd/Lxj70FDER4G5/JOPPPmu22HSQwjjQmdkHDANJkhk1dZ2Mnw4FDQxlqNIVgRUmsIXTgte7smnhQnq++FfcsLcLlFLj+UvI3WYRTQESmmxZMLAKJiHaun+AOCksSWxuXn6fklncJakTQ+uadoZvpGNZA2SWA/GprNB4UAJOf/b/m58BhVp8FkAHqfwtvJ//I; _U=1RRT3N9897Unc7lqwcayUJRg0weqoRK6E-SaDZRxU1pKFMclk5dWrUDf6-x3SR65fFc6E6rjcAjteB9mVB7VdDIzmBnYVJAOqaGSEYye779KvpIUg3_eT0o3E1fuAYYjD-iiQxMwd8FCKLUAEND3lix-FhQ6EW3xCnMH5ofgI8Sy_yEX9g4Ehr-Fp6DUINSYIewZx_B_b1ax96fCKIHtoIJFPqWDJExCU9V20QP0vShA; WLID=5HKjNyzHgwVTvOJiZUJHhRCklFwQ4ZXLJJceSE+qUcLyP9u3YEBfXva8bI3wUs+JM5iGsAycmq0o1trJrLmvq/GMUcYntHG5viNrJPTwm5M=; _HPVN=CS=eyJQbiI6eyJDbiI6MSwiU3QiOjAsIlFzIjowLCJQcm9kIjoiUCJ9LCJTYyI6eyJDbiI6MSwiU3QiOjAsIlFzIjowLCJQcm9kIjoiSCJ9LCJReiI6eyJDbiI6MSwiU3QiOjAsIlFzIjowLCJQcm9kIjoiVCJ9LCJBcCI6dHJ1ZSwiTXV0ZSI6dHJ1ZSwiTGFkIjoiMjAyMy0wNy0yOFQwMDowMDowMFoiLCJJb3RkIjowLCJHd2IiOjAsIkRmdCI6bnVsbCwiTXZzIjowLCJGbHQiOjAsIkltcCI6NH0=; USRLOC=HS=1&ELOC=LAT=43.710304260253906|LON=-98.48551940917969|N=%E6%99%AE%E6%9C%97%E9%87%91%E9%A1%BF%EF%BC%8C%E5%8D%97%E8%BE%BE%E7%A7%91%E4%BB%96|ELT=1|; SnrOvr=X=rebateson; SUID=A; _EDGE_S=SID=13A3D83641176C13042DCB6B40BC6D4E; WLS=C=d5accf9efdba999e&N=Smith; SRCHUSR=DOB=20230728&T=1690604674000&POEX=W; _SS=SID=13A3D83641176C13042DCB6B40BC6D4E&R=0&RB=0&GB=0&RG=0&RP=0; ipv6=hit=1690608283881&t=4; cct=Km5OnV7eaN4oiWil_hvlJzV-PnLeJYnPXh9Vqy9Dyf__yp_OzsfQ_63jJKvCib8K74n1_9eGhUW8xIe7ESJm_A; ABDEF=V=13&ABDV=13&MRNB=1690604710476&MRB=0; _RwBf=ilt=3&ihpd=2&ispd=1&rc=0&rb=0&gb=0&rg=0&pc=0&mtu=0&rbb=0.0&g=0&cid=&clo=0&v=4&l=2023-07-28T07:00:00.0000000Z&lft=0001-01-01T00:00:00.0000000&aof=0&o=0&p=BINGCOPILOTWAITLIST&c=MR000T&t=5015&s=2023-07-28T03:49:25.0212949+00:00&ts=2023-07-29T04:25:12.3038594+00:00&rwred=0&wls=2&lka=0&lkt=0&TH=&dci=0&W=1&r=1&mta=0&e=5hGuvH-hgROgFrAFCyoprGS3tojdQpS-9dprbxKJNPJf5adOY1BMnPReepJYVlCsjKVDlVgwBQSEBQYXrjykKNh4LPcOEPxOqKQ9fMQDo_w&A=; SRCHHPGUSR=SRCHLANG=zh-Hans&IG=A6F1DD36E2E4449090EBC3B4F244A9B0&PV=10.0.0&BRW=HTP&BRH=M&CW=927&CH=754&SCW=1164&SCH=2643&DPR=1.3&UTC=480&DM=0&WTS=63826112199&HV=1690604714&PRVCW=927&PRVCH=754&EXLTT=7; GC=Km5OnV7eaN4oiWil_hvlJzV-PnLeJYnPXh9Vqy9Dyf-XQIULVApCO45kebIJBakbfE_P68YcvgNWtVzth_FNZg']
|
24 |
+
ssl_context = ssl.create_default_context()
|
25 |
+
ssl_context.load_verify_locations(certifi.where())
|
26 |
+
|
27 |
+
|
28 |
+
class optionsSets:
|
29 |
+
optionSet: dict = {
|
30 |
+
'tone': str,
|
31 |
+
'optionsSets': list
|
32 |
+
}
|
33 |
+
|
34 |
+
jailbreak: dict = {
|
35 |
+
"optionsSets": [
|
36 |
+
'saharasugg',
|
37 |
+
'enablenewsfc',
|
38 |
+
'clgalileo',
|
39 |
+
'gencontentv3',
|
40 |
+
"nlu_direct_response_filter",
|
41 |
+
"deepleo",
|
42 |
+
"disable_emoji_spoken_text",
|
43 |
+
"responsible_ai_policy_235",
|
44 |
+
"enablemm",
|
45 |
+
"h3precise"
|
46 |
+
# "harmonyv3",
|
47 |
+
"dtappid",
|
48 |
+
"cricinfo",
|
49 |
+
"cricinfov2",
|
50 |
+
"dv3sugg",
|
51 |
+
"nojbfedge"
|
52 |
+
]
|
53 |
+
}
|
54 |
+
|
55 |
+
|
56 |
+
class Defaults:
|
57 |
+
delimiter = '\x1e'
|
58 |
+
ip_address = f'13.{random.randint(104, 107)}.{random.randint(0, 255)}.{random.randint(0, 255)}'
|
59 |
+
|
60 |
+
allowedMessageTypes = [
|
61 |
+
'Chat',
|
62 |
+
'Disengaged',
|
63 |
+
'AdsQuery',
|
64 |
+
'SemanticSerp',
|
65 |
+
'GenerateContentQuery',
|
66 |
+
'SearchQuery',
|
67 |
+
'ActionRequest',
|
68 |
+
'Context',
|
69 |
+
'Progress',
|
70 |
+
'AdsQuery',
|
71 |
+
'SemanticSerp'
|
72 |
+
]
|
73 |
+
|
74 |
+
sliceIds = [
|
75 |
+
|
76 |
+
# "222dtappid",
|
77 |
+
# "225cricinfo",
|
78 |
+
# "224locals0"
|
79 |
+
|
80 |
+
'winmuid3tf',
|
81 |
+
'osbsdusgreccf',
|
82 |
+
'ttstmout',
|
83 |
+
'crchatrev',
|
84 |
+
'winlongmsgtf',
|
85 |
+
'ctrlworkpay',
|
86 |
+
'norespwtf',
|
87 |
+
'tempcacheread',
|
88 |
+
'temptacache',
|
89 |
+
'505scss0',
|
90 |
+
'508jbcars0',
|
91 |
+
'515enbotdets0',
|
92 |
+
'5082tsports',
|
93 |
+
'515vaoprvs',
|
94 |
+
'424dagslnv1s0',
|
95 |
+
'kcimgattcf',
|
96 |
+
'427startpms0'
|
97 |
+
]
|
98 |
+
|
99 |
+
location = {
|
100 |
+
'locale': 'en-US',
|
101 |
+
'market': 'en-US',
|
102 |
+
'region': 'US',
|
103 |
+
'locationHints': [
|
104 |
+
{
|
105 |
+
'country': 'United States',
|
106 |
+
'state': 'California',
|
107 |
+
'city': 'Los Angeles',
|
108 |
+
'timezoneoffset': 8,
|
109 |
+
'countryConfidence': 8,
|
110 |
+
'Center': {
|
111 |
+
'Latitude': 34.0536909,
|
112 |
+
'Longitude': -118.242766
|
113 |
+
},
|
114 |
+
'RegionType': 2,
|
115 |
+
'SourceType': 1
|
116 |
+
}
|
117 |
+
],
|
118 |
+
}
|
119 |
+
|
120 |
+
|
121 |
+
def _format(msg: dict) -> str:
|
122 |
+
return json.dumps(msg, ensure_ascii=False) + Defaults.delimiter
|
123 |
+
|
124 |
+
|
125 |
+
async def create_conversation():
|
126 |
+
for _ in range(5):
|
127 |
+
create = requests.get('https://bing.lemonsoftware.eu.org/turing/conversation/create',
|
128 |
+
headers={
|
129 |
+
'authority': 'edgeservices.bing.com',
|
130 |
+
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
|
131 |
+
'accept-language': 'en-US,en;q=0.9',
|
132 |
+
'cache-control': 'max-age=0',
|
133 |
+
'sec-ch-ua': '"Chromium";v="110", "Not A(Brand";v="24", "Microsoft Edge";v="110"',
|
134 |
+
'sec-ch-ua-arch': '"x86"',
|
135 |
+
'sec-ch-ua-bitness': '"64"',
|
136 |
+
'sec-ch-ua-full-version': '"110.0.1587.69"',
|
137 |
+
'sec-ch-ua-full-version-list': '"Chromium";v="110.0.5481.192", "Not A(Brand";v="24.0.0.0", "Microsoft Edge";v="110.0.1587.69"',
|
138 |
+
'sec-ch-ua-mobile': '?0',
|
139 |
+
'sec-ch-ua-model': '""',
|
140 |
+
'sec-ch-ua-platform': '"Windows"',
|
141 |
+
'sec-ch-ua-platform-version': '"15.0.0"',
|
142 |
+
'sec-fetch-dest': 'document',
|
143 |
+
'sec-fetch-mode': 'navigate',
|
144 |
+
'sec-fetch-site': 'none',
|
145 |
+
'sec-fetch-user': '?1',
|
146 |
+
'upgrade-insecure-requests': '1',
|
147 |
+
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 Edg/110.0.1587.69',
|
148 |
+
'x-edge-shopping-flag': '1',
|
149 |
+
'x-forwarded-for': Defaults.ip_address,
|
150 |
+
'Cookie':random.choice(cookies)
|
151 |
+
})
|
152 |
+
|
153 |
+
conversationId = create.json().get('conversationId')
|
154 |
+
clientId = create.json().get('clientId')
|
155 |
+
conversationSignature = create.json().get('conversationSignature')
|
156 |
+
|
157 |
+
if not conversationId or not clientId or not conversationSignature and _ == 4:
|
158 |
+
raise Exception('Failed to create conversation.')
|
159 |
+
|
160 |
+
return conversationId, clientId, conversationSignature
|
161 |
+
|
162 |
+
|
163 |
+
async def stream_generate(prompt: str, mode: optionsSets.optionSet = optionsSets.jailbreak, context: bool or str = False):
|
164 |
+
timeout = aiohttp.ClientTimeout(total=900)
|
165 |
+
session = aiohttp.ClientSession(timeout=timeout)
|
166 |
+
|
167 |
+
conversationId, clientId, conversationSignature = await create_conversation()
|
168 |
+
|
169 |
+
wss = await session.ws_connect('wss://sydney.lemonsoftware.eu.org/sydney/ChatHub', ssl=ssl_context, autoping=False,
|
170 |
+
headers={
|
171 |
+
'accept': 'application/json',
|
172 |
+
'accept-language': 'en-US,en;q=0.9',
|
173 |
+
'content-type': 'application/json',
|
174 |
+
'sec-ch-ua': '"Not_A Brand";v="99", "Microsoft Edge";v="110", "Chromium";v="110"',
|
175 |
+
'sec-ch-ua-arch': '"x86"',
|
176 |
+
'sec-ch-ua-bitness': '"64"',
|
177 |
+
'sec-ch-ua-full-version': '"109.0.1518.78"',
|
178 |
+
'sec-ch-ua-full-version-list': '"Chromium";v="110.0.5481.192", "Not A(Brand";v="24.0.0.0", "Microsoft Edge";v="110.0.1587.69"',
|
179 |
+
'sec-ch-ua-mobile': '?0',
|
180 |
+
'sec-ch-ua-model': '',
|
181 |
+
'sec-ch-ua-platform': '"Windows"',
|
182 |
+
'sec-ch-ua-platform-version': '"15.0.0"',
|
183 |
+
'sec-fetch-dest': 'empty',
|
184 |
+
'sec-fetch-mode': 'cors',
|
185 |
+
'sec-fetch-site': 'same-origin',
|
186 |
+
'x-ms-client-request-id': str(uuid.uuid4()),
|
187 |
+
'x-ms-useragent': 'azsdk-js-api-client-factory/1.0.0-beta.1 core-rest-pipeline/1.10.0 OS/Win32',
|
188 |
+
'Referer': 'https://www.bing.com/search?q=Bing+AI&showconv=1&FORM=hpcodx',
|
189 |
+
'Referrer-Policy': 'origin-when-cross-origin',
|
190 |
+
'x-forwarded-for': Defaults.ip_address
|
191 |
+
})
|
192 |
+
|
193 |
+
await wss.send_str(_format({'protocol': 'json', 'version': 1}))
|
194 |
+
await wss.receive(timeout=900)
|
195 |
+
|
196 |
+
struct = {
|
197 |
+
'arguments': [
|
198 |
+
{
|
199 |
+
**mode,
|
200 |
+
'source': 'cib',
|
201 |
+
'allowedMessageTypes': Defaults.allowedMessageTypes,
|
202 |
+
'sliceIds': Defaults.sliceIds,
|
203 |
+
'traceId': os.urandom(16).hex(),
|
204 |
+
'isStartOfSession': True,
|
205 |
+
'message': Defaults.location | {
|
206 |
+
'author': 'user',
|
207 |
+
'inputMethod': 'Keyboard',
|
208 |
+
'text': prompt,
|
209 |
+
'messageType': 'Chat'
|
210 |
+
},
|
211 |
+
'conversationSignature': conversationSignature,
|
212 |
+
'participant': {
|
213 |
+
'id': clientId
|
214 |
+
},
|
215 |
+
'conversationId': conversationId
|
216 |
+
}
|
217 |
+
],
|
218 |
+
'invocationId': '0',
|
219 |
+
'target': 'chat',
|
220 |
+
'type': 4
|
221 |
+
}
|
222 |
+
|
223 |
+
if context:
|
224 |
+
struct['arguments'][0]['previousMessages'] = [
|
225 |
+
{
|
226 |
+
"author": "user",
|
227 |
+
"description": context,
|
228 |
+
"contextType": "WebPage",
|
229 |
+
"messageType": "Context",
|
230 |
+
"messageId": "discover-web--page-ping-mriduna-----"
|
231 |
+
}
|
232 |
+
]
|
233 |
+
|
234 |
+
await wss.send_str(_format(struct))
|
235 |
+
|
236 |
+
final = False
|
237 |
+
draw = False
|
238 |
+
resp_txt = ''
|
239 |
+
result_text = ''
|
240 |
+
resp_txt_no_link = ''
|
241 |
+
cache_text = ''
|
242 |
+
|
243 |
+
while not final:
|
244 |
+
msg = await wss.receive(timeout=900)
|
245 |
+
objects = msg.data.split(Defaults.delimiter)
|
246 |
+
|
247 |
+
for obj in objects:
|
248 |
+
if obj is None or not obj:
|
249 |
+
continue
|
250 |
+
|
251 |
+
response = json.loads(obj)
|
252 |
+
if response.get('type') == 1 and response['arguments'][0].get('messages',):
|
253 |
+
if not draw:
|
254 |
+
if (response['arguments'][0]['messages'][0]['contentOrigin'] != 'Apology') and not draw:
|
255 |
+
resp_txt = result_text + \
|
256 |
+
response['arguments'][0]['messages'][0]['adaptiveCards'][0]['body'][0].get(
|
257 |
+
'text', '')
|
258 |
+
resp_txt_no_link = result_text + \
|
259 |
+
response['arguments'][0]['messages'][0].get(
|
260 |
+
'text', '')
|
261 |
+
|
262 |
+
if response['arguments'][0]['messages'][0].get('messageType',):
|
263 |
+
resp_txt = (
|
264 |
+
resp_txt
|
265 |
+
+ response['arguments'][0]['messages'][0]['adaptiveCards'][0]['body'][0]['inlines'][0].get('text')
|
266 |
+
+ '\n'
|
267 |
+
)
|
268 |
+
result_text = (
|
269 |
+
result_text
|
270 |
+
+ response['arguments'][0]['messages'][0]['adaptiveCards'][0]['body'][0]['inlines'][0].get('text')
|
271 |
+
+ '\n'
|
272 |
+
)
|
273 |
+
|
274 |
+
if cache_text.endswith(' '):
|
275 |
+
final = True
|
276 |
+
if wss and not wss.closed:
|
277 |
+
await wss.close()
|
278 |
+
if session and not session.closed:
|
279 |
+
await session.close()
|
280 |
+
|
281 |
+
yield (resp_txt.replace(cache_text, ''))
|
282 |
+
cache_text = resp_txt
|
283 |
+
|
284 |
+
elif response.get('type') == 2:
|
285 |
+
if response['item']['result'].get('error'):
|
286 |
+
if wss and not wss.closed:
|
287 |
+
await wss.close()
|
288 |
+
if session and not session.closed:
|
289 |
+
await session.close()
|
290 |
+
|
291 |
+
raise Exception(
|
292 |
+
f"{response['item']['result']['value']}: {response['item']['result']['message']}")
|
293 |
+
|
294 |
+
if draw:
|
295 |
+
cache = response['item']['messages'][1]['adaptiveCards'][0]['body'][0]['text']
|
296 |
+
response['item']['messages'][1]['adaptiveCards'][0]['body'][0]['text'] = (
|
297 |
+
cache + resp_txt)
|
298 |
+
|
299 |
+
if (response['item']['messages'][-1]['contentOrigin'] == 'Apology' and resp_txt):
|
300 |
+
response['item']['messages'][-1]['text'] = resp_txt_no_link
|
301 |
+
response['item']['messages'][-1]['adaptiveCards'][0]['body'][0]['text'] = resp_txt
|
302 |
+
|
303 |
+
# print('Preserved the message from being deleted', file=sys.stderr)
|
304 |
+
|
305 |
+
final = True
|
306 |
+
if wss and not wss.closed:
|
307 |
+
await wss.close()
|
308 |
+
if session and not session.closed:
|
309 |
+
await session.close()
|
310 |
+
|
311 |
+
|
312 |
+
def run(generator):
|
313 |
+
loop = asyncio.new_event_loop()
|
314 |
+
gen = generator.__aiter__()
|
315 |
+
|
316 |
+
while True:
|
317 |
+
try:
|
318 |
+
next_val = loop.run_until_complete(gen.__anext__())
|
319 |
+
yield next_val
|
320 |
+
|
321 |
+
except StopAsyncIteration:
|
322 |
+
break
|
323 |
+
|
324 |
+
#print('Done')
|
325 |
+
|
326 |
+
|
327 |
+
def convert(messages):
|
328 |
+
context = ""
|
329 |
+
|
330 |
+
for message in messages:
|
331 |
+
context += "[%s](#message)\n%s\n\n" % (message['role'],
|
332 |
+
message['content'])
|
333 |
+
|
334 |
+
return context
|
335 |
+
|
336 |
+
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
337 |
+
if model == 'dall-e':
|
338 |
+
HEADERS = {
|
339 |
+
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
|
340 |
+
"accept-language": "en-US,en;q=0.9",
|
341 |
+
"cache-control": "max-age=0",
|
342 |
+
"content-type": "application/x-www-form-urlencoded",
|
343 |
+
"referrer": "https://www.bing.com/images/create/",
|
344 |
+
"origin": "https://www.bing.com",
|
345 |
+
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 Edge/110.0.1587.63",
|
346 |
+
}
|
347 |
+
params = {
|
348 |
+
't':int(round(time.time() * 1000)),
|
349 |
+
're': 1,
|
350 |
+
'showselective': 1,
|
351 |
+
'sude': 1,
|
352 |
+
'kseed': 7500,
|
353 |
+
'SFX': 2,
|
354 |
+
'q': messages[-1]['content']
|
355 |
+
}
|
356 |
+
r = requests.get("https://bing.lemonsoftware.eu.org/images/create",params=params,headers=HEADERS)
|
357 |
+
try:
|
358 |
+
id = urlparse.parse_qs(urlparse.urlparse(r.url).query)['id'][0]
|
359 |
+
except:
|
360 |
+
yield 'Image generation error. This may be because your image is illegal or our service has malfunctioned.'
|
361 |
+
return
|
362 |
+
image_urls = set()
|
363 |
+
t = 0
|
364 |
+
while len(image_urls)<4 and t<60:
|
365 |
+
time.sleep(0.5)
|
366 |
+
t += 0.5
|
367 |
+
r = requests.get("https://bing.lemonsoftware.eu.org/images/create/async/results/"+id+"?q="+params['q'],headers=HEADERS)
|
368 |
+
soup = BeautifulSoup(r.text, 'html.parser')
|
369 |
+
img_tags = soup.find_all('img')
|
370 |
+
for img_tag in img_tags:
|
371 |
+
src = img_tag.get('src')
|
372 |
+
if src:
|
373 |
+
image_urls.add(src)
|
374 |
+
if not image_urls:
|
375 |
+
yield 'Image generation error. This is because our service has malfunctioned.'
|
376 |
+
return
|
377 |
+
for img in image_urls:
|
378 |
+
yield ''
|
379 |
+
else:
|
380 |
+
if len(messages) < 2:
|
381 |
+
prompt = messages[0]['content']
|
382 |
+
context = False
|
383 |
+
|
384 |
+
else:
|
385 |
+
prompt = messages[-1]['content']
|
386 |
+
context = convert(messages[:-1])
|
387 |
+
|
388 |
+
response = run(stream_generate(prompt, optionsSets.jailbreak, context))
|
389 |
+
for token in response:
|
390 |
+
yield (token)
|
391 |
+
|
392 |
+
|
393 |
+
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
|
394 |
+
'(%s)' % ', '.join(
|
395 |
+
[f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
|
g4f/Provider/Providers/Bingo.py
ADDED
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import requests
|
2 |
+
import uuid
|
3 |
+
import json
|
4 |
+
import re
|
5 |
+
import os
|
6 |
+
from ...typing import sha256, Dict, get_type_hints
|
7 |
+
|
8 |
+
url = 'https://hf4all-bingo.hf.space/'
|
9 |
+
model = ['bing']
|
10 |
+
supports_stream = True
|
11 |
+
needs_auth = False
|
12 |
+
working = True
|
13 |
+
|
14 |
+
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
15 |
+
bing_cookie = str(uuid.uuid4())
|
16 |
+
create = requests.post('https://hf4all-bingo.hf.space/api/create',
|
17 |
+
headers={
|
18 |
+
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36 Edg/115.0.1901.183',
|
19 |
+
'Cookie':'BING_COOKIE='+bing_cookie+'; BING_UA=Mozilla%2F5.0%20(Windows%20NT%2010.0%3B%20Win64%3B%20x64)%20AppleWebKit%2F537.36%20(KHTML%2C%20like%20Gecko)%20Chrome%2F115.0.0.0%20Safari%2F537.36%20Edg%2F115.0.1901.183; BING_IP=11.105.176.100'
|
20 |
+
})
|
21 |
+
try:
|
22 |
+
conversationId = create.json().get('conversationId')
|
23 |
+
clientId = create.json().get('clientId')
|
24 |
+
conversationSignature = create.json().get('conversationSignature')
|
25 |
+
except:
|
26 |
+
create = requests.post('https://hf4all-bingo.hf.space/api/create',
|
27 |
+
headers={
|
28 |
+
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36 Edg/115.0.1901.183',
|
29 |
+
'Cookie':'BING_COOKIE='+bing_cookie+'; BING_UA=Mozilla%2F5.0%20(Windows%20NT%2010.0%3B%20Win64%3B%20x64)%20AppleWebKit%2F537.36%20(KHTML%2C%20like%20Gecko)%20Chrome%2F115.0.0.0%20Safari%2F537.36%20Edg%2F115.0.1901.183; BING_IP=11.105.176.100'
|
30 |
+
})
|
31 |
+
conversationId = create.json().get('conversationId')
|
32 |
+
clientId = create.json().get('clientId')
|
33 |
+
conversationSignature = create.json().get('conversationSignature')
|
34 |
+
payload = {
|
35 |
+
"conversationId": conversationId,
|
36 |
+
"conversationSignature": conversationSignature,
|
37 |
+
"clientId": clientId,
|
38 |
+
"invocationId": 0,
|
39 |
+
"conversationStyle": "Creative",
|
40 |
+
"prompt": messages[-1]['content']
|
41 |
+
}
|
42 |
+
sydney = requests.post('https://hf4all-bingo.hf.space/api/sydney',data=json.dumps(payload),stream=True,headers={
|
43 |
+
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36 Edg/115.0.1901.183',
|
44 |
+
'Content-Type':'application/json',
|
45 |
+
'Cookie':'BING_COOKIE='+bing_cookie+'; BING_UA=Mozilla%2F5.0%20(Windows%20NT%2010.0%3B%20Win64%3B%20x64)%20AppleWebKit%2F537.36%20(KHTML%2C%20like%20Gecko)%20Chrome%2F115.0.0.0%20Safari%2F537.36%20Edg%2F115.0.1901.183; BING_IP=11.105.176.100'
|
46 |
+
})
|
47 |
+
lasttoken = ''
|
48 |
+
for line in sydney.text.split(''):
|
49 |
+
try:
|
50 |
+
if line:
|
51 |
+
try:
|
52 |
+
line_ = json.loads(line)['arguments'][0]['messages'][0]['text'][len(lasttoken):]
|
53 |
+
lasttoken = json.loads(line)['arguments'][0]['messages'][0]['text']
|
54 |
+
if 'Searching the web for:' not in line_ :
|
55 |
+
yield line_
|
56 |
+
except:
|
57 |
+
pass
|
58 |
+
'''
|
59 |
+
for text in json.loads(line)['items']['messages']:
|
60 |
+
yield text['text']
|
61 |
+
'''
|
62 |
+
except:
|
63 |
+
pass
|
64 |
+
|
65 |
+
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
|
66 |
+
'(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
|
g4f/Provider/Providers/ChatFree.py
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os, requests
|
2 |
+
from ...typing import sha256, Dict, get_type_hints
|
3 |
+
import json
|
4 |
+
|
5 |
+
url = "https://v.chatfree.cc"
|
6 |
+
model = ['gpt-3.5-turbo','gpt-3.5-turbo-0301','gpt-3.5-turbo-0613','gpt-3.5-turbo-16k','gpt-3.5-turbo-16k-0613']
|
7 |
+
supports_stream = True
|
8 |
+
needs_auth = False
|
9 |
+
working = True
|
10 |
+
|
11 |
+
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
12 |
+
headers = {
|
13 |
+
'authority': 'v.chatfree.cc',
|
14 |
+
'accept': '*/*',
|
15 |
+
'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
|
16 |
+
'content-type': 'application/json',
|
17 |
+
'origin': 'https://v.chatfree.cc',
|
18 |
+
'referer': 'https://v.chatfree.cc/',
|
19 |
+
'sec-ch-ua': '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
|
20 |
+
'sec-ch-ua-mobile': '?0',
|
21 |
+
'sec-ch-ua-platform': '"macOS"',
|
22 |
+
'sec-fetch-dest': 'empty',
|
23 |
+
'sec-fetch-mode': 'cors',
|
24 |
+
'sec-fetch-site': 'same-origin',
|
25 |
+
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36',
|
26 |
+
'x-requested-with': 'XMLHttpRequest',
|
27 |
+
}
|
28 |
+
|
29 |
+
json_data = {
|
30 |
+
'messages': messages,
|
31 |
+
'stream': True,
|
32 |
+
'model': model,
|
33 |
+
'temperature': 0.5,
|
34 |
+
'presence_penalty': 0,
|
35 |
+
'frequency_penalty': 0,
|
36 |
+
'top_p': 1,
|
37 |
+
}
|
38 |
+
|
39 |
+
response = requests.post('https://v.chatfree.cc/api/openai/v1/chat/completions',
|
40 |
+
headers=headers, stream=True,json=json_data)
|
41 |
+
|
42 |
+
for chunk in response.iter_lines():
|
43 |
+
if b'content' in chunk:
|
44 |
+
data = json.loads(chunk.decode().split('data: ')[1])
|
45 |
+
yield (data['choices'][0]['delta']['content'])
|
46 |
+
|
47 |
+
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
|
48 |
+
'(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
|
g4f/Provider/Providers/ChatGPTunli.py
ADDED
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import requests,json
|
3 |
+
from ...typing import sha256, Dict, get_type_hints
|
4 |
+
|
5 |
+
url = 'https://www.chatgptunli.com/chatgpt/'
|
6 |
+
model = ['gpt-3.5-turbo']
|
7 |
+
supports_stream = True
|
8 |
+
needs_auth = False
|
9 |
+
working = True
|
10 |
+
|
11 |
+
headers = {
|
12 |
+
'Origin':'https://www.chatgptunli.com',
|
13 |
+
'Referer':'https://www.chatgptunli.com/chatgpt/',
|
14 |
+
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36',
|
15 |
+
'Content-Type':'application/json',
|
16 |
+
}
|
17 |
+
|
18 |
+
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
19 |
+
json_data = {
|
20 |
+
"id": "default",
|
21 |
+
"botId": "default",
|
22 |
+
"session": "N/A",
|
23 |
+
"clientId": "wc6c7y0t45",
|
24 |
+
"contextId": 382,
|
25 |
+
"messages": messages,
|
26 |
+
"newMessage": messages[-1]['content'],
|
27 |
+
"stream": True
|
28 |
+
}
|
29 |
+
r = requests.post('https://www.chatgptunli.com/wp-json/mwai-ui/v1/chats/submit',json=json_data,headers=headers,stream=True)
|
30 |
+
for chunk in r.iter_lines():
|
31 |
+
if chunk and chunk.startswith(b'data:'):
|
32 |
+
data = json.loads(chunk.decode()[5:])
|
33 |
+
if data['type'] == 'live':
|
34 |
+
yield data['data']
|
35 |
+
|
36 |
+
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
|
37 |
+
'(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
|
g4f/Provider/Providers/ChatgptAi.py
ADDED
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from curl_cffi import requests
|
3 |
+
import re
|
4 |
+
from ...typing import sha256, Dict, get_type_hints
|
5 |
+
|
6 |
+
url = 'https://chatgpt.ai/gpt-4/'
|
7 |
+
model = ['gpt-4']
|
8 |
+
supports_stream = False
|
9 |
+
needs_auth = False
|
10 |
+
working = True
|
11 |
+
|
12 |
+
|
13 |
+
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
14 |
+
chat = ''
|
15 |
+
for message in messages:
|
16 |
+
chat += '%s: %s\n' % (message['role'], message['content'])
|
17 |
+
chat += 'assistant: '
|
18 |
+
|
19 |
+
response = requests.get('https://chatgpt.ai/')
|
20 |
+
nonce, post_id, _, bot_id = re.findall(r'data-nonce="(.*)"\n data-post-id="(.*)"\n data-url="(.*)"\n data-bot-id="(.*)"\n data-width', response.text)[0]
|
21 |
+
|
22 |
+
headers = {
|
23 |
+
'authority': 'chatgpt.ai',
|
24 |
+
'accept': '*/*',
|
25 |
+
'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
|
26 |
+
'cache-control': 'no-cache',
|
27 |
+
'origin': 'https://chatgpt.ai',
|
28 |
+
'pragma': 'no-cache',
|
29 |
+
'referer': 'https://chatgpt.ai/gpt-4/',
|
30 |
+
'sec-ch-ua': '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
|
31 |
+
'sec-ch-ua-mobile': '?0',
|
32 |
+
'sec-ch-ua-platform': '"Windows"',
|
33 |
+
'sec-fetch-dest': 'empty',
|
34 |
+
'sec-fetch-mode': 'cors',
|
35 |
+
'sec-fetch-site': 'same-origin',
|
36 |
+
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36',
|
37 |
+
}
|
38 |
+
data = {
|
39 |
+
'_wpnonce': nonce,
|
40 |
+
'post_id': post_id,
|
41 |
+
'url': 'https://chatgpt.ai/gpt-4',
|
42 |
+
'action': 'wpaicg_chat_shortcode_message',
|
43 |
+
'message': chat,
|
44 |
+
'bot_id': bot_id
|
45 |
+
}
|
46 |
+
|
47 |
+
response = requests.post('https://chatgpt.ai/wp-admin/admin-ajax.php',
|
48 |
+
headers=headers, data=data,impersonate='chrome110')
|
49 |
+
|
50 |
+
yield (response.json()['data'])
|
51 |
+
|
52 |
+
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
|
53 |
+
'(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
|
g4f/Provider/Providers/Chimera.py
ADDED
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import re
|
2 |
+
import os
|
3 |
+
from translate import Translator
|
4 |
+
import openai
|
5 |
+
import openai.error
|
6 |
+
from dotenv import load_dotenv
|
7 |
+
from ...typing import sha256, Dict, get_type_hints
|
8 |
+
|
9 |
+
load_dotenv()
|
10 |
+
openai.api_key = '28MJ7F_t5nxPj9dgyGUJXj8Yb1L4Y1ZdxCU06CVa5Uw'
|
11 |
+
openai.api_base = "https://chimeragpt.adventblocks.cc/api/v1"
|
12 |
+
|
13 |
+
url = 'https://chimeragpt.adventblocks.cc/'
|
14 |
+
model = [
|
15 |
+
'gpt-3.5-turbo',
|
16 |
+
'gpt-3.5-turbo-0301',
|
17 |
+
'gpt-3.5-turbo-16k',
|
18 |
+
'gpt-4',
|
19 |
+
'gpt-4-0314',
|
20 |
+
'gpt-4-32k',
|
21 |
+
'llama-2-70b-chat',
|
22 |
+
'kandinsky'
|
23 |
+
]
|
24 |
+
|
25 |
+
supports_stream = True
|
26 |
+
needs_auth = False
|
27 |
+
working = True
|
28 |
+
|
29 |
+
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
30 |
+
try:
|
31 |
+
if model == 'kandinsky':
|
32 |
+
q = messages[-1]['content']
|
33 |
+
qt = Translator(from_lang ='autodetect',to_lang="en").translate(q)
|
34 |
+
if qt == 'PLEASE SELECT TWO DISTINCT LANGUAGES':
|
35 |
+
qt=q
|
36 |
+
response = openai.Image.create(prompt=messages[-1]['content'],n=1,size="1024x1024")
|
37 |
+
try:
|
38 |
+
yield ''
|
39 |
+
except:
|
40 |
+
yield 'Image generation error. This may be because your image is illegal or our service has malfunctioned.'
|
41 |
+
else:
|
42 |
+
response = openai.ChatCompletion.create(
|
43 |
+
model=model,
|
44 |
+
messages=messages,
|
45 |
+
stream=stream
|
46 |
+
)
|
47 |
+
if stream:
|
48 |
+
for chunk in response:
|
49 |
+
yield chunk.choices[0].delta.get("content", "")
|
50 |
+
else:
|
51 |
+
yield response.choices[0]['message'].get("content", "")
|
52 |
+
|
53 |
+
except openai.error.APIError as e:
|
54 |
+
if e.http_status == 429:
|
55 |
+
detail_pattern = re.compile(r'{"detail":"(.*?)"}')
|
56 |
+
match = detail_pattern.search(e.user_message)
|
57 |
+
if match:
|
58 |
+
error_message = match.group(1)
|
59 |
+
print(error_message)
|
60 |
+
yield error_message
|
61 |
+
else:
|
62 |
+
print(e.user_message)
|
63 |
+
yield e.user_message
|
64 |
+
else:
|
65 |
+
raise
|
66 |
+
|
67 |
+
|
68 |
+
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
|
69 |
+
'(%s)' % ', '.join(
|
70 |
+
[f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
|
g4f/Provider/Providers/ClaudeAI.py
ADDED
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import requests
|
2 |
+
import uuid
|
3 |
+
import json
|
4 |
+
import os
|
5 |
+
from ...typing import sha256, Dict, get_type_hints
|
6 |
+
|
7 |
+
url = 'https://claude.ai/chats'
|
8 |
+
supports_stream = True
|
9 |
+
needs_auth = False
|
10 |
+
working = True
|
11 |
+
model = ['claude-2','claude-2-100k']
|
12 |
+
|
13 |
+
cookies = 'sessionKey=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; intercom-device-id-lupk8zyo=723e5224-2cf5-499d-94f0-a544809f80c0; intercom-session-lupk8zyo=YzRFV1FXTFBBNDZsVVdxbFpoVWRGeTVmMmNqRVhlZFpKaVJ3YkMyM21saVFVRG9rYmdwTTE4cWxsZFRlZFhUUS0tYkJPZHBMMkkzZDV6dnM2eGVKZk9LZz09--2fc7d343a082574166926853f201bc418b2f11d2; __cf_bm=0IpkLTxsKKInsJb2G7Cb4MT9yQQvVY.iWWrAL2O3Vgo-1690472331-0-AZCY331k6nkFO0lMtzi6ljJI4AoSPi0MBqKxc65PJd6aC5DoJJ3/MSWYgdM4WNg36LPMzAAkzSJLxnQzwo5nNf4='
|
14 |
+
user = '7491288a-406a-490b-acf2-ef374b375c61'
|
15 |
+
h1 = {
|
16 |
+
'Authority': 'claude.ai',
|
17 |
+
'Scheme': 'https',
|
18 |
+
'Accept': '*/*',
|
19 |
+
'Accept-Encoding': 'gzip, deflate, br',
|
20 |
+
'Accept-Language': 'en-US,en;q=0.5',
|
21 |
+
'Content-Type': 'application/json',
|
22 |
+
'Cookie': cookies,
|
23 |
+
'Origin': 'https://claude.ai',
|
24 |
+
'Referer': 'https://claude.ai/chats',
|
25 |
+
'Sec-Ch-Ua': '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
|
26 |
+
'Sec-Ch-Ua-Mobile': '?0',
|
27 |
+
'Sec-Ch-Ua-Platform': '"Windows"',
|
28 |
+
'Sec-Fetch-Dest': 'empty',
|
29 |
+
'Sec-Fetch-Mode': 'cors',
|
30 |
+
'Sec-Fetch-Site': 'same-origin',
|
31 |
+
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36'
|
32 |
+
}
|
33 |
+
|
34 |
+
|
35 |
+
|
36 |
+
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
37 |
+
conversation = 'This is a conversation between a human and a language model. The language model should always respond as the assistant, referring to the past history of messages if needed.\n'
|
38 |
+
for message in messages:
|
39 |
+
conversation += '%s: %s\n' % (message['role'], message['content'])
|
40 |
+
conversation += 'assistant: '
|
41 |
+
_uuid = str(uuid.uuid4())
|
42 |
+
session = requests.Session()
|
43 |
+
r = session.post("https://claude.ai/api/organizations/"+user+"/chat_conversations",data=json.dumps({"uuid":_uuid ,"name": ""}),headers=h1)
|
44 |
+
r = session.post("https://claude.ai/api/append_message",data=json.dumps({
|
45 |
+
"completion": {
|
46 |
+
"prompt": conversation,
|
47 |
+
"timezone": "Asia/Shanghai",
|
48 |
+
"model": "claude-2",
|
49 |
+
"incremental": True
|
50 |
+
},
|
51 |
+
"organization_uuid": user,
|
52 |
+
"conversation_uuid": _uuid,
|
53 |
+
"text": conversation,
|
54 |
+
"attachments": []
|
55 |
+
}),headers=h1,stream=True)
|
56 |
+
r.encoding='utf-8'
|
57 |
+
for line in r.iter_lines():
|
58 |
+
line = line.decode()
|
59 |
+
if line.startswith('data:'):
|
60 |
+
line = line[5:]
|
61 |
+
data = json.loads(line)
|
62 |
+
try:
|
63 |
+
yield str(data['completion'].encode('utf-8').decode('utf-8') )
|
64 |
+
except:
|
65 |
+
pass
|
66 |
+
r = session.delete("https://claude.ai/api/organizations/"+user+"/chat_conversations/"+_uuid,headers=h1)
|
67 |
+
|
68 |
+
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
|
69 |
+
'(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
|
g4f/Provider/Providers/DeepAi.py
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import json
|
3 |
+
import random
|
4 |
+
import hashlib
|
5 |
+
import requests
|
6 |
+
|
7 |
+
from ...typing import sha256, Dict, get_type_hints
|
8 |
+
|
9 |
+
url = 'https://deepai.org'
|
10 |
+
model = ['gpt-3.5-turbo']
|
11 |
+
supports_stream = True
|
12 |
+
needs_auth = False
|
13 |
+
working = True
|
14 |
+
|
15 |
+
|
16 |
+
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
17 |
+
def md5(text: str) -> str:
|
18 |
+
return hashlib.md5(text.encode()).hexdigest()[::-1]
|
19 |
+
|
20 |
+
|
21 |
+
def get_api_key(user_agent: str) -> str:
|
22 |
+
part1 = str(random.randint(0, 10**11))
|
23 |
+
part2 = md5(user_agent + md5(user_agent + md5(user_agent + part1 + "x")))
|
24 |
+
|
25 |
+
return f"tryit-{part1}-{part2}"
|
26 |
+
|
27 |
+
user_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36'
|
28 |
+
|
29 |
+
headers = {
|
30 |
+
"api-key": get_api_key(user_agent),
|
31 |
+
"user-agent": user_agent
|
32 |
+
}
|
33 |
+
|
34 |
+
files = {
|
35 |
+
"chat_style": (None, "chat"),
|
36 |
+
"chatHistory": (None, json.dumps(messages))
|
37 |
+
}
|
38 |
+
|
39 |
+
r = requests.post("https://api.deepai.org/chat_response", headers=headers, files=files, stream=True)
|
40 |
+
|
41 |
+
for chunk in r.iter_content(chunk_size=None):
|
42 |
+
r.raise_for_status()
|
43 |
+
yield chunk.decode()
|
44 |
+
|
45 |
+
|
46 |
+
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
|
47 |
+
'(%s)' % ', '.join(
|
48 |
+
[f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
|
g4f/Provider/Providers/EasyChat.py
ADDED
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os, requests
|
2 |
+
from ...typing import sha256, Dict, get_type_hints
|
3 |
+
import json
|
4 |
+
|
5 |
+
url = "https://free.easychat.work"
|
6 |
+
model = ['gpt-3.5-turbo','gpt-3.5-turbo-0301','gpt-3.5-turbo-0613','gpt-3.5-turbo-16k']
|
7 |
+
supports_stream = True
|
8 |
+
needs_auth = False
|
9 |
+
working = True
|
10 |
+
|
11 |
+
|
12 |
+
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
13 |
+
headers = {
|
14 |
+
'authority': 'cf1.easychat.work',
|
15 |
+
'accept': 'text/event-stream',
|
16 |
+
'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
|
17 |
+
'content-type': 'application/json',
|
18 |
+
'endpoint': '',
|
19 |
+
'origin': 'https://cf1.easychat.work',
|
20 |
+
'plugins': '0',
|
21 |
+
'referer': 'https://cf1.easychat.work/',
|
22 |
+
'sec-ch-ua': '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
|
23 |
+
'sec-ch-ua-mobile': '?0',
|
24 |
+
'sec-ch-ua-platform': '"Windows"',
|
25 |
+
'sec-fetch-dest': 'empty',
|
26 |
+
'sec-fetch-mode': 'cors',
|
27 |
+
'sec-fetch-site': 'same-origin',
|
28 |
+
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36',
|
29 |
+
'usesearch': 'false',
|
30 |
+
'x-requested-with': 'XMLHttpRequest',
|
31 |
+
'Cookie': 'Hm_lvt_563fb31e93813a8a7094966df6671d3f=1690081938; cf_clearance=1jRhucaEPxeT0LtYHhC6.LJrhbVeksv38wGsFoCd1Rk-1690368274-0-250.2.1690368274'
|
32 |
+
}
|
33 |
+
|
34 |
+
json_data = {
|
35 |
+
'messages': messages,
|
36 |
+
'stream': True,
|
37 |
+
'model': model,
|
38 |
+
'temperature': kwargs.get('temperature', 0.5),
|
39 |
+
'presence_penalty': kwargs.get('presence_penalty', 0),
|
40 |
+
'frequency_penalty': kwargs.get('frequency_penalty', 0),
|
41 |
+
'top_p': kwargs.get('top_p', 1),
|
42 |
+
}
|
43 |
+
|
44 |
+
response = requests.post('https://easychat.provider.lemonsoftware.eu.org/api/openai/v1/chat/completions',
|
45 |
+
headers=headers, json=json_data,stream=True)
|
46 |
+
for chunk in response.iter_lines():
|
47 |
+
if b'content' in chunk:
|
48 |
+
data = json.loads(chunk.decode().split('data: ')[1])
|
49 |
+
yield (data['choices'][0]['delta']['content'])
|
50 |
+
|
51 |
+
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
|
52 |
+
'(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
|
g4f/Provider/Providers/EzChat.py
ADDED
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os, requests
|
2 |
+
from ...typing import sha256, Dict, get_type_hints
|
3 |
+
import json
|
4 |
+
|
5 |
+
url = "https://gpt4.ezchat.top/"
|
6 |
+
model = ['gpt-3.5-turbo','gpt-3.5-turbo-0301','gpt-3.5-turbo-0613','gpt-3.5-turbo-16k','gpt-3.5-turbo-16k-0613']
|
7 |
+
supports_stream = True
|
8 |
+
needs_auth = False
|
9 |
+
working = True
|
10 |
+
|
11 |
+
|
12 |
+
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
13 |
+
headers = {
|
14 |
+
'authority': 'gpt4.ezchat.top',
|
15 |
+
'accept': 'text/event-stream',
|
16 |
+
'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
|
17 |
+
'content-type': 'application/json',
|
18 |
+
'endpoint': '',
|
19 |
+
'origin': 'https://gpt4.ezchat.top',
|
20 |
+
'plugins': '0',
|
21 |
+
'referer': 'https://gpt4.ezchat.top/',
|
22 |
+
'sec-ch-ua': '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
|
23 |
+
'sec-ch-ua-mobile': '?0',
|
24 |
+
'sec-ch-ua-platform': '"Windows"',
|
25 |
+
'sec-fetch-dest': 'empty',
|
26 |
+
'sec-fetch-mode': 'cors',
|
27 |
+
'sec-fetch-site': 'same-origin',
|
28 |
+
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36',
|
29 |
+
'usesearch': 'false',
|
30 |
+
'x-requested-with': 'XMLHttpRequest',
|
31 |
+
'Authorization':'Bearer ak-EZGPT'
|
32 |
+
}
|
33 |
+
|
34 |
+
json_data = {
|
35 |
+
'messages': messages,
|
36 |
+
'stream': True,
|
37 |
+
'model': model,
|
38 |
+
'temperature': kwargs.get('temperature', 0.5),
|
39 |
+
'presence_penalty': kwargs.get('presence_penalty', 0),
|
40 |
+
'frequency_penalty': kwargs.get('frequency_penalty', 0),
|
41 |
+
'top_p': kwargs.get('top_p', 1),
|
42 |
+
}
|
43 |
+
|
44 |
+
response = requests.post('https://gpt4.ezchat.top/api/openai/v1/chat/completions',
|
45 |
+
headers=headers, json=json_data,stream=True)
|
46 |
+
for chunk in response.iter_lines():
|
47 |
+
if b'content' in chunk:
|
48 |
+
data = json.loads(chunk.decode().split('data: ')[1])
|
49 |
+
yield (data['choices'][0]['delta']['content'])
|
50 |
+
|
51 |
+
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
|
52 |
+
'(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
|
g4f/Provider/Providers/Forefront.py
ADDED
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import json
|
3 |
+
import requests
|
4 |
+
from ...typing import sha256, Dict, get_type_hints
|
5 |
+
|
6 |
+
url = 'https://chat.forefront.ai/'
|
7 |
+
model = ['gpt-3.5-turbo','claude-instant']
|
8 |
+
supports_stream = True
|
9 |
+
needs_auth = False
|
10 |
+
working = True
|
11 |
+
|
12 |
+
|
13 |
+
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
14 |
+
conversation = 'This is a conversation between a human and a language model. The language model should always respond as the assistant, referring to the past history of messages if needed.\n'
|
15 |
+
|
16 |
+
for message in messages:
|
17 |
+
conversation += '%s: %s\n' % (message['role'], message['content'])
|
18 |
+
|
19 |
+
conversation += 'assistant: '
|
20 |
+
|
21 |
+
json_data = {
|
22 |
+
'text': conversation,
|
23 |
+
'hidden': True,
|
24 |
+
'action': 'new',
|
25 |
+
'id': '379e1b22-18da-b4b9-00da-830cdc2a9210',
|
26 |
+
'parentId': '79dcec9e-3b7b-4c39-b967-00285b1cd22b',
|
27 |
+
'workspaceId': '79dcec9e-3b7b-4c39-b967-00285b1cd22b',
|
28 |
+
'messagePersona': 'default',
|
29 |
+
'model': model,
|
30 |
+
'messages': messages[:-1] if len(messages) > 1 else [],
|
31 |
+
'internetMode': 'never'
|
32 |
+
}
|
33 |
+
|
34 |
+
headers = {'Authorization':'Bearer eyJhbGciOiJSUzI1NiIsImtpZCI6Imluc18yTzZ3UTFYd3dxVFdXUWUyQ1VYZHZ2bnNaY2UiLCJ0eXAiOiJKV1QifQ.eyJhenAiOiJodHRwczovL2NoYXQuZm9yZWZyb250LmFpIiwiZXhwIjoxNjkwMzU0ODYyLCJpYXQiOjE2OTAzNTQ4MDIsImlzcyI6Imh0dHBzOi8vY2xlcmsuZm9yZWZyb250LmFpIiwibmJmIjoxNjkwMzU0NzkyLCJzaWQiOiJzZXNzXzJUNjlQWFo4VEpyb2lmMldmMHlpeU1TOXlPdiIsInN1YiI6InVzZXJfMlQ2OVBZdGNYeXBBa0Q3UDl1a1ZQTkdGWDRGIn0.bxp-NUcGfGvKHqTl6FYCVUrPVtLTSNfwAKCDVEesp2by2y2UFhTX7iDwbkNh4OzJtJglsdIo2sKvyiuBCoVDGHmPKSjD3D62FXC7xEaXaJ6EQuhgDgpMin4qlAoCUvYWSy9KQRW0YCcIqhJ65-u3XvKT610G2RSt70vf4Bwhu9q-LRdd4YEIXvtBd2BIQOm9daLG1w5qTb0xwegDJaWp4rocf5ey64XvxJVoXEMgIgOW6LxfMl9n6hUe7artfkVNiEFnNWp9lc-zW-h8uSk9u6DTCQuOJEURnRcrV55PJJXLa7lxgt70bNsABCt60ewo8JLjL9iDdK3kGgtvFaj3eQ'}
|
35 |
+
response = requests.post('https://streaming-worker.forefront.workers.dev/chat',
|
36 |
+
json=json_data, stream=True,headers=headers)
|
37 |
+
lasttoken = ''
|
38 |
+
for token in response.iter_lines():
|
39 |
+
if b'delta' in token:
|
40 |
+
token_ = json.loads(token.decode().split('data: ')[1])['delta']
|
41 |
+
yield token_
|
42 |
+
if b'text' in token:
|
43 |
+
token_ = json.loads(token.decode().split('data: ')[1])['text'][len(lasttoken):]
|
44 |
+
lasttoken = json.loads(token.decode().split('data: ')[1])['text']
|
45 |
+
yield token_
|
46 |
+
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
|
47 |
+
'(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
|
g4f/Provider/Providers/Free2gpt.py
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import requests
|
3 |
+
from datetime import datetime
|
4 |
+
import base64,hashlib,json
|
5 |
+
from ...typing import sha256, Dict, get_type_hints
|
6 |
+
|
7 |
+
url = 'https://chatf.free2gpt.xyz/'
|
8 |
+
model = ['gpt-3.5-turbo']
|
9 |
+
supports_stream = True
|
10 |
+
needs_auth = False
|
11 |
+
working = True
|
12 |
+
|
13 |
+
headers = {
|
14 |
+
'Origin':'https://chatf.free2gpt.xyz',
|
15 |
+
'Referer':'https://chatf.free2gpt.xyz/',
|
16 |
+
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36',
|
17 |
+
'Content-Type':'text/plain;charset=UTF-8',
|
18 |
+
}
|
19 |
+
|
20 |
+
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
21 |
+
current_time = datetime.now()
|
22 |
+
timestamp_in_seconds = current_time.timestamp()
|
23 |
+
timestamp_in_milliseconds = int(round(timestamp_in_seconds * 1000))
|
24 |
+
sign = str(timestamp_in_milliseconds)+':'+messages[-1]['content']+':'
|
25 |
+
sign = hashlib.sha256(sign.encode('utf-8')).hexdigest()
|
26 |
+
data = {
|
27 |
+
"messages": messages,
|
28 |
+
"time": timestamp_in_milliseconds,
|
29 |
+
"pass": None,
|
30 |
+
"sign": sign
|
31 |
+
}
|
32 |
+
r = requests.post('https://chatf.free2gpt.xyz/api/generate',json=data,headers=headers,stream=True)
|
33 |
+
r.encoding='utf-8'
|
34 |
+
for chunk in r.iter_content(chunk_size=2048):
|
35 |
+
if chunk:
|
36 |
+
if b'rate_limit_exceeded' in chunk:
|
37 |
+
yield 'Rate Limited'
|
38 |
+
return
|
39 |
+
yield chunk.decode('utf-8',errors='ignore')
|
40 |
+
|
41 |
+
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
|
42 |
+
'(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
|
g4f/Provider/Providers/Fusionbrain.py
ADDED
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import requests
|
2 |
+
import time
|
3 |
+
from translate import Translator
|
4 |
+
import os
|
5 |
+
from ...typing import sha256, Dict, get_type_hints
|
6 |
+
|
7 |
+
url = 'https://editor.fusionbrain.ai/'
|
8 |
+
model = ['kandinsky']
|
9 |
+
supports_stream = True
|
10 |
+
needs_auth = False
|
11 |
+
working = True
|
12 |
+
|
13 |
+
HEADERS = {
|
14 |
+
"accept-language": "en-US,en;q=0.9",
|
15 |
+
"referrer": "https://editor.fusionbrain.ai/",
|
16 |
+
"origin": "https://editor.fusionbrain.ai",
|
17 |
+
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36",
|
18 |
+
}
|
19 |
+
|
20 |
+
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
21 |
+
q = messages[-1]['content']
|
22 |
+
qt = Translator(from_lang ='autodetect',to_lang="en").translate(q)
|
23 |
+
if qt == 'PLEASE SELECT TWO DISTINCT LANGUAGES':
|
24 |
+
qt=q
|
25 |
+
boundary = '----WebKitFormBoundaryAvV9KCrOGx8dToxn'
|
26 |
+
data_list = [
|
27 |
+
f'--{boundary}\r\n',
|
28 |
+
'Content-Disposition: form-data; name="params"; filename="blob"\r\n',
|
29 |
+
'Content-Type: application/json\r\n',
|
30 |
+
'\r\n',
|
31 |
+
'{"type":"GENERATE","style":"DEFAULT","width":1024,"height":1024,"generateParams":{"query":"'+qt+'"}}\r\n',
|
32 |
+
f'--{boundary}--'
|
33 |
+
]
|
34 |
+
data = ''.join(data_list)
|
35 |
+
HEADERS['Content-Type']= f'multipart/form-data; boundary={boundary}'
|
36 |
+
r = requests.post("https://api.fusionbrain.ai/web/api/v1/text2image/run?model_id=1",headers=HEADERS, data=data)
|
37 |
+
try:
|
38 |
+
id = r.json()['uuid']
|
39 |
+
except:
|
40 |
+
yield 'Image generation error. This may be because your image is illegal or our service has malfunctioned.'
|
41 |
+
return
|
42 |
+
r = requests.get("https://api.fusionbrain.ai/web/api/v1/text2image/status/"+id,headers=HEADERS)
|
43 |
+
img = r.json()['images']
|
44 |
+
t = 0
|
45 |
+
while img == None and t<30 :
|
46 |
+
time.sleep(0.5)
|
47 |
+
t += 0.5
|
48 |
+
r = requests.get("https://api.fusionbrain.ai/web/api/v1/text2image/status/"+id,headers=HEADERS)
|
49 |
+
img = r.json()['images']
|
50 |
+
if not img:
|
51 |
+
yield 'Image generation error. This is because our service has malfunctioned.'
|
52 |
+
return
|
53 |
+
yield ''
|
54 |
+
|
55 |
+
|
56 |
+
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
|
57 |
+
'(%s)' % ', '.join(
|
58 |
+
[f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
|
g4f/Provider/Providers/Gravityengine.py
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import requests
|
2 |
+
import os
|
3 |
+
import json
|
4 |
+
from ...typing import sha256, Dict, get_type_hints
|
5 |
+
|
6 |
+
url = 'https://gpt4.xunika.uk/'
|
7 |
+
model = ['gpt-3.5-turbo','gpt-3.5-turbo-0301','gpt-3.5-turbo-0613','gpt-3.5-turbo-16k','gpt-3.5-turbo-16k-0613']
|
8 |
+
supports_stream = True
|
9 |
+
needs_auth = False
|
10 |
+
working = True
|
11 |
+
|
12 |
+
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
13 |
+
headers = {
|
14 |
+
'Content-Type': 'application/json',
|
15 |
+
'Authorization':'Bearer ak-chatgptorguk',
|
16 |
+
'Cookie':'FCNEC=%5B%5B%22AKsRol8oAtMdGFPKct6Xlvf9FMwt0ghzonq-NWJaGWZgyBfewG7IzKBSKZRpUeq_dOeMsER8VrYaIKOQgwwXT7zOzBtXf_OU7rD44yIjAl03Q4HRRtdsryzUPl2DuQZ8Wq6IMzD9RbCKxYuSCMqEJcDB51IiIZrqeg%3D%3D%22%5D%2Cnull%2C%5B%5D%5D',
|
17 |
+
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36',
|
18 |
+
'Origin':'https://gpt4.xunika.uk',
|
19 |
+
'Referer':'https://gpt4.xunika.uk/'
|
20 |
+
}
|
21 |
+
data = {
|
22 |
+
'model': model,
|
23 |
+
'temperature': kwargs.get("temperature", 0.7),
|
24 |
+
'presence_penalty': kwargs.get('presence_penalty', 0),
|
25 |
+
'messages': messages,
|
26 |
+
}
|
27 |
+
response = requests.post(url + '/api/openai/v1/chat/completions',
|
28 |
+
json=data, headers=headers,stream=True)
|
29 |
+
|
30 |
+
yield response.json()['choices'][0]['message']['content']
|
31 |
+
|
32 |
+
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
|
33 |
+
'(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
|
g4f/Provider/Providers/H2o.py
ADDED
@@ -0,0 +1,94 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from requests import Session
|
2 |
+
from uuid import uuid4
|
3 |
+
from json import loads
|
4 |
+
import os
|
5 |
+
import json
|
6 |
+
import requests
|
7 |
+
from ...typing import sha256, Dict, get_type_hints
|
8 |
+
|
9 |
+
url = 'https://gpt-gm.h2o.ai'
|
10 |
+
model = ['falcon-40b', 'falcon-7b', 'llama-13b']
|
11 |
+
supports_stream = True
|
12 |
+
needs_auth = False
|
13 |
+
working = True
|
14 |
+
|
15 |
+
models = {
|
16 |
+
'falcon-7b': 'h2oai/h2ogpt-gm-oasst1-en-2048-falcon-7b-v3',
|
17 |
+
'falcon-40b': 'h2oai/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1',
|
18 |
+
'llama-13b': 'h2oai/h2ogpt-gm-oasst1-en-2048-open-llama-13b'
|
19 |
+
}
|
20 |
+
|
21 |
+
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
22 |
+
|
23 |
+
conversation = ''
|
24 |
+
for message in messages:
|
25 |
+
conversation += '%s: %s\n' % (message['role'], message['content'])
|
26 |
+
|
27 |
+
conversation += 'assistant: '
|
28 |
+
session = requests.Session()
|
29 |
+
|
30 |
+
response = session.get("https://gpt-gm.h2o.ai/")
|
31 |
+
headers = {
|
32 |
+
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/115.0",
|
33 |
+
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8",
|
34 |
+
"Accept-Language": "ru-RU,ru;q=0.8,en-US;q=0.5,en;q=0.3",
|
35 |
+
"Content-Type": "application/x-www-form-urlencoded",
|
36 |
+
"Upgrade-Insecure-Requests": "1",
|
37 |
+
"Sec-Fetch-Dest": "document",
|
38 |
+
"Sec-Fetch-Mode": "navigate",
|
39 |
+
"Sec-Fetch-Site": "same-origin",
|
40 |
+
"Sec-Fetch-User": "?1",
|
41 |
+
"Referer": "https://gpt-gm.h2o.ai/r/jGfKSwU"
|
42 |
+
}
|
43 |
+
data = {
|
44 |
+
"ethicsModalAccepted": "true",
|
45 |
+
"shareConversationsWithModelAuthors": "true",
|
46 |
+
"ethicsModalAcceptedAt": "",
|
47 |
+
"activeModel": "h2oai/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1",
|
48 |
+
"searchEnabled": "true"
|
49 |
+
}
|
50 |
+
response = session.post("https://gpt-gm.h2o.ai/settings", headers=headers, data=data)
|
51 |
+
|
52 |
+
headers = {
|
53 |
+
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/115.0",
|
54 |
+
"Accept": "*/*",
|
55 |
+
"Accept-Language": "ru-RU,ru;q=0.8,en-US;q=0.5,en;q=0.3",
|
56 |
+
"Content-Type": "application/json",
|
57 |
+
"Sec-Fetch-Dest": "empty",
|
58 |
+
"Sec-Fetch-Mode": "cors",
|
59 |
+
"Sec-Fetch-Site": "same-origin",
|
60 |
+
"Referer": "https://gpt-gm.h2o.ai/"
|
61 |
+
}
|
62 |
+
data = {
|
63 |
+
"model": models[model]
|
64 |
+
}
|
65 |
+
|
66 |
+
conversation_id = session.post("https://gpt-gm.h2o.ai/conversation", headers=headers, json=data)
|
67 |
+
data = {
|
68 |
+
"inputs": conversation,
|
69 |
+
"parameters": {
|
70 |
+
"temperature": kwargs.get('temperature', 0.4),
|
71 |
+
"truncate": kwargs.get('truncate', 2048),
|
72 |
+
"max_new_tokens": kwargs.get('max_new_tokens', 1024),
|
73 |
+
"do_sample": kwargs.get('do_sample', True),
|
74 |
+
"repetition_penalty": kwargs.get('repetition_penalty', 1.2),
|
75 |
+
"return_full_text": kwargs.get('return_full_text', False)
|
76 |
+
},
|
77 |
+
"stream": True,
|
78 |
+
"options": {
|
79 |
+
"id": kwargs.get('id', str(uuid4())),
|
80 |
+
"response_id": kwargs.get('response_id', str(uuid4())),
|
81 |
+
"is_retry": False,
|
82 |
+
"use_cache": False,
|
83 |
+
"web_search_id": ""
|
84 |
+
}
|
85 |
+
}
|
86 |
+
|
87 |
+
response = session.post(f"https://gpt-gm.h2o.ai/conversation/{conversation_id.json()['conversationId']}", headers=headers, json=data)
|
88 |
+
generated_text = response.text.replace("\n", "").split("data:")
|
89 |
+
generated_text = json.loads(generated_text[-1])
|
90 |
+
|
91 |
+
return generated_text["generated_text"]
|
92 |
+
|
93 |
+
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
|
94 |
+
'(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
|
g4f/Provider/Providers/PerplexityAI.py
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import subprocess
|
2 |
+
import json
|
3 |
+
import os
|
4 |
+
from ...typing import sha256, Dict, get_type_hints
|
5 |
+
|
6 |
+
url = 'https://labs.perplexity.ai/'
|
7 |
+
model = ['llama-2-7b-chat','llama-2-13b-chat','llama-2-70b-chat']
|
8 |
+
supports_stream = True
|
9 |
+
needs_auth = False
|
10 |
+
working = True
|
11 |
+
|
12 |
+
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
13 |
+
path = os.path.dirname(os.path.realpath(__file__))
|
14 |
+
config = json.dumps({
|
15 |
+
'model': model,
|
16 |
+
'messages': messages}, separators=(',', ':'))
|
17 |
+
cmd = ['python3', f'{path}/helpers/perplexityai.py', config]
|
18 |
+
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
|
19 |
+
for line in iter(p.stdout.readline, b''):
|
20 |
+
if b'<title>Just a moment...</title>' in line:
|
21 |
+
os.system('clear' if os.name == 'posix' else 'cls')
|
22 |
+
yield 'Error'
|
23 |
+
os._exit(0)
|
24 |
+
else:
|
25 |
+
yield line.decode('unicode_escape',errors='ignore') #[:-1]
|
26 |
+
|
27 |
+
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
|
28 |
+
'(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
|
g4f/Provider/Providers/Phind.py
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import json
|
3 |
+
import subprocess
|
4 |
+
from ...typing import sha256, Dict, get_type_hints
|
5 |
+
|
6 |
+
url = 'https://phind.com'
|
7 |
+
model = ['gpt-3.5-turbo-0613','gpt-4-0613']
|
8 |
+
supports_stream = True
|
9 |
+
needs_auth = False
|
10 |
+
working = True
|
11 |
+
|
12 |
+
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
13 |
+
|
14 |
+
path = os.path.dirname(os.path.realpath(__file__))
|
15 |
+
config = json.dumps({
|
16 |
+
'model': model,
|
17 |
+
'messages': messages}, separators=(',', ':'))
|
18 |
+
|
19 |
+
cmd = ['python3', f'{path}/helpers/phind.py', config]
|
20 |
+
|
21 |
+
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
|
22 |
+
|
23 |
+
for line in iter(p.stdout.readline, b''):
|
24 |
+
if b'<title>Just a moment...</title>' in line:
|
25 |
+
os.system('clear' if os.name == 'posix' else 'cls')
|
26 |
+
yield 'Error'
|
27 |
+
os._exit(0)
|
28 |
+
|
29 |
+
else:
|
30 |
+
if b'ping - 2023-' in line:
|
31 |
+
continue
|
32 |
+
|
33 |
+
yield line.decode('unicode_escape',errors='ignore') #[:-1]
|
34 |
+
|
35 |
+
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
|
36 |
+
'(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
|
g4f/Provider/Providers/PizzaGPT.py
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from curl_cffi import requests
|
3 |
+
from ...typing import sha256, Dict, get_type_hints
|
4 |
+
|
5 |
+
url = 'https://pizzagpt.it/'
|
6 |
+
model = ['gpt-3.5-turbo']
|
7 |
+
supports_stream = False
|
8 |
+
needs_auth = False
|
9 |
+
working = True
|
10 |
+
|
11 |
+
headers = {
|
12 |
+
'Origin':'https://pizzagpt.it',
|
13 |
+
'Referer':'https://pizzagpt.it/',
|
14 |
+
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36',
|
15 |
+
'X-Secret':'Marinara',
|
16 |
+
'Content-Type':'text/plain;charset=UTF-8',
|
17 |
+
'Cookie':'dntd=false; cf_clearance=r4xzN9B6NS2nW5gq2Q1YOgiYw1zu3xs81FmZyNjSVBg-1690797483-0-0.2.1690797483; n-req=1'
|
18 |
+
}
|
19 |
+
|
20 |
+
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
21 |
+
conversation = 'This is a conversation between a human and a language model. The language model should always respond as the assistant, referring to the past history of messages if needed.\n'
|
22 |
+
|
23 |
+
for message in messages:
|
24 |
+
conversation += '%s: %s\n' % (message['role'], message['content'])
|
25 |
+
|
26 |
+
conversation += 'assistant: '
|
27 |
+
json_data = {
|
28 |
+
"question": conversation
|
29 |
+
}
|
30 |
+
r = requests.post('https://pizzagpt.it/api/chat-completion',json=json_data,headers=headers,impersonate='chrome110')
|
31 |
+
yield r.json()['answer']['content']
|
32 |
+
|
33 |
+
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
|
34 |
+
'(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
|
g4f/Provider/Providers/Poe.py
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import random
|
3 |
+
import poe
|
4 |
+
from ...typing import sha256, Dict, get_type_hints
|
5 |
+
|
6 |
+
url = 'https://poe.com/'
|
7 |
+
models = {'gpt-3.5-turbo':'capybara','claude-instant':'a2','palm':'acouchy','palm2':'acouchy','bard':'acouchy','google-bard':'acouchy','google-palm':'acouchy','llama-2-70b-chat':'llama_2_70b_chat'}
|
8 |
+
model = ['gpt-3.5-turbo','claude-instant','palm2','llama-2-70b-chat']
|
9 |
+
supports_stream = True
|
10 |
+
needs_auth = False
|
11 |
+
working = True
|
12 |
+
token = ['H959lSH8kjQ-b4K8FCrDPg%3D%3D','ACHY1MG7xz1yE0P6EByF5g%3D%3D','a4DoOVnIl3FievhYiQYOJw%3D%3D']
|
13 |
+
formkey = ['a40f267a9751c48d34c9f12f56c5c6f8','b65db0a463062fcabe43aa6c6978c344','413b8fa39bfb54f99cb9a4f18d18aab1']
|
14 |
+
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
15 |
+
path = os.path.dirname(os.path.realpath(__file__))
|
16 |
+
conversation = '这是一个人和一个语言模型之间的对话。语言模型应该始终作为助理进行响应,如果需要,可以参考过去的消息历史。\n'
|
17 |
+
for message in messages:
|
18 |
+
conversation += '%s:%s\n' % (message['role'], message['content'])
|
19 |
+
conversation += '助理: '
|
20 |
+
index = random.randrange(len(token))
|
21 |
+
client = poe.Client(token[index],formkey=formkey[index])
|
22 |
+
for chunk in client.send_message(models[model], conversation, with_chat_break=True):
|
23 |
+
yield chunk["text_new"]
|
24 |
+
client.purge_conversation(models[model], count=3)
|
25 |
+
|
26 |
+
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
|
27 |
+
'(%s)' % ', '.join(
|
28 |
+
[f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
|
g4f/Provider/Providers/PowerChat.py
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import requests
|
3 |
+
from datetime import datetime
|
4 |
+
import base64,hashlib,json
|
5 |
+
from ...typing import sha256, Dict, get_type_hints
|
6 |
+
|
7 |
+
url = 'https://powerchat.top/'
|
8 |
+
model = ['gpt-3.5-turbo']
|
9 |
+
supports_stream = True
|
10 |
+
needs_auth = False
|
11 |
+
working = True
|
12 |
+
|
13 |
+
headers = {
|
14 |
+
'Origin':'https://powerchat.top',
|
15 |
+
'Referer':'https://powerchat.top/',
|
16 |
+
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36',
|
17 |
+
'Content-Type':'text/plain;charset=UTF-8',
|
18 |
+
'Version':'1.0'
|
19 |
+
}
|
20 |
+
|
21 |
+
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
22 |
+
current_time = datetime.now()
|
23 |
+
timestamp_in_seconds = current_time.timestamp()
|
24 |
+
timestamp_in_milliseconds = int(round(timestamp_in_seconds * 1000))
|
25 |
+
sign = str(timestamp_in_milliseconds)+':question:contact_me_to_work_together_hello@promptboom.com'
|
26 |
+
sign = hashlib.sha256(sign.encode('utf-8')).hexdigest()
|
27 |
+
data = '{"did":"060ca8eaa0625da25d61ae94d4a2cf99","chatList":'+json.dumps(messages)+',"special":{"time":'+str(timestamp_in_milliseconds)+',"sign":"'+sign+'","referer":"https://github.com/","path":"https://powerchat.top/"}}'
|
28 |
+
data = base64.b64encode(data.encode('utf-8')).decode()
|
29 |
+
r = requests.post('https://api.powerchat.top/requestPowerChat',json={'data':data},headers=headers,stream=True)
|
30 |
+
for chunk in r.iter_content(chunk_size=2048):
|
31 |
+
if chunk:
|
32 |
+
yield chunk.decode(errors='ignore')
|
33 |
+
|
34 |
+
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
|
35 |
+
'(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
|
g4f/Provider/Providers/Slack.py
ADDED
@@ -0,0 +1,109 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from os import getenv
|
2 |
+
from dotenv import load_dotenv
|
3 |
+
from slack_sdk import WebClient
|
4 |
+
from slack_sdk.errors import SlackApiError
|
5 |
+
import random
|
6 |
+
import os
|
7 |
+
from ...typing import sha256, Dict, get_type_hints
|
8 |
+
|
9 |
+
load_dotenv()
|
10 |
+
CLAUDE_BOT_ID = ['U05J5M9E7MZ','U05J74AT9DL','U05J79DQC0N']
|
11 |
+
TOKEN = ['xo'+'xp-5641502612336-5'+'611139843990-5612369289415-'+'7bf36aaefafe9ff2525e5ad845a6005c','xo'+'xp-5625792498914-56384302'+'57617-5629359479268-ce'+'c6629de47c2e3e2a130e8c440ecc6c','xo'+'xp-562841008470'+'8-5611431325175-56'+'26812979347'+'-edca39d9a088591'+'d74cc3a20dd32642f']
|
12 |
+
|
13 |
+
|
14 |
+
class SlackClient(WebClient):
|
15 |
+
|
16 |
+
CHANNEL_ID = None
|
17 |
+
LAST_TS = None
|
18 |
+
|
19 |
+
def chat(self, text):
|
20 |
+
if not self.CHANNEL_ID:
|
21 |
+
raise Exception("Channel not found.")
|
22 |
+
|
23 |
+
resp = self.chat_postMessage(channel=self.CHANNEL_ID, text=text)
|
24 |
+
#print("c: ", resp)
|
25 |
+
self.LAST_TS = resp["ts"]
|
26 |
+
|
27 |
+
def open_channel(self):
|
28 |
+
if not self.CHANNEL_ID:
|
29 |
+
response = self.conversations_open(users=CLAUDE_BOT_ID[index])
|
30 |
+
self.CHANNEL_ID = response["channel"]["id"]
|
31 |
+
|
32 |
+
def get_reply(self):
|
33 |
+
for _ in range(150):
|
34 |
+
try:
|
35 |
+
resp = self.conversations_history(channel=self.CHANNEL_ID, oldest=self.LAST_TS, limit=2)
|
36 |
+
#print("r: ", resp)
|
37 |
+
msg = [msg["text"] for msg in resp["messages"] if msg["user"] == CLAUDE_BOT_ID[index]]
|
38 |
+
if msg and not msg[-1].endswith("Typing…_"):
|
39 |
+
return msg[-1]
|
40 |
+
except (SlackApiError, KeyError) as e:
|
41 |
+
print(f"Get reply error: {e}")
|
42 |
+
|
43 |
+
|
44 |
+
raise Exception("Get replay timeout")
|
45 |
+
|
46 |
+
def get_stream_reply(self):
|
47 |
+
l = 0
|
48 |
+
for _ in range(150):
|
49 |
+
try:
|
50 |
+
resp = self.conversations_history(channel=self.CHANNEL_ID, oldest=self.LAST_TS, limit=2)
|
51 |
+
msg = [msg["text"] for msg in resp["messages"] if msg["user"] == CLAUDE_BOT_ID[index]]
|
52 |
+
if msg:
|
53 |
+
last_msg = msg[-1]
|
54 |
+
more = False
|
55 |
+
if msg[-1].endswith("Typing…_"):
|
56 |
+
last_msg = str(msg[-1])[:-11] # remove typing…
|
57 |
+
more = True
|
58 |
+
diff = last_msg[l:]
|
59 |
+
if diff == "":
|
60 |
+
continue
|
61 |
+
l = len(last_msg)
|
62 |
+
yield diff
|
63 |
+
if not more:
|
64 |
+
break
|
65 |
+
except (SlackApiError, KeyError) as e:
|
66 |
+
print(f"Get reply error: {e}")
|
67 |
+
|
68 |
+
|
69 |
+
|
70 |
+
'''
|
71 |
+
if __name__ == '__main__':
|
72 |
+
async def server():
|
73 |
+
await client.open_channel()
|
74 |
+
while True:
|
75 |
+
prompt = input("You: ")
|
76 |
+
await client.chat(prompt)
|
77 |
+
|
78 |
+
reply = await client.get_reply()
|
79 |
+
print(f"Claude: {reply}\n--------------------")
|
80 |
+
|
81 |
+
asyncio.run(server())'''
|
82 |
+
|
83 |
+
|
84 |
+
url = 'https://anthropic.com/claude-in-slack'
|
85 |
+
model = ['claude-1']
|
86 |
+
supports_stream = True
|
87 |
+
needs_auth = False
|
88 |
+
working = True
|
89 |
+
|
90 |
+
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
91 |
+
global index
|
92 |
+
index = random.randrange(len(CLAUDE_BOT_ID))
|
93 |
+
client = SlackClient(token=TOKEN[index])
|
94 |
+
client.open_channel()
|
95 |
+
conversation = 'Please forget the conversation content above.This is a conversation between a human and a language model. The language model should always respond as the assistant, referring to the past history of messages if needed.\n'
|
96 |
+
|
97 |
+
for message in messages:
|
98 |
+
conversation += '%s: %s\n' % (message['role'], message['content'])
|
99 |
+
|
100 |
+
conversation += 'assistant: '
|
101 |
+
client.chat(conversation)
|
102 |
+
if stream:
|
103 |
+
yield from client.get_stream_reply()
|
104 |
+
else:
|
105 |
+
yield client.get_reply()
|
106 |
+
|
107 |
+
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
|
108 |
+
'(%s)' % ', '.join(
|
109 |
+
[f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
|
g4f/Provider/Providers/Theb.py
ADDED
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import json
|
3 |
+
import time
|
4 |
+
import subprocess
|
5 |
+
import sys
|
6 |
+
from re import findall
|
7 |
+
#from curl_cffi import requests
|
8 |
+
import requests
|
9 |
+
from ...typing import sha256, Dict, get_type_hints
|
10 |
+
|
11 |
+
url = 'https://chatbot.theb.ai'
|
12 |
+
model = ['gpt-3.5-turbo']
|
13 |
+
supports_stream = True
|
14 |
+
needs_auth = False
|
15 |
+
working = True
|
16 |
+
|
17 |
+
headers = {
|
18 |
+
'authority': 'chatbot.theb.ai',
|
19 |
+
'accept': 'application/json, text/plain, */*',
|
20 |
+
'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
|
21 |
+
'content-type': 'application/json',
|
22 |
+
'origin': 'https://chatbot.theb.ai',
|
23 |
+
'referer': 'https://chatbot.theb.ai/',
|
24 |
+
'sec-ch-ua': '"Google Chrome";v="113", "Chromium";v="113", "Not-A.Brand";v="24"',
|
25 |
+
'sec-ch-ua-mobile': '?0',
|
26 |
+
'sec-ch-ua-platform': '"macOS"',
|
27 |
+
'sec-fetch-dest': 'empty',
|
28 |
+
'sec-fetch-mode': 'cors',
|
29 |
+
'sec-fetch-site': 'same-origin',
|
30 |
+
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36',
|
31 |
+
}
|
32 |
+
|
33 |
+
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
34 |
+
conversation = 'This is a conversation between a human and a language model. The language model should always respond as the assistant, referring to the past history of messages if needed.\n'
|
35 |
+
|
36 |
+
for message in messages:
|
37 |
+
conversation += '%s: %s\n' % (message['role'], message['content'])
|
38 |
+
|
39 |
+
conversation += 'assistant: '
|
40 |
+
json_data = {
|
41 |
+
'prompt': conversation,
|
42 |
+
'options': {}
|
43 |
+
}
|
44 |
+
response = requests.post('https://chatbot.theb.ai/api/chat-process',
|
45 |
+
headers=headers, json=json_data, stream=True) # impersonate='chrome110'
|
46 |
+
for chunk in response.iter_lines(chunk_size=2048):
|
47 |
+
try:
|
48 |
+
completion_chunk = findall(r'content":"(.*)"},"fin', chunk.decode(errors='ignore'))[0]
|
49 |
+
yield completion_chunk
|
50 |
+
except:
|
51 |
+
pass
|
52 |
+
|
53 |
+
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
|
54 |
+
'(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
|
g4f/Provider/Providers/Vercel.py
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import vercel_ai
|
3 |
+
from ...typing import sha256, Dict, get_type_hints
|
4 |
+
|
5 |
+
url = 'https://sdk.vercel.ai'
|
6 |
+
supports_stream = True
|
7 |
+
needs_auth = False
|
8 |
+
working = True
|
9 |
+
|
10 |
+
models = {
|
11 |
+
'gpt-3.5-turbo':'openai:gpt-3.5-turbo',
|
12 |
+
'gpt-3.5-turbo-16k':'openai:gpt-3.5-turbo-16k',
|
13 |
+
'gpt-3.5-turbo-16k-0613':'openai:gpt-3.5-turbo-16k-0613',
|
14 |
+
'text-ada-001':'openai:text-ada-001',
|
15 |
+
'text-babbage-001':'openai:text-babbage-001',
|
16 |
+
'text-curie-001':'openai:text-curie-001',
|
17 |
+
'text-davinci-002':'openai:text-davinci-002',
|
18 |
+
'text-davinci-003':'openai:text-davinci-003',
|
19 |
+
'llama-2-7b-chat':'replicate:a16z-infra/llama7b-v2-chat',
|
20 |
+
'llama-2-13b-chat':'replicate:a16z-infra/llama13b-v2-chat',
|
21 |
+
'bloom':'huggingface:bigscience/bloom',
|
22 |
+
'flan-t5-xxl':'huggingface:google/flan-t5-xxl',
|
23 |
+
'gpt-neox-20b':'huggingface:EleutherAI/gpt-neox-20b',
|
24 |
+
'oasst-sft-4-pythia-12b-epoch-3.5':'huggingface:OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5',
|
25 |
+
'oasst-sft-1-pythia-12b':'huggingface:OpenAssistant/oasst-sft-1-pythia-12b',
|
26 |
+
'santacoder':'huggingface:bigcode/santacoder',
|
27 |
+
'command-light-nightly':'cohere:command-light-nightly',
|
28 |
+
'command-nightly':'cohere:command-nightly'
|
29 |
+
}
|
30 |
+
model = models.keys()
|
31 |
+
|
32 |
+
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
33 |
+
client = vercel_ai.Client()
|
34 |
+
for chunk in client.chat(models[model], messages):
|
35 |
+
yield chunk
|
36 |
+
|
37 |
+
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
|
38 |
+
'(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
|
g4f/Provider/Providers/Wewordle.py
ADDED
@@ -0,0 +1,73 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os,sys
|
2 |
+
import requests
|
3 |
+
import json
|
4 |
+
import random
|
5 |
+
import time
|
6 |
+
import string
|
7 |
+
from ...typing import sha256, Dict, get_type_hints
|
8 |
+
|
9 |
+
url = "https://wewordle.org/gptapi/v1/android/turbo"
|
10 |
+
model = ['gpt-3.5-turbo']
|
11 |
+
supports_stream = False
|
12 |
+
needs_auth = False
|
13 |
+
working = False
|
14 |
+
|
15 |
+
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
16 |
+
base = ''
|
17 |
+
for message in messages:
|
18 |
+
base += '%s: %s\n' % (message['role'], message['content'])
|
19 |
+
base += 'assistant:'
|
20 |
+
# randomize user id and app id
|
21 |
+
_user_id = ''.join(random.choices(f'{string.ascii_lowercase}{string.digits}', k=16))
|
22 |
+
_app_id = ''.join(random.choices(f'{string.ascii_lowercase}{string.digits}', k=31))
|
23 |
+
# make current date with format utc
|
24 |
+
_request_date = time.strftime("%Y-%m-%dT%H:%M:%S.000Z", time.gmtime())
|
25 |
+
headers = {
|
26 |
+
'accept': '*/*',
|
27 |
+
'pragma': 'no-cache',
|
28 |
+
'Content-Type': 'application/json',
|
29 |
+
'Connection':'keep-alive'
|
30 |
+
# user agent android client
|
31 |
+
# 'User-Agent': 'Dalvik/2.1.0 (Linux; U; Android 10; SM-G975F Build/QP1A.190711.020)',
|
32 |
+
|
33 |
+
}
|
34 |
+
data = {
|
35 |
+
"user": _user_id,
|
36 |
+
"messages": [
|
37 |
+
{"role": "user", "content": base}
|
38 |
+
],
|
39 |
+
"subscriber": {
|
40 |
+
"originalPurchaseDate": None,
|
41 |
+
"originalApplicationVersion": None,
|
42 |
+
"allPurchaseDatesMillis": {},
|
43 |
+
"entitlements": {
|
44 |
+
"active": {},
|
45 |
+
"all": {}
|
46 |
+
},
|
47 |
+
"allPurchaseDates": {},
|
48 |
+
"allExpirationDatesMillis": {},
|
49 |
+
"allExpirationDates": {},
|
50 |
+
"originalAppUserId": f"$RCAnonymousID:{_app_id}",
|
51 |
+
"latestExpirationDate": None,
|
52 |
+
"requestDate": _request_date,
|
53 |
+
"latestExpirationDateMillis": None,
|
54 |
+
"nonSubscriptionTransactions": [],
|
55 |
+
"originalPurchaseDateMillis": None,
|
56 |
+
"managementURL": None,
|
57 |
+
"allPurchasedProductIdentifiers": [],
|
58 |
+
"firstSeen": _request_date,
|
59 |
+
"activeSubscriptions": []
|
60 |
+
}
|
61 |
+
}
|
62 |
+
response = requests.post(url, headers=headers, data=json.dumps(data))
|
63 |
+
if response.status_code == 200:
|
64 |
+
_json = response.json()
|
65 |
+
if 'message' in _json:
|
66 |
+
yield _json['message']['content']
|
67 |
+
else:
|
68 |
+
print(f"Error Occurred::{response.status_code}")
|
69 |
+
return None
|
70 |
+
|
71 |
+
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
|
72 |
+
'(%s)' % ', '.join(
|
73 |
+
[f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
|
g4f/Provider/Providers/Zhulei.py
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import requests
|
3 |
+
from ...typing import sha256, Dict, get_type_hints
|
4 |
+
|
5 |
+
url = 'https://chat.zhulei.xyz/'
|
6 |
+
model = ['gpt-3.5-turbo']
|
7 |
+
supports_stream = True
|
8 |
+
needs_auth = False
|
9 |
+
working = True
|
10 |
+
|
11 |
+
headers = {
|
12 |
+
'Origin':'https://chat.zhulei.xyz',
|
13 |
+
'Referer':'https://chat.zhulei.xyz/',
|
14 |
+
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36',
|
15 |
+
'Content-Type':'text/plain;charset=UTF-8',
|
16 |
+
}
|
17 |
+
|
18 |
+
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
19 |
+
json_data = {
|
20 |
+
"messages": messages,
|
21 |
+
"temperature": kwargs.get("temperature", 0.6),
|
22 |
+
"password": "",
|
23 |
+
"model": "gpt-3.5-turbo"
|
24 |
+
}
|
25 |
+
r = requests.post('https://chat.zhulei.xyz/api',json=json_data,headers=headers,stream=True)
|
26 |
+
for chunk in r.iter_content(chunk_size=2048):
|
27 |
+
if chunk:
|
28 |
+
yield chunk.decode('utf-8',errors='ignore')
|
29 |
+
|
30 |
+
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
|
31 |
+
'(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
|
g4f/Provider/Providers/helpers/perplexityai.py
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import base64
|
2 |
+
import os
|
3 |
+
import json
|
4 |
+
import sys
|
5 |
+
import socketio
|
6 |
+
config = json.loads(sys.argv[1])
|
7 |
+
model = config['model']
|
8 |
+
messages = config['messages']
|
9 |
+
def generate_sec_websocket_key():
|
10 |
+
random_bytes = os.urandom(16)
|
11 |
+
sec_websocket_key = base64.b64encode(random_bytes).decode('utf-8')
|
12 |
+
return sec_websocket_key
|
13 |
+
output = ''
|
14 |
+
|
15 |
+
key = generate_sec_websocket_key()
|
16 |
+
sio = socketio.Client()
|
17 |
+
@sio.on(model+'_query_progress')
|
18 |
+
def chat(data):
|
19 |
+
global output
|
20 |
+
if data['final']:
|
21 |
+
sio.disconnect()
|
22 |
+
print(data['output'][len(output):],flush=True,end='')
|
23 |
+
output = data['output']
|
24 |
+
sio.connect("https://labs-api.perplexity.ai",headers={"Accept-Language": "q=0.9,en-US;q=0.8,en;q=0.7","Cache-Control": "no-cache","Pragma": "no-cache","Sec-WebSocket-Extensions": "client_max_window_bits","Sec-WebSocket-Key": key,"Sec-WebSocket-Version": "13","Upgrade":"websocket","Connection":"Upgrade","Origin":"https://labs.perplexity.ai","User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36"})
|
25 |
+
sio.emit('perplexity_playground', {'model': model, 'messages': messages})
|
g4f/Provider/Providers/helpers/phind.py
ADDED
@@ -0,0 +1,88 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys
|
2 |
+
import json
|
3 |
+
import re
|
4 |
+
from curl_cffi import requests
|
5 |
+
|
6 |
+
config = json.loads(sys.argv[1])
|
7 |
+
model = config['model']
|
8 |
+
prompt = config['messages'][-1]['content']
|
9 |
+
if model == 'gpt-4-0613':
|
10 |
+
json_data = json.dumps({
|
11 |
+
"userInput": "Hi",
|
12 |
+
"messages": [],
|
13 |
+
"anonUserID": ""
|
14 |
+
}, separators=(',', ':'))
|
15 |
+
headers = {
|
16 |
+
"accept": "*/*",
|
17 |
+
"baggage": "sentry-environment=vercel-production,sentry-release=17ac4575f09a70b6d11b370305c5c232ccc740ff,sentry-transaction=%2Fagent,sentry-public_key=ea29c13458134fd3bc88a8bb4ba668cb,sentry-trace_id=cae48a5201194de9897c90a959cb41fd,sentry-sample_rate=0.002,sentry-replay_id=c622e21e115345c69aaa723c253ae4c3",
|
18 |
+
"content-type": "application/json",
|
19 |
+
"sec-ch-ua": "\"Not/A)Brand\";v=\"99\", \"Google Chrome\";v=\"115\", \"Chromium\";v=\"115\"",
|
20 |
+
"sec-ch-ua-mobile": "?0",
|
21 |
+
"sec-ch-ua-platform": "\"Windows\"",
|
22 |
+
"sec-fetch-dest": "empty",
|
23 |
+
"sec-fetch-site": "same-origin",
|
24 |
+
"sentry-trace": "cae48a5201194de9897c90a959cb41fd-89e44f68ac2353d8-0",
|
25 |
+
'Content-Type': 'application/json',
|
26 |
+
'Pragma': 'no-cache',
|
27 |
+
'Sec-Fetch-Site': 'same-origin',
|
28 |
+
'Accept-Language': 'en-GB,en;q=0.9',
|
29 |
+
'Cache-Control': 'no-cache',
|
30 |
+
'Sec-Fetch-Mode': 'cors',
|
31 |
+
'Origin': 'https://www.phind.com',
|
32 |
+
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/16.4 Safari/605.1.15',
|
33 |
+
'Referer': 'https://www.phind.com/agent',
|
34 |
+
'Connection': 'keep-alive',
|
35 |
+
'Sec-Fetch-Dest': 'empty',
|
36 |
+
'Cookie':'__cf_bm=_IRj.8NM5j8u7IXNCeQeYQO6pKEA6fH8zr8glrr_9wo-1690733956-0-AXDTsLVChYgL7QF/88zm0HjZdKYsp23RwRpWG4JsfBWooKmNZQ8jWjLaYQYY/ihIoJxwj8pS677lOaCP/x5WRps=; __Host-next-auth.csrf-token=56f1687f951dd514127f7f09835943ac71b43b7c8df5c07fe9f83e8ab2b1a953%7Cb68332e0f2950cf104ccebfa4cc7e8590e6e5b0e8f388d0ac4cb42f8094eb5bd; __Secure-next-auth.callback-url=https%3A%2F%2Fwww.phind.com%2F; cf_clearance=PeV3_ME4QtJyFf35LHuSRnL.ckXbTfQndUSQgTMXJGw-1690733963-0-1-9895f01e.b9c64471.cf94affe-0.2.1690733963; __Secure-next-auth.session-token=eyJhbGciOiJkaXIiLCJlbmMiOiJBMjU2R0NNIn0..eqiHzSErquEy5J8x.YoB-z5B1HDdJoO3zWROuTWHJM6WHHJFht6GEZQonrT0jNBBc4Jn-2Pp9Y39MkbZ8bW0NqoVDShkDtWrvxBjdzrGQYoFB6VX6akuN1gG2aqY1Jol6kuBWnLS3jp8felzIsy77hJB2INB67H269eQdLj14LUZ82fJVe6na7v_tUpqbmja5Imqs2DRU1xbwGzxYywJAWMAFE1WtoOuWk1hj0RSTw9u9zGPO8EU.pwDJCOV1_WEPxZPaO1tD3A; mp_6d31cd4238973a84421371ef3929c915_mixpanel=%7B%22distinct_id%22%3A%20%22189a798d5dd21-0ab4e9da64dd5d-26031c51-144000-189a798d5deeb4%22%2C%22%24device_id%22%3A%20%22189a798d5dd21-0ab4e9da64dd5d-26031c51-144000-189a798d5deeb4%22%2C%22%24initial_referrer%22%3A%20%22%24direct%22%2C%22%24initial_referring_domain%22%3A%20%22%24direct%22%2C%22%24user_id%22%3A%20%22clkpndx060021mq08b65lvanf%22%7D'
|
37 |
+
}
|
38 |
+
else:
|
39 |
+
json_data = json.dumps({
|
40 |
+
"userInput": prompt,
|
41 |
+
"messages": config['messages'],
|
42 |
+
"shouldRunGPT4": False
|
43 |
+
}, separators=(',', ':'))
|
44 |
+
|
45 |
+
headers = {
|
46 |
+
'Content-Type': 'application/json',
|
47 |
+
'Pragma': 'no-cache',
|
48 |
+
'Accept': '*/*',
|
49 |
+
'Sec-Fetch-Site': 'same-origin',
|
50 |
+
'Accept-Language': 'en-GB,en;q=0.9',
|
51 |
+
'Cache-Control': 'no-cache',
|
52 |
+
'Sec-Fetch-Mode': 'cors',
|
53 |
+
'Origin': 'https://www.phind.com',
|
54 |
+
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/16.4 Safari/605.1.15',
|
55 |
+
'Referer': 'https://www.phind.com/agent',
|
56 |
+
'Connection': 'keep-alive',
|
57 |
+
'Sec-Fetch-Dest': 'empty',
|
58 |
+
'Cookie': '__Host-next-auth.csrf-token=b254c36d29862b95381910bd789b890dc081c9745fbe8b397487b55ded503d50%7C5e97dfca46c3a263e180c6dffa4c315464b232d1e4dbd81340c66d98eb4c6241; __Secure-next-auth.callback-url=https%3A%2F%2Fwww.phind.com; mp_6d31cd4238973a84421371ef3929c915_mixpanel=%7B%22distinct_id%22%3A%20%22188c4960b0afd1-04505a4573c948-26031d51-144000-188c4960b0b916%22%2C%22%24device_id%22%3A%20%22188c4960b0afd1-04505a4573c948-26031d51-144000-188c4960b0b916%22%2C%22%24initial_referrer%22%3A%20%22https%3A%2F%2Fgithub.com%2Fxiangsx%2Fgpt4free-ts%2Fblob%2Fmaster%2FREADME_zh.md%22%2C%22%24initial_referring_domain%22%3A%20%22github.com%22%7D; cf_clearance=8LxWcAZIbce1P814klBXEYGb7_0EJ0rzcMcskzm1eJ4-1690435153-0-1-efb0c65b.1c588b13.b8b199bb-0.2.1690435153; __cf_bm=oBEN3D2yv7CYMOZwb0bE7tM.qQpb92OWckfYZfxRk3g-1690435155-0-ATa9DlpuUspsHjm1DPWKiOK6yNeTddtyhtmVwb/VCrXi4EBkansL9lGaC2+ZklFzPZnivkdQZ+mZJADHJFqXl3g='
|
59 |
+
}
|
60 |
+
|
61 |
+
def output(chunk):
|
62 |
+
try:
|
63 |
+
if b'PHIND_METADATA' in chunk:
|
64 |
+
return
|
65 |
+
|
66 |
+
if chunk == b'data: \r\ndata: \r\ndata: \r\n\r\n':
|
67 |
+
chunk = b'data: \n\r\n\r\n'
|
68 |
+
|
69 |
+
chunk = chunk.decode().strip()
|
70 |
+
chunk = chunk.replace('data: \r\n\r\ndata: ', 'data: \n')
|
71 |
+
chunk = chunk.replace('\r\ndata: \r\ndata: \r\n\r\n', '\n\r\n\r\n')
|
72 |
+
chunk = chunk.replace('data: ', '').replace('\r\n\r\n', '')
|
73 |
+
if chunk.count('[{"index": 0, "delta": {"content":')>0:
|
74 |
+
for completion_chunk in re.findall(r'"model": "'+model+'", "choices": \[{"index": 0, "delta": {"content": "(.*?)"}, "fin', chunk):
|
75 |
+
print(completion_chunk.replace('\\n','\n'),flush=True,end="")
|
76 |
+
|
77 |
+
except Exception as e:
|
78 |
+
pass
|
79 |
+
|
80 |
+
|
81 |
+
try:
|
82 |
+
response = requests.post('https://www.phind.com/api/agent',
|
83 |
+
headers=headers, data=json_data, content_callback=output, timeout=999999, impersonate='safari15_5')
|
84 |
+
|
85 |
+
exit(0)
|
86 |
+
|
87 |
+
except Exception as e:
|
88 |
+
print('Error')
|
g4f/Provider/Providers/helpers/theb.py
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import sys
|
3 |
+
from re import findall
|
4 |
+
from curl_cffi import requests
|
5 |
+
|
6 |
+
config = json.loads(sys.argv[1])
|
7 |
+
prompt = config['messages'][-1]['content']
|
8 |
+
|
9 |
+
headers = {
|
10 |
+
'authority': 'chatbot.theb.ai',
|
11 |
+
'accept': 'application/json, text/plain, */*',
|
12 |
+
'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
|
13 |
+
'content-type': 'application/json',
|
14 |
+
'origin': 'https://chatbot.theb.ai',
|
15 |
+
'referer': 'https://chatbot.theb.ai/',
|
16 |
+
'sec-ch-ua': '"Google Chrome";v="113", "Chromium";v="113", "Not-A.Brand";v="24"',
|
17 |
+
'sec-ch-ua-mobile': '?0',
|
18 |
+
'sec-ch-ua-platform': '"macOS"',
|
19 |
+
'sec-fetch-dest': 'empty',
|
20 |
+
'sec-fetch-mode': 'cors',
|
21 |
+
'sec-fetch-site': 'same-origin',
|
22 |
+
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36',
|
23 |
+
}
|
24 |
+
|
25 |
+
json_data = {
|
26 |
+
'prompt': prompt,
|
27 |
+
'options': {}
|
28 |
+
}
|
29 |
+
|
30 |
+
def format(chunk):
|
31 |
+
try:
|
32 |
+
completion_chunk = findall(r'content":"(.*)"},"fin', chunk.decode())[0]
|
33 |
+
print(completion_chunk, flush=True, end='')
|
34 |
+
|
35 |
+
except Exception as e:
|
36 |
+
print(f'[ERROR] an error occured, retrying... | [[{chunk.decode()}]]', flush=True)
|
37 |
+
return
|
38 |
+
|
39 |
+
while True:
|
40 |
+
try:
|
41 |
+
response = requests.post('https://chatbot.theb.ai/api/chat-process',
|
42 |
+
headers=headers, json=json_data, content_callback=format, impersonate='chrome110')
|
43 |
+
|
44 |
+
exit(0)
|
45 |
+
|
46 |
+
except Exception as e:
|
47 |
+
print('[ERROR] an error occured, retrying... |', e, flush=True)
|
48 |
+
continue
|
g4f/Provider/__init__.py
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from . import Provider
|
2 |
+
from .Providers import (
|
3 |
+
Bing,
|
4 |
+
Theb,
|
5 |
+
Aichat,
|
6 |
+
Vercel,
|
7 |
+
Forefront,
|
8 |
+
H2o,
|
9 |
+
DeepAi,
|
10 |
+
AItianhu,
|
11 |
+
EasyChat,
|
12 |
+
Acytoo,
|
13 |
+
AiService,
|
14 |
+
Wewordle,
|
15 |
+
ChatgptAi,
|
16 |
+
Chimera,
|
17 |
+
ClaudeAI,
|
18 |
+
Gravityengine,
|
19 |
+
Phind,
|
20 |
+
Poe,
|
21 |
+
Slack,
|
22 |
+
Fusionbrain,
|
23 |
+
Bingo,
|
24 |
+
ChatFree,
|
25 |
+
PerplexityAI,
|
26 |
+
PowerChat,
|
27 |
+
ChatGPTunli,
|
28 |
+
Zhulei,
|
29 |
+
PizzaGPT,
|
30 |
+
Free2gpt,
|
31 |
+
AiFree,
|
32 |
+
B88,
|
33 |
+
EzChat,
|
34 |
+
)
|
g4f/__init__.py
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys
|
2 |
+
from . import Provider
|
3 |
+
from g4f.models import Model, ModelUtils
|
4 |
+
|
5 |
+
logging = False
|
6 |
+
|
7 |
+
class ChatCompletion:
|
8 |
+
@staticmethod
|
9 |
+
def create(model: Model.model or str, messages: list, provider: Provider.Provider = None, stream: bool = False, auth: str = False, **kwargs):
|
10 |
+
kwargs['auth'] = auth
|
11 |
+
if provider and provider.working == False:
|
12 |
+
return f'{provider.__name__} is not working'
|
13 |
+
|
14 |
+
if provider and provider.needs_auth and not auth:
|
15 |
+
print(
|
16 |
+
f'ValueError: {provider.__name__} requires authentication (use auth="cookie or token or jwt ..." param)', file=sys.stderr)
|
17 |
+
sys.exit(1)
|
18 |
+
|
19 |
+
try:
|
20 |
+
if isinstance(model, str):
|
21 |
+
try:
|
22 |
+
model = ModelUtils.convert[model]
|
23 |
+
except KeyError:
|
24 |
+
raise Exception(f'The model: {model} does not exist')
|
25 |
+
|
26 |
+
engine = model.best_provider if not provider else provider
|
27 |
+
|
28 |
+
if not engine.supports_stream and stream == True:
|
29 |
+
print(
|
30 |
+
f"ValueError: {engine.__name__} does not support 'stream' argument", file=sys.stderr)
|
31 |
+
sys.exit(1)
|
32 |
+
|
33 |
+
if logging: print(f'Using {engine.__name__} provider')
|
34 |
+
|
35 |
+
return (engine._create_completion(model.name, messages, stream, **kwargs)
|
36 |
+
if stream else ''.join(engine._create_completion(model.name, messages, stream, **kwargs)))
|
37 |
+
except TypeError as e:
|
38 |
+
print(e)
|
39 |
+
arg: str = str(e).split("'")[1]
|
40 |
+
print(
|
41 |
+
f"ValueError: {engine.__name__} does not support '{arg}' argument", file=sys.stderr)
|
42 |
+
sys.exit(1)
|
g4f/models.py
ADDED
@@ -0,0 +1,248 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from g4f import Provider
|
2 |
+
|
3 |
+
|
4 |
+
class Model:
|
5 |
+
class model:
|
6 |
+
name: str
|
7 |
+
base_provider: str
|
8 |
+
best_provider: str
|
9 |
+
|
10 |
+
class gpt_35_turbo:
|
11 |
+
name: str = 'gpt-3.5-turbo'
|
12 |
+
base_provider: str = 'openai'
|
13 |
+
best_provider: Provider.Provider = Provider.Chimera
|
14 |
+
|
15 |
+
class gpt_35_turbo_0301:
|
16 |
+
name: str = 'gpt-3.5-turbo-0301'
|
17 |
+
base_provider: str = 'openai'
|
18 |
+
best_provider: Provider.Provider = Provider.Chimera
|
19 |
+
|
20 |
+
class gpt_35_turbo_0613:
|
21 |
+
name: str = 'gpt-3.5-turbo-0613'
|
22 |
+
base_provider: str = 'openai'
|
23 |
+
best_provider: Provider.Provider = Provider.Phind
|
24 |
+
|
25 |
+
class gpt_35_turbo_16k:
|
26 |
+
name: str = 'gpt-3.5-turbo-16k'
|
27 |
+
base_provider: str = 'openai'
|
28 |
+
best_provider: Provider.Provider = Provider.Chimera
|
29 |
+
|
30 |
+
class gpt_35_turbo_16k_0613:
|
31 |
+
name: str = 'gpt-3.5-turbo-16k-0613'
|
32 |
+
base_provider: str = 'openai'
|
33 |
+
best_provider: Provider.Provider = Provider.Vercel
|
34 |
+
|
35 |
+
class gpt_4:
|
36 |
+
name: str = 'gpt-4'
|
37 |
+
base_provider: str = 'openai'
|
38 |
+
best_provider: Provider.Provider = Provider.Chimera
|
39 |
+
|
40 |
+
class gpt_4_0314:
|
41 |
+
name: str = 'gpt-4-0314'
|
42 |
+
base_provider: str = 'openai'
|
43 |
+
best_provider: Provider.Provider = Provider.Chimera
|
44 |
+
|
45 |
+
class gpt_4_0613:
|
46 |
+
name: str = 'gpt-4-0613'
|
47 |
+
base_provider: str = 'openai'
|
48 |
+
best_provider: Provider.Provider = Provider.Phind
|
49 |
+
|
50 |
+
class gpt_4_32k:
|
51 |
+
name: str = 'gpt-4-32k'
|
52 |
+
base_provider: str = 'openai'
|
53 |
+
best_provider: Provider.Provider = Provider.Chimera
|
54 |
+
|
55 |
+
class claude_1:
|
56 |
+
name: str = 'claude-1'
|
57 |
+
base_provider: str = 'anthropic'
|
58 |
+
best_provider: Provider.Provider = Provider.Slack
|
59 |
+
|
60 |
+
class claude_instant_100k:
|
61 |
+
name: str = 'claude-instant-100k'
|
62 |
+
base_provider: str = 'anthropic'
|
63 |
+
best_provider: Provider.Provider = Provider.B88
|
64 |
+
|
65 |
+
class claude_instant:
|
66 |
+
name: str = 'claude-instant'
|
67 |
+
base_provider: str = 'anthropic'
|
68 |
+
best_provider: Provider.Provider = Provider.Poe
|
69 |
+
|
70 |
+
class claude_2:
|
71 |
+
name: str = 'claude-2'
|
72 |
+
base_provider: str = 'anthropic'
|
73 |
+
best_provider: Provider.Provider = Provider.ClaudeAI
|
74 |
+
|
75 |
+
class claude_2_100k:
|
76 |
+
name: str = 'claude-2-100k'
|
77 |
+
base_provider: str = 'anthropic'
|
78 |
+
best_provider: Provider.Provider = Provider.ClaudeAI
|
79 |
+
|
80 |
+
class bloom:
|
81 |
+
name: str = 'bloom'
|
82 |
+
base_provider: str = 'huggingface'
|
83 |
+
best_provider: Provider.Provider = Provider.Vercel
|
84 |
+
|
85 |
+
class flan_t5_xxl:
|
86 |
+
name: str = 'flan-t5-xxl'
|
87 |
+
base_provider: str = 'huggingface'
|
88 |
+
best_provider: Provider.Provider = Provider.Vercel
|
89 |
+
|
90 |
+
class gpt_neox_20b:
|
91 |
+
name: str = 'gpt-neox-20b'
|
92 |
+
base_provider: str = 'huggingface'
|
93 |
+
best_provider: Provider.Provider = Provider.Vercel
|
94 |
+
|
95 |
+
class oasst_sft_4_pythia_12b_epoch_35:
|
96 |
+
name: str = 'oasst-sft-4-pythia-12b-epoch-3.5'
|
97 |
+
base_provider: str = 'huggingface'
|
98 |
+
best_provider: Provider.Provider = Provider.Vercel
|
99 |
+
|
100 |
+
class oasst_sft_1_pythia_12b:
|
101 |
+
name: str = 'oasst-sft-1-pythia-12b'
|
102 |
+
base_provider: str = 'huggingface'
|
103 |
+
best_provider: Provider.Provider = Provider.Vercel
|
104 |
+
|
105 |
+
class santacoder:
|
106 |
+
name: str = 'santacoder'
|
107 |
+
base_provider: str = 'huggingface'
|
108 |
+
best_provider: Provider.Provider = Provider.Vercel
|
109 |
+
|
110 |
+
class command_light_nightly:
|
111 |
+
name: str = 'command-light-nightly'
|
112 |
+
base_provider: str = 'cohere'
|
113 |
+
best_provider: Provider.Provider = Provider.Vercel
|
114 |
+
|
115 |
+
class command_nightly:
|
116 |
+
name: str = 'command-nightly'
|
117 |
+
base_provider: str = 'cohere'
|
118 |
+
best_provider: Provider.Provider = Provider.Vercel
|
119 |
+
|
120 |
+
class text_ada_001:
|
121 |
+
name: str = 'text-ada-001'
|
122 |
+
base_provider: str = 'openai'
|
123 |
+
best_provider: Provider.Provider = Provider.Vercel
|
124 |
+
|
125 |
+
class text_babbage_001:
|
126 |
+
name: str = 'text-babbage-001'
|
127 |
+
base_provider: str = 'openai'
|
128 |
+
best_provider: Provider.Provider = Provider.Vercel
|
129 |
+
|
130 |
+
class text_curie_001:
|
131 |
+
name: str = 'text-curie-001'
|
132 |
+
base_provider: str = 'openai'
|
133 |
+
best_provider: Provider.Provider = Provider.Vercel
|
134 |
+
|
135 |
+
class text_davinci_002:
|
136 |
+
name: str = 'text-davinci-002'
|
137 |
+
base_provider: str = 'openai'
|
138 |
+
best_provider: Provider.Provider = Provider.Vercel
|
139 |
+
|
140 |
+
class text_davinci_003:
|
141 |
+
name: str = 'text-davinci-003'
|
142 |
+
base_provider: str = 'openai'
|
143 |
+
best_provider: Provider.Provider = Provider.Vercel
|
144 |
+
|
145 |
+
class palm2:
|
146 |
+
name: str = 'palm2'
|
147 |
+
base_provider: str = 'google'
|
148 |
+
best_provider: Provider.Provider = Provider.Poe
|
149 |
+
|
150 |
+
class falcon_40b:
|
151 |
+
name: str = 'falcon-40b'
|
152 |
+
base_provider: str = 'huggingface'
|
153 |
+
best_provider: Provider.Provider = Provider.H2o
|
154 |
+
|
155 |
+
class falcon_7b:
|
156 |
+
name: str = 'falcon-7b'
|
157 |
+
base_provider: str = 'huggingface'
|
158 |
+
best_provider: Provider.Provider = Provider.H2o
|
159 |
+
|
160 |
+
class llama_13b:
|
161 |
+
name: str = 'llama-13b'
|
162 |
+
base_provider: str = 'huggingface'
|
163 |
+
best_provider: Provider.Provider = Provider.H2o
|
164 |
+
|
165 |
+
class llama_2_7b_chat:
|
166 |
+
name: str = 'llama-2-7b-chat'
|
167 |
+
base_provider: str = 'replicate'
|
168 |
+
best_provider: Provider.Provider = Provider.PerplexityAI
|
169 |
+
|
170 |
+
class llama_2_13b_chat:
|
171 |
+
name: str = 'llama-2-13b-chat'
|
172 |
+
base_provider: str = 'replicate'
|
173 |
+
best_provider: Provider.Provider = Provider.PerplexityAI
|
174 |
+
|
175 |
+
class llama_2_70b_chat:
|
176 |
+
name: str = 'llama-2-70b-chat'
|
177 |
+
base_provider: str = 'replicate'
|
178 |
+
best_provider: Provider.Provider = Provider.PerplexityAI
|
179 |
+
|
180 |
+
class dall_e:
|
181 |
+
name: str = 'dall-e'
|
182 |
+
base_provider: str = 'Bing'
|
183 |
+
best_provider: Provider.Provider = Provider.Bing
|
184 |
+
|
185 |
+
class kandinsky:
|
186 |
+
name: str = 'kandinsky'
|
187 |
+
base_provider: str = 'Fusionbrain'
|
188 |
+
best_provider: Provider.Provider = Provider.Fusionbrain
|
189 |
+
|
190 |
+
class bing:
|
191 |
+
name: str = 'bing'
|
192 |
+
base_provider: str = 'Bing'
|
193 |
+
best_provider: Provider.Provider = Provider.Bing
|
194 |
+
|
195 |
+
class ModelUtils:
|
196 |
+
convert: dict = {
|
197 |
+
'gpt-3.5-turbo': Model.gpt_35_turbo,
|
198 |
+
'gpt-3.5-turbo-0301': Model.gpt_35_turbo_0301,
|
199 |
+
'gpt-3.5-turbo-0613': Model.gpt_35_turbo_0613,
|
200 |
+
'gpt-3.5-turbo-16k': Model.gpt_35_turbo_16k,
|
201 |
+
'gpt-3.5-turbo-16k-0613': Model.gpt_35_turbo_16k_0613,
|
202 |
+
|
203 |
+
'gpt-4': Model.gpt_4,
|
204 |
+
'gpt-4-0314': Model.gpt_4_0314,
|
205 |
+
'gpt-4-0613': Model.gpt_4_0613,
|
206 |
+
'gpt-4-32k': Model.gpt_4_32k,
|
207 |
+
|
208 |
+
'claude-1': Model.claude_1,
|
209 |
+
|
210 |
+
'claude-instant-100k': Model.claude_instant_100k,
|
211 |
+
'claude-instant': Model.claude_instant,
|
212 |
+
'claude-2': Model.claude_2,
|
213 |
+
'claude-2-100k': Model.claude_2_100k,
|
214 |
+
|
215 |
+
'bloom': Model.bloom,
|
216 |
+
|
217 |
+
'flan-t5-xxl': Model.flan_t5_xxl,
|
218 |
+
|
219 |
+
'gpt-neox-20b': Model.gpt_neox_20b,
|
220 |
+
'oasst-sft-4-pythia-12b-epoch-3.5': Model.oasst_sft_4_pythia_12b_epoch_35,
|
221 |
+
'oasst-sft-1-pythia-12b': Model.oasst_sft_1_pythia_12b,
|
222 |
+
'santacoder': Model.santacoder,
|
223 |
+
|
224 |
+
'command-light-nightly': Model.command_light_nightly,
|
225 |
+
'command-nightly': Model.command_nightly,
|
226 |
+
|
227 |
+
'text-ada-001': Model.text_ada_001,
|
228 |
+
'text-babbage-001': Model.text_babbage_001,
|
229 |
+
'text-curie-001': Model.text_curie_001,
|
230 |
+
'text-davinci-002': Model.text_davinci_002,
|
231 |
+
'text-davinci-003': Model.text_davinci_003,
|
232 |
+
|
233 |
+
'palm2': Model.palm2,
|
234 |
+
'palm': Model.palm2,
|
235 |
+
'chat-bison-001': Model.palm2,
|
236 |
+
'bard': Model.palm2,
|
237 |
+
|
238 |
+
'falcon-40b': Model.falcon_40b,
|
239 |
+
'falcon-7b': Model.falcon_7b,
|
240 |
+
'llama-13b': Model.llama_13b,
|
241 |
+
|
242 |
+
'llama-2-7b-chat': Model.llama_2_7b_chat,
|
243 |
+
'llama-2-13b-chat': Model.llama_2_13b_chat,
|
244 |
+
'llama-2-70b-chat': Model.llama_2_70b_chat,
|
245 |
+
'dall-e': Model.dall_e,
|
246 |
+
'kandinsky': Model.kandinsky,
|
247 |
+
'bing': Model.bing
|
248 |
+
}
|
g4f/typing.py
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Dict, NewType, Union, Optional, List, get_type_hints
|
2 |
+
|
3 |
+
sha256 = NewType('sha_256_hash', str)
|
g4f/utils.py
ADDED
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import browser_cookie3
|
2 |
+
|
3 |
+
|
4 |
+
class Utils:
|
5 |
+
browsers = [
|
6 |
+
browser_cookie3.chrome, # 62.74% market share
|
7 |
+
browser_cookie3.safari, # 24.12% market share
|
8 |
+
browser_cookie3.firefox, # 4.56% market share
|
9 |
+
browser_cookie3.edge, # 2.85% market share
|
10 |
+
browser_cookie3.opera, # 1.69% market share
|
11 |
+
browser_cookie3.brave, # 0.96% market share
|
12 |
+
browser_cookie3.opera_gx, # 0.64% market share
|
13 |
+
browser_cookie3.vivaldi, # 0.32% market share
|
14 |
+
]
|
15 |
+
|
16 |
+
def get_cookies(domain: str, setName: str = None, setBrowser: str = False) -> dict:
|
17 |
+
cookies = {}
|
18 |
+
|
19 |
+
if setBrowser != False:
|
20 |
+
for browser in Utils.browsers:
|
21 |
+
if browser.__name__ == setBrowser:
|
22 |
+
try:
|
23 |
+
for c in browser(domain_name=domain):
|
24 |
+
if c.name not in cookies:
|
25 |
+
cookies = cookies | {c.name: c.value}
|
26 |
+
|
27 |
+
except Exception as e:
|
28 |
+
pass
|
29 |
+
|
30 |
+
else:
|
31 |
+
for browser in Utils.browsers:
|
32 |
+
try:
|
33 |
+
for c in browser(domain_name=domain):
|
34 |
+
if c.name not in cookies:
|
35 |
+
cookies = cookies | {c.name: c.value}
|
36 |
+
|
37 |
+
except Exception as e:
|
38 |
+
pass
|
39 |
+
|
40 |
+
if setName:
|
41 |
+
try:
|
42 |
+
return {setName: cookies[setName]}
|
43 |
+
|
44 |
+
except ValueError:
|
45 |
+
print(f'Error: could not find {setName} cookie in any browser.')
|
46 |
+
exit(1)
|
47 |
+
|
48 |
+
else:
|
49 |
+
return cookies
|
install.sh
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
pip install -r requirements.txt
|
requirements.txt
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
requests
|
2 |
+
pycryptodome
|
3 |
+
curl_cffi
|
4 |
+
aiohttp
|
5 |
+
certifi
|
6 |
+
browser_cookie3
|
7 |
+
websockets
|
8 |
+
pyexecjs
|
9 |
+
pytz
|
10 |
+
openai
|
11 |
+
python-dotenv
|
12 |
+
websocket-client
|
13 |
+
tls-client
|
14 |
+
free-proxy
|
15 |
+
fastapi[all]==0.100.0
|
16 |
+
slack-sdk==3.21.3
|
17 |
+
poe-api==0.4.17
|
18 |
+
vercel-llm-api==0.3.0
|
19 |
+
beautifulsoup4==4.12.2
|
20 |
+
translate
|
21 |
+
python-socketio
|
start.sh
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
uvicorn app:app --host 0.0.0.0 --port 7860
|
test.py
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import g4f
|
2 |
+
|
3 |
+
stream = True
|
4 |
+
response = g4f.ChatCompletion.create(model='gpt-3.5-turbo', provider=g4f.Provider.EasyChat, messages=[
|
5 |
+
{"role": "user", "content": "Hi"}], stream=stream)
|
6 |
+
|
7 |
+
if stream:
|
8 |
+
for message in response:
|
9 |
+
print(message,end="")
|
10 |
+
else:
|
11 |
+
print(response)
|
vercel.json
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"name": "gpt4free",
|
3 |
+
"version": 2,
|
4 |
+
"framework": null,
|
5 |
+
"builds": [
|
6 |
+
{
|
7 |
+
"src": "app.py",
|
8 |
+
"use": "@vercel/python"
|
9 |
+
}
|
10 |
+
],
|
11 |
+
"rewrites": [
|
12 |
+
{
|
13 |
+
"source": "/(.*)",
|
14 |
+
"destination": "app.py"
|
15 |
+
}
|
16 |
+
],
|
17 |
+
"env": {
|
18 |
+
}
|
19 |
+
}
|