Spaces:
Running
Running
Merge branch 'dev-testing'
Browse files- client/html/index.html +3 -3
- g4f/Provider/Providers/Better.py +20 -2
- g4f/Provider/Providers/Fakeopen.py +1 -1
- g4f/Provider/Providers/Lsdev.py +54 -0
- g4f/Provider/__init__.py +1 -0
- g4f/models.py +2 -2
client/html/index.html
CHANGED
@@ -41,7 +41,7 @@
|
|
41 |
<i class="fa-brands fa-github"></i>
|
42 |
<span class="conversation-title">
|
43 |
Author: @ramonvc<br />
|
44 |
-
Version: 0.0.
|
45 |
</span>
|
46 |
</a>
|
47 |
</div>
|
@@ -74,8 +74,8 @@
|
|
74 |
<option value="gpt-3.5-turbo">GPT-3.5</option>
|
75 |
<option value="gpt-3.5-turbo-0613">GPT-3.5-0613</option>
|
76 |
<option value="gpt-3.5-turbo-16k">GPT-3.5-turbo-16k</option>
|
77 |
-
<option value="gpt-3.5-turbo-16k-0613"
|
78 |
-
<option value="gpt-4"
|
79 |
</select>
|
80 |
</div>
|
81 |
<div class="field">
|
|
|
41 |
<i class="fa-brands fa-github"></i>
|
42 |
<span class="conversation-title">
|
43 |
Author: @ramonvc<br />
|
44 |
+
Version: 0.0.8-Alpha<br />
|
45 |
</span>
|
46 |
</a>
|
47 |
</div>
|
|
|
74 |
<option value="gpt-3.5-turbo">GPT-3.5</option>
|
75 |
<option value="gpt-3.5-turbo-0613">GPT-3.5-0613</option>
|
76 |
<option value="gpt-3.5-turbo-16k">GPT-3.5-turbo-16k</option>
|
77 |
+
<option value="gpt-3.5-turbo-16k-0613">GPT-3.5-turbo-16k-0613</option>
|
78 |
+
<option value="gpt-4-0613" selected>GPT-4</option>
|
79 |
</select>
|
80 |
</div>
|
81 |
<div class="field">
|
g4f/Provider/Providers/Better.py
CHANGED
@@ -18,8 +18,26 @@ needs_auth = False
|
|
18 |
|
19 |
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
20 |
headers = {
|
21 |
-
'
|
22 |
-
'
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
23 |
'Referer': 'https://chat.ylokh.xyz/',
|
24 |
'Origin': 'https://chat.ylokh.xyz',
|
25 |
}
|
|
|
18 |
|
19 |
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
20 |
headers = {
|
21 |
+
'authority': 'edgeservices.bing.com',
|
22 |
+
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
|
23 |
+
'accept-language': 'en-US,en;q=0.9',
|
24 |
+
'cache-control': 'max-age=0',
|
25 |
+
'sec-ch-ua': '"Chromium";v="110", "Not A(Brand";v="24", "Microsoft Edge";v="110"',
|
26 |
+
'sec-ch-ua-arch': '"x86"',
|
27 |
+
'sec-ch-ua-bitness': '"64"',
|
28 |
+
'sec-ch-ua-full-version': '"110.0.1587.69"',
|
29 |
+
'sec-ch-ua-full-version-list': '"Chromium";v="110.0.5481.192", "Not A(Brand";v="24.0.0.0", "Microsoft Edge";v="110.0.1587.69"',
|
30 |
+
'sec-ch-ua-mobile': '?0',
|
31 |
+
'sec-ch-ua-model': '""',
|
32 |
+
'sec-ch-ua-platform': '"Windows"',
|
33 |
+
'sec-ch-ua-platform-version': '"15.0.0"',
|
34 |
+
'sec-fetch-dest': 'document',
|
35 |
+
'sec-fetch-mode': 'navigate',
|
36 |
+
'sec-fetch-site': 'none',
|
37 |
+
'sec-fetch-user': '?1',
|
38 |
+
'upgrade-insecure-requests': '1',
|
39 |
+
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 Edg/110.0.1587.69',
|
40 |
+
'x-edge-shopping-flag': '1',
|
41 |
'Referer': 'https://chat.ylokh.xyz/',
|
42 |
'Origin': 'https://chat.ylokh.xyz',
|
43 |
}
|
g4f/Provider/Providers/Fakeopen.py
CHANGED
@@ -22,7 +22,7 @@ def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
|
22 |
'accept': 'text/event-stream',
|
23 |
'Cache-Control': 'no-cache',
|
24 |
'Proxy-Connection': 'keep-alive',
|
25 |
-
'Authorization': f"Bearer {os.environ.get('FAKE_OPEN_KEY', '
|
26 |
}
|
27 |
|
28 |
json_data = {
|
|
|
22 |
'accept': 'text/event-stream',
|
23 |
'Cache-Control': 'no-cache',
|
24 |
'Proxy-Connection': 'keep-alive',
|
25 |
+
'Authorization': f"Bearer {os.environ.get('FAKE_OPEN_KEY', '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')}",
|
26 |
}
|
27 |
|
28 |
json_data = {
|
g4f/Provider/Providers/Lsdev.py
ADDED
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os, uuid, requests
|
2 |
+
from ...typing import sha256, Dict, get_type_hints
|
3 |
+
|
4 |
+
url = 'https://gpt.free.lsdev.me'
|
5 |
+
model = ['gpt-4-0613', 'gpt-4-poe']
|
6 |
+
supports_stream = True
|
7 |
+
needs_auth = True
|
8 |
+
|
9 |
+
models = {
|
10 |
+
'gpt-4-0613': {
|
11 |
+
"id":"gpt-4-0613",
|
12 |
+
"name":"GPT-4-0613",
|
13 |
+
"maxLength":24000,
|
14 |
+
"tokenLimit":8192
|
15 |
+
},
|
16 |
+
'claude-instant-100k': {
|
17 |
+
"id":"claude-instant-100k",
|
18 |
+
"name":"CLAUDE-INSTANT-100K"
|
19 |
+
},
|
20 |
+
'gpt-4-poe': {
|
21 |
+
"id":"gpt-4-poe",
|
22 |
+
"name":"GPT-4-POE"
|
23 |
+
},
|
24 |
+
}
|
25 |
+
|
26 |
+
def _create_completion(model: str, messages: list, stream: bool, chatId: str, **kwargs):
|
27 |
+
|
28 |
+
print(kwargs)
|
29 |
+
|
30 |
+
headers = {
|
31 |
+
'authority': 'gpt.free.lsdev.me',
|
32 |
+
'content-type': 'application/json',
|
33 |
+
'origin': 'https://gpt.free.lsdev.me',
|
34 |
+
'referer': 'https://gpt.free.lsdev.me/zh',
|
35 |
+
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36',
|
36 |
+
}
|
37 |
+
|
38 |
+
json_data = {
|
39 |
+
'conversationId': chatId,
|
40 |
+
'model': models[model],
|
41 |
+
'messages': messages,
|
42 |
+
'auth': 'oVy1CLB25mA43',
|
43 |
+
'key': '',
|
44 |
+
'prompt': "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully. Respond using markdown.",
|
45 |
+
}
|
46 |
+
|
47 |
+
response = requests.post('https://gpt.free.lsdev.me/api/chat',
|
48 |
+
headers=headers, json=json_data, stream=stream)
|
49 |
+
|
50 |
+
for token in response.iter_content(chunk_size=2046):
|
51 |
+
yield (token.decode('utf-8'))
|
52 |
+
|
53 |
+
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
|
54 |
+
'(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
|
g4f/Provider/__init__.py
CHANGED
@@ -16,6 +16,7 @@ from .Providers import (
|
|
16 |
Gravityengine,
|
17 |
H2o,
|
18 |
hteyun,
|
|
|
19 |
Liaobots,
|
20 |
Lockchat,
|
21 |
Mishalsgpt,
|
|
|
16 |
Gravityengine,
|
17 |
H2o,
|
18 |
hteyun,
|
19 |
+
Lsdev,
|
20 |
Liaobots,
|
21 |
Lockchat,
|
22 |
Mishalsgpt,
|
g4f/models.py
CHANGED
@@ -21,7 +21,7 @@ class Model:
|
|
21 |
class gpt_35_turbo_16k_0613:
|
22 |
name: str = 'gpt-3.5-turbo-16k-0613'
|
23 |
base_provider: str = 'openai'
|
24 |
-
best_provider: Provider.Provider = Provider.
|
25 |
best_providers: list = [Provider.Easychat, Provider.Ezcht, Provider.Better]
|
26 |
|
27 |
class gpt_35_turbo_16k:
|
@@ -43,7 +43,7 @@ class Model:
|
|
43 |
class gpt_4_0613:
|
44 |
name: str = 'gpt-4-0613'
|
45 |
base_provider: str = 'openai'
|
46 |
-
best_provider: Provider.Provider = Provider.
|
47 |
best_providers: list = [Provider.Bing, Provider.Lockchat]
|
48 |
|
49 |
class claude_instant_v1_100k:
|
|
|
21 |
class gpt_35_turbo_16k_0613:
|
22 |
name: str = 'gpt-3.5-turbo-16k-0613'
|
23 |
base_provider: str = 'openai'
|
24 |
+
best_provider: Provider.Provider = Provider.Gravityengine
|
25 |
best_providers: list = [Provider.Easychat, Provider.Ezcht, Provider.Better]
|
26 |
|
27 |
class gpt_35_turbo_16k:
|
|
|
43 |
class gpt_4_0613:
|
44 |
name: str = 'gpt-4-0613'
|
45 |
base_provider: str = 'openai'
|
46 |
+
best_provider: Provider.Provider = Provider.Lsdev
|
47 |
best_providers: list = [Provider.Bing, Provider.Lockchat]
|
48 |
|
49 |
class claude_instant_v1_100k:
|