Spaces:
Running
Running
Fix: could not build wheels for quickjs
Browse files- g4f/Provider/Providers/Vercel.py +0 -157
- g4f/Provider/__init__.py +0 -1
- g4f/__init__.py +1 -1
- requirements.txt +0 -1
g4f/Provider/Providers/Vercel.py
DELETED
@@ -1,157 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import json
|
3 |
-
import base64
|
4 |
-
import quickjs
|
5 |
-
import queue
|
6 |
-
import threading
|
7 |
-
|
8 |
-
from curl_cffi import requests
|
9 |
-
from ...typing import sha256, Dict, get_type_hints
|
10 |
-
|
11 |
-
url = 'https://play.vercel.ai'
|
12 |
-
model = None
|
13 |
-
supports_stream = True
|
14 |
-
|
15 |
-
models = {
|
16 |
-
'claude-instant-v1': 'anthropic:claude-instant-v1',
|
17 |
-
'claude-v1': 'anthropic:claude-v1',
|
18 |
-
'alpaca-7b': 'replicate:replicate/alpaca-7b',
|
19 |
-
'stablelm-tuned-alpha-7b': 'replicate:stability-ai/stablelm-tuned-alpha-7b',
|
20 |
-
'bloom': 'huggingface:bigscience/bloom',
|
21 |
-
'bloomz': 'huggingface:bigscience/bloomz',
|
22 |
-
'flan-t5-xxl': 'huggingface:google/flan-t5-xxl',
|
23 |
-
'flan-ul2': 'huggingface:google/flan-ul2',
|
24 |
-
'gpt-neox-20b': 'huggingface:EleutherAI/gpt-neox-20b',
|
25 |
-
'oasst-sft-4-pythia-12b-epoch-3.5': 'huggingface:OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5',
|
26 |
-
'santacoder': 'huggingface:bigcode/santacoder',
|
27 |
-
'command-medium-nightly': 'cohere:command-medium-nightly',
|
28 |
-
'command-xlarge-nightly': 'cohere:command-xlarge-nightly',
|
29 |
-
'gpt-4': 'openai:gpt-4',
|
30 |
-
'code-cushman-001': 'openai:code-cushman-001',
|
31 |
-
'code-davinci-002': 'openai:code-davinci-002',
|
32 |
-
'gpt-3.5-turbo': 'openai:gpt-3.5-turbo',
|
33 |
-
'text-ada-001': 'openai:text-ada-001',
|
34 |
-
'text-babbage-001': 'openai:text-babbage-001',
|
35 |
-
'text-curie-001': 'openai:text-curie-001',
|
36 |
-
'text-davinci-002': 'openai:text-davinci-002',
|
37 |
-
'text-davinci-003': 'openai:text-davinci-003'
|
38 |
-
}
|
39 |
-
|
40 |
-
vercel_models = {'anthropic:claude-instant-v1': {'id': 'anthropic:claude-instant-v1', 'provider': 'anthropic', 'providerHumanName': 'Anthropic', 'makerHumanName': 'Anthropic', 'minBillingTier': 'hobby', 'parameters': {'temperature': {'value': 1, 'range': [0, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'topK': {'value': 1, 'range': [1, 500]}, 'presencePenalty': {'value': 1, 'range': [0, 1]}, 'frequencyPenalty': {'value': 1, 'range': [0, 1]}, 'stopSequences': {'value': ['\n\nHuman:'], 'range': []}}, 'name': 'claude-instant-v1'}, 'anthropic:claude-v1': {'id': 'anthropic:claude-v1', 'provider': 'anthropic', 'providerHumanName': 'Anthropic', 'makerHumanName': 'Anthropic', 'minBillingTier': 'hobby', 'parameters': {'temperature': {'value': 1, 'range': [0, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'topK': {'value': 1, 'range': [1, 500]}, 'presencePenalty': {'value': 1, 'range': [0, 1]}, 'frequencyPenalty': {'value': 1, 'range': [0, 1]}, 'stopSequences': {'value': ['\n\nHuman:'], 'range': []}}, 'name': 'claude-v1'}, 'replicate:replicate/alpaca-7b': {'id': 'replicate:replicate/alpaca-7b', 'provider': 'replicate', 'providerHumanName': 'Replicate', 'makerHumanName': 'Stanford', 'parameters': {'temperature': {'value': 0.75, 'range': [0.01, 5]}, 'maximumLength': {'value': 200, 'range': [50, 512]}, 'topP': {'value': 0.95, 'range': [0.01, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'repetitionPenalty': {'value': 1.1765, 'range': [0.01, 5]}, 'stopSequences': {'value': [], 'range': []}}, 'version': '2014ee1247354f2e81c0b3650d71ca715bc1e610189855f134c30ecb841fae21', 'name': 'alpaca-7b'}, 'replicate:stability-ai/stablelm-tuned-alpha-7b': {'id': 'replicate:stability-ai/stablelm-tuned-alpha-7b', 'provider': 'replicate', 'makerHumanName': 'StabilityAI', 'providerHumanName': 'Replicate', 'parameters': {'temperature': {'value': 0.75, 'range': [0.01, 5]}, 'maximumLength': {'value': 200, 'range': [50, 512]}, 'topP': {'value': 0.95, 'range': [0.01, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'repetitionPenalty': {'value': 1.1765, 'range': [0.01, 5]}, 'stopSequences': {'value': [], 'range': []}}, 'version': '4a9a32b4fd86c2d047f1d271fa93972683ec6ef1cf82f402bd021f267330b50b', 'name': 'stablelm-tuned-alpha-7b'}, 'huggingface:bigscience/bloom': {'id': 'huggingface:bigscience/bloom', 'provider': 'huggingface', 'providerHumanName': 'HuggingFace', 'makerHumanName': 'BigScience', 'instructions': "Do NOT talk to Bloom as an entity, it's not a chatbot but a webpage/blog/article completion model. For the best results: mimic a few words of a webpage similar to the content you want to generate. Start a sentence as if YOU were writing a blog, webpage, math post, coding article and Bloom will generate a coherent follow-up.", 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 0.95, 'range': [0.01, 0.99]}, 'topK': {'value': 4, 'range': [1, 500]}, 'repetitionPenalty': {'value': 1.03, 'range': [0.1, 2]}}, 'name': 'bloom'}, 'huggingface:bigscience/bloomz': {'id': 'huggingface:bigscience/bloomz', 'provider': 'huggingface', 'providerHumanName': 'HuggingFace', 'makerHumanName': 'BigScience', 'instructions': 'We recommend using the model to perform tasks expressed in natural language. For example, given the prompt "Translate to English: Je t\'aime.", the model will most likely answer "I love you.".', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 0.95, 'range': [0.01, 0.99]}, 'topK': {'value': 4, 'range': [1, 500]}, 'repetitionPenalty': {'value': 1.03, 'range': [0.1, 2]}}, 'name': 'bloomz'}, 'huggingface:google/flan-t5-xxl': {'id': 'huggingface:google/flan-t5-xxl', 'provider': 'huggingface', 'makerHumanName': 'Google', 'providerHumanName': 'HuggingFace', 'name': 'flan-t5-xxl', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 0.95, 'range': [0.01, 0.99]}, 'topK': {'value': 4, 'range': [1, 500]}, 'repetitionPenalty': {'value': 1.03, 'range': [0.1, 2]}}}, 'huggingface:google/flan-ul2': {'id': 'huggingface:google/flan-ul2', 'provider': 'huggingface', 'providerHumanName': 'HuggingFace', 'makerHumanName': 'Google', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 0.95, 'range': [0.01, 0.99]}, 'topK': {'value': 4, 'range': [1, 500]}, 'repetitionPenalty': {'value': 1.03, 'range': [0.1, 2]}}, 'name': 'flan-ul2'}, 'huggingface:EleutherAI/gpt-neox-20b': {'id': 'huggingface:EleutherAI/gpt-neox-20b', 'provider': 'huggingface', 'providerHumanName': 'HuggingFace', 'makerHumanName': 'EleutherAI', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 0.95, 'range': [0.01, 0.99]}, 'topK': {'value': 4, 'range': [1, 500]}, 'repetitionPenalty': {'value': 1.03, 'range': [0.1, 2]}, 'stopSequences': {'value': [], 'range': []}}, 'name': 'gpt-neox-20b'}, 'huggingface:OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5': {'id': 'huggingface:OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5', 'provider': 'huggingface', 'providerHumanName': 'HuggingFace', 'makerHumanName': 'OpenAssistant', 'parameters': {'maximumLength': {'value': 200, 'range': [50, 1024]}, 'typicalP': {'value': 0.2, 'range': [0.1, 0.99]}, 'repetitionPenalty': {'value': 1, 'range': [0.1, 2]}}, 'name': 'oasst-sft-4-pythia-12b-epoch-3.5'}, 'huggingface:bigcode/santacoder': {
|
41 |
-
'id': 'huggingface:bigcode/santacoder', 'provider': 'huggingface', 'providerHumanName': 'HuggingFace', 'makerHumanName': 'BigCode', 'instructions': 'The model was trained on GitHub code. As such it is not an instruction model and commands like "Write a function that computes the square root." do not work well. You should phrase commands like they occur in source code such as comments (e.g. # the following function computes the sqrt) or write a function signature and docstring and let the model complete the function body.', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 0.95, 'range': [0.01, 0.99]}, 'topK': {'value': 4, 'range': [1, 500]}, 'repetitionPenalty': {'value': 1.03, 'range': [0.1, 2]}}, 'name': 'santacoder'}, 'cohere:command-medium-nightly': {'id': 'cohere:command-medium-nightly', 'provider': 'cohere', 'providerHumanName': 'Cohere', 'makerHumanName': 'Cohere', 'name': 'command-medium-nightly', 'parameters': {'temperature': {'value': 0.9, 'range': [0, 2]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0, 1]}, 'topK': {'value': 0, 'range': [0, 500]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'cohere:command-xlarge-nightly': {'id': 'cohere:command-xlarge-nightly', 'provider': 'cohere', 'providerHumanName': 'Cohere', 'makerHumanName': 'Cohere', 'name': 'command-xlarge-nightly', 'parameters': {'temperature': {'value': 0.9, 'range': [0, 2]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0, 1]}, 'topK': {'value': 0, 'range': [0, 500]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'openai:gpt-4': {'id': 'openai:gpt-4', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'name': 'gpt-4', 'minBillingTier': 'pro', 'parameters': {'temperature': {'value': 0.7, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'openai:code-cushman-001': {'id': 'openai:code-cushman-001', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}, 'name': 'code-cushman-001'}, 'openai:code-davinci-002': {'id': 'openai:code-davinci-002', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}, 'name': 'code-davinci-002'}, 'openai:gpt-3.5-turbo': {'id': 'openai:gpt-3.5-turbo', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'parameters': {'temperature': {'value': 0.7, 'range': [0, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'topK': {'value': 1, 'range': [1, 500]}, 'presencePenalty': {'value': 1, 'range': [0, 1]}, 'frequencyPenalty': {'value': 1, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}, 'name': 'gpt-3.5-turbo'}, 'openai:text-ada-001': {'id': 'openai:text-ada-001', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'name': 'text-ada-001', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'openai:text-babbage-001': {'id': 'openai:text-babbage-001', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'name': 'text-babbage-001', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'openai:text-curie-001': {'id': 'openai:text-curie-001', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'name': 'text-curie-001', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'openai:text-davinci-002': {'id': 'openai:text-davinci-002', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'name': 'text-davinci-002', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'openai:text-davinci-003': {'id': 'openai:text-davinci-003', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'name': 'text-davinci-003', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}}
|
42 |
-
|
43 |
-
|
44 |
-
# based on https://github.com/ading2210/vercel-llm-api // modified
|
45 |
-
class Client:
|
46 |
-
def __init__(self):
|
47 |
-
self.session = requests.Session(impersonate='chrome110')
|
48 |
-
self.headers = {
|
49 |
-
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110 Safari/537.36',
|
50 |
-
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8',
|
51 |
-
'Accept-Encoding': 'gzip, deflate, br',
|
52 |
-
'Accept-Language': 'en-US,en;q=0.5',
|
53 |
-
'Te': 'trailers',
|
54 |
-
'Upgrade-Insecure-Requests': '1'
|
55 |
-
}
|
56 |
-
self.session.headers.update(self.headers)
|
57 |
-
|
58 |
-
def get_token(self):
|
59 |
-
b64 = self.session.get('https://play.vercel.ai/openai.jpeg').text
|
60 |
-
data = json.loads(base64.b64decode(b64))
|
61 |
-
|
62 |
-
script = 'const globalThis = {data: `sentinel`}; (%s)(%s)' % (
|
63 |
-
data['c'], data['a'])
|
64 |
-
|
65 |
-
token_data = json.loads(quickjs.Context().eval(script).json())
|
66 |
-
token_string = json.dumps(separators=(',', ':'),
|
67 |
-
obj={'r': token_data, 't': data['t']})
|
68 |
-
|
69 |
-
return base64.b64encode(token_string.encode()).decode()
|
70 |
-
|
71 |
-
def get_default_params(self, model_id):
|
72 |
-
return {key: param['value'] for key, param in vercel_models[model_id]['parameters'].items()}
|
73 |
-
|
74 |
-
def generate(self, model_id: str, prompt: str, params: dict = {}):
|
75 |
-
if ':' not in model_id:
|
76 |
-
model_id = models[model_id]
|
77 |
-
|
78 |
-
defaults = self.get_default_params(model_id)
|
79 |
-
|
80 |
-
payload = defaults | params | {
|
81 |
-
'prompt': prompt,
|
82 |
-
'model': model_id,
|
83 |
-
}
|
84 |
-
|
85 |
-
headers = self.headers | {
|
86 |
-
'Accept-Encoding': 'gzip, deflate, br',
|
87 |
-
'Custom-Encoding': self.get_token(),
|
88 |
-
'Host': 'play.vercel.ai',
|
89 |
-
'Origin': 'https://play.vercel.ai',
|
90 |
-
'Referrer': 'https://play.vercel.ai',
|
91 |
-
'Sec-Fetch-Dest': 'empty',
|
92 |
-
'Sec-Fetch-Mode': 'cors',
|
93 |
-
'Sec-Fetch-Site': 'same-origin',
|
94 |
-
}
|
95 |
-
|
96 |
-
chunks_queue = queue.Queue()
|
97 |
-
error = None
|
98 |
-
response = None
|
99 |
-
|
100 |
-
def callback(data):
|
101 |
-
chunks_queue.put(data.decode())
|
102 |
-
|
103 |
-
def request_thread():
|
104 |
-
nonlocal response, error
|
105 |
-
for _ in range(3):
|
106 |
-
try:
|
107 |
-
response = self.session.post('https://play.vercel.ai/api/generate',
|
108 |
-
json=payload, headers=headers, content_callback=callback)
|
109 |
-
response.raise_for_status()
|
110 |
-
|
111 |
-
except Exception as e:
|
112 |
-
if _ == 2:
|
113 |
-
error = e
|
114 |
-
|
115 |
-
else:
|
116 |
-
continue
|
117 |
-
|
118 |
-
thread = threading.Thread(target=request_thread, daemon=True)
|
119 |
-
thread.start()
|
120 |
-
|
121 |
-
text = ''
|
122 |
-
index = 0
|
123 |
-
while True:
|
124 |
-
try:
|
125 |
-
chunk = chunks_queue.get(block=True, timeout=0.1)
|
126 |
-
|
127 |
-
except queue.Empty:
|
128 |
-
if error:
|
129 |
-
raise error
|
130 |
-
|
131 |
-
elif response:
|
132 |
-
break
|
133 |
-
|
134 |
-
else:
|
135 |
-
continue
|
136 |
-
|
137 |
-
text += chunk
|
138 |
-
lines = text.split('\n')
|
139 |
-
|
140 |
-
if len(lines) - 1 > index:
|
141 |
-
new = lines[index:-1]
|
142 |
-
for word in new:
|
143 |
-
yield json.loads(word)
|
144 |
-
index = len(lines) - 1
|
145 |
-
|
146 |
-
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
147 |
-
conversation = 'This is a conversation between a human and a language model, respond to the last message accordingly, referring to the past history of messages if needed.\n'
|
148 |
-
|
149 |
-
for message in messages:
|
150 |
-
conversation += '%s: %s\n' % (message['role'], message['content'])
|
151 |
-
|
152 |
-
conversation += 'assistant: '
|
153 |
-
|
154 |
-
yield from Client().generate(model, conversation)
|
155 |
-
|
156 |
-
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
|
157 |
-
'(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
g4f/Provider/__init__.py
CHANGED
@@ -10,7 +10,6 @@ from .Providers import (
|
|
10 |
Ora,
|
11 |
Aws,
|
12 |
Bard,
|
13 |
-
Vercel,
|
14 |
Pierangelo,
|
15 |
Forefront
|
16 |
)
|
|
|
10 |
Ora,
|
11 |
Aws,
|
12 |
Bard,
|
|
|
13 |
Pierangelo,
|
14 |
Forefront
|
15 |
)
|
g4f/__init__.py
CHANGED
@@ -23,7 +23,7 @@ class Model(metaclass=MetaModels):
|
|
23 |
class davinvi_003:
|
24 |
name: str = 'davinvi-003'
|
25 |
base_provider: str = 'openai'
|
26 |
-
best_site: Provider.Provider = Provider.
|
27 |
|
28 |
class Utils:
|
29 |
convert: dict = {
|
|
|
23 |
class davinvi_003:
|
24 |
name: str = 'davinvi-003'
|
25 |
base_provider: str = 'openai'
|
26 |
+
best_site: Provider.Provider = Provider.Forefront
|
27 |
|
28 |
class Utils:
|
29 |
convert: dict = {
|
requirements.txt
CHANGED
@@ -59,7 +59,6 @@ pyrsistent==0.19.3
|
|
59 |
PySocks==1.7.1
|
60 |
python-dateutil==2.8.2
|
61 |
pytz==2023.3
|
62 |
-
quickjs==1.19.2
|
63 |
random-username==1.0.2
|
64 |
requests==2.31.0
|
65 |
rfc3986==1.5.0
|
|
|
59 |
PySocks==1.7.1
|
60 |
python-dateutil==2.8.2
|
61 |
pytz==2023.3
|
|
|
62 |
random-username==1.0.2
|
63 |
requests==2.31.0
|
64 |
rfc3986==1.5.0
|