monra commited on
Commit
355bae1
1 Parent(s): 1e2f57f

Fix: 'g4f.Provider.Providers' (unknown location)

Browse files
g4f/Provider/Providers/Aichat.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os, requests
2
+ from ...typing import sha256, Dict, get_type_hints
3
+
4
+ url = 'https://chat-gpt.org/chat'
5
+ model = ['gpt-3.5-turbo']
6
+ supports_stream = False
7
+
8
+ def _create_completion(model: str, messages: list, stream: bool, **kwargs):
9
+ headers = {
10
+ 'authority': 'chat-gpt.org',
11
+ 'accept': '*/*',
12
+ 'cache-control': 'no-cache',
13
+ 'content-type': 'application/json',
14
+ 'origin': 'https://chat-gpt.org',
15
+ 'pragma': 'no-cache',
16
+ 'referer': 'https://chat-gpt.org/chat',
17
+ 'sec-ch-ua-mobile': '?0',
18
+ 'sec-ch-ua-platform': '"macOS"',
19
+ 'sec-fetch-dest': 'empty',
20
+ 'sec-fetch-mode': 'cors',
21
+ 'sec-fetch-site': 'same-origin',
22
+ 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36',
23
+ }
24
+
25
+ json_data = {
26
+ 'message': messages[-1]['content'],
27
+ 'temperature': 1,
28
+ 'presence_penalty': 0,
29
+ 'top_p': 1,
30
+ 'frequency_penalty': 0
31
+ }
32
+
33
+ response = requests.post('https://chat-gpt.org/api/text', headers=headers, json=json_data)
34
+ yield response.json()['message']
35
+
36
+ params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
37
+ '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
g4f/Provider/Providers/Aws.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import requests
3
+
4
+ from ...typing import sha256, Dict, get_type_hints
5
+
6
+ url = 'https://4aiu6ctrknfxkoaigkigzh5lwm0cciuc.lambda-url.ap-east-1.on.aws/chat/completions'
7
+ model = ['gpt-3.5-turbo', 'gpt-4']
8
+ supports_stream = False
9
+
10
+ class Auth(requests.auth.AuthBase):
11
+ def __init__(self):
12
+ self.token = 'sk-1234567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKL'
13
+
14
+ def __call__(self, r):
15
+ r.headers["authorization"] = "Bearer " + self.token
16
+ return r
17
+
18
+ def _create_completion(model: str, messages: list, stream: bool, **kwargs):
19
+
20
+ response = requests.post(url,
21
+ auth=Auth(), json={"model": model,"messages": messages})
22
+
23
+ yield (response.json()['choices'][0]['message']['content'])
24
+
25
+ params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
26
+ '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
g4f/Provider/Providers/Bard.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # implement proxy argument
2
+
3
+ import os, requests, json, browser_cookie3, re, random
4
+ from ...typing import sha256, Dict, get_type_hints
5
+
6
+ url = 'https://bard.google.com'
7
+ model = ['Palm2']
8
+ supports_stream = False
9
+
10
+ def _create_completion(model: str, messages: list, stream: bool, **kwargs):
11
+ psid = {cookie.name: cookie.value for cookie in browser_cookie3.chrome(
12
+ domain_name='.google.com')}['__Secure-1PSID']
13
+
14
+ formatted = '\n'.join([
15
+ '%s: %s' % (message['role'], message['content']) for message in messages
16
+ ])
17
+ prompt = f'{formatted}\nAssistant:'
18
+
19
+ proxy = None
20
+
21
+ if proxy == None:
22
+ raise Exception('Proxy is required for Bard (set in g4f/Provider/Providers/Bard.py line 18)')
23
+
24
+ snlm0e = False
25
+ conversation_id = None
26
+ response_id = None
27
+ choice_id = None
28
+
29
+ client = requests.Session()
30
+ client.proxies = {
31
+ 'http': f'https://{proxy}',
32
+ 'https': f'https://{proxy}'} if proxy else None
33
+
34
+ client.headers = {
35
+ 'authority': 'bard.google.com',
36
+ 'content-type': 'application/x-www-form-urlencoded;charset=UTF-8',
37
+ 'origin': 'https://bard.google.com',
38
+ 'referer': 'https://bard.google.com/',
39
+ 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36',
40
+ 'x-same-domain': '1',
41
+ 'cookie': f'__Secure-1PSID={psid}'
42
+ }
43
+
44
+ snlm0e = re.search(r'SNlM0e\":\"(.*?)\"',
45
+ client.get('https://bard.google.com/').text).group(1) if not snlm0e else snlm0e
46
+
47
+ params = {
48
+ 'bl': 'boq_assistant-bard-web-server_20230326.21_p0',
49
+ '_reqid': random.randint(1111, 9999),
50
+ 'rt': 'c'
51
+ }
52
+
53
+ data = {
54
+ 'at': snlm0e,
55
+ 'f.req': json.dumps([None, json.dumps([[prompt], None, [conversation_id, response_id, choice_id]])])}
56
+
57
+ intents = '.'.join([
58
+ 'assistant',
59
+ 'lamda',
60
+ 'BardFrontendService'
61
+ ])
62
+
63
+ response = client.post(f'https://bard.google.com/_/BardChatUi/data/{intents}/StreamGenerate',
64
+ data=data, params=params)
65
+
66
+ chat_data = json.loads(response.content.splitlines()[3])[0][2]
67
+ if chat_data:
68
+ json_chat_data = json.loads(chat_data)
69
+
70
+ yield json_chat_data[0][0]
71
+
72
+ else:
73
+ yield 'error'
74
+
75
+ params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
76
+ '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
g4f/Provider/Providers/Bing.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ import time
4
+ import subprocess
5
+
6
+ from ...typing import sha256, Dict, get_type_hints
7
+
8
+ url = 'https://bing.com/chat'
9
+ model = ['gpt-3.5-turbo', 'gpt-4']
10
+ supports_stream = True
11
+
12
+ def _create_completion(model: str, messages: list, stream: bool, **kwargs):
13
+ path = os.path.dirname(os.path.realpath(__file__))
14
+ config = json.dumps({
15
+ 'messages': messages,
16
+ 'model': model}, separators=(',', ':'))
17
+
18
+ try:
19
+ subprocess.run(["python3", "--version"], capture_output=True, check=True)
20
+ cmd = ["python3", f"{path}/helpers/bing.py", config]
21
+ except subprocess.CalledProcessError:
22
+ cmd = ["python", f"{path}/helpers/bing.py", config]
23
+
24
+ p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
25
+
26
+ for line in iter(p.stdout.readline, b''):
27
+ #print(line)
28
+ yield line.decode('utf-8', errors='ignore') #[:-1]
29
+
30
+
31
+ params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
32
+ '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
g4f/Provider/Providers/Openai.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ import time
4
+ import subprocess
5
+
6
+ from ...typing import sha256, Dict, get_type_hints
7
+
8
+ url = 'https://chat.openai.com/chat'
9
+ model = ['gpt-3.5-turbo']
10
+ supports_stream = True
11
+
12
+
13
+ def _create_completion(model: str, messages: list, stream: bool, **kwargs):
14
+
15
+ path = os.path.dirname(os.path.realpath(__file__))
16
+ config = json.dumps({
17
+ 'messages': messages,
18
+ 'model': model}, separators=(',', ':'))
19
+
20
+ try:
21
+ subprocess.run(["python3", "--version"], capture_output=True, check=True)
22
+ cmd = ["python3", f"{path}/helpers/bing.py", config]
23
+ except subprocess.CalledProcessError:
24
+ cmd = ["python", f"{path}/helpers/bing.py", config]
25
+
26
+ p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
27
+
28
+ for line in iter(p.stdout.readline, b''):
29
+ #print(line)
30
+ yield line.decode('utf-8', errors='ignore') #[:-1]
31
+
32
+ params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
33
+ '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
g4f/Provider/Providers/Ora.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os, requests, uuid
2
+ from ...typing import sha256, Dict, get_type_hints
3
+
4
+ url = 'https://ora.ai'
5
+ model = ['gpt-3.5-turbo', 'gpt-4']
6
+ supports_stream = False
7
+
8
+ def _create_completion(model: str, messages: list, stream: bool, **kwargs):
9
+ headers = {
10
+ 'authority': 'ora.ai',
11
+ 'accept': '*/*',
12
+ 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
13
+ 'cache-control': 'no-cache',
14
+ 'content-type': 'application/json',
15
+ 'origin': 'https://ora.ai',
16
+ 'pragma': 'no-cache',
17
+ 'referer': 'https://ora.ai/chat/',
18
+ 'sec-ch-ua': '"Google Chrome";v="113", "Chromium";v="113", "Not-A.Brand";v="24"',
19
+ 'sec-ch-ua-mobile': '?0',
20
+ 'sec-ch-ua-platform': '"macOS"',
21
+ 'sec-fetch-dest': 'empty',
22
+ 'sec-fetch-mode': 'cors',
23
+ 'sec-fetch-site': 'same-origin',
24
+ 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36',
25
+ }
26
+
27
+ json_data = {
28
+ 'chatbotId': 'adb2b793-e667-46b9-8d80-114eaa9a4c40',
29
+ 'input': messages[-1]['content'],
30
+ 'userId': f'auto:{uuid.uuid4()}',
31
+ 'provider': 'OPEN_AI',
32
+ 'config': False,
33
+ 'includeHistory': False
34
+ }
35
+
36
+ response = requests.post('https://ora.ai/api/conversation',
37
+ headers=headers, json=json_data)
38
+
39
+ yield response.json()['response']
40
+
41
+ params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
42
+ '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
g4f/Provider/Providers/Phind.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ import time
4
+ import subprocess
5
+
6
+ from ...typing import sha256, Dict, get_type_hints
7
+
8
+ url = 'https://phind.com'
9
+ model = ['gpt-3.5-turbo', 'gpt-4']
10
+ supports_stream = True
11
+
12
+ def _create_completion(model: str, messages: list, stream: bool, **kwargs):
13
+
14
+ path = os.path.dirname(os.path.realpath(__file__))
15
+ config = json.dumps({
16
+ 'model': model,
17
+ 'messages': messages}, separators=(',', ':'))
18
+
19
+ cmd = ['python', f'{path}/helpers/phind.py', config]
20
+
21
+ p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
22
+
23
+ for line in iter(p.stdout.readline, b''):
24
+ if b'<title>Just a moment...</title>' in line:
25
+ os.system('clear' if os.name == 'posix' else 'cls')
26
+ yield 'Clouflare error, please try again...'
27
+ os._exit(0)
28
+
29
+ else:
30
+ if b'ping - 2023-' in line:
31
+ continue
32
+
33
+ yield line.decode('utf-8', errors='ignore') #[:-1]
34
+
35
+
36
+ params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
37
+ '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
g4f/Provider/Providers/Pierangelo.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import requests
3
+ from ...typing import sha256, Dict, get_type_hints
4
+
5
+ url = 'https://chat.pierangelo.info'
6
+ model = ['gpt-4', 'gpt-3.5-turbo']
7
+ supports_stream = True
8
+
9
+ models = {
10
+ 'gpt-4': {
11
+ 'id':'gpt-4',
12
+ 'name':'GPT-4'
13
+ },
14
+ 'gpt-3.5-turbo': {
15
+ 'id':'gpt-3.5-turbo',
16
+ 'name':'GPT-3.5'
17
+ }
18
+ }
19
+
20
+ def _create_completion(model: str, messages: list, stream: bool, **kwargs):
21
+
22
+ headers = {
23
+ 'authority': 'chat.pierangelo.info',
24
+ 'accept': '*/*',
25
+ 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
26
+ 'cache-control': 'no-cache',
27
+ 'content-type': 'application/json',
28
+ 'origin': 'https://chat.pierangelo.info',
29
+ 'pragma': 'no-cache',
30
+ 'referer': 'https://chat.pierangelo.info/',
31
+ 'sec-ch-ua': '"Google Chrome";v="113", "Chromium";v="113", "Not-A.Brand";v="24"',
32
+ 'sec-ch-ua-mobile': '?0',
33
+ 'sec-ch-ua-platform': '"macOS"',
34
+ 'sec-fetch-dest': 'empty',
35
+ 'sec-fetch-mode': 'cors',
36
+ 'sec-fetch-site': 'same-origin',
37
+ 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36',
38
+ }
39
+
40
+ json_data = {
41
+ 'model': models[model],
42
+ 'messages': messages,
43
+ 'key': '',
44
+ 'prompt': "You are ChatGPT, a large language model trained by OpenAI. Answer consisely",
45
+ 'temperature': 0.7
46
+ }
47
+
48
+ response = requests.post('https://chat.pierangelo.info/api/chat',
49
+ headers=headers, json=json_data, stream=True)
50
+
51
+ for token in response:
52
+ yield (token)
53
+
54
+ params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
55
+ '(%s)' % ', '.join([f'{name}: {get_type_hints(_create_completion)[name].__name__}' for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
g4f/Provider/Providers/Theb.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ import time
4
+ import subprocess
5
+
6
+ from ...typing import sha256, Dict, get_type_hints
7
+
8
+ url = 'https://theb.ai'
9
+ model = ['gpt-3.5-turbo']
10
+ supports_stream = True
11
+
12
+ def _create_completion(model: str, messages: list, stream: bool, **kwargs):
13
+
14
+ path = os.path.dirname(os.path.realpath(__file__))
15
+ config = json.dumps({
16
+ 'messages': messages,
17
+ 'model': model}, separators=(',', ':'))
18
+
19
+ try:
20
+ subprocess.run(["python3", "--version"], capture_output=True, check=True)
21
+ cmd = ["python3", f"{path}/helpers/bing.py", config]
22
+ except subprocess.CalledProcessError:
23
+ cmd = ["python", f"{path}/helpers/bing.py", config]
24
+
25
+ p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
26
+
27
+ for line in iter(p.stdout.readline, b''):
28
+ #print(line)
29
+ yield line.decode('utf-8', errors='ignore') #[:-1]
30
+
31
+
32
+ params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
33
+ '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
g4f/Provider/Providers/Vercel.py ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ import base64
4
+ import quickjs
5
+ import queue
6
+ import threading
7
+
8
+ from curl_cffi import requests
9
+ from ...typing import sha256, Dict, get_type_hints
10
+
11
+ url = 'https://play.vercel.ai'
12
+ model = None
13
+ supports_stream = True
14
+
15
+ models = {
16
+ 'claude-instant-v1': 'anthropic:claude-instant-v1',
17
+ 'claude-v1': 'anthropic:claude-v1',
18
+ 'alpaca-7b': 'replicate:replicate/alpaca-7b',
19
+ 'stablelm-tuned-alpha-7b': 'replicate:stability-ai/stablelm-tuned-alpha-7b',
20
+ 'bloom': 'huggingface:bigscience/bloom',
21
+ 'bloomz': 'huggingface:bigscience/bloomz',
22
+ 'flan-t5-xxl': 'huggingface:google/flan-t5-xxl',
23
+ 'flan-ul2': 'huggingface:google/flan-ul2',
24
+ 'gpt-neox-20b': 'huggingface:EleutherAI/gpt-neox-20b',
25
+ 'oasst-sft-4-pythia-12b-epoch-3.5': 'huggingface:OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5',
26
+ 'santacoder': 'huggingface:bigcode/santacoder',
27
+ 'command-medium-nightly': 'cohere:command-medium-nightly',
28
+ 'command-xlarge-nightly': 'cohere:command-xlarge-nightly',
29
+ 'gpt-4': 'openai:gpt-4',
30
+ 'code-cushman-001': 'openai:code-cushman-001',
31
+ 'code-davinci-002': 'openai:code-davinci-002',
32
+ 'gpt-3.5-turbo': 'openai:gpt-3.5-turbo',
33
+ 'text-ada-001': 'openai:text-ada-001',
34
+ 'text-babbage-001': 'openai:text-babbage-001',
35
+ 'text-curie-001': 'openai:text-curie-001',
36
+ 'text-davinci-002': 'openai:text-davinci-002',
37
+ 'text-davinci-003': 'openai:text-davinci-003'
38
+ }
39
+
40
+ vercel_models = {'anthropic:claude-instant-v1': {'id': 'anthropic:claude-instant-v1', 'provider': 'anthropic', 'providerHumanName': 'Anthropic', 'makerHumanName': 'Anthropic', 'minBillingTier': 'hobby', 'parameters': {'temperature': {'value': 1, 'range': [0, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'topK': {'value': 1, 'range': [1, 500]}, 'presencePenalty': {'value': 1, 'range': [0, 1]}, 'frequencyPenalty': {'value': 1, 'range': [0, 1]}, 'stopSequences': {'value': ['\n\nHuman:'], 'range': []}}, 'name': 'claude-instant-v1'}, 'anthropic:claude-v1': {'id': 'anthropic:claude-v1', 'provider': 'anthropic', 'providerHumanName': 'Anthropic', 'makerHumanName': 'Anthropic', 'minBillingTier': 'hobby', 'parameters': {'temperature': {'value': 1, 'range': [0, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'topK': {'value': 1, 'range': [1, 500]}, 'presencePenalty': {'value': 1, 'range': [0, 1]}, 'frequencyPenalty': {'value': 1, 'range': [0, 1]}, 'stopSequences': {'value': ['\n\nHuman:'], 'range': []}}, 'name': 'claude-v1'}, 'replicate:replicate/alpaca-7b': {'id': 'replicate:replicate/alpaca-7b', 'provider': 'replicate', 'providerHumanName': 'Replicate', 'makerHumanName': 'Stanford', 'parameters': {'temperature': {'value': 0.75, 'range': [0.01, 5]}, 'maximumLength': {'value': 200, 'range': [50, 512]}, 'topP': {'value': 0.95, 'range': [0.01, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'repetitionPenalty': {'value': 1.1765, 'range': [0.01, 5]}, 'stopSequences': {'value': [], 'range': []}}, 'version': '2014ee1247354f2e81c0b3650d71ca715bc1e610189855f134c30ecb841fae21', 'name': 'alpaca-7b'}, 'replicate:stability-ai/stablelm-tuned-alpha-7b': {'id': 'replicate:stability-ai/stablelm-tuned-alpha-7b', 'provider': 'replicate', 'makerHumanName': 'StabilityAI', 'providerHumanName': 'Replicate', 'parameters': {'temperature': {'value': 0.75, 'range': [0.01, 5]}, 'maximumLength': {'value': 200, 'range': [50, 512]}, 'topP': {'value': 0.95, 'range': [0.01, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'repetitionPenalty': {'value': 1.1765, 'range': [0.01, 5]}, 'stopSequences': {'value': [], 'range': []}}, 'version': '4a9a32b4fd86c2d047f1d271fa93972683ec6ef1cf82f402bd021f267330b50b', 'name': 'stablelm-tuned-alpha-7b'}, 'huggingface:bigscience/bloom': {'id': 'huggingface:bigscience/bloom', 'provider': 'huggingface', 'providerHumanName': 'HuggingFace', 'makerHumanName': 'BigScience', 'instructions': "Do NOT talk to Bloom as an entity, it's not a chatbot but a webpage/blog/article completion model. For the best results: mimic a few words of a webpage similar to the content you want to generate. Start a sentence as if YOU were writing a blog, webpage, math post, coding article and Bloom will generate a coherent follow-up.", 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 0.95, 'range': [0.01, 0.99]}, 'topK': {'value': 4, 'range': [1, 500]}, 'repetitionPenalty': {'value': 1.03, 'range': [0.1, 2]}}, 'name': 'bloom'}, 'huggingface:bigscience/bloomz': {'id': 'huggingface:bigscience/bloomz', 'provider': 'huggingface', 'providerHumanName': 'HuggingFace', 'makerHumanName': 'BigScience', 'instructions': 'We recommend using the model to perform tasks expressed in natural language. For example, given the prompt "Translate to English: Je t\'aime.", the model will most likely answer "I love you.".', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 0.95, 'range': [0.01, 0.99]}, 'topK': {'value': 4, 'range': [1, 500]}, 'repetitionPenalty': {'value': 1.03, 'range': [0.1, 2]}}, 'name': 'bloomz'}, 'huggingface:google/flan-t5-xxl': {'id': 'huggingface:google/flan-t5-xxl', 'provider': 'huggingface', 'makerHumanName': 'Google', 'providerHumanName': 'HuggingFace', 'name': 'flan-t5-xxl', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 0.95, 'range': [0.01, 0.99]}, 'topK': {'value': 4, 'range': [1, 500]}, 'repetitionPenalty': {'value': 1.03, 'range': [0.1, 2]}}}, 'huggingface:google/flan-ul2': {'id': 'huggingface:google/flan-ul2', 'provider': 'huggingface', 'providerHumanName': 'HuggingFace', 'makerHumanName': 'Google', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 0.95, 'range': [0.01, 0.99]}, 'topK': {'value': 4, 'range': [1, 500]}, 'repetitionPenalty': {'value': 1.03, 'range': [0.1, 2]}}, 'name': 'flan-ul2'}, 'huggingface:EleutherAI/gpt-neox-20b': {'id': 'huggingface:EleutherAI/gpt-neox-20b', 'provider': 'huggingface', 'providerHumanName': 'HuggingFace', 'makerHumanName': 'EleutherAI', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 0.95, 'range': [0.01, 0.99]}, 'topK': {'value': 4, 'range': [1, 500]}, 'repetitionPenalty': {'value': 1.03, 'range': [0.1, 2]}, 'stopSequences': {'value': [], 'range': []}}, 'name': 'gpt-neox-20b'}, 'huggingface:OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5': {'id': 'huggingface:OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5', 'provider': 'huggingface', 'providerHumanName': 'HuggingFace', 'makerHumanName': 'OpenAssistant', 'parameters': {'maximumLength': {'value': 200, 'range': [50, 1024]}, 'typicalP': {'value': 0.2, 'range': [0.1, 0.99]}, 'repetitionPenalty': {'value': 1, 'range': [0.1, 2]}}, 'name': 'oasst-sft-4-pythia-12b-epoch-3.5'}, 'huggingface:bigcode/santacoder': {
41
+ 'id': 'huggingface:bigcode/santacoder', 'provider': 'huggingface', 'providerHumanName': 'HuggingFace', 'makerHumanName': 'BigCode', 'instructions': 'The model was trained on GitHub code. As such it is not an instruction model and commands like "Write a function that computes the square root." do not work well. You should phrase commands like they occur in source code such as comments (e.g. # the following function computes the sqrt) or write a function signature and docstring and let the model complete the function body.', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 0.95, 'range': [0.01, 0.99]}, 'topK': {'value': 4, 'range': [1, 500]}, 'repetitionPenalty': {'value': 1.03, 'range': [0.1, 2]}}, 'name': 'santacoder'}, 'cohere:command-medium-nightly': {'id': 'cohere:command-medium-nightly', 'provider': 'cohere', 'providerHumanName': 'Cohere', 'makerHumanName': 'Cohere', 'name': 'command-medium-nightly', 'parameters': {'temperature': {'value': 0.9, 'range': [0, 2]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0, 1]}, 'topK': {'value': 0, 'range': [0, 500]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'cohere:command-xlarge-nightly': {'id': 'cohere:command-xlarge-nightly', 'provider': 'cohere', 'providerHumanName': 'Cohere', 'makerHumanName': 'Cohere', 'name': 'command-xlarge-nightly', 'parameters': {'temperature': {'value': 0.9, 'range': [0, 2]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0, 1]}, 'topK': {'value': 0, 'range': [0, 500]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'openai:gpt-4': {'id': 'openai:gpt-4', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'name': 'gpt-4', 'minBillingTier': 'pro', 'parameters': {'temperature': {'value': 0.7, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'openai:code-cushman-001': {'id': 'openai:code-cushman-001', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}, 'name': 'code-cushman-001'}, 'openai:code-davinci-002': {'id': 'openai:code-davinci-002', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}, 'name': 'code-davinci-002'}, 'openai:gpt-3.5-turbo': {'id': 'openai:gpt-3.5-turbo', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'parameters': {'temperature': {'value': 0.7, 'range': [0, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'topK': {'value': 1, 'range': [1, 500]}, 'presencePenalty': {'value': 1, 'range': [0, 1]}, 'frequencyPenalty': {'value': 1, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}, 'name': 'gpt-3.5-turbo'}, 'openai:text-ada-001': {'id': 'openai:text-ada-001', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'name': 'text-ada-001', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'openai:text-babbage-001': {'id': 'openai:text-babbage-001', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'name': 'text-babbage-001', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'openai:text-curie-001': {'id': 'openai:text-curie-001', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'name': 'text-curie-001', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'openai:text-davinci-002': {'id': 'openai:text-davinci-002', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'name': 'text-davinci-002', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'openai:text-davinci-003': {'id': 'openai:text-davinci-003', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'name': 'text-davinci-003', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}}
42
+
43
+
44
+ # based on https://github.com/ading2210/vercel-llm-api // modified
45
+ class Client:
46
+ def __init__(self):
47
+ self.session = requests.Session(impersonate='chrome110')
48
+ self.headers = {
49
+ 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110 Safari/537.36',
50
+ 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8',
51
+ 'Accept-Encoding': 'gzip, deflate, br',
52
+ 'Accept-Language': 'en-US,en;q=0.5',
53
+ 'Te': 'trailers',
54
+ 'Upgrade-Insecure-Requests': '1'
55
+ }
56
+ self.session.headers.update(self.headers)
57
+
58
+ def get_token(self):
59
+ b64 = self.session.get('https://play.vercel.ai/openai.jpeg').text
60
+ data = json.loads(base64.b64decode(b64))
61
+
62
+ script = 'const globalThis = {data: `sentinel`}; (%s)(%s)' % (
63
+ data['c'], data['a'])
64
+
65
+ token_data = json.loads(quickjs.Context().eval(script).json())
66
+ token_string = json.dumps(separators=(',', ':'),
67
+ obj={'r': token_data, 't': data['t']})
68
+
69
+ return base64.b64encode(token_string.encode()).decode()
70
+
71
+ def get_default_params(self, model_id):
72
+ return {key: param['value'] for key, param in vercel_models[model_id]['parameters'].items()}
73
+
74
+ def generate(self, model_id: str, prompt: str, params: dict = {}):
75
+ if not ':' in model_id:
76
+ model_id = models[model_id]
77
+
78
+ defaults = self.get_default_params(model_id)
79
+
80
+ payload = defaults | params | {
81
+ 'prompt': prompt,
82
+ 'model': model_id,
83
+ }
84
+
85
+ headers = self.headers | {
86
+ 'Accept-Encoding': 'gzip, deflate, br',
87
+ 'Custom-Encoding': self.get_token(),
88
+ 'Host': 'play.vercel.ai',
89
+ 'Origin': 'https://play.vercel.ai',
90
+ 'Referrer': 'https://play.vercel.ai',
91
+ 'Sec-Fetch-Dest': 'empty',
92
+ 'Sec-Fetch-Mode': 'cors',
93
+ 'Sec-Fetch-Site': 'same-origin',
94
+ }
95
+
96
+ chunks_queue = queue.Queue()
97
+ error = None
98
+ response = None
99
+
100
+ def callback(data):
101
+ chunks_queue.put(data.decode())
102
+
103
+ def request_thread():
104
+ nonlocal response, error
105
+ for _ in range(3):
106
+ try:
107
+ response = self.session.post('https://play.vercel.ai/api/generate',
108
+ json=payload, headers=headers, content_callback=callback)
109
+ response.raise_for_status()
110
+
111
+ except Exception as e:
112
+ if _ == 2:
113
+ error = e
114
+
115
+ else:
116
+ continue
117
+
118
+ thread = threading.Thread(target=request_thread, daemon=True)
119
+ thread.start()
120
+
121
+ text = ''
122
+ index = 0
123
+ while True:
124
+ try:
125
+ chunk = chunks_queue.get(block=True, timeout=0.1)
126
+
127
+ except queue.Empty:
128
+ if error:
129
+ raise error
130
+
131
+ elif response:
132
+ break
133
+
134
+ else:
135
+ continue
136
+
137
+ text += chunk
138
+ lines = text.split('\n')
139
+
140
+ if len(lines) - 1 > index:
141
+ new = lines[index:-1]
142
+ for word in new:
143
+ yield json.loads(word)
144
+ index = len(lines) - 1
145
+
146
+ def _create_completion(model: str, messages: list, stream: bool, **kwargs):
147
+ conversation = 'This is a conversation between a human and a language model, respond to the last message accordingly, referring to the past history of messages if needed.\n'
148
+
149
+ for message in messages:
150
+ conversation += '%s: %s\n' % (message['role'], message['content'])
151
+
152
+ conversation += 'assistant: '
153
+
154
+ completion = Client().generate(model, conversation)
155
+
156
+ for token in completion:
157
+ yield token
158
+
159
+ params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
160
+ '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
g4f/Provider/Providers/You.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ import time
4
+ import subprocess
5
+
6
+ from ...typing import sha256, Dict, get_type_hints
7
+
8
+ url = 'https://you.com'
9
+ model = 'gpt-3.5-turbo'
10
+ supports_stream = True
11
+
12
+ def _create_completion(model: str, messages: list, stream: bool, **kwargs):
13
+
14
+ path = os.path.dirname(os.path.realpath(__file__))
15
+ config = json.dumps({
16
+ 'messages': messages}, separators=(',', ':'))
17
+
18
+ cmd = ['python3', f'{path}/helpers/you.py', config]
19
+
20
+ p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
21
+
22
+ for line in iter(p.stdout.readline, b''):
23
+ yield line.decode('utf-8') #[:-1]
g4f/Provider/Providers/Yqcloud.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import time
3
+ import requests
4
+
5
+ from ...typing import sha256, Dict, get_type_hints
6
+ supports_stream = True
7
+
8
+ def _create_completion(model: str, messages: list, stream: bool, **kwargs):
9
+
10
+ headers = {
11
+ 'authority': 'api.aichatos.cloud',
12
+ 'origin': 'https://chat9.yqcloud.top',
13
+ 'referer': 'https://chat9.yqcloud.top/',
14
+ 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36',
15
+ }
16
+
17
+ json_data = {
18
+ 'prompt': '%s' % messages[-1]['content'],
19
+ 'userId': f'#/chat/{int(time.time() * 1000)}',
20
+ 'network': True,
21
+ 'apikey': '',
22
+ 'system': '',
23
+ 'withoutContext': False,
24
+ }
25
+
26
+ response = requests.post('https://api.aichatos.cloud/api/generateStream', headers=headers, json=json_data, stream=True)
27
+ for token in response.iter_content(chunk_size=2046):
28
+ if not b'always respond in english' in token:
29
+ yield (token.decode('utf-8'))
30
+
31
+ params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
32
+ '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
g4f/Provider/Providers/helpers/bing.py ADDED
@@ -0,0 +1,220 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import ssl
3
+ import uuid
4
+ import json
5
+ import time
6
+ import random
7
+ import asyncio
8
+ import certifi
9
+ import requests
10
+ import websockets
11
+ import browser_cookie3
12
+
13
+ config = json.loads(sys.argv[1])
14
+
15
+ ssl_context = ssl.create_default_context()
16
+ ssl_context.load_verify_locations(certifi.where())
17
+
18
+
19
+
20
+ conversationstyles = {
21
+ 'gpt-4': [ #'precise'
22
+ "nlu_direct_response_filter",
23
+ "deepleo",
24
+ "disable_emoji_spoken_text",
25
+ "responsible_ai_policy_235",
26
+ "enablemm",
27
+ "h3imaginative",
28
+ "travelansgnd",
29
+ "dv3sugg",
30
+ "clgalileo",
31
+ "gencontentv3",
32
+ "dv3sugg",
33
+ "responseos",
34
+ "e2ecachewrite",
35
+ "cachewriteext",
36
+ "nodlcpcwrite",
37
+ "travelansgnd",
38
+ "nojbfedge",
39
+ ],
40
+ 'balanced': [
41
+ "nlu_direct_response_filter",
42
+ "deepleo",
43
+ "disable_emoji_spoken_text",
44
+ "responsible_ai_policy_235",
45
+ "enablemm",
46
+ "galileo",
47
+ "dv3sugg",
48
+ "responseos",
49
+ "e2ecachewrite",
50
+ "cachewriteext",
51
+ "nodlcpcwrite",
52
+ "travelansgnd",
53
+ "nojbfedge",
54
+ ],
55
+ 'gpt-3.5-turbo': [ #'precise'
56
+ "nlu_direct_response_filter",
57
+ "deepleo",
58
+ "disable_emoji_spoken_text",
59
+ "responsible_ai_policy_235",
60
+ "enablemm",
61
+ "galileo",
62
+ "dv3sugg",
63
+ "responseos",
64
+ "e2ecachewrite",
65
+ "cachewriteext",
66
+ "nodlcpcwrite",
67
+ "travelansgnd",
68
+ "h3precise",
69
+ "clgalileo",
70
+ "nojbfedge",
71
+ ]
72
+ }
73
+
74
+ def format(msg: dict) -> str:
75
+ return json.dumps(msg) + '\x1e'
76
+
77
+ def get_token():
78
+ return
79
+
80
+ try:
81
+ cookies = {c.name: c.value for c in browser_cookie3.edge(domain_name='bing.com')}
82
+ return cookies['_U']
83
+ except:
84
+ print('Error: could not find bing _U cookie in edge browser.')
85
+ exit(1)
86
+
87
+ class AsyncCompletion:
88
+ async def create(
89
+ prompt : str = None,
90
+ optionSets : list = None,
91
+ token : str = None): # No auth required anymore
92
+
93
+ create = None
94
+ for _ in range(5):
95
+ try:
96
+ create = requests.get('https://edgeservices.bing.com/edgesvc/turing/conversation/create',
97
+ headers = {
98
+ 'host': 'edgeservices.bing.com',
99
+ 'accept-encoding': 'gzip, deflate, br',
100
+ 'connection': 'keep-alive',
101
+ 'authority': 'edgeservices.bing.com',
102
+ 'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
103
+ 'accept-language': 'en-US,en;q=0.9',
104
+ 'cache-control': 'max-age=0',
105
+ 'sec-ch-ua': '"Chromium";v="110", "Not A(Brand";v="24", "Microsoft Edge";v="110"',
106
+ 'sec-ch-ua-arch': '"x86"',
107
+ 'sec-ch-ua-bitness': '"64"',
108
+ 'sec-ch-ua-full-version': '"110.0.1587.69"',
109
+ 'sec-ch-ua-full-version-list': '"Chromium";v="110.0.5481.192", "Not A(Brand";v="24.0.0.0", "Microsoft Edge";v="110.0.1587.69"',
110
+ 'sec-ch-ua-mobile': '?0',
111
+ 'sec-ch-ua-model': '""',
112
+ 'sec-ch-ua-platform': '"Windows"',
113
+ 'sec-ch-ua-platform-version': '"15.0.0"',
114
+ 'sec-fetch-dest': 'document',
115
+ 'sec-fetch-mode': 'navigate',
116
+ 'sec-fetch-site': 'none',
117
+ 'sec-fetch-user': '?1',
118
+ 'upgrade-insecure-requests': '1',
119
+ 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 Edg/110.0.1587.69',
120
+ 'x-edge-shopping-flag': '1',
121
+ 'x-forwarded-for': f'13.{random.randint(104, 107)}.{random.randint(0, 255)}.{random.randint(0, 255)}'
122
+ }
123
+ )
124
+
125
+ conversationId = create.json()['conversationId']
126
+ clientId = create.json()['clientId']
127
+ conversationSignature = create.json()['conversationSignature']
128
+
129
+ except Exception as e:
130
+ time.sleep(0.5)
131
+ continue
132
+
133
+ if create == None: raise Exception('Failed to create conversation.')
134
+
135
+ wss: websockets.WebSocketClientProtocol or None = None
136
+
137
+ wss = await websockets.connect('wss://sydney.bing.com/sydney/ChatHub', max_size = None, ssl = ssl_context,
138
+ extra_headers = {
139
+ 'accept': 'application/json',
140
+ 'accept-language': 'en-US,en;q=0.9',
141
+ 'content-type': 'application/json',
142
+ 'sec-ch-ua': '"Not_A Brand";v="99", Microsoft Edge";v="110", "Chromium";v="110"',
143
+ 'sec-ch-ua-arch': '"x86"',
144
+ 'sec-ch-ua-bitness': '"64"',
145
+ 'sec-ch-ua-full-version': '"109.0.1518.78"',
146
+ 'sec-ch-ua-full-version-list': '"Chromium";v="110.0.5481.192", "Not A(Brand";v="24.0.0.0", "Microsoft Edge";v="110.0.1587.69"',
147
+ 'sec-ch-ua-mobile': '?0',
148
+ 'sec-ch-ua-model': "",
149
+ 'sec-ch-ua-platform': '"Windows"',
150
+ 'sec-ch-ua-platform-version': '"15.0.0"',
151
+ 'sec-fetch-dest': 'empty',
152
+ 'sec-fetch-mode': 'cors',
153
+ 'sec-fetch-site': 'same-origin',
154
+ 'x-ms-client-request-id': str(uuid.uuid4()),
155
+ 'x-ms-useragent': 'azsdk-js-api-client-factory/1.0.0-beta.1 core-rest-pipeline/1.10.0 OS/Win32',
156
+ 'Referer': 'https://www.bing.com/search?q=Bing+AI&showconv=1&FORM=hpcodx',
157
+ 'Referrer-Policy': 'origin-when-cross-origin',
158
+ 'x-forwarded-for': f'13.{random.randint(104, 107)}.{random.randint(0, 255)}.{random.randint(0, 255)}'
159
+ }
160
+ )
161
+
162
+ await wss.send(format({'protocol': 'json', 'version': 1}))
163
+ await wss.recv()
164
+
165
+ struct = {
166
+ 'arguments': [
167
+ {
168
+ 'source': 'cib',
169
+ 'optionsSets': optionSets,
170
+ 'isStartOfSession': True,
171
+ 'message': {
172
+ 'author': 'user',
173
+ 'inputMethod': 'Keyboard',
174
+ 'text': prompt,
175
+ 'messageType': 'Chat'
176
+ },
177
+ 'conversationSignature': conversationSignature,
178
+ 'participant': {
179
+ 'id': clientId
180
+ },
181
+ 'conversationId': conversationId
182
+ }
183
+ ],
184
+ 'invocationId': '0',
185
+ 'target': 'chat',
186
+ 'type': 4
187
+ }
188
+
189
+ await wss.send(format(struct))
190
+
191
+ base_string = ''
192
+
193
+ final = False
194
+ while not final:
195
+ objects = str(await wss.recv()).split('\x1e')
196
+ for obj in objects:
197
+ if obj is None or obj == '':
198
+ continue
199
+
200
+ response = json.loads(obj)
201
+ #print(response, flush=True, end='')
202
+ if response.get('type') == 1 and response['arguments'][0].get('messages',):
203
+ response_text = response['arguments'][0]['messages'][0]['adaptiveCards'][0]['body'][0].get('text')
204
+
205
+ yield (response_text.replace(base_string, ''))
206
+ base_string = response_text
207
+
208
+ elif response.get('type') == 2:
209
+ final = True
210
+
211
+ await wss.close()
212
+
213
+ async def run(optionSets, messages):
214
+ async for value in AsyncCompletion.create(prompt=messages[-1]['content'],
215
+ optionSets=optionSets):
216
+
217
+ print(value, flush=True, end = '')
218
+
219
+ optionSet = conversationstyles[config['model']]
220
+ asyncio.run(run(optionSet, config['messages']))
g4f/Provider/Providers/helpers/openai.py ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import uuid
3
+ import json
4
+ import browser_cookie3
5
+
6
+ from curl_cffi import requests
7
+
8
+ config = json.loads(sys.argv[1])
9
+
10
+ def session_auth(cookies):
11
+ headers = {
12
+ 'authority': 'chat.openai.com',
13
+ 'accept': '*/*',
14
+ 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
15
+ 'cache-control': 'no-cache',
16
+ 'pragma': 'no-cache',
17
+ 'referer': 'https://chat.openai.com/chat',
18
+ 'sec-ch-ua': '"Chromium";v="112", "Google Chrome";v="112", "Not:A-Brand";v="99"',
19
+ 'sec-ch-ua-mobile': '?0',
20
+ 'sec-ch-ua-platform': '"macOS"',
21
+ 'sec-fetch-dest': 'empty',
22
+ 'sec-fetch-mode': 'cors',
23
+ 'sec-fetch-site': 'same-origin',
24
+ 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36',
25
+ }
26
+
27
+ return requests.get('https://chat.openai.com/api/auth/session',
28
+ cookies=cookies, headers=headers, impersonate='chrome110').json()
29
+
30
+ all_cookies = {cookie.name: cookie.value for cookie in browser_cookie3.chrome(
31
+ domain_name='chat.openai.com')}
32
+
33
+ try:
34
+ cookies = {
35
+ '__Secure-next-auth.session-token': all_cookies['__Secure-next-auth.session-token'],
36
+ }
37
+ except Exception:
38
+ print('Failed to get "__Secure-next-auth.session-token" in chrome, please make sure you are authenticated on openai.com')
39
+ exit(0)
40
+
41
+ headers = {
42
+ 'authority': 'chat.openai.com',
43
+ 'accept': 'text/event-stream',
44
+ 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
45
+ 'authorization': 'Bearer ' + session_auth(cookies)['accessToken'],
46
+ 'cache-control': 'no-cache',
47
+ 'content-type': 'application/json',
48
+ 'origin': 'https://chat.openai.com',
49
+ 'pragma': 'no-cache',
50
+ 'referer': 'https://chat.openai.com/chat',
51
+ 'sec-ch-ua': '"Chromium";v="112", "Google Chrome";v="112", "Not:A-Brand";v="99"',
52
+ 'sec-ch-ua-mobile': '?0',
53
+ 'sec-ch-ua-platform': '"macOS"',
54
+ 'sec-fetch-dest': 'empty',
55
+ 'sec-fetch-mode': 'cors',
56
+ 'sec-fetch-site': 'same-origin',
57
+ 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36',
58
+ }
59
+
60
+ payload = {
61
+ 'action': 'next',
62
+ 'history_and_training_disabled': False,
63
+ 'messages': [
64
+ {
65
+ 'id': str(uuid.uuid4()),
66
+ 'author': {
67
+ 'role': 'user',
68
+ },
69
+ 'content': {
70
+ 'content_type': 'text',
71
+ 'parts': [
72
+ config['messages'][-1]['content']
73
+ ]
74
+ }
75
+ }
76
+ ],
77
+ 'model': 'text-davinci-002-render-sha',
78
+ 'parent_message_id': str(uuid.uuid4()),
79
+ 'supports_modapi': True,
80
+ 'timezone_offset_min': -60
81
+ }
82
+
83
+ completion = ''
84
+
85
+ def format(chunk):
86
+ try:
87
+ global completion
88
+
89
+ if b'parts' in chunk:
90
+ json_data = json.loads(chunk.decode('utf-8').split('data: ')[1])
91
+ token = json_data['message']['content']['parts'][0]
92
+ token = token.replace(completion, '')
93
+ completion += token
94
+
95
+ print(token, flush=True, end = '')
96
+
97
+ except Exception as e:
98
+ pass
99
+
100
+ for _ in range(3):
101
+ try:
102
+ response = requests.post('https://chat.openai.com/backend-api/conversation',
103
+ json=payload, headers=headers, content_callback=format, impersonate='chrome110')
104
+ break
105
+ except:
106
+ continue
g4f/Provider/Providers/helpers/phind.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import json
3
+ import datetime
4
+ import urllib.parse
5
+
6
+ from curl_cffi import requests
7
+
8
+ config = json.loads(sys.argv[1])
9
+ prompt = config['messages'][-1]['content']
10
+
11
+ skill = 'expert' if config['model'] == 'gpt-4' else 'intermediate'
12
+
13
+ json_data = json.dumps({
14
+ 'question': prompt,
15
+ 'options': {
16
+ 'skill': skill,
17
+ 'date': datetime.datetime.now().strftime('%d/%m/%Y'),
18
+ 'language': 'en',
19
+ 'detailed': True,
20
+ 'creative': True,
21
+ 'customLinks': []}}, separators=(',', ':'))
22
+
23
+ headers = {
24
+ 'Content-Type': 'application/json',
25
+ 'Pragma': 'no-cache',
26
+ 'Accept': '*/*',
27
+ 'Sec-Fetch-Site': 'same-origin',
28
+ 'Accept-Language': 'en-GB,en;q=0.9',
29
+ 'Cache-Control': 'no-cache',
30
+ 'Sec-Fetch-Mode': 'cors',
31
+ 'Content-Length': str(len(json_data)),
32
+ 'Origin': 'https://www.phind.com',
33
+ 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/16.4 Safari/605.1.15',
34
+ 'Referer': f'https://www.phind.com/search?q={urllib.parse.quote(prompt)}&source=searchbox',
35
+ 'Connection': 'keep-alive',
36
+ 'Host': 'www.phind.com',
37
+ 'Sec-Fetch-Dest': 'empty'
38
+ }
39
+
40
+
41
+ def output(chunk):
42
+ try:
43
+ if b'PHIND_METADATA' in chunk:
44
+ return
45
+
46
+ if chunk == b'data: \r\ndata: \r\ndata: \r\n\r\n':
47
+ chunk = b'data: \n\r\n\r\n'
48
+
49
+ chunk = chunk.decode()
50
+
51
+ chunk = chunk.replace('data: \r\n\r\ndata: ', 'data: \n')
52
+ chunk = chunk.replace('\r\ndata: \r\ndata: \r\n\r\n', '\n\r\n\r\n')
53
+ chunk = chunk.replace('data: ', '').replace('\r\n\r\n', '')
54
+
55
+ print(chunk, flush=True, end = '')
56
+
57
+ except json.decoder.JSONDecodeError:
58
+ pass
59
+
60
+ while True:
61
+ try:
62
+ response = requests.post('https://www.phind.com/api/infer/answer',
63
+ headers=headers, data=json_data, content_callback=output, timeout=999999, impersonate='safari15_5')
64
+
65
+ exit(0)
66
+
67
+ except Exception as e:
68
+ print('an error occured, retrying... |', e, flush=True)
69
+ continue
g4f/Provider/Providers/helpers/theb.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import sys
3
+ from curl_cffi import requests
4
+
5
+ config = json.loads(sys.argv[1])
6
+ prompt = config['messages'][-1]['content']
7
+
8
+ headers = {
9
+ 'authority': 'chatbot.theb.ai',
10
+ 'accept': 'application/json, text/plain, */*',
11
+ 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
12
+ 'content-type': 'application/json',
13
+ 'origin': 'https://chatbot.theb.ai',
14
+ 'referer': 'https://chatbot.theb.ai/',
15
+ 'sec-ch-ua': '"Google Chrome";v="113", "Chromium";v="113", "Not-A.Brand";v="24"',
16
+ 'sec-ch-ua-mobile': '?0',
17
+ 'sec-ch-ua-platform': '"macOS"',
18
+ 'sec-fetch-dest': 'empty',
19
+ 'sec-fetch-mode': 'cors',
20
+ 'sec-fetch-site': 'same-origin',
21
+ 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36',
22
+ }
23
+
24
+ json_data = {
25
+ 'prompt': prompt,
26
+ 'options': {}
27
+ }
28
+
29
+ def format(chunk):
30
+ try:
31
+ chunk_json = json.loads(chunk.decode('utf-8'))
32
+ completion_chunk = chunk_json['detail']['choices'][0]['delta']['content']
33
+
34
+ print(completion_chunk, flush=True, end = '')
35
+
36
+ except Exception as e:
37
+ print('[ERROR] an error occured, retrying... |', e, flush=True)
38
+ return
39
+
40
+ while True:
41
+ try:
42
+ response = requests.post('https://chatbot.theb.ai/api/chat-process',
43
+ headers=headers, json=json_data, content_callback=format, impersonate='chrome110')
44
+
45
+ exit(0)
46
+
47
+ except Exception as e:
48
+ print('[ERROR] an error occured, retrying... |', e, flush=True)
49
+ continue
g4f/Provider/Providers/helpers/you.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import json
3
+ import urllib.parse
4
+
5
+ from curl_cffi import requests
6
+
7
+ config = json.loads(sys.argv[1])
8
+ messages = config['messages']
9
+ prompt = ''
10
+
11
+
12
+ def transform(messages: list) -> list:
13
+ result = []
14
+ i = 0
15
+
16
+ while i < len(messages):
17
+ if messages[i]['role'] == 'user':
18
+ question = messages[i]['content']
19
+ i += 1
20
+
21
+ if i < len(messages) and messages[i]['role'] == 'assistant':
22
+ answer = messages[i]['content']
23
+ i += 1
24
+ else:
25
+ answer = ''
26
+
27
+ result.append({'question': question, 'answer': answer})
28
+
29
+ elif messages[i]['role'] == 'assistant':
30
+ result.append({'question': '', 'answer': messages[i]['content']})
31
+ i += 1
32
+
33
+ elif messages[i]['role'] == 'system':
34
+ result.append({'question': messages[i]['content'], 'answer': ''})
35
+ i += 1
36
+
37
+ return result
38
+
39
+ headers = {
40
+ 'Content-Type': 'application/x-www-form-urlencoded',
41
+ 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
42
+ 'Sec-Fetch-Site': 'same-origin',
43
+ 'Accept-Language': 'en-GB,en;q=0.9',
44
+ 'Sec-Fetch-Mode': 'navigate',
45
+ 'Host': 'you.com',
46
+ 'Origin': 'https://you.com',
47
+ 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/16.4 Safari/605.1.15',
48
+ 'Referer': 'https://you.com/api/streamingSearch?q=nice&safeSearch=Moderate&onShoppingPage=false&mkt=&responseFilter=WebPages,Translations,TimeZone,Computation,RelatedSearches&domain=youchat&queryTraceId=7a6671f8-5881-404d-8ea3-c3f8301f85ba&chat=%5B%7B%22question%22%3A%22hi%22%2C%22answer%22%3A%22Hello!%20How%20can%20I%20assist%20you%20today%3F%22%7D%5D&chatId=7a6671f8-5881-404d-8ea3-c3f8301f85ba&__cf_chl_tk=ex2bw6vn5vbLsUm8J5rDYUC0Bjzc1XZqka6vUl6765A-1684108495-0-gaNycGzNDtA',
49
+ 'Connection': 'keep-alive',
50
+ 'Sec-Fetch-Dest': 'document',
51
+ 'Priority': 'u=0, i',
52
+ }
53
+
54
+ if messages[-1]['role'] == 'user':
55
+ prompt = messages[-1]['content']
56
+ messages = messages[:-1]
57
+
58
+ params = urllib.parse.urlencode({
59
+ 'q': prompt,
60
+ 'domain': 'youchat',
61
+ 'chat': transform(messages)
62
+ })
63
+
64
+ def output(chunk):
65
+ if b'"youChatToken"' in chunk:
66
+ chunk_json = json.loads(chunk.decode().split('data: ')[1])
67
+
68
+ print(chunk_json['youChatToken'], flush=True, end = '')
69
+
70
+ while True:
71
+ try:
72
+ response = requests.get(f'https://you.com/api/streamingSearch?{params}',
73
+ headers=headers, content_callback=output, impersonate='safari15_5')
74
+
75
+ exit(0)
76
+
77
+ except Exception as e:
78
+ print('an error occured, retrying... |', e, flush=True)
79
+ continue