ka1kuk commited on
Commit
f0c15d5
1 Parent(s): 9506dfb

Delete Providers

Browse files
Providers/Ava.py DELETED
@@ -1,61 +0,0 @@
1
- import requests
2
- import json
3
- import os
4
- from ...typing import sha256, Dict, get_type_hints
5
-
6
- url = "https://ava-alpha-api.codelink.io/api/chat"
7
- model = ["gpt-4"]
8
- supports_stream = True
9
- needs_auth = False
10
-
11
- class Model:
12
- def __init__(self):
13
- self.url = "https://ava-alpha-api.codelink.io/api/chat"
14
- self.headers = {
15
- "content-type": "application/json"
16
- }
17
- self.payload = {
18
- "model": "gpt-4",
19
- "temperature": 0.6,
20
- "stream": True
21
- }
22
- self.accumulated_content = ""
23
-
24
- def _process_line(self, line):
25
- line_text = line.decode("utf-8").strip()
26
- if line_text.startswith("data:"):
27
- data = line_text[len("data:"):]
28
- try:
29
- data_json = json.loads(data)
30
- if "choices" in data_json:
31
- choices = data_json["choices"]
32
- for choice in choices:
33
- if "finish_reason" in choice and choice["finish_reason"] == "stop":
34
- break
35
- if "delta" in choice and "content" in choice["delta"]:
36
- content = choice["delta"]["content"]
37
- self.accumulated_content += content
38
- except json.JSONDecodeError as e:
39
- return
40
-
41
- def ChatCompletion(self, messages):
42
- self.payload["messages"] = messages
43
-
44
- with requests.post(self.url, headers=self.headers, data=json.dumps(self.payload), stream=True) as response:
45
- for line in response.iter_lines():
46
- self._process_line(line)
47
-
48
- accumulated_content = self.accumulated_content
49
- self.accumulated_content = ""
50
-
51
- return accumulated_content
52
-
53
- def _create_completion(model: str, messages: list, stream: bool, **kwargs):
54
- model = Model()
55
-
56
- # Call the chat completion method
57
- response = model.ChatCompletion(messages)
58
- return response
59
-
60
- params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
61
- '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Providers/ChatFree.py DELETED
@@ -1,48 +0,0 @@
1
- import os, requests
2
- from ...typing import sha256, Dict, get_type_hints
3
- import json
4
-
5
- url = "https://v.chatfree.cc"
6
- model = ['gpt-3.5-turbo', 'gpt-3.5-turbo-16k']
7
- supports_stream = False
8
- needs_auth = False
9
-
10
-
11
- def _create_completion(model: str, messages: list, stream: bool, **kwargs):
12
- headers = {
13
- 'authority': 'chat.dfehub.com',
14
- 'accept': '*/*',
15
- 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
16
- 'content-type': 'application/json',
17
- 'origin': 'https://v.chatfree.cc',
18
- 'referer': 'https://v.chatfree.cc/',
19
- 'sec-ch-ua': '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
20
- 'sec-ch-ua-mobile': '?0',
21
- 'sec-ch-ua-platform': '"macOS"',
22
- 'sec-fetch-dest': 'empty',
23
- 'sec-fetch-mode': 'cors',
24
- 'sec-fetch-site': 'same-origin',
25
- 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36',
26
- 'x-requested-with': 'XMLHttpRequest',
27
- }
28
-
29
- json_data = {
30
- 'messages': messages,
31
- 'stream': True,
32
- 'model': model,
33
- 'temperature': 0.5,
34
- 'presence_penalty': 0,
35
- 'frequency_penalty': 0,
36
- 'top_p': 1,
37
- }
38
-
39
- response = requests.post('https://v.chatfree.cc/api/openai/v1/chat/completions',
40
- headers=headers, json=json_data)
41
-
42
- for chunk in response.iter_lines():
43
- if b'content' in chunk:
44
- data = json.loads(chunk.decode().split('data: ')[1])
45
- yield (data['choices'][0]['delta']['content'])
46
-
47
- params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
48
- '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Providers/ChatgptAi.py DELETED
@@ -1,51 +0,0 @@
1
- import os
2
- import requests, re
3
- from ...typing import sha256, Dict, get_type_hints
4
-
5
- url = 'https://chatgpt.ai/gpt-4/'
6
- model = ['gpt-4']
7
- supports_stream = True
8
- needs_auth = False
9
-
10
-
11
- def _create_completion(model: str, messages: list, stream: bool, **kwargs):
12
- chat = ''
13
- for message in messages:
14
- chat += '%s: %s\n' % (message['role'], message['content'])
15
- chat += 'user: '
16
-
17
- response = requests.get('https://chatgpt.ai/')
18
- nonce, post_id, _, bot_id = re.findall(r'data-nonce="(.*)"\n data-post-id="(.*)"\n data-url="(.*)"\n data-bot-id="(.*)"\n data-width', response.text)[0]
19
-
20
- headers = {
21
- 'authority': 'chatgpt.ai',
22
- 'accept': '*/*',
23
- 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
24
- 'cache-control': 'no-cache',
25
- 'origin': 'https://chatgpt.ai',
26
- 'pragma': 'no-cache',
27
- 'referer': 'https://chatgpt.ai/gpt-4/',
28
- 'sec-ch-ua': '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
29
- 'sec-ch-ua-mobile': '?0',
30
- 'sec-ch-ua-platform': '"Windows"',
31
- 'sec-fetch-dest': 'empty',
32
- 'sec-fetch-mode': 'cors',
33
- 'sec-fetch-site': 'same-origin',
34
- 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36',
35
- }
36
- data = {
37
- '_wpnonce': nonce,
38
- 'post_id': post_id,
39
- 'url': 'https://chatgpt.ai/gpt-4',
40
- 'action': 'wpaicg_chat_shortcode_message',
41
- 'message': chat,
42
- 'bot_id': bot_id
43
- }
44
-
45
- response = requests.post('https://chatgpt.ai/wp-admin/admin-ajax.php',
46
- headers=headers, data=data)
47
-
48
- yield (response.json()['data'])
49
-
50
- params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
51
- '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Providers/DeepAi.py DELETED
@@ -1,46 +0,0 @@
1
- import os
2
- import json
3
- import random
4
- import hashlib
5
- import requests
6
-
7
- from ...typing import sha256, Dict, get_type_hints
8
-
9
- url = 'https://deepai.org'
10
- model = ['gpt-3.5-turbo']
11
- supports_stream = True
12
- needs_auth = False
13
-
14
- def _create_completion(model: str, messages: list, stream: bool, **kwargs):
15
- def md5(text: str) -> str:
16
- return hashlib.md5(text.encode()).hexdigest()[::-1]
17
-
18
-
19
- def get_api_key(user_agent: str) -> str:
20
- part1 = str(random.randint(0, 10**11))
21
- part2 = md5(user_agent + md5(user_agent + md5(user_agent + part1 + "x")))
22
-
23
- return f"tryit-{part1}-{part2}"
24
-
25
- user_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36'
26
-
27
- headers = {
28
- "api-key": get_api_key(user_agent),
29
- "user-agent": user_agent
30
- }
31
-
32
- files = {
33
- "chat_style": (None, "chat"),
34
- "chatHistory": (None, json.dumps(messages))
35
- }
36
-
37
- r = requests.post("https://api.deepai.org/chat_response", headers=headers, files=files, stream=True)
38
-
39
- for chunk in r.iter_content(chunk_size=None):
40
- r.raise_for_status()
41
- yield chunk.decode()
42
-
43
-
44
- params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
45
- '(%s)' % ', '.join(
46
- [f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Providers/helpers/phind.py DELETED
@@ -1,69 +0,0 @@
1
- import sys
2
- import json
3
- import datetime
4
- import urllib.parse
5
-
6
- from curl_cffi import requests
7
-
8
- config = json.loads(sys.argv[1])
9
- prompt = config['messages'][-1]['content']
10
-
11
- skill = 'expert' if config['model'] == 'gpt-4' else 'intermediate'
12
-
13
- json_data = json.dumps({
14
- 'question': prompt,
15
- 'options': {
16
- 'skill': skill,
17
- 'date': datetime.datetime.now().strftime('%d/%m/%Y'),
18
- 'language': 'en',
19
- 'detailed': True,
20
- 'creative': True,
21
- 'customLinks': []}}, separators=(',', ':'))
22
-
23
- headers = {
24
- 'Content-Type': 'application/json',
25
- 'Pragma': 'no-cache',
26
- 'Accept': '*/*',
27
- 'Sec-Fetch-Site': 'same-origin',
28
- 'Accept-Language': 'en-GB,en;q=0.9',
29
- 'Cache-Control': 'no-cache',
30
- 'Sec-Fetch-Mode': 'cors',
31
- 'Content-Length': str(len(json_data)),
32
- 'Origin': 'https://www.phind.com',
33
- 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/16.4 Safari/605.1.15',
34
- 'Referer': f'https://www.phind.com/search?q={urllib.parse.quote(prompt)}&source=searchbox',
35
- 'Connection': 'keep-alive',
36
- 'Host': 'www.phind.com',
37
- 'Sec-Fetch-Dest': 'empty'
38
- }
39
-
40
-
41
- def output(chunk):
42
- try:
43
- if b'PHIND_METADATA' in chunk:
44
- return
45
-
46
- if chunk == b'data: \r\ndata: \r\ndata: \r\n\r\n':
47
- chunk = b'data: \n\r\n\r\n'
48
-
49
- chunk = chunk.decode()
50
-
51
- chunk = chunk.replace('data: \r\n\r\ndata: ', 'data: \n')
52
- chunk = chunk.replace('\r\ndata: \r\ndata: \r\n\r\n', '\n\r\n\r\n')
53
- chunk = chunk.replace('data: ', '').replace('\r\n\r\n', '')
54
-
55
- print(chunk, flush=True, end = '')
56
-
57
- except json.decoder.JSONDecodeError:
58
- pass
59
-
60
- while True:
61
- try:
62
- response = requests.post('https://www.phind.com/api/infer/answer',
63
- headers=headers, data=json_data, content_callback=output, timeout=999999, impersonate='safari15_5')
64
-
65
- exit(0)
66
-
67
- except Exception as e:
68
- print('an error occured, retrying... |', e, flush=True)
69
- continue