monra commited on
Commit
9d9c069
1 Parent(s): ad4e5d7

Some adjustments to the API providers

Browse files
g4f/Provider/Providers/Bing.py CHANGED
@@ -24,7 +24,6 @@ def _create_completion(model: str, messages: list, stream: bool, **kwargs):
24
  p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
25
 
26
  for line in iter(p.stdout.readline, b''):
27
- #print(line)
28
  yield line.decode('utf-8', errors='ignore') #[:-1]
29
 
30
 
 
24
  p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
25
 
26
  for line in iter(p.stdout.readline, b''):
 
27
  yield line.decode('utf-8', errors='ignore') #[:-1]
28
 
29
 
g4f/Provider/Providers/Forefront.py CHANGED
@@ -7,6 +7,7 @@ url = 'forefront.com'
7
  model = ['gpt-3.5-turbo']
8
  supports_stream = True
9
 
 
10
  def _create_completion(model: str, messages: list, stream: bool, **kwargs):
11
  json_data = {
12
  'text': messages[-1]['content'],
@@ -19,18 +20,17 @@ def _create_completion(model: str, messages: list, stream: bool, **kwargs):
19
  'messages': messages[:-1] if len(messages) > 1 else [],
20
  'internetMode': 'auto'
21
  }
22
- print(json_data)
23
- response = requests.post( 'https://streaming.tenant-forefront-default.knative.chi.coreweave.com/free-chat',
24
- json=json_data, stream=True)
25
 
26
- for token in response.iter_lines():
 
 
 
27
  if b'delta' in token:
28
  token = json.loads(token.decode().split('data: ')[1])['delta']
29
  yield (token)
30
 
31
- params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
32
- '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
33
-
34
-
35
-
36
 
 
 
 
 
 
7
  model = ['gpt-3.5-turbo']
8
  supports_stream = True
9
 
10
+
11
  def _create_completion(model: str, messages: list, stream: bool, **kwargs):
12
  json_data = {
13
  'text': messages[-1]['content'],
 
20
  'messages': messages[:-1] if len(messages) > 1 else [],
21
  'internetMode': 'auto'
22
  }
 
 
 
23
 
24
+ response = requests.post('https://streaming.tenant-forefront-default.knative.chi.coreweave.com/free-chat',
25
+ json=json_data, stream=True)
26
+
27
+ for token in response.iter_lines():
28
  if b'delta' in token:
29
  token = json.loads(token.decode().split('data: ')[1])['delta']
30
  yield (token)
31
 
 
 
 
 
 
32
 
33
+ params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
34
+ '(%s)' % ', '.join(
35
+ [f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in
36
+ _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
g4f/Provider/Providers/Openai.py CHANGED
@@ -14,19 +14,19 @@ def _create_completion(model: str, messages: list, stream: bool, **kwargs):
14
 
15
  path = os.path.dirname(os.path.realpath(__file__))
16
  config = json.dumps({
17
- 'messages': messages,
18
- 'model': model}, separators=(',', ':'))
 
19
 
20
  try:
21
  subprocess.run(["python3", "--version"], capture_output=True, check=True)
22
- cmd = ["python3", f"{path}/helpers/bing.py", config]
23
  except subprocess.CalledProcessError:
24
- cmd = ["python", f"{path}/helpers/bing.py", config]
25
 
26
  p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
27
 
28
  for line in iter(p.stdout.readline, b''):
29
- #print(line)
30
  yield line.decode('utf-8', errors='ignore') #[:-1]
31
 
32
  params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
 
14
 
15
  path = os.path.dirname(os.path.realpath(__file__))
16
  config = json.dumps({
17
+ 'model': model,
18
+ 'messages': messages[:-1] if len(messages) > 1 else [],
19
+ })
20
 
21
  try:
22
  subprocess.run(["python3", "--version"], capture_output=True, check=True)
23
+ cmd = ["python3", f"{path}/helpers/openai.py", config]
24
  except subprocess.CalledProcessError:
25
+ cmd = ["python", f"{path}/helpers/openai.py", config]
26
 
27
  p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
28
 
29
  for line in iter(p.stdout.readline, b''):
 
30
  yield line.decode('utf-8', errors='ignore') #[:-1]
31
 
32
  params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
g4f/Provider/Providers/Phind.py CHANGED
@@ -9,14 +9,21 @@ url = 'https://phind.com'
9
  model = ['gpt-3.5-turbo', 'gpt-4']
10
  supports_stream = True
11
 
 
12
  def _create_completion(model: str, messages: list, stream: bool, **kwargs):
13
 
14
  path = os.path.dirname(os.path.realpath(__file__))
15
  config = json.dumps({
16
  'model': model,
17
- 'messages': messages}, separators=(',', ':'))
 
18
 
19
- cmd = ['python', f'{path}/helpers/phind.py', config]
 
 
 
 
 
20
 
21
  p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
22
 
@@ -25,13 +32,13 @@ def _create_completion(model: str, messages: list, stream: bool, **kwargs):
25
  os.system('clear' if os.name == 'posix' else 'cls')
26
  yield 'Clouflare error, please try again...'
27
  os._exit(0)
28
-
29
  else:
30
  if b'ping - 2023-' in line:
31
  continue
32
-
33
- yield line.decode('utf-8', errors='ignore') #[:-1]
34
 
35
-
36
  params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
37
- '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
 
 
9
  model = ['gpt-3.5-turbo', 'gpt-4']
10
  supports_stream = True
11
 
12
+
13
  def _create_completion(model: str, messages: list, stream: bool, **kwargs):
14
 
15
  path = os.path.dirname(os.path.realpath(__file__))
16
  config = json.dumps({
17
  'model': model,
18
+ 'messages': messages[:-1] if len(messages) > 1 else [],
19
+ })
20
 
21
+ try:
22
+ subprocess.run(["python3", "--version"],
23
+ capture_output=True, check=True)
24
+ cmd = ["python3", f"{path}/helpers/phind.py", config]
25
+ except subprocess.CalledProcessError:
26
+ cmd = ["python", f"{path}/helpers/phind.py", config]
27
 
28
  p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
29
 
 
32
  os.system('clear' if os.name == 'posix' else 'cls')
33
  yield 'Clouflare error, please try again...'
34
  os._exit(0)
35
+
36
  else:
37
  if b'ping - 2023-' in line:
38
  continue
39
+ yield line.decode('utf-8', errors='ignore') # [:-1]
40
+
41
 
 
42
  params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
43
+ '(%s)' % ', '.join(
44
+ [f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
g4f/Provider/Providers/Theb.py CHANGED
@@ -13,19 +13,19 @@ def _create_completion(model: str, messages: list, stream: bool, **kwargs):
13
 
14
  path = os.path.dirname(os.path.realpath(__file__))
15
  config = json.dumps({
16
- 'messages': messages,
17
- 'model': model}, separators=(',', ':'))
 
18
 
19
  try:
20
  subprocess.run(["python3", "--version"], capture_output=True, check=True)
21
- cmd = ["python3", f"{path}/helpers/bing.py", config]
22
  except subprocess.CalledProcessError:
23
- cmd = ["python", f"{path}/helpers/bing.py", config]
24
 
25
  p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
26
 
27
  for line in iter(p.stdout.readline, b''):
28
- #print(line)
29
  yield line.decode('utf-8', errors='ignore') #[:-1]
30
 
31
 
 
13
 
14
  path = os.path.dirname(os.path.realpath(__file__))
15
  config = json.dumps({
16
+ 'model': model,
17
+ 'messages': messages[:-1] if len(messages) > 1 else [],
18
+ })
19
 
20
  try:
21
  subprocess.run(["python3", "--version"], capture_output=True, check=True)
22
+ cmd = ["python3", f"{path}/helpers/theb.py", config]
23
  except subprocess.CalledProcessError:
24
+ cmd = ["python", f"{path}/helpers/theb.py", config]
25
 
26
  p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
27
 
28
  for line in iter(p.stdout.readline, b''):
 
29
  yield line.decode('utf-8', errors='ignore') #[:-1]
30
 
31
 
g4f/Provider/Providers/You.py CHANGED
@@ -9,15 +9,22 @@ url = 'https://you.com'
9
  model = 'gpt-3.5-turbo'
10
  supports_stream = True
11
 
 
12
  def _create_completion(model: str, messages: list, stream: bool, **kwargs):
13
 
14
  path = os.path.dirname(os.path.realpath(__file__))
15
  config = json.dumps({
16
- 'messages': messages}, separators=(',', ':'))
17
-
18
- cmd = ['python3', f'{path}/helpers/you.py', config]
 
 
 
 
 
 
19
 
20
  p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
21
 
22
  for line in iter(p.stdout.readline, b''):
23
- yield line.decode('utf-8') #[:-1]
 
9
  model = 'gpt-3.5-turbo'
10
  supports_stream = True
11
 
12
+
13
  def _create_completion(model: str, messages: list, stream: bool, **kwargs):
14
 
15
  path = os.path.dirname(os.path.realpath(__file__))
16
  config = json.dumps({
17
+ 'messages': messages[:-1] if len(messages) > 1 else [],
18
+ })
19
+
20
+ try:
21
+ subprocess.run(["python3", "--version"],
22
+ capture_output=True, check=True)
23
+ cmd = ["python3", f"{path}/helpers/you.py", config]
24
+ except subprocess.CalledProcessError:
25
+ cmd = ["python", f"{path}/helpers/you.py", config]
26
 
27
  p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
28
 
29
  for line in iter(p.stdout.readline, b''):
30
+ yield line.decode('utf-8', errors='ignore') # [:-1]
g4f/Provider/Providers/helpers/bing.py CHANGED
@@ -198,7 +198,6 @@ class AsyncCompletion:
198
  continue
199
 
200
  response = json.loads(obj)
201
- #print(response, flush=True, end='')
202
  if response.get('type') == 1 and response['arguments'][0].get('messages',):
203
  response_text = response['arguments'][0]['messages'][0]['adaptiveCards'][0]['body'][0].get('text')
204
 
 
198
  continue
199
 
200
  response = json.loads(obj)
 
201
  if response.get('type') == 1 and response['arguments'][0].get('messages',):
202
  response_text = response['arguments'][0]['messages'][0]['adaptiveCards'][0]['body'][0].get('text')
203
 
server/backend.py CHANGED
@@ -9,8 +9,6 @@ from requests import get
9
  from server.auto_proxy import get_random_proxy, update_working_proxies
10
  from server.config import special_instructions
11
 
12
- # Backend_Api class definition
13
-
14
 
15
  class Backend_Api:
16
  def __init__(self, app, config: dict) -> None:
@@ -47,7 +45,7 @@ class Backend_Api:
47
 
48
  # Generate response
49
  response = ChatCompletion.create(model=model, stream=True,
50
- messages=messages, provider=g4f.Provider.Forefront)
51
 
52
  return self.app.response_class(generate_stream(response, jailbreak), mimetype='text/event-stream')
53
 
 
9
  from server.auto_proxy import get_random_proxy, update_working_proxies
10
  from server.config import special_instructions
11
 
 
 
12
 
13
  class Backend_Api:
14
  def __init__(self, app, config: dict) -> None:
 
45
 
46
  # Generate response
47
  response = ChatCompletion.create(model=model, stream=True,
48
+ messages=messages, provider=g4f.Provider.Yqcloud)
49
 
50
  return self.app.response_class(generate_stream(response, jailbreak), mimetype='text/event-stream')
51