monra commited on
Commit
d941263
โ€ข
2 Parent(s): e1e0237 c144290

Merge branch 'main' into huggingface-space

Browse files
README.md CHANGED
@@ -29,11 +29,11 @@ Check out the project here: [FreeGPT WebUI - Chimera Version](https://github.com
29
 
30
  ## Project Hosting and Demonstration ๐ŸŒ๐Ÿš€
31
  The project is hosted on multiple platforms to be tested and modified.
32
- |Plataform|Status|Repo|Demo|
33
- |--|--|--|--|
34
- |[replit](https://replit.com/)|![Active](https://img.shields.io/badge/Active-brightgreen)|[FreeGPT WebUI](https://replit.com/@ramonvc/freegpt-webui)|[Chat](https://freegpt-webui.ramonvc.repl.co/chat/)
35
- |[hugging face](https://huggingface.co)|![Active](https://img.shields.io/badge/Active-brightgreen)|[FreeGPT WebUI](https://huggingface.co/spaces/monra/freegpt-webui/tree/main)|[Chat](https://huggingface.co/spaces/monra/freegpt-webui)
36
- |[replit](https://replit.com/)|![Active](https://img.shields.io/badge/Active-brightgreen)|[FreeGPT WebUI - Chimera Version](https://replit.com/@ramonvc/freegpt-webui-chimera)|[Chat](https://freegpt-webui-chimera.ramonvc.repl.co/chat/)
37
 
38
  ## Note โ„น๏ธ
39
  <p>
 
29
 
30
  ## Project Hosting and Demonstration ๐ŸŒ๐Ÿš€
31
  The project is hosted on multiple platforms to be tested and modified.
32
+ |Plataform|Status|API Key|Free|Repo|Demo|
33
+ |--|--|--|--|--|--|
34
+ |[replit](https://replit.com/)|![Active](https://img.shields.io/badge/Active-brightgreen)|โ—ผ๏ธ|โ˜‘๏ธ|[FreeGPT WebUI](https://replit.com/@ramonvc/freegpt-webui)|[Chat](https://freegpt-webui.ramonvc.repl.co/chat/)
35
+ |[hugging face](https://huggingface.co)|![Active](https://img.shields.io/badge/Active-brightgreen)|โ—ผ๏ธ|โ˜‘๏ธ|[FreeGPT WebUI](https://huggingface.co/spaces/monra/freegpt-webui/tree/main)|[Chat](https://huggingface.co/spaces/monra/freegpt-webui)
36
+ |[replit](https://replit.com/)|![Active](https://img.shields.io/badge/Active-brightgreen)|โ˜‘๏ธ|โ˜‘๏ธ|[FreeGPT WebUI - Chimera Version](https://replit.com/@ramonvc/freegpt-webui-chimera)|[Chat](https://freegpt-webui-chimera.ramonvc.repl.co/chat/)
37
 
38
  ## Note โ„น๏ธ
39
  <p>
client/css/conversation.css CHANGED
@@ -60,13 +60,21 @@
60
  border: 1px solid var(--blur-border);
61
  }
62
 
63
- .input-box {
64
- display: flex;
65
  align-items: center;
66
  padding: 8px;
67
  cursor: pointer;
68
  }
69
 
 
 
 
 
 
 
 
 
70
  #cursor {
71
  line-height: 17px;
72
  margin-left: 3px;
@@ -119,13 +127,6 @@
119
  border-radius: 10px;
120
  }
121
 
122
- @media screen and (max-width: 990px) {
123
- .conversation {
124
- width: 100%;
125
- height: 90%;
126
- }
127
- }
128
-
129
  @media screen and (max-height: 720px) {
130
  .conversation.box {
131
  height: 70%;
 
60
  border: 1px solid var(--blur-border);
61
  }
62
 
63
+ .box.input-box {
64
+ position: relative;
65
  align-items: center;
66
  padding: 8px;
67
  cursor: pointer;
68
  }
69
 
70
+ #send-button {
71
+ position: absolute;
72
+ bottom: 25%;
73
+ right: 10px;
74
+ z-index: 1;
75
+ padding: 16px;
76
+ }
77
+
78
  #cursor {
79
  line-height: 17px;
80
  margin-left: 3px;
 
127
  border-radius: 10px;
128
  }
129
 
 
 
 
 
 
 
 
130
  @media screen and (max-height: 720px) {
131
  .conversation.box {
132
  height: 70%;
client/css/message.css CHANGED
@@ -28,6 +28,7 @@
28
  .message .content {
29
  display: flex;
30
  flex-direction: column;
 
31
  gap: 18px;
32
  }
33
 
 
28
  .message .content {
29
  display: flex;
30
  flex-direction: column;
31
+ width: 90%;
32
  gap: 18px;
33
  }
34
 
client/html/index.html CHANGED
@@ -75,10 +75,10 @@
75
  <div class="buttons">
76
  <div class="field">
77
  <select class="dropdown" name="model" id="model">
78
- <option value="gpt-3.5-turbo">GPT-3.5</option>
79
  <option value="gpt-3.5-turbo-0613">GPT-3.5-0613</option>
80
  <option value="gpt-3.5-turbo-16k">GPT-3.5-turbo-16k</option>
81
- <option value="gpt-3.5-turbo-16k-0613" selected>
82
  GPT-3.5-turbo-16k-0613
83
  </option>
84
  <option value="gpt-4">GPT-4</option>
 
75
  <div class="buttons">
76
  <div class="field">
77
  <select class="dropdown" name="model" id="model">
78
+ <option value="gpt-3.5-turbo" selected>GPT-3.5</option>
79
  <option value="gpt-3.5-turbo-0613">GPT-3.5-0613</option>
80
  <option value="gpt-3.5-turbo-16k">GPT-3.5-turbo-16k</option>
81
+ <option value="gpt-3.5-turbo-16k-0613">
82
  GPT-3.5-turbo-16k-0613
83
  </option>
84
  <option value="gpt-4">GPT-4</option>
client/js/chat.js CHANGED
@@ -56,7 +56,7 @@ const ask_gpt = async (message) => {
56
  message_input.innerHTML = ``;
57
  message_input.innerText = ``;
58
 
59
- add_conversation(window.conversation_id, message.substr(0, 20));
60
  window.scrollTo(0, 0);
61
  window.controller = new AbortController();
62
 
 
56
  message_input.innerHTML = ``;
57
  message_input.innerText = ``;
58
 
59
+ add_conversation(window.conversation_id, message.substr(0, 16));
60
  window.scrollTo(0, 0);
61
  window.controller = new AbortController();
62
 
g4f/Provider/Providers/Ails.py ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import time
3
+ import json
4
+ import uuid
5
+ import hashlib
6
+ import requests
7
+
8
+ from ...typing import sha256, Dict, get_type_hints
9
+ from datetime import datetime
10
+
11
+ url: str = 'https://ai.ls'
12
+ model: str = 'gpt-3.5-turbo'
13
+ supports_stream = True
14
+ needs_auth = False
15
+ working = True
16
+
17
+
18
+ class Utils:
19
+ def hash(json_data: Dict[str, str]) -> sha256:
20
+
21
+ base_string: str = '%s:%s:%s:%s' % (
22
+ json_data['t'],
23
+ json_data['m'],
24
+ 'WI,2rU#_r:r~aF4aJ36[.Z(/8Rv93Rf',
25
+ len(json_data['m'])
26
+ )
27
+
28
+ return hashlib.sha256(base_string.encode()).hexdigest()
29
+
30
+ def format_timestamp(timestamp: int) -> str:
31
+
32
+ e = timestamp
33
+ n = e % 10
34
+ r = n + 1 if n % 2 == 0 else n
35
+ return str(e - n + r)
36
+
37
+
38
+ def _create_completion(model: str, messages: list, temperature: float = 0.6, stream: bool = False, **kwargs):
39
+
40
+ headers = {
41
+ 'authority': 'api.caipacity.com',
42
+ 'accept': '*/*',
43
+ 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
44
+ 'authorization': 'Bearer free',
45
+ 'client-id': str(uuid.uuid4()),
46
+ 'client-v': '0.1.249',
47
+ 'content-type': 'application/json',
48
+ 'origin': 'https://ai.ls',
49
+ 'referer': 'https://ai.ls/',
50
+ 'sec-ch-ua': '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
51
+ 'sec-ch-ua-mobile': '?0',
52
+ 'sec-ch-ua-platform': '"Windows"',
53
+ 'sec-fetch-dest': 'empty',
54
+ 'sec-fetch-mode': 'cors',
55
+ 'sec-fetch-site': 'cross-site',
56
+ 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36',
57
+ }
58
+
59
+ timestamp = Utils.format_timestamp(int(time.time() * 1000))
60
+
61
+ sig = {
62
+ 'd': datetime.now().strftime('%Y-%m-%d'),
63
+ 't': timestamp,
64
+ 's': Utils.hash({
65
+ 't': timestamp,
66
+ 'm': messages[-1]['content']})}
67
+
68
+ json_data = json.dumps(separators=(',', ':'), obj={
69
+ 'model': 'gpt-3.5-turbo',
70
+ 'temperature': 0.6,
71
+ 'stream': True,
72
+ 'messages': messages} | sig)
73
+
74
+ response = requests.post('https://api.caipacity.com/v1/chat/completions',
75
+ headers=headers, data=json_data, stream=True)
76
+
77
+ for token in response.iter_lines():
78
+ if b'content' in token:
79
+ completion_chunk = json.loads(token.decode().replace('data: ', ''))
80
+ token = completion_chunk['choices'][0]['delta'].get('content')
81
+ if token != None:
82
+ yield token
83
+
84
+
85
+ params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
86
+ '(%s)' % ', '.join(
87
+ [f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
g4f/Provider/Providers/Easychat.py CHANGED
@@ -4,24 +4,52 @@ import json
4
  from ...typing import sha256, Dict, get_type_hints
5
 
6
  url = 'https://free.easychat.work'
7
- model = ['gpt-3.5-turbo-16k', 'gpt-3.5-turbo-16k-0613', 'gpt-3.5-turbo-0613']
 
8
  supports_stream = True
9
  needs_auth = False
10
 
11
- def _create_completion(model: str, messages: list, stream: bool, temperature: float = 0.7, **kwargs):
 
12
  headers = {
13
- 'Content-Type': 'application/json',
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
  }
15
- data = {
16
- 'model':model,
17
- 'temperature': 0.7,
18
- 'presence_penalty': 0,
19
  'messages': messages,
 
 
 
 
 
 
20
  }
21
- response = requests.post(url + '/api/openai/v1/chat/completions',
22
- json=data, stream=stream)
23
-
24
- yield response.json()['choices'][0]['message']['content']
 
 
 
 
 
25
 
26
  params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
27
- '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
 
 
4
  from ...typing import sha256, Dict, get_type_hints
5
 
6
  url = 'https://free.easychat.work'
7
+ model = ['gpt-3.5-turbo', 'gpt-3.5-turbo-16k',
8
+ 'gpt-3.5-turbo-16k-0613', 'gpt-3.5-turbo-0613']
9
  supports_stream = True
10
  needs_auth = False
11
 
12
+
13
+ def _create_completion(model: str, messages: list, stream: bool, **kwargs):
14
  headers = {
15
+ 'authority': 'free.easychat.work',
16
+ 'accept': 'text/event-stream',
17
+ 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
18
+ 'content-type': 'application/json',
19
+ 'endpoint': '',
20
+ 'origin': 'https://free.easychat.work',
21
+ 'plugins': '0',
22
+ 'referer': 'https://free.easychat.work/',
23
+ 'sec-ch-ua': '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
24
+ 'sec-ch-ua-mobile': '?0',
25
+ 'sec-ch-ua-platform': '"macOS"',
26
+ 'sec-fetch-dest': 'empty',
27
+ 'sec-fetch-mode': 'cors',
28
+ 'sec-fetch-site': 'same-origin',
29
+ 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36',
30
+ 'usesearch': 'false',
31
+ 'x-requested-with': 'XMLHttpRequest',
32
  }
33
+
34
+ json_data = {
 
 
35
  'messages': messages,
36
+ 'stream': True,
37
+ 'model': model,
38
+ 'temperature': 0.5,
39
+ 'presence_penalty': 0,
40
+ 'frequency_penalty': 0,
41
+ 'top_p': 1,
42
  }
43
+
44
+ response = requests.post('https://free.easychat.work/api/openai/v1/chat/completions',
45
+ headers=headers, json=json_data)
46
+
47
+ for chunk in response.iter_lines():
48
+ if b'content' in chunk:
49
+ data = json.loads(chunk.decode().split('data: ')[1])
50
+ yield (data['choices'][0]['delta']['content'])
51
+
52
 
53
  params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
54
+ '(%s)' % ', '.join(
55
+ [f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
g4f/Provider/__init__.py CHANGED
@@ -1,6 +1,7 @@
1
  from . import Provider
2
  from .Providers import (
3
  Aichat,
 
4
  Bard,
5
  Better,
6
  Bing,
 
1
  from . import Provider
2
  from .Providers import (
3
  Aichat,
4
+ Ails,
5
  Bard,
6
  Better,
7
  Bing,
g4f/models.py CHANGED
@@ -10,22 +10,22 @@ class Model:
10
  class gpt_35_turbo:
11
  name: str = 'gpt-3.5-turbo'
12
  base_provider: str = 'openai'
13
- best_provider: Provider.Provider = random.choice([Provider.DeepAi, Provider.Easychat])
14
 
15
  class gpt_35_turbo_0613:
16
  name: str = 'gpt-3.5-turbo-0613'
17
  base_provider: str = 'openai'
18
- best_provider: Provider.Provider = random.choice([Provider.Gravityengine, Provider.Easychat])
19
 
20
  class gpt_35_turbo_16k_0613:
21
  name: str = 'gpt-3.5-turbo-16k-0613'
22
  base_provider: str = 'openai'
23
- best_provider: Provider.Provider = random.choice([Provider.Gravityengine, Provider.Easychat])
24
 
25
  class gpt_35_turbo_16k:
26
  name: str = 'gpt-3.5-turbo-16k'
27
  base_provider: str = 'openai'
28
- best_provider: Provider.Provider = random.choice([Provider.Gravityengine, Provider.Easychat])
29
 
30
  class gpt_4_dev:
31
  name: str = 'gpt-4-for-dev'
 
10
  class gpt_35_turbo:
11
  name: str = 'gpt-3.5-turbo'
12
  base_provider: str = 'openai'
13
+ best_provider: Provider.Provider = Provider.DeepAi
14
 
15
  class gpt_35_turbo_0613:
16
  name: str = 'gpt-3.5-turbo-0613'
17
  base_provider: str = 'openai'
18
+ best_provider: Provider.Provider = Provider.Gravityengine
19
 
20
  class gpt_35_turbo_16k_0613:
21
  name: str = 'gpt-3.5-turbo-16k-0613'
22
  base_provider: str = 'openai'
23
+ best_provider: Provider.Provider = Provider.Gravityengine
24
 
25
  class gpt_35_turbo_16k:
26
  name: str = 'gpt-3.5-turbo-16k'
27
  base_provider: str = 'openai'
28
+ best_provider: Provider.Provider = Provider.Gravityengine
29
 
30
  class gpt_4_dev:
31
  name: str = 'gpt-4-for-dev'