CostalCry commited on
Commit
295c7b2
1 Parent(s): 46ec4c3

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .github/README.md +35 -28
  2. .github/README_EN.md +75 -0
  3. backend/backend.py +20 -37
  4. backend/embedding_processing.py +1 -1
  5. docker-compose/docker-compose.traefik.yml +0 -2
  6. docker-compose/docker-compose.yml +2 -4
  7. endpoint.py +11 -10
  8. g4f/Provider/AItianhu.py +34 -38
  9. g4f/Provider/AItianhu2.py +54 -0
  10. g4f/Provider/Acytoo.py +6 -5
  11. g4f/Provider/AiService.py +36 -0
  12. g4f/Provider/Aichat.py +1 -2
  13. g4f/Provider/Ails.py +3 -3
  14. g4f/Provider/Aivvm.py +56 -62
  15. g4f/Provider/Aiwang.py +105 -0
  16. g4f/Provider/Bard.py +91 -0
  17. g4f/Provider/Bing.py +72 -91
  18. g4f/Provider/CaffCat.py +101 -0
  19. g4f/Provider/ChatBase.py +4 -5
  20. g4f/Provider/ChatgptAi.py +31 -31
  21. g4f/Provider/ChatgptLogin.py +67 -0
  22. g4f/Provider/CodeLinkAva.py +63 -0
  23. g4f/Provider/DeepAi.py +63 -0
  24. g4f/Provider/DfeHub.py +77 -0
  25. g4f/Provider/EasyChat.py +111 -0
  26. g4f/Provider/Equing.py +81 -0
  27. g4f/Provider/FastGpt.py +86 -0
  28. g4f/Provider/Forefront.py +40 -0
  29. g4f/Provider/Freet.py +109 -0
  30. g4f/Provider/GetGpt.py +88 -0
  31. g4f/Provider/H2o.py +8 -16
  32. g4f/Provider/HuggingChat.py +106 -0
  33. g4f/Provider/I207m.py +73 -0
  34. g4f/Provider/Liaobots.py +12 -27
  35. g4f/Provider/Lockchat.py +64 -0
  36. g4f/Provider/Opchatgpts.py +8 -0
  37. g4f/Provider/OpenAssistant.py +102 -0
  38. g4f/Provider/OpenaiChat.py +94 -0
  39. g4f/Provider/Providers/Forefront.py +33 -0
  40. g4f/Provider/Qidinam.py +57 -0
  41. g4f/Provider/Raycast.py +72 -0
  42. g4f/Provider/Theb.py +97 -0
  43. g4f/Provider/V50.py +67 -0
  44. g4f/Provider/Vercel.py +286 -309
  45. g4f/Provider/Vitalentum.py +5 -8
  46. g4f/Provider/Wewordle.py +65 -0
  47. g4f/Provider/Wuguokai.py +68 -0
  48. g4f/Provider/Ylokh.py +18 -19
  49. g4f/Provider/You.py +16 -16
  50. g4f/Provider/Yqcloud.py +10 -21
.github/README.md CHANGED
@@ -1,45 +1,48 @@
1
  <div align="center">
2
  <a href="https://t.me/neurogen_news">
3
- <img src="https://readme-typing-svg.herokuapp.com?font=Jura&weight=700&size=30&duration=4000&pause=1000&color=1BED29&center=true&width=435&lines=NeuroGPT+by+NeurogenAI" alt="NeuroGPT" />
4
  </a>
5
 
6
- <strong> <a href="https://github.com/Em1tSan/NeuroGPT/blob/main/.github/README_RU.md">Русский</a> | English </strong>
7
 
8
- > ### The project is currently undergoing a transition to another client and reconstruction of the API. Technical work is continuing. We apologize for any inconvenience.
9
-
10
- <p> Free API service providing access to GPT-3.5, GPT-4, and other language models. Before using it, please make sure you check out our <a href="https://github.com/Em1tSan/NeuroGPT/wiki#english-language">wiki</a>. The project utilizes a modified version of <a href="https://github.com/xtekky/gpt4free">gpt4free</a>, as well as <a href="https://github.com/GaiZhenbiao/ChuanhuChatGPT">ChuanhuChatGPT</a> as a web interface. We extend our gratitude to the authors.</p>
 
11
 
12
- <p>
13
  <a href="https://github.com/Em1tSan/NeuroGPT/blob/main/LICENSE">
14
- <img src="https://img.shields.io/badge/license-GPL_3.0-darkgreen.svg" alt="license"/>
15
  </a>
16
  <a href="https://github.com/Em1tSan/NeuroGPT/commits/main">
17
- <img src="https://img.shields.io/badge/latest-v1.5.4-darkgreen.svg" alt="latest"/>
18
- </a></p>
 
 
 
19
 
20
- <a href="https://github.com/Em1tSan/NeuroGPT/wiki/Installing-and-running#windows">
21
  <img src="https://img.shields.io/badge/-Windows-1371c3?logo=windows" alt="windows"/>
22
  </a>
23
- <a href="https://github.com/Em1tSan/NeuroGPT/wiki/Installing-and-running#linux">
24
  <img src="https://img.shields.io/badge/-Linux-F1502F?logo=linux" alt="linux"/>
25
  </a>
26
- <a href="https://github.com/Em1tSan/NeuroGPT/wiki/Installing-and-running#macos">
27
  <img src="https://img.shields.io/badge/-MacOS-C0BFC0?logo=apple" alt="macos"/>
28
- </a>
29
- <a href="https://github.com/Em1tSan/NeuroGPT/wiki/Installing-and-running#portable-version">
30
- <img src="https://img.shields.io/badge/-Portable version-8080ff?logo=portable" alt="portable"/>
31
  </a>
32
 
33
- <br> News and feedback: <br/>
34
 
35
  <a href="https://t.me/neurogen_news">
36
- <img src="https://img.shields.io/badge/-Telegram channel-0088CC?logo=telegram" alt="telegram"/>
37
  </a>
38
  <a href="https://t.me/neurogen_chat">
39
- <img src="https://img.shields.io/badge/-Telegram chat-0088CC?logo=telegram" alt="telegram_chat"/>
40
  </a>
41
 
42
- <br> Support the project: <br/>
43
 
44
  <a href="https://boosty.to/neurogen">
45
  <img src="https://upload.wikimedia.org/wikipedia/commons/thumb/9/92/Boosty_logo.svg/512px-Boosty_logo.svg.png?20230209172145" alt="neurogen_boosty" width="20%">
@@ -47,16 +50,20 @@
47
 
48
  </div>
49
 
50
- ## Features
 
 
 
 
51
 
52
- - Web search
53
- - [Model list](https://status.neuroapi.host/v1/status)
54
- - Dialog context
55
- - No-logs
56
- - API endpoint
57
- - Dialog history
58
- - Setting generation parameters for GPT models
59
- - Built-in prompt templates and jailbreaks for various tasks
60
 
61
  <div align="center">
62
  <img src="https://github.com/NealBelov/screenshots/blob/main/img_03.png?raw=true" width="100%">
 
1
  <div align="center">
2
  <a href="https://t.me/neurogen_news">
3
+ <img src="https://readme-typing-svg.herokuapp.com?font=Inconsolata&weight=700&size=30&duration=4000&pause=1000&color=1BED29&center=true&width=435&lines=NeuroGPT+by+Neurogen...;Opening..." alt="NeuroGPT" />
4
  </a>
5
 
6
+ <strong> Русский | <a href="https://github.com/Em1tSan/NeuroGPT/blob/main/.github/README_EN.md">English </a></strong>
7
 
8
+ <p> NeuroGPT позволяет простому пользователю получить бесплатный доступ к gpt-3.5, gpt-4 и другим языковым моделям без VPN и регистрации аккаунта. В стабильном режиме на данный момент функционирует только gpt-3.5-turbo 4097. Постоянный доступ к остальным моделям не гарантируется.
9
+
10
+ Проект основан на модифицированных версиях <a href="https://github.com/xtekky/gpt4free">gpt4free</a> и <a href="https://github.com/GaiZhenbiao/ChuanhuChatGPT">ChuanhuChatGPT</a></p>
11
+ Благодарность авторам.
12
 
 
13
  <a href="https://github.com/Em1tSan/NeuroGPT/blob/main/LICENSE">
14
+ <img src="https://img.shields.io/badge/license-GPL_3.0-indigo.svg" alt="license"/>
15
  </a>
16
  <a href="https://github.com/Em1tSan/NeuroGPT/commits/main">
17
+ <img src="https://img.shields.io/badge/latest-v1.4.6-indigo.svg" alt="latest"/>
18
+ </a>
19
+
20
+ <br> Перед использованием обязательно ознакомьтесь с <a href="https://github.com/Em1tSan/NeuroGPT/wiki#%D1%80%D1%83%D1%81%D1%81%D0%BA%D0%B8%D0%B9-%D1%8F%D0%B7%D1%8B%D0%BA">wiki проекта</a><br/>
21
+ <br> Инструкции по установке: <br/>
22
 
23
+ <a href="https://github.com/Em1tSan/NeuroGPT/wiki/%D0%A3%D1%81%D1%82%D0%B0%D0%BD%D0%BE%D0%B2%D0%BA%D0%B0-%D0%B8-%D0%B7%D0%B0%D0%BF%D1%83%D1%81%D0%BA#windows">
24
  <img src="https://img.shields.io/badge/-Windows-1371c3?logo=windows" alt="windows"/>
25
  </a>
26
+ <a href="https://github.com/Em1tSan/NeuroGPT/wiki/%D0%A3%D1%81%D1%82%D0%B0%D0%BD%D0%BE%D0%B2%D0%BA%D0%B0-%D0%B8-%D0%B7%D0%B0%D0%BF%D1%83%D1%81%D0%BA#linux">
27
  <img src="https://img.shields.io/badge/-Linux-F1502F?logo=linux" alt="linux"/>
28
  </a>
29
+ <a href="https://github.com/Em1tSan/NeuroGPT/wiki/%D0%A3%D1%81%D1%82%D0%B0%D0%BD%D0%BE%D0%B2%D0%BA%D0%B0-%D0%B8-%D0%B7%D0%B0%D0%BF%D1%83%D1%81%D0%BA#macos">
30
  <img src="https://img.shields.io/badge/-MacOS-C0BFC0?logo=apple" alt="macos"/>
31
+ </a> </p>
32
+ <a href="https://github.com/Em1tSan/NeuroGPT/wiki/%D0%A3%D1%81%D1%82%D0%B0%D0%BD%D0%BE%D0%B2%D0%BA%D0%B0-%D0%B8-%D0%B7%D0%B0%D0%BF%D1%83%D1%81%D0%BA#%D0%BF%D0%BE%D1%80%D1%82%D0%B0%D1%82%D0%B8%D0%B2%D0%BD%D0%B0%D1%8F-%D0%B2%D0%B5%D1%80%D1%81%D0%B8%D1%8F">
33
+ <img src="https://img.shields.io/badge/-Портативная версия-008000?logo=portable" alt="portable"/>
34
  </a>
35
 
36
+ <br> Новости и обратная связь: <br/>
37
 
38
  <a href="https://t.me/neurogen_news">
39
+ <img src="https://img.shields.io/badge/-Telegram канал-0088CC?logo=telegram" alt="telegram"/>
40
  </a>
41
  <a href="https://t.me/neurogen_chat">
42
+ <img src="https://img.shields.io/badge/-Telegram чат-0088CC?logo=telegram" alt="telegram_chat"/>
43
  </a>
44
 
45
+ <br> Поддержать проект: <br/>
46
 
47
  <a href="https://boosty.to/neurogen">
48
  <img src="https://upload.wikimedia.org/wikipedia/commons/thumb/9/92/Boosty_logo.svg/512px-Boosty_logo.svg.png?20230209172145" alt="neurogen_boosty" width="20%">
 
50
 
51
  </div>
52
 
53
+ ## Дисклеймер:
54
+
55
+ Поскольку данный проект функционирует не через официальное API, а благодаря доступу, полученному путем обратной инженерии, то API провайдеры могут падать, а различные модели отключаться. Пожалуйста, учтите это. Если вам необходима высокая стабильность для работы, то стоит обойти этот проект стороной. Также важно помнить, что поддержка осуществляется на чистом энтузиазме.
56
+
57
+ ## Возможности:
58
 
59
+ - Веб-поиск
60
+ - [Список моделей](https://status.neuroapi.host/v1/status)
61
+ - Встроенные шаблоны промптов под разные задачи
62
+ - Встроенные джейлбрейки для снятия цензуры
63
+ - Контекст беседы
64
+ - Режим endpoint для работы с API
65
+ - Изменение параметров генерации для gpt-моделей
66
+ - Сохранение и загрузка истории диалогов
67
 
68
  <div align="center">
69
  <img src="https://github.com/NealBelov/screenshots/blob/main/img_03.png?raw=true" width="100%">
.github/README_EN.md ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <div align="center">
2
+ <a href="https://t.me/neurogen_news">
3
+ <img src="https://readme-typing-svg.herokuapp.com?font=Inconsolata&weight=700&size=30&duration=4000&pause=1000&color=1BED29&center=true&width=435&lines=NeuroGPT+by+Neurogen...;Opening..." alt="NeuroGPT" />
4
+ </a>
5
+
6
+ <strong> <a href="https://github.com/Em1tSan/NeuroGPT#readme">Русский</a> | English </strong>
7
+
8
+ <p> NeuroGPT allows free use of gpt-3.5, gpt-4, and other language models without VPN and account registration. It operates through API Reverse Engineering. At the moment, only gpt-3.5-turbo 4097 is functioning in stable mode. Continuous access to other models is not guaranteed.
9
+
10
+ The project is based on modified versions of <a href="https://github.com/xtekky/gpt4free">gpt4free</a> and <a href="https://github.com/GaiZhenbiao/ChuanhuChatGPT">ChuanhuChatGPT</a></p>
11
+ We extend our gratitude to the authors.
12
+
13
+ <a href="https://github.com/Em1tSan/NeuroGPT/blob/main/LICENSE">
14
+ <img src="https://img.shields.io/badge/license-GPL_3.0-indigo.svg" alt="license"/>
15
+ </a>
16
+ <a href="https://github.com/Em1tSan/NeuroGPT/commits/main">
17
+ <img src="https://img.shields.io/badge/latest-v1.4.6-indigo.svg" alt="latest"/>
18
+ </a>
19
+
20
+ <br> Before using NeuroGPT, be sure to check out <a href="https://github.com/Em1tSan/NeuroGPT/wiki#english-language">our project's wiki</a><br/>
21
+ <br> Installation instructions: <br/>
22
+
23
+ <a href="https://github.com/Em1tSan/NeuroGPT/wiki/Installing-and-running#windows">
24
+ <img src="https://img.shields.io/badge/-Windows-1371c3?logo=windows" alt="windows"/>
25
+ </a>
26
+ <a href="https://github.com/Em1tSan/NeuroGPT/wiki/Installing-and-running#linux">
27
+ <img src="https://img.shields.io/badge/-Linux-F1502F?logo=linux" alt="linux"/>
28
+ </a>
29
+ <a href="https://github.com/Em1tSan/NeuroGPT/wiki/Installing-and-running#macos">
30
+ <img src="https://img.shields.io/badge/-MacOS-C0BFC0?logo=apple" alt="macos"/>
31
+ </a> </p>
32
+ <a href="https://github.com/Em1tSan/NeuroGPT/wiki/Installing-and-running#portable-version">
33
+ <img src="https://img.shields.io/badge/-Portable version-008000?logo=portable" alt="portable"/>
34
+ </a>
35
+
36
+ <br> News and feedback: <br/>
37
+
38
+ <a href="https://t.me/neurogen_news">
39
+ <img src="https://img.shields.io/badge/-Telegram channel-0088CC?logo=telegram" alt="telegram"/>
40
+ </a>
41
+ <a href="https://t.me/neurogen_chat">
42
+ <img src="https://img.shields.io/badge/-Telegram chat-0088CC?logo=telegram" alt="telegram_chat"/>
43
+ </a>
44
+
45
+ <br> Support the project: <br/>
46
+
47
+ <a href="https://boosty.to/neurogen">
48
+ <img src="https://upload.wikimedia.org/wikipedia/commons/thumb/9/92/Boosty_logo.svg/512px-Boosty_logo.svg.png?20230209172145" alt="neurogen_boosty" width="20%">
49
+ </a>
50
+
51
+ </div>
52
+
53
+ ## Disclaimer:
54
+
55
+ Given that this project doesn't use an official API but relies on reverse-engineered method, there's a risk of API providers failing and certain models disconnecting. Consider this if you need high stability for your work. Remember, support is provided solely out of enthusiasm.
56
+
57
+ ## Features:
58
+
59
+ - Web search
60
+ - [List of models](https://status.neuroapi.host/v1/status)
61
+ - Built-in prompt templates for various tasks
62
+ - Built-in jailbreaks for removing censorship
63
+ - Conversation context
64
+ - API endpoint
65
+ - Setting generation parameters for GPT models
66
+ - Saving and loading dialogue history
67
+
68
+ <div align="center">
69
+ <img src="https://github.com/NealBelov/screenshots/blob/main/img_03.png?raw=true" width="100%">
70
+
71
+ ***
72
+
73
+ [![Star History Chart](https://api.star-history.com/svg?repos=Em1tSan/NeuroGPT&type=Date)](https://star-history.com/#Em1tSan/NeuroGPT&Date)
74
+
75
+ </div>
backend/backend.py CHANGED
@@ -4,13 +4,12 @@ import json
4
  import random
5
  import string
6
  import socket
7
- import nest_asyncio
8
 
9
  import requests
10
- from typing import Any
11
 
12
- from flask import Flask, request, jsonify, Response
13
  from flask_cors import CORS
 
14
  import logging
15
  from .embedding_processing import embedding_processing
16
 
@@ -18,11 +17,8 @@ import g4f
18
  from g4f import ChatCompletion, Provider, BaseProvider, models
19
  from g4f.models import ModelUtils
20
 
21
- nest_asyncio.apply()
22
-
23
  app = Flask(__name__)
24
  CORS(app)
25
-
26
  LOG = logging.getLogger(__name__)
27
  embedding_proc = embedding_processing()
28
 
@@ -71,48 +67,35 @@ def chat_completions():
71
  },
72
  }
73
 
74
- def streaming():
75
- for chunk in response:
 
 
 
 
 
76
  completion_data = {
77
  'id': f'chatcmpl-{completion_id}',
78
  'object': 'chat.completion.chunk',
79
  'created': completion_timestamp,
80
- 'model': model,
81
  'choices': [
82
  {
83
- 'index': 0,
84
  'delta': {
85
- 'content': chunk,
86
  },
87
- 'finish_reason': None,
 
88
  }
89
- ],
90
  }
 
 
 
 
 
 
 
91
 
92
- content = json.dumps(completion_data, separators=(',', ':'))
93
- yield f'data: {content}\n\n'
94
- time.sleep(0.05)
95
-
96
- end_completion_data: dict[str, Any] = {
97
- 'id': f'chatcmpl-{completion_id}',
98
- 'object': 'chat.completion.chunk',
99
- 'created': completion_timestamp,
100
- 'model': model,
101
- 'choices': [
102
- {
103
- 'index': 0,
104
- 'delta': {},
105
- 'finish_reason': 'stop',
106
- }
107
- ],
108
- }
109
- content = json.dumps(end_completion_data, separators=(',', ':'))
110
- yield f'data: {content}\n\n'
111
-
112
- return app.response_class(streaming(), mimetype='text/event-stream')
113
-
114
- @app.route("/engines/text-embedding-ada-002/embeddings", methods=["POST"])
115
- @app.route("/engines/text-similarity-davinci-001/embeddings", methods=["POST"])
116
  @app.route('/v1/embeddings', methods=['POST'])
117
  @app.route('/embeddings', methods=['POST'])
118
  def create_embedding():
 
4
  import random
5
  import string
6
  import socket
 
7
 
8
  import requests
 
9
 
10
+ from flask import Flask, request, jsonify, Response, stream_with_context
11
  from flask_cors import CORS
12
+ from threading import Thread
13
  import logging
14
  from .embedding_processing import embedding_processing
15
 
 
17
  from g4f import ChatCompletion, Provider, BaseProvider, models
18
  from g4f.models import ModelUtils
19
 
 
 
20
  app = Flask(__name__)
21
  CORS(app)
 
22
  LOG = logging.getLogger(__name__)
23
  embedding_proc = embedding_processing()
24
 
 
67
  },
68
  }
69
 
70
+ def stream():
71
+ nonlocal response
72
+ for token in response:
73
+ completion_timestamp = int(time.time())
74
+ completion_id = ''.join(random.choices(
75
+ 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789', k=28))
76
+
77
  completion_data = {
78
  'id': f'chatcmpl-{completion_id}',
79
  'object': 'chat.completion.chunk',
80
  'created': completion_timestamp,
 
81
  'choices': [
82
  {
 
83
  'delta': {
84
+ 'content': token
85
  },
86
+ 'index': 0,
87
+ 'finish_reason': None
88
  }
89
+ ]
90
  }
91
+ #print(token)
92
+ #print(completion_data)
93
+ #print('data: %s\n\n' % json.dumps(completion_data, separators=(',' ':')))
94
+ yield 'data: %s\n\n' % json.dumps(completion_data, separators=(',' ':'))
95
+ time.sleep(0.02)
96
+ print('===Start Streaming===')
97
+ return app.response_class(stream(), mimetype='text/event-stream')
98
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
99
  @app.route('/v1/embeddings', methods=['POST'])
100
  @app.route('/embeddings', methods=['POST'])
101
  def create_embedding():
backend/embedding_processing.py CHANGED
@@ -49,7 +49,7 @@ class embedding_processing:
49
  response_embedding = self.transform_embedding_to_dict(embeddings_list,text_list)
50
  return response_embedding
51
 
52
- def transform_embedding_to_dict(self, embedding_list, text_list, model_name="text-embedding-ada-002"):
53
  prompt_tokens = sum(len(text) for text in text_list)
54
  total_tokens = sum(len(embedding) for embedding in embedding_list)
55
 
 
49
  response_embedding = self.transform_embedding_to_dict(embeddings_list,text_list)
50
  return response_embedding
51
 
52
+ def transform_embedding_to_dict(self, embedding_list, text_list, model_name="text-embedding-elmo-002"):
53
  prompt_tokens = sum(len(text) for text in text_list)
54
  total_tokens = sum(len(embedding) for embedding in embedding_list)
55
 
docker-compose/docker-compose.traefik.yml CHANGED
@@ -39,8 +39,6 @@ services:
39
  dockerfile: Dockerfile
40
  container_name: neurogengpt
41
  hostname: neurogengpt
42
- environment:
43
- - SUBF=/sub
44
  env_file:
45
  - .env
46
  networks:
 
39
  dockerfile: Dockerfile
40
  container_name: neurogengpt
41
  hostname: neurogengpt
 
 
42
  env_file:
43
  - .env
44
  networks:
docker-compose/docker-compose.yml CHANGED
@@ -6,11 +6,9 @@ services:
6
  context: ../
7
  dockerfile: Dockerfile
8
  ports:
9
- - "7860:7860"
10
- - "1337:1337"
11
  volumes:
12
  - ../config.json:/app/config.json
13
- environment:
14
- - SUBF=/sub
15
  env_file:
16
  - .env
 
6
  context: ../
7
  dockerfile: Dockerfile
8
  ports:
9
+ - "7861:7860"
10
+ - "1338:1337"
11
  volumes:
12
  - ../config.json:/app/config.json
 
 
13
  env_file:
14
  - .env
endpoint.py CHANGED
@@ -1,21 +1,22 @@
 
1
  import socket
2
- import nest_asyncio
3
  from backend.backend import app
4
- from waitress import serve
5
 
6
- nest_asyncio.apply()
 
7
 
8
- site_config = {
9
- 'host': '0.0.0.0',
10
- 'port': 1337,
11
- 'debug': False
12
- }
13
 
14
  if __name__ == "__main__":
15
  hostname = socket.gethostname()
16
  ip_address = socket.gethostbyname(hostname)
17
-
 
 
 
 
18
  print(f"Running on http://127.0.0.1:{site_config['port']}")
19
  print(f"Running on http://{ip_address}:{site_config['port']}")
20
 
21
- serve(app, host='0.0.0.0', port=site_config['port'],threads=4)
 
 
1
+ import fastwsgi
2
  import socket
 
3
  from backend.backend import app
4
+ from multiprocessing import Process
5
 
6
+ def run_api_server():
7
+ fastwsgi.run(wsgi_app=app, host='0.0.0.0', port=1337)
8
 
 
 
 
 
 
9
 
10
  if __name__ == "__main__":
11
  hostname = socket.gethostname()
12
  ip_address = socket.gethostbyname(hostname)
13
+ site_config = {
14
+ 'host': '0.0.0.0',
15
+ 'port': 1337,
16
+ 'debug': False
17
+ }
18
  print(f"Running on http://127.0.0.1:{site_config['port']}")
19
  print(f"Running on http://{ip_address}:{site_config['port']}")
20
 
21
+ api_process = Process(target=run_api_server)
22
+ api_process.start()
g4f/Provider/AItianhu.py CHANGED
@@ -1,10 +1,10 @@
1
  from __future__ import annotations
2
 
3
  import json
 
4
 
5
- from ..typing import AsyncResult, Messages
6
- from ..requests import StreamSession
7
- from .base_provider import AsyncGeneratorProvider, format_prompt, get_cookies
8
 
9
 
10
  class AItianhu(AsyncGeneratorProvider):
@@ -16,50 +16,46 @@ class AItianhu(AsyncGeneratorProvider):
16
  async def create_async_generator(
17
  cls,
18
  model: str,
19
- messages: Messages,
20
  proxy: str = None,
21
- cookies: dict = None,
22
- timeout: int = 120,
23
  **kwargs
24
- ) -> AsyncResult:
25
- if not cookies:
26
- cookies = get_cookies("www.aitianhu.com")
27
- data = {
28
- "prompt": format_prompt(messages),
29
- "options": {},
30
- "systemMessage": "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully.",
31
- "temperature": 0.8,
32
- "top_p": 1,
33
- **kwargs
34
- }
35
  headers = {
36
- "Authority": cls.url,
37
  "Accept": "application/json, text/plain, */*",
 
 
38
  "Origin": cls.url,
39
- "Referer": f"{cls.url}/"
 
 
 
 
40
  }
41
- async with StreamSession(
42
  headers=headers,
43
- cookies=cookies,
44
- timeout=timeout,
45
- proxies={"https": proxy},
46
- impersonate="chrome107",
47
- verify=False
48
  ) as session:
49
- async with session.post(f"{cls.url}/api/chat-process", json=data) as response:
 
 
 
 
 
 
 
 
 
 
 
 
 
50
  response.raise_for_status()
51
- async for line in response.iter_lines():
52
- if line == b"<script>":
53
- raise RuntimeError("Solve challenge and pass cookies")
54
- if b"platform's risk control" in line:
55
- raise RuntimeError("Platform's Risk Control")
56
- line = json.loads(line)
57
- if "detail" in line:
58
- content = line["detail"]["choices"][0]["delta"].get("content")
59
- if content:
60
- yield content
61
- else:
62
- raise RuntimeError(f"Response: {line}")
63
 
64
 
65
  @classmethod
 
1
  from __future__ import annotations
2
 
3
  import json
4
+ from aiohttp import ClientSession, http
5
 
6
+ from ..typing import AsyncGenerator
7
+ from .base_provider import AsyncGeneratorProvider, format_prompt
 
8
 
9
 
10
  class AItianhu(AsyncGeneratorProvider):
 
16
  async def create_async_generator(
17
  cls,
18
  model: str,
19
+ messages: list[dict[str, str]],
20
  proxy: str = None,
 
 
21
  **kwargs
22
+ ) -> AsyncGenerator:
 
 
 
 
 
 
 
 
 
 
23
  headers = {
24
+ "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/116.0",
25
  "Accept": "application/json, text/plain, */*",
26
+ "Accept-Language": "de,en-US;q=0.7,en;q=0.3",
27
+ "Content-Type": "application/json",
28
  "Origin": cls.url,
29
+ "Connection": "keep-alive",
30
+ "Referer": cls.url + "/",
31
+ "Sec-Fetch-Dest": "empty",
32
+ "Sec-Fetch-Mode": "cors",
33
+ "Sec-Fetch-Site": "same-origin",
34
  }
35
+ async with ClientSession(
36
  headers=headers,
37
+ version=http.HttpVersion10
 
 
 
 
38
  ) as session:
39
+ data = {
40
+ "prompt": format_prompt(messages),
41
+ "options": {},
42
+ "systemMessage": "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully.",
43
+ "temperature": 0.8,
44
+ "top_p": 1,
45
+ **kwargs
46
+ }
47
+ async with session.post(
48
+ cls.url + "/api/chat-process",
49
+ proxy=proxy,
50
+ json=data,
51
+ ssl=False,
52
+ ) as response:
53
  response.raise_for_status()
54
+ async for line in response.content:
55
+ line = json.loads(line.decode('utf-8'))
56
+ token = line["detail"]["choices"][0]["delta"].get("content")
57
+ if token:
58
+ yield token
 
 
 
 
 
 
 
59
 
60
 
61
  @classmethod
g4f/Provider/AItianhu2.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+
3
+ import requests
4
+
5
+ from ..typing import Any, CreateResult
6
+ from .base_provider import BaseProvider
7
+
8
+
9
+ class AItianhu2(BaseProvider):
10
+ url = "https://0vbix.aitianhu.site/api/chat-proces"
11
+ working = True
12
+ supports_gpt_4 = True
13
+
14
+ @staticmethod
15
+ def create_completion(
16
+ model: str,
17
+ messages: list[dict[str, str]],
18
+ stream: bool,
19
+ **kwargs: Any,
20
+ ) -> CreateResult:
21
+ base = ""
22
+ for message in messages:
23
+ base += "%s: %s\n" % (message["role"], message["content"])
24
+ base += "assistant:"
25
+
26
+ headers = {
27
+ "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36"
28
+ }
29
+ data: dict[str, Any] = {
30
+ "prompt": base,
31
+ "options": {},
32
+ "systemMessage": "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully. Respond using markdown.",
33
+ "temperature": kwargs.get("temperature", 0.8),
34
+ "top_p": kwargs.get("top_p", 1),
35
+ }
36
+ url = "https://0vbix.aitianhu.site/api/chat-proces"
37
+ response = requests.post(url, headers=headers, json=data)
38
+ response.raise_for_status()
39
+ lines = response.text.strip().split("\n")
40
+ res = json.loads(lines[-1])
41
+ yield res["text"]
42
+
43
+ @classmethod
44
+ @property
45
+ def params(cls):
46
+ params = [
47
+ ("model", "str"),
48
+ ("messages", "list[dict[str, str]]"),
49
+ ("stream", "bool"),
50
+ ("temperature", "float"),
51
+ ("top_p", "int"),
52
+ ]
53
+ param = ", ".join([": ".join(p) for p in params])
54
+ return f"g4f.provider.{cls.__name__} supports: ({param})"
g4f/Provider/Acytoo.py CHANGED
@@ -2,7 +2,7 @@ from __future__ import annotations
2
 
3
  from aiohttp import ClientSession
4
 
5
- from ..typing import AsyncResult, Messages
6
  from .base_provider import AsyncGeneratorProvider
7
 
8
 
@@ -15,15 +15,16 @@ class Acytoo(AsyncGeneratorProvider):
15
  async def create_async_generator(
16
  cls,
17
  model: str,
18
- messages: Messages,
19
  proxy: str = None,
20
  **kwargs
21
- ) -> AsyncResult:
 
22
  async with ClientSession(
23
  headers=_create_header()
24
  ) as session:
25
  async with session.post(
26
- f'{cls.url}/api/completions',
27
  proxy=proxy,
28
  json=_create_payload(messages, **kwargs)
29
  ) as response:
@@ -40,7 +41,7 @@ def _create_header():
40
  }
41
 
42
 
43
- def _create_payload(messages: Messages, temperature: float = 0.5, **kwargs):
44
  return {
45
  'key' : '',
46
  'model' : 'gpt-3.5-turbo',
 
2
 
3
  from aiohttp import ClientSession
4
 
5
+ from ..typing import AsyncGenerator
6
  from .base_provider import AsyncGeneratorProvider
7
 
8
 
 
15
  async def create_async_generator(
16
  cls,
17
  model: str,
18
+ messages: list[dict[str, str]],
19
  proxy: str = None,
20
  **kwargs
21
+ ) -> AsyncGenerator:
22
+
23
  async with ClientSession(
24
  headers=_create_header()
25
  ) as session:
26
  async with session.post(
27
+ cls.url + '/api/completions',
28
  proxy=proxy,
29
  json=_create_payload(messages, **kwargs)
30
  ) as response:
 
41
  }
42
 
43
 
44
+ def _create_payload(messages: list[dict[str, str]], temperature: float = 0.5, **kwargs):
45
  return {
46
  'key' : '',
47
  'model' : 'gpt-3.5-turbo',
g4f/Provider/AiService.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import requests
4
+
5
+ from ..typing import Any, CreateResult
6
+ from .base_provider import BaseProvider
7
+
8
+
9
+ class AiService(BaseProvider):
10
+ url = "https://aiservice.vercel.app/"
11
+ working = False
12
+ supports_gpt_35_turbo = True
13
+
14
+ @staticmethod
15
+ def create_completion(
16
+ model: str,
17
+ messages: list[dict[str, str]],
18
+ stream: bool,
19
+ **kwargs: Any,
20
+ ) -> CreateResult:
21
+ base = "\n".join(f"{message['role']}: {message['content']}" for message in messages)
22
+ base += "\nassistant: "
23
+
24
+ headers = {
25
+ "accept": "*/*",
26
+ "content-type": "text/plain;charset=UTF-8",
27
+ "sec-fetch-dest": "empty",
28
+ "sec-fetch-mode": "cors",
29
+ "sec-fetch-site": "same-origin",
30
+ "Referer": "https://aiservice.vercel.app/chat",
31
+ }
32
+ data = {"input": base}
33
+ url = "https://aiservice.vercel.app/api/chat/answer"
34
+ response = requests.post(url, headers=headers, json=data)
35
+ response.raise_for_status()
36
+ yield response.json()["data"]
g4f/Provider/Aichat.py CHANGED
@@ -2,7 +2,6 @@ from __future__ import annotations
2
 
3
  from aiohttp import ClientSession
4
 
5
- from ..typing import Messages
6
  from .base_provider import AsyncProvider, format_prompt
7
 
8
 
@@ -14,7 +13,7 @@ class Aichat(AsyncProvider):
14
  @staticmethod
15
  async def create_async(
16
  model: str,
17
- messages: Messages,
18
  proxy: str = None,
19
  **kwargs
20
  ) -> str:
 
2
 
3
  from aiohttp import ClientSession
4
 
 
5
  from .base_provider import AsyncProvider, format_prompt
6
 
7
 
 
13
  @staticmethod
14
  async def create_async(
15
  model: str,
16
+ messages: list[dict[str, str]],
17
  proxy: str = None,
18
  **kwargs
19
  ) -> str:
g4f/Provider/Ails.py CHANGED
@@ -7,7 +7,7 @@ import json
7
  from datetime import datetime
8
  from aiohttp import ClientSession
9
 
10
- from ..typing import SHA256, AsyncResult, Messages
11
  from .base_provider import AsyncGeneratorProvider
12
 
13
 
@@ -19,11 +19,11 @@ class Ails(AsyncGeneratorProvider):
19
  @staticmethod
20
  async def create_async_generator(
21
  model: str,
22
- messages: Messages,
23
  stream: bool,
24
  proxy: str = None,
25
  **kwargs
26
- ) -> AsyncResult:
27
  headers = {
28
  "authority": "api.caipacity.com",
29
  "accept": "*/*",
 
7
  from datetime import datetime
8
  from aiohttp import ClientSession
9
 
10
+ from ..typing import SHA256, AsyncGenerator
11
  from .base_provider import AsyncGeneratorProvider
12
 
13
 
 
19
  @staticmethod
20
  async def create_async_generator(
21
  model: str,
22
+ messages: list[dict[str, str]],
23
  stream: bool,
24
  proxy: str = None,
25
  **kwargs
26
+ ) -> AsyncGenerator:
27
  headers = {
28
  "authority": "api.caipacity.com",
29
  "accept": "*/*",
g4f/Provider/Aivvm.py CHANGED
@@ -1,84 +1,78 @@
1
  from __future__ import annotations
2
- import requests
3
 
4
- from .base_provider import BaseProvider
5
- from ..typing import CreateResult
6
- from json import dumps
 
7
 
8
- # to recreate this easily, send a post request to https://chat.aivvm.com/api/models
9
  models = {
10
- 'gpt-3.5-turbo': {'id': 'gpt-3.5-turbo', 'name': 'GPT-3.5'},
11
- 'gpt-3.5-turbo-0613': {'id': 'gpt-3.5-turbo-0613', 'name': 'GPT-3.5-0613'},
12
- 'gpt-3.5-turbo-16k': {'id': 'gpt-3.5-turbo-16k', 'name': 'GPT-3.5-16K'},
13
- 'gpt-3.5-turbo-16k-0613': {'id': 'gpt-3.5-turbo-16k-0613', 'name': 'GPT-3.5-16K-0613'},
14
- 'gpt-4': {'id': 'gpt-4', 'name': 'GPT-4'},
15
- 'gpt-4-0613': {'id': 'gpt-4-0613', 'name': 'GPT-4-0613'},
16
- 'gpt-4-32k': {'id': 'gpt-4-32k', 'name': 'GPT-4-32K'},
17
- 'gpt-4-32k-0613': {'id': 'gpt-4-32k-0613', 'name': 'GPT-4-32K-0613'},
 
 
 
 
18
  }
19
 
20
- class Aivvm(BaseProvider):
21
- url = 'https://chat.aivvm.com'
22
- supports_stream = True
23
- working = False
24
  supports_gpt_35_turbo = True
25
  supports_gpt_4 = True
26
 
 
27
  @classmethod
28
- def create_completion(cls,
 
29
  model: str,
30
  messages: list[dict[str, str]],
31
- stream: bool,
32
  **kwargs
33
- ) -> CreateResult:
34
- if not model:
35
- model = "gpt-3.5-turbo"
36
- elif model not in models:
37
- raise ValueError(f"Model is not supported: {model}")
38
-
39
- json_data = {
40
- "model" : models[model],
41
- "messages" : messages,
42
- "key" : "",
43
- "prompt" : kwargs.get("system_message", "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully. Respond using markdown."),
44
- "temperature" : kwargs.get("temperature", 0.7)
45
- }
46
-
47
- data = dumps(json_data)
48
-
49
  headers = {
50
- "accept" : "text/event-stream",
51
- "accept-language" : "en-US,en;q=0.9",
52
- "content-type" : "application/json",
53
- "content-length" : str(len(data)),
54
- "sec-ch-ua" : "\"Chrome\";v=\"117\", \"Not;A=Brand\";v=\"8\", \"Chromium\";v=\"117\"",
55
- "sec-ch-ua-mobile" : "?0",
56
- "sec-ch-ua-platform": "\"Windows\"",
57
- "sec-fetch-dest" : "empty",
58
- "sec-fetch-mode" : "cors",
59
- "sec-fetch-site" : "same-origin",
60
- "sec-gpc" : "1",
61
- "referrer" : "https://chat.aivvm.com/",
62
- "user-agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36"
63
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
64
 
65
- response = requests.post("https://chat.aivvm.com/api/chat", headers=headers, data=data, stream=True)
66
- response.raise_for_status()
67
-
68
- for chunk in response.iter_content(chunk_size=4096):
69
- try:
70
- yield chunk.decode("utf-8")
71
- except UnicodeDecodeError:
72
- yield chunk.decode("unicode-escape")
73
 
74
  @classmethod
75
  @property
76
  def params(cls):
77
  params = [
78
- ('model', 'str'),
79
- ('messages', 'list[dict[str, str]]'),
80
- ('stream', 'bool'),
81
- ('temperature', 'float'),
82
  ]
83
- param = ', '.join([': '.join(p) for p in params])
84
- return f'g4f.provider.{cls.__name__} supports: ({param})'
 
1
  from __future__ import annotations
 
2
 
3
+ from aiohttp import ClientSession
4
+
5
+ from .base_provider import AsyncGeneratorProvider
6
+ from ..typing import AsyncGenerator
7
 
 
8
  models = {
9
+ "gpt-4": {
10
+ "id": "gpt-4",
11
+ "name": "GPT-4",
12
+ },
13
+ "gpt-3.5-turbo": {
14
+ "id": "gpt-3.5-turbo",
15
+ "name": "GPT-3.5",
16
+ },
17
+ "gpt-3.5-turbo-16k": {
18
+ "id": "gpt-3.5-turbo-16k",
19
+ "name": "GPT-3.5-16k",
20
+ },
21
  }
22
 
23
+ class Aivvm(AsyncGeneratorProvider):
24
+ url = "https://chat.aivvm.com"
25
+ working = True
 
26
  supports_gpt_35_turbo = True
27
  supports_gpt_4 = True
28
 
29
+
30
  @classmethod
31
+ async def create_async_generator(
32
+ cls,
33
  model: str,
34
  messages: list[dict[str, str]],
35
+ proxy: str = None,
36
  **kwargs
37
+ ) -> AsyncGenerator:
38
+ model = model if model else "gpt-3.5-turbo"
39
+ if model not in models:
40
+ raise ValueError(f"Model are not supported: {model}")
 
 
 
 
 
 
 
 
 
 
 
 
41
  headers = {
42
+ "User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36",
43
+ "Accept" : "*/*",
44
+ "Accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
45
+ "Origin" : cls.url,
46
+ "Referer" : cls.url + "/",
47
+ "Sec-Fetch-Dest" : "empty",
48
+ "Sec-Fetch-Mode" : "cors",
49
+ "Sec-Fetch-Site" : "same-origin",
 
 
 
 
 
50
  }
51
+ async with ClientSession(
52
+ headers=headers
53
+ ) as session:
54
+ data = {
55
+ "temperature": 1,
56
+ "key": "",
57
+ "messages": messages,
58
+ "model": models[model],
59
+ "prompt": "",
60
+ **kwargs
61
+ }
62
+ async with session.post(cls.url + "/api/chat", json=data, proxy=proxy) as response:
63
+ response.raise_for_status()
64
+ async for stream in response.content.iter_any():
65
+ yield stream.decode()
66
 
 
 
 
 
 
 
 
 
67
 
68
  @classmethod
69
  @property
70
  def params(cls):
71
  params = [
72
+ ("model", "str"),
73
+ ("messages", "list[dict[str, str]]"),
74
+ ("stream", "bool"),
75
+ ("temperature", "float"),
76
  ]
77
+ param = ", ".join([": ".join(p) for p in params])
78
+ return f"g4f.provider.{cls.__name__} supports: ({param})"
g4f/Provider/Aiwang.py ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import random, requests, json
2
+ from ..typing import Any, CreateResult
3
+ from .base_provider import BaseProvider
4
+
5
+
6
+ class Aiwang(BaseProvider):
7
+ url = "https://ai-wang.vercel.app"
8
+ supports_stream = True
9
+ supports_gpt_35_turbo = True
10
+ supports_gpt_35_turbo_16k = True
11
+ supports_gpt_35_turbo_16k_0613 = True
12
+ supports_gpt_4 = True
13
+ supports_gpt_4_0613 = True
14
+ supports_gpt_4_32k = True
15
+ supports_gpt_4_32k_0613 = True
16
+
17
+ working = False
18
+
19
+ @staticmethod
20
+ def create_completion(
21
+ model: str,
22
+ messages: list[dict[str, str]],
23
+ stream: bool,
24
+ **kwargs: Any,
25
+ ) -> CreateResult:
26
+ base = ''
27
+ for message in messages:
28
+ base += '%s: %s\n' % (message['role'], message['content'])
29
+ base += 'assistant:'
30
+
31
+ headers = {
32
+ "authority": "ai-wang.vercel.app",
33
+ "accept": "*/*",
34
+ "accept-language": "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3,fa=0.2",
35
+ "content-type": "application/json",
36
+ "path": "v1/chat/completions",
37
+ "origin": f"{server}",
38
+ "referer": f"{server}/",
39
+ "x-requested-with": "XMLHttpRequest",
40
+ 'sec-ch-ua': '"Chromium";v="116", "Not)A;Brand";v="24", "Google Chrome";v="116"',
41
+ 'sec-ch-ua-mobile': '?0',
42
+ 'sec-ch-ua-platform': '"Windows"',
43
+ 'sec-fetch-dest': 'empty',
44
+ 'sec-fetch-mode': 'cors',
45
+ 'sec-fetch-site': 'same-origin',
46
+ 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36',
47
+ }
48
+
49
+ json_data = {
50
+ "messages": messages,
51
+ "stream": stream,
52
+ "model": model,
53
+ "temperature": kwargs.get("temperature", 0.5),
54
+ "presence_penalty": kwargs.get("presence_penalty", 0),
55
+ "frequency_penalty": kwargs.get("frequency_penalty", 0),
56
+ "top_p": kwargs.get("top_p", 1),
57
+ "max_tokens": kwargs.get("max_tokens", 4096),
58
+ }
59
+
60
+ session = requests.Session()
61
+ # init cookies from server
62
+ session.get(f"{server}/")
63
+
64
+ response = session.post(
65
+ f"{server}/api/chat-stream",
66
+ headers=headers,
67
+ json=json_data,
68
+ stream=stream,
69
+ )
70
+ response.encoding = "utf-8"
71
+
72
+ try:
73
+ # Find the first opening brace and the last closing brace
74
+ start = response.text.find('{')
75
+ end = response.text.rfind('}') + 1 # +1 to include the brace itself
76
+
77
+ # Extract the JSON text
78
+ json_text = response.text[start:end]
79
+
80
+ # Convert the cleaned text to a Python dictionary
81
+ response_dict = json.loads(json_text)
82
+ except json.JSONDecodeError:
83
+ print(f"Failed to decode JSON. Response text was: {response.text}")
84
+ raise
85
+
86
+ # Extract the desired message
87
+ yield response_dict['choices'][0]['message']['content']
88
+
89
+
90
+ @classmethod
91
+ @property
92
+ def params(cls):
93
+ params = [
94
+ ("model", "str"),
95
+ ("messages", "list[dict[str, str]]"),
96
+ ("stream", "bool"),
97
+ ("temperature", "float"),
98
+ ("presence_penalty", "int"),
99
+ ("frequency_penalty", "int"),
100
+ ("top_p", "int"),
101
+ ("max_tokens", "int"),
102
+ ("active_server", "int"),
103
+ ]
104
+ param = ", ".join([": ".join(p) for p in params])
105
+ return f"g4f.provider.{cls.__name__} supports: ({param})"
g4f/Provider/Bard.py ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import json
4
+ import random
5
+ import re
6
+
7
+ from aiohttp import ClientSession
8
+
9
+ from .base_provider import AsyncProvider, format_prompt, get_cookies
10
+
11
+
12
+ class Bard(AsyncProvider):
13
+ url = "https://bard.google.com"
14
+ needs_auth = True
15
+ working = True
16
+
17
+ @classmethod
18
+ async def create_async(
19
+ cls,
20
+ model: str,
21
+ messages: list[dict[str, str]],
22
+ proxy: str = None,
23
+ cookies: dict = None,
24
+ **kwargs
25
+ ) -> str:
26
+ prompt = format_prompt(messages)
27
+ if proxy and "://" not in proxy:
28
+ proxy = f"http://{proxy}"
29
+ if not cookies:
30
+ cookies = get_cookies(".google.com")
31
+
32
+ headers = {
33
+ 'authority': 'bard.google.com',
34
+ 'content-type': 'application/x-www-form-urlencoded;charset=UTF-8',
35
+ 'origin': 'https://bard.google.com',
36
+ 'referer': 'https://bard.google.com/',
37
+ 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36',
38
+ 'x-same-domain': '1',
39
+ }
40
+
41
+ async with ClientSession(
42
+ cookies=cookies,
43
+ headers=headers
44
+ ) as session:
45
+ async with session.get(cls.url, proxy=proxy) as response:
46
+ text = await response.text()
47
+
48
+ match = re.search(r'SNlM0e\":\"(.*?)\"', text)
49
+ if not match:
50
+ raise RuntimeError("No snlm0e value.")
51
+ snlm0e = match.group(1)
52
+
53
+ params = {
54
+ 'bl': 'boq_assistant-bard-web-server_20230326.21_p0',
55
+ '_reqid': random.randint(1111, 9999),
56
+ 'rt': 'c'
57
+ }
58
+
59
+ data = {
60
+ 'at': snlm0e,
61
+ 'f.req': json.dumps([None, json.dumps([[prompt]])])
62
+ }
63
+
64
+ intents = '.'.join([
65
+ 'assistant',
66
+ 'lamda',
67
+ 'BardFrontendService'
68
+ ])
69
+
70
+ async with session.post(
71
+ f'{cls.url}/_/BardChatUi/data/{intents}/StreamGenerate',
72
+ data=data,
73
+ params=params,
74
+ proxy=proxy
75
+ ) as response:
76
+ response = await response.text()
77
+ response = json.loads(response.splitlines()[3])[0][2]
78
+ response = json.loads(response)[4][0][1][0]
79
+ return response
80
+
81
+ @classmethod
82
+ @property
83
+ def params(cls):
84
+ params = [
85
+ ("model", "str"),
86
+ ("messages", "list[dict[str, str]]"),
87
+ ("stream", "bool"),
88
+ ("proxy", "str"),
89
+ ]
90
+ param = ", ".join([": ".join(p) for p in params])
91
+ return f"g4f.provider.{cls.__name__} supports: ({param})"
g4f/Provider/Bing.py CHANGED
@@ -3,25 +3,10 @@ from __future__ import annotations
3
  import random
4
  import json
5
  import os
6
- import uuid
7
- import urllib.parse
8
  from aiohttp import ClientSession, ClientTimeout
9
- from ..typing import AsyncResult, Messages
10
- from .base_provider import AsyncGeneratorProvider
11
 
12
- class Tones():
13
- creative = "Creative"
14
- balanced = "Balanced"
15
- precise = "Precise"
16
-
17
- default_cookies = {
18
- 'SRCHD' : 'AF=NOFORM',
19
- 'PPLState' : '1',
20
- 'KievRPSSecAuth': '',
21
- 'SUID' : '',
22
- 'SRCHUSR' : '',
23
- 'SRCHHPGUSR' : '',
24
- }
25
 
26
  class Bing(AsyncGeneratorProvider):
27
  url = "https://bing.com/chat"
@@ -30,13 +15,12 @@ class Bing(AsyncGeneratorProvider):
30
 
31
  @staticmethod
32
  def create_async_generator(
33
- model: str,
34
- messages: Messages,
35
- proxy: str = None,
36
- cookies: dict = None,
37
- tone: str = Tones.creative,
38
- **kwargs
39
- ) -> AsyncResult:
40
  if len(messages) < 2:
41
  prompt = messages[0]["content"]
42
  context = None
@@ -45,10 +29,17 @@ class Bing(AsyncGeneratorProvider):
45
  context = create_context(messages[:-1])
46
 
47
  if not cookies or "SRCHD" not in cookies:
48
- cookies = default_cookies
49
- return stream_generate(prompt, tone, context, proxy, cookies)
 
 
 
 
 
 
 
50
 
51
- def create_context(messages: Messages):
52
  context = "".join(f"[{message['role']}](#message)\n{message['content']}\n\n" for message in messages)
53
 
54
  return context
@@ -59,15 +50,13 @@ class Conversation():
59
  self.clientId = clientId
60
  self.conversationSignature = conversationSignature
61
 
62
- async def create_conversation(session: ClientSession, proxy: str = None) -> Conversation:
63
- url = 'https://www.bing.com/turing/conversation/create?bundleVersion=1.1150.3'
64
-
65
- async with await session.get(url, proxy=proxy) as response:
66
- data = await response.json()
67
-
68
- conversationId = data.get('conversationId')
69
- clientId = data.get('clientId')
70
- conversationSignature = response.headers.get('X-Sydney-Encryptedconversationsignature')
71
 
72
  if not conversationId or not clientId or not conversationSignature:
73
  raise Exception('Failed to create conversation.')
@@ -80,7 +69,7 @@ async def list_conversations(session: ClientSession) -> list:
80
  response = await response.json()
81
  return response["chats"]
82
 
83
- async def delete_conversation(session: ClientSession, conversation: Conversation, proxy: str = None) -> list:
84
  url = "https://sydney.bing.com/sydney/DeleteSingleConversation"
85
  json = {
86
  "conversationId": conversation.conversationId,
@@ -89,7 +78,7 @@ async def delete_conversation(session: ClientSession, conversation: Conversation
89
  "source": "cib",
90
  "optionsSets": ["autosave"]
91
  }
92
- async with session.post(url, json=json, proxy=proxy) as response:
93
  response = await response.json()
94
  return response["result"]["value"] == "Success"
95
 
@@ -172,56 +161,53 @@ class Defaults:
172
  'x-forwarded-for': ip_address,
173
  }
174
 
175
- optionsSets = [
176
- 'saharasugg',
177
- 'enablenewsfc',
178
- 'clgalileo',
179
- 'gencontentv3',
180
- "nlu_direct_response_filter",
181
- "deepleo",
182
- "disable_emoji_spoken_text",
183
- "responsible_ai_policy_235",
184
- "enablemm",
185
- "h3precise"
186
- "dtappid",
187
- "cricinfo",
188
- "cricinfov2",
189
- "dv3sugg",
190
- "nojbfedge"
191
- ]
 
 
192
 
193
  def format_message(msg: dict) -> str:
194
  return json.dumps(msg, ensure_ascii=False) + Defaults.delimiter
195
 
196
- def create_message(conversation: Conversation, prompt: str, tone: str, context: str=None) -> str:
197
- request_id = str(uuid.uuid4())
198
  struct = {
199
  'arguments': [
200
  {
 
201
  'source': 'cib',
202
- 'optionsSets': Defaults.optionsSets,
203
  'allowedMessageTypes': Defaults.allowedMessageTypes,
204
  'sliceIds': Defaults.sliceIds,
205
  'traceId': os.urandom(16).hex(),
206
  'isStartOfSession': True,
207
- 'requestId': request_id,
208
  'message': Defaults.location | {
209
  'author': 'user',
210
  'inputMethod': 'Keyboard',
211
  'text': prompt,
212
- 'messageType': 'Chat',
213
- 'requestId': request_id,
214
- 'messageId': request_id,
215
  },
216
- 'tone': tone,
217
- 'spokenTextMode': 'None',
218
- 'conversationId': conversation.conversationId,
219
  'participant': {
220
  'id': conversation.clientId
221
  },
 
222
  }
223
  ],
224
- 'invocationId': '1',
225
  'target': 'chat',
226
  'type': 4
227
  }
@@ -238,30 +224,28 @@ def create_message(conversation: Conversation, prompt: str, tone: str, context:
238
 
239
  async def stream_generate(
240
  prompt: str,
241
- tone: str,
242
- context: str = None,
243
- proxy: str = None,
244
- cookies: dict = None
245
  ):
246
  async with ClientSession(
247
  timeout=ClientTimeout(total=900),
248
  cookies=cookies,
249
  headers=Defaults.headers,
250
  ) as session:
251
- conversation = await create_conversation(session, proxy)
252
  try:
253
  async with session.ws_connect(
254
- f'wss://sydney.bing.com/sydney/ChatHub',
255
  autoping=False,
256
- params={'sec_access_token': conversation.conversationSignature},
257
- proxy=proxy
258
  ) as wss:
259
 
260
  await wss.send_str(format_message({'protocol': 'json', 'version': 1}))
261
- await wss.receive(timeout=900)
262
- await wss.send_str(create_message(conversation, prompt, tone, context))
 
263
 
264
  response_txt = ''
 
265
  returned_text = ''
266
  final = False
267
 
@@ -271,23 +255,19 @@ async def stream_generate(
271
  for obj in objects:
272
  if obj is None or not obj:
273
  continue
274
-
275
  response = json.loads(obj)
276
  if response.get('type') == 1 and response['arguments'][0].get('messages'):
277
  message = response['arguments'][0]['messages'][0]
278
  if (message['contentOrigin'] != 'Apology'):
279
- if 'adaptiveCards' in message:
280
- card = message['adaptiveCards'][0]['body'][0]
281
- if "text" in card:
282
- response_txt = card.get('text')
283
- if message.get('messageType'):
284
- inline_txt = card['inlines'][0].get('text')
285
- response_txt += inline_txt + '\n'
286
- elif message.get('contentType') == "IMAGE":
287
- query = urllib.parse.quote(message.get('text'))
288
- url = f"\nhttps://www.bing.com/images/create?q={query}"
289
- response_txt += url
290
- final = True
291
  if response_txt.startswith(returned_text):
292
  new = response_txt[len(returned_text):]
293
  if new != "\n":
@@ -297,6 +277,7 @@ async def stream_generate(
297
  result = response['item']['result']
298
  if result.get('error'):
299
  raise Exception(f"{result['value']}: {result['message']}")
300
- return
 
301
  finally:
302
- await delete_conversation(session, conversation, proxy)
 
3
  import random
4
  import json
5
  import os
 
 
6
  from aiohttp import ClientSession, ClientTimeout
7
+ from ..typing import AsyncGenerator
8
+ from .base_provider import AsyncGeneratorProvider, get_cookies
9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
 
11
  class Bing(AsyncGeneratorProvider):
12
  url = "https://bing.com/chat"
 
15
 
16
  @staticmethod
17
  def create_async_generator(
18
+ model: str,
19
+ messages: list[dict[str, str]],
20
+ cookies: dict = None, **kwargs) -> AsyncGenerator:
21
+
22
+ if not cookies:
23
+ cookies = get_cookies(".bing.com")
 
24
  if len(messages) < 2:
25
  prompt = messages[0]["content"]
26
  context = None
 
29
  context = create_context(messages[:-1])
30
 
31
  if not cookies or "SRCHD" not in cookies:
32
+ cookies = {
33
+ 'SRCHD' : 'AF=NOFORM',
34
+ 'PPLState' : '1',
35
+ 'KievRPSSecAuth': '',
36
+ 'SUID' : '',
37
+ 'SRCHUSR' : '',
38
+ 'SRCHHPGUSR' : '',
39
+ }
40
+ return stream_generate(prompt, context, cookies)
41
 
42
+ def create_context(messages: list[dict[str, str]]):
43
  context = "".join(f"[{message['role']}](#message)\n{message['content']}\n\n" for message in messages)
44
 
45
  return context
 
50
  self.clientId = clientId
51
  self.conversationSignature = conversationSignature
52
 
53
+ async def create_conversation(session: ClientSession) -> Conversation:
54
+ url = 'https://www.bing.com/turing/conversation/create'
55
+ async with await session.get(url) as response:
56
+ response = await response.json()
57
+ conversationId = response.get('conversationId')
58
+ clientId = response.get('clientId')
59
+ conversationSignature = response.get('conversationSignature')
 
 
60
 
61
  if not conversationId or not clientId or not conversationSignature:
62
  raise Exception('Failed to create conversation.')
 
69
  response = await response.json()
70
  return response["chats"]
71
 
72
+ async def delete_conversation(session: ClientSession, conversation: Conversation) -> list:
73
  url = "https://sydney.bing.com/sydney/DeleteSingleConversation"
74
  json = {
75
  "conversationId": conversation.conversationId,
 
78
  "source": "cib",
79
  "optionsSets": ["autosave"]
80
  }
81
+ async with session.post(url, json=json) as response:
82
  response = await response.json()
83
  return response["result"]["value"] == "Success"
84
 
 
161
  'x-forwarded-for': ip_address,
162
  }
163
 
164
+ optionsSets = {
165
+ "optionsSets": [
166
+ 'saharasugg',
167
+ 'enablenewsfc',
168
+ 'clgalileo',
169
+ 'gencontentv3',
170
+ "nlu_direct_response_filter",
171
+ "deepleo",
172
+ "disable_emoji_spoken_text",
173
+ "responsible_ai_policy_235",
174
+ "enablemm",
175
+ "h3precise"
176
+ "dtappid",
177
+ "cricinfo",
178
+ "cricinfov2",
179
+ "dv3sugg",
180
+ "nojbfedge"
181
+ ]
182
+ }
183
 
184
  def format_message(msg: dict) -> str:
185
  return json.dumps(msg, ensure_ascii=False) + Defaults.delimiter
186
 
187
+ def create_message(conversation: Conversation, prompt: str, context: str=None) -> str:
 
188
  struct = {
189
  'arguments': [
190
  {
191
+ **Defaults.optionsSets,
192
  'source': 'cib',
 
193
  'allowedMessageTypes': Defaults.allowedMessageTypes,
194
  'sliceIds': Defaults.sliceIds,
195
  'traceId': os.urandom(16).hex(),
196
  'isStartOfSession': True,
 
197
  'message': Defaults.location | {
198
  'author': 'user',
199
  'inputMethod': 'Keyboard',
200
  'text': prompt,
201
+ 'messageType': 'Chat'
 
 
202
  },
203
+ 'conversationSignature': conversation.conversationSignature,
 
 
204
  'participant': {
205
  'id': conversation.clientId
206
  },
207
+ 'conversationId': conversation.conversationId
208
  }
209
  ],
210
+ 'invocationId': '0',
211
  'target': 'chat',
212
  'type': 4
213
  }
 
224
 
225
  async def stream_generate(
226
  prompt: str,
227
+ context: str=None,
228
+ cookies: dict=None
 
 
229
  ):
230
  async with ClientSession(
231
  timeout=ClientTimeout(total=900),
232
  cookies=cookies,
233
  headers=Defaults.headers,
234
  ) as session:
235
+ conversation = await create_conversation(session)
236
  try:
237
  async with session.ws_connect(
238
+ 'wss://sydney.bing.com/sydney/ChatHub',
239
  autoping=False,
 
 
240
  ) as wss:
241
 
242
  await wss.send_str(format_message({'protocol': 'json', 'version': 1}))
243
+ msg = await wss.receive(timeout=900)
244
+
245
+ await wss.send_str(create_message(conversation, prompt, context))
246
 
247
  response_txt = ''
248
+ result_text = ''
249
  returned_text = ''
250
  final = False
251
 
 
255
  for obj in objects:
256
  if obj is None or not obj:
257
  continue
258
+
259
  response = json.loads(obj)
260
  if response.get('type') == 1 and response['arguments'][0].get('messages'):
261
  message = response['arguments'][0]['messages'][0]
262
  if (message['contentOrigin'] != 'Apology'):
263
+ response_txt = result_text + \
264
+ message['adaptiveCards'][0]['body'][0].get('text', '')
265
+
266
+ if message.get('messageType'):
267
+ inline_txt = message['adaptiveCards'][0]['body'][0]['inlines'][0].get('text')
268
+ response_txt += inline_txt + '\n'
269
+ result_text += inline_txt + '\n'
270
+
 
 
 
 
271
  if response_txt.startswith(returned_text):
272
  new = response_txt[len(returned_text):]
273
  if new != "\n":
 
277
  result = response['item']['result']
278
  if result.get('error'):
279
  raise Exception(f"{result['value']}: {result['message']}")
280
+ final = True
281
+ break
282
  finally:
283
+ await delete_conversation(session, conversation)
g4f/Provider/CaffCat.py ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+
3
+ import requests
4
+
5
+ from ..typing import Any, CreateResult
6
+ from .base_provider import BaseProvider
7
+
8
+
9
+ class CaffCat(BaseProvider):
10
+ url = "https://caffcat.com"
11
+ supports_stream = True
12
+ supports_gpt_35_turbo = True
13
+ supports_gpt_35_turbo_16k = True
14
+ working = True
15
+
16
+ @staticmethod
17
+ def create_completion(
18
+ model: str,
19
+ messages: list[dict[str, str]],
20
+ stream: bool,
21
+ **kwargs: Any,
22
+ ) -> CreateResult:
23
+ active_servers = [
24
+ "https://coffeecat.ai",
25
+ "https://caffcat.com",
26
+ "https://www.jinwangyile.xyz",
27
+ ]
28
+ server = active_servers[kwargs.get("active_server", 2)]
29
+ headers = {
30
+ "authority": f"{server}".replace("https://", ""),
31
+ "accept": "text/event-stream",
32
+ "accept-language": "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3,fa=0.2",
33
+ "content-type": "application/json",
34
+ "origin": f"{server}",
35
+ "referer": f"{server}/",
36
+ "x-requested-with": "XMLHttpRequest",
37
+ 'plugins': '0',
38
+ 'sec-ch-ua': '"Chromium";v="116", "Not)A;Brand";v="24", "Google Chrome";v="116"',
39
+ 'sec-ch-ua-mobile': '?0',
40
+ 'sec-ch-ua-platform': '"Windows"',
41
+ 'sec-fetch-dest': 'empty',
42
+ 'sec-fetch-mode': 'cors',
43
+ 'sec-fetch-site': 'same-origin',
44
+ 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36',
45
+ 'usesearch': 'false',
46
+ 'x-requested-with': 'XMLHttpRequest'
47
+ }
48
+
49
+ json_data = {
50
+ "messages": messages,
51
+ "stream": stream,
52
+ "model": model,
53
+ "temperature": kwargs.get("temperature", 0.5),
54
+ "presence_penalty": kwargs.get("presence_penalty", 0),
55
+ "frequency_penalty": kwargs.get("frequency_penalty", 0),
56
+ "top_p": kwargs.get("top_p", 1),
57
+ }
58
+
59
+ session = requests.Session()
60
+ # init cookies from server
61
+ session.get(f"{server}/")
62
+
63
+ response = session.post(
64
+ f"{server}/api/openai/v1/chat/completions",
65
+ headers=headers,
66
+ json=json_data,
67
+ stream=stream,
68
+ )
69
+ if response.status_code == 200:
70
+ if stream == False:
71
+ json_data = response.json()
72
+ if "choices" in json_data:
73
+ yield json_data["choices"][0]["message"]["content"]
74
+ else:
75
+ yield Exception("No response from server")
76
+ else:
77
+
78
+ for chunk in response.iter_lines():
79
+ if b"content" in chunk:
80
+ splitData = chunk.decode().split("data: ")
81
+ if len(splitData) > 1:
82
+ yield json.loads(splitData[1])["choices"][0]["delta"]["content"]
83
+ else:
84
+ yield Exception(f"Error {response.status_code} from server")
85
+
86
+
87
+ @classmethod
88
+ @property
89
+ def params(cls):
90
+ params = [
91
+ ("model", "str"),
92
+ ("messages", "list[dict[str, str]]"),
93
+ ("stream", "bool"),
94
+ ("temperature", "float"),
95
+ ("presence_penalty", "int"),
96
+ ("frequency_penalty", "int"),
97
+ ("top_p", "int"),
98
+ ("active_server", "int"),
99
+ ]
100
+ param = ", ".join([": ".join(p) for p in params])
101
+ return f"g4f.provider.{cls.__name__} supports: ({param})"
g4f/Provider/ChatBase.py CHANGED
@@ -2,7 +2,7 @@ from __future__ import annotations
2
 
3
  from aiohttp import ClientSession
4
 
5
- from ..typing import AsyncResult, Messages
6
  from .base_provider import AsyncGeneratorProvider
7
 
8
 
@@ -16,10 +16,9 @@ class ChatBase(AsyncGeneratorProvider):
16
  async def create_async_generator(
17
  cls,
18
  model: str,
19
- messages: Messages,
20
- proxy: str = None,
21
  **kwargs
22
- ) -> AsyncResult:
23
  if model == "gpt-4":
24
  chat_id = "quran---tafseer-saadi-pdf-wbgknt7zn"
25
  elif model == "gpt-3.5-turbo" or not model:
@@ -45,7 +44,7 @@ class ChatBase(AsyncGeneratorProvider):
45
  "chatId": chat_id,
46
  "conversationId": f"kcXpqEnqUie3dnJlsRi_O-{chat_id}"
47
  }
48
- async with session.post("https://www.chatbase.co/api/fe/chat", json=data, proxy=proxy) as response:
49
  response.raise_for_status()
50
  async for stream in response.content.iter_any():
51
  yield stream.decode()
 
2
 
3
  from aiohttp import ClientSession
4
 
5
+ from ..typing import AsyncGenerator
6
  from .base_provider import AsyncGeneratorProvider
7
 
8
 
 
16
  async def create_async_generator(
17
  cls,
18
  model: str,
19
+ messages: list[dict[str, str]],
 
20
  **kwargs
21
+ ) -> AsyncGenerator:
22
  if model == "gpt-4":
23
  chat_id = "quran---tafseer-saadi-pdf-wbgknt7zn"
24
  elif model == "gpt-3.5-turbo" or not model:
 
44
  "chatId": chat_id,
45
  "conversationId": f"kcXpqEnqUie3dnJlsRi_O-{chat_id}"
46
  }
47
+ async with session.post("https://www.chatbase.co/api/fe/chat", json=data) as response:
48
  response.raise_for_status()
49
  async for stream in response.content.iter_any():
50
  yield stream.decode()
g4f/Provider/ChatgptAi.py CHANGED
@@ -1,28 +1,28 @@
1
  from __future__ import annotations
2
 
3
  import re
 
 
4
  from aiohttp import ClientSession
5
 
6
- from ..typing import Messages
7
- from .base_provider import AsyncProvider, format_prompt
8
 
9
 
10
- class ChatgptAi(AsyncProvider):
11
  url: str = "https://chatgpt.ai/"
12
  working = True
13
  supports_gpt_35_turbo = True
14
- _nonce = None
15
- _post_id = None
16
- _bot_id = None
17
 
18
  @classmethod
19
- async def create_async(
20
  cls,
21
  model: str,
22
- messages: Messages,
23
  proxy: str = None,
24
  **kwargs
25
- ) -> str:
26
  headers = {
27
  "authority" : "chatgpt.ai",
28
  "accept" : "*/*",
@@ -42,34 +42,34 @@ class ChatgptAi(AsyncProvider):
42
  async with ClientSession(
43
  headers=headers
44
  ) as session:
45
- if not cls._nonce:
46
  async with session.get(cls.url, proxy=proxy) as response:
47
  response.raise_for_status()
48
- text = await response.text()
49
- result = re.search(r'data-nonce="(.*?)"', text)
50
- if result:
51
- cls._nonce = result.group(1)
52
- result = re.search(r'data-post-id="(.*?)"', text)
53
- if result:
54
- cls._post_id = result.group(1)
55
- result = re.search(r'data-bot-id="(.*?)"', text)
56
- if result:
57
- cls._bot_id = result.group(1)
58
- if not cls._nonce or not cls._post_id or not cls._bot_id:
59
- raise RuntimeError("Nonce, post-id or bot-id not found")
60
 
61
  data = {
62
- "_wpnonce": cls._nonce,
63
- "post_id": cls._post_id,
64
- "url": "https://chatgpt.ai",
65
- "action": "wpaicg_chat_shortcode_message",
66
- "message": format_prompt(messages),
67
- "bot_id": cls._bot_id
 
 
68
  }
69
  async with session.post(
70
- "https://chatgpt.ai/wp-admin/admin-ajax.php",
71
  proxy=proxy,
72
- data=data
73
  ) as response:
74
  response.raise_for_status()
75
- return (await response.json())["data"]
 
 
 
 
 
 
 
1
  from __future__ import annotations
2
 
3
  import re
4
+ import html
5
+ import json
6
  from aiohttp import ClientSession
7
 
8
+ from ..typing import AsyncGenerator
9
+ from .base_provider import AsyncGeneratorProvider
10
 
11
 
12
+ class ChatgptAi(AsyncGeneratorProvider):
13
  url: str = "https://chatgpt.ai/"
14
  working = True
15
  supports_gpt_35_turbo = True
16
+ _system_data = None
 
 
17
 
18
  @classmethod
19
+ async def create_async_generator(
20
  cls,
21
  model: str,
22
+ messages: list[dict[str, str]],
23
  proxy: str = None,
24
  **kwargs
25
+ ) -> AsyncGenerator:
26
  headers = {
27
  "authority" : "chatgpt.ai",
28
  "accept" : "*/*",
 
42
  async with ClientSession(
43
  headers=headers
44
  ) as session:
45
+ if not cls._system_data:
46
  async with session.get(cls.url, proxy=proxy) as response:
47
  response.raise_for_status()
48
+ match = re.findall(r"data-system='([^']+)'", await response.text())
49
+ if not match:
50
+ raise RuntimeError("No system data")
51
+ cls._system_data = json.loads(html.unescape(match[0]))
 
 
 
 
 
 
 
 
52
 
53
  data = {
54
+ "botId": cls._system_data["botId"],
55
+ "clientId": "",
56
+ "contextId": cls._system_data["contextId"],
57
+ "id": cls._system_data["id"],
58
+ "messages": messages[:-1],
59
+ "newMessage": messages[-1]["content"],
60
+ "session": cls._system_data["sessionId"],
61
+ "stream": True
62
  }
63
  async with session.post(
64
+ "https://chatgpt.ai/wp-json/mwai-ui/v1/chats/submit",
65
  proxy=proxy,
66
+ json=data
67
  ) as response:
68
  response.raise_for_status()
69
+ start = "data: "
70
+ async for line in response.content:
71
+ line = line.decode('utf-8')
72
+ if line.startswith(start):
73
+ line = json.loads(line[len(start):-1])
74
+ if line["type"] == "live":
75
+ yield line["data"]
g4f/Provider/ChatgptLogin.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import os, re
4
+ from aiohttp import ClientSession
5
+
6
+ from .base_provider import AsyncProvider, format_prompt
7
+
8
+
9
+ class ChatgptLogin(AsyncProvider):
10
+ url = "https://opchatgpts.net"
11
+ supports_gpt_35_turbo = True
12
+ working = True
13
+ _nonce = None
14
+
15
+ @classmethod
16
+ async def create_async(
17
+ cls,
18
+ model: str,
19
+ messages: list[dict[str, str]],
20
+ **kwargs
21
+ ) -> str:
22
+ headers = {
23
+ "User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36",
24
+ "Accept" : "*/*",
25
+ "Accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
26
+ "Origin" : "https://opchatgpts.net",
27
+ "Alt-Used" : "opchatgpts.net",
28
+ "Referer" : "https://opchatgpts.net/chatgpt-free-use/",
29
+ "Sec-Fetch-Dest" : "empty",
30
+ "Sec-Fetch-Mode" : "cors",
31
+ "Sec-Fetch-Site" : "same-origin",
32
+ }
33
+ async with ClientSession(
34
+ headers=headers
35
+ ) as session:
36
+ if not cls._nonce:
37
+ async with session.get(
38
+ "https://opchatgpts.net/chatgpt-free-use/",
39
+ params={"id": os.urandom(6).hex()},
40
+ ) as response:
41
+ result = re.search(r'data-nonce="(.*?)"', await response.text())
42
+ if not result:
43
+ raise RuntimeError("No nonce value")
44
+ cls._nonce = result.group(1)
45
+ data = {
46
+ "_wpnonce": cls._nonce,
47
+ "post_id": 28,
48
+ "url": "https://opchatgpts.net/chatgpt-free-use",
49
+ "action": "wpaicg_chat_shortcode_message",
50
+ "message": format_prompt(messages),
51
+ "bot_id": 0
52
+ }
53
+ async with session.post("https://opchatgpts.net/wp-admin/admin-ajax.php", data=data) as response:
54
+ response.raise_for_status()
55
+ return (await response.json())["data"]
56
+
57
+ @classmethod
58
+ @property
59
+ def params(cls):
60
+ params = [
61
+ ("model", "str"),
62
+ ("messages", "list[dict[str, str]]"),
63
+ ("stream", "bool"),
64
+ ("temperature", "float"),
65
+ ]
66
+ param = ", ".join([": ".join(p) for p in params])
67
+ return f"g4f.provider.{cls.__name__} supports: ({param})"
g4f/Provider/CodeLinkAva.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from aiohttp import ClientSession
4
+ import json
5
+
6
+ from ..typing import AsyncGenerator
7
+ from .base_provider import AsyncGeneratorProvider
8
+
9
+
10
+ class CodeLinkAva(AsyncGeneratorProvider):
11
+ url = "https://ava-ai-ef611.web.app"
12
+ supports_gpt_35_turbo = True
13
+ working = True
14
+
15
+ @classmethod
16
+ async def create_async_generator(
17
+ cls,
18
+ model: str,
19
+ messages: list[dict[str, str]],
20
+ **kwargs
21
+ ) -> AsyncGenerator:
22
+ headers = {
23
+ "User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36",
24
+ "Accept" : "*/*",
25
+ "Accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
26
+ "Origin" : cls.url,
27
+ "Referer" : cls.url + "/",
28
+ "Sec-Fetch-Dest" : "empty",
29
+ "Sec-Fetch-Mode" : "cors",
30
+ "Sec-Fetch-Site" : "same-origin",
31
+ }
32
+ async with ClientSession(
33
+ headers=headers
34
+ ) as session:
35
+ data = {
36
+ "messages": messages,
37
+ "temperature": 0.6,
38
+ "stream": True,
39
+ **kwargs
40
+ }
41
+ async with session.post("https://ava-alpha-api.codelink.io/api/chat", json=data) as response:
42
+ response.raise_for_status()
43
+ start = "data: "
44
+ async for line in response.content:
45
+ line = line.decode()
46
+ if line.startswith("data: ") and not line.startswith("data: [DONE]"):
47
+ line = json.loads(line[len(start):-1])
48
+ content = line["choices"][0]["delta"].get("content")
49
+ if content:
50
+ yield content
51
+
52
+
53
+ @classmethod
54
+ @property
55
+ def params(cls):
56
+ params = [
57
+ ("model", "str"),
58
+ ("messages", "list[dict[str, str]]"),
59
+ ("stream", "bool"),
60
+ ("temperature", "float"),
61
+ ]
62
+ param = ", ".join([": ".join(p) for p in params])
63
+ return f"g4f.provider.{cls.__name__} supports: ({param})"
g4f/Provider/DeepAi.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import json
4
+ import js2py
5
+ from aiohttp import ClientSession
6
+
7
+ from ..typing import AsyncGenerator
8
+ from .base_provider import AsyncGeneratorProvider
9
+
10
+
11
+ class DeepAi(AsyncGeneratorProvider):
12
+ url: str = "https://deepai.org"
13
+ working = True
14
+ supports_gpt_35_turbo = True
15
+
16
+ @staticmethod
17
+ async def create_async_generator(
18
+ model: str,
19
+ messages: list[dict[str, str]],
20
+ proxy: str = None,
21
+ **kwargs
22
+ ) -> AsyncGenerator:
23
+
24
+ token_js = """
25
+ var agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36'
26
+ var a, b, c, d, e, h, f, l, g, k, m, n, r, x, C, E, N, F, T, O, P, w, D, G, Q, R, W, I, aa, fa, na, oa, ha, ba, X, ia, ja, ka, J, la, K, L, ca, S, U, M, ma, B, da, V, Y;
27
+ h = Math.round(1E11 * Math.random()) + "";
28
+ f = function () {
29
+ for (var p = [], q = 0; 64 > q;) p[q] = 0 | 4294967296 * Math.sin(++q % Math.PI);
30
+
31
+ return function (t) {
32
+ var v, y, H, ea = [v = 1732584193, y = 4023233417, ~v, ~y],
33
+ Z = [],
34
+ A = unescape(encodeURI(t)) + "\u0080",
35
+ z = A.length;
36
+ t = --z / 4 + 2 | 15;
37
+ for (Z[--t] = 8 * z; ~z;) Z[z >> 2] |= A.charCodeAt(z) << 8 * z--;
38
+ for (q = A = 0; q < t; q += 16) {
39
+ for (z = ea; 64 > A; z = [H = z[3], v + ((H = z[0] + [v & y | ~v & H, H & v | ~H & y, v ^ y ^ H, y ^ (v | ~H)][z = A >> 4] + p[A] + ~~Z[q | [A, 5 * A + 1, 3 * A + 5, 7 * A][z] & 15]) << (z = [7, 12, 17, 22, 5, 9, 14, 20, 4, 11, 16, 23, 6, 10, 15, 21][4 * z + A++ % 4]) | H >>> -z), v, y]) v = z[1] | 0, y = z[2];
40
+ for (A = 4; A;) ea[--A] += z[A]
41
+ }
42
+ for (t = ""; 32 > A;) t += (ea[A >> 3] >> 4 * (1 ^ A++) & 15).toString(16);
43
+ return t.split("").reverse().join("")
44
+ }
45
+ }();
46
+
47
+ "tryit-" + h + "-" + f(agent + f(agent + f(agent + h + "x")));
48
+ """
49
+
50
+ payload = {"chas_style": "chat", "chatHistory": json.dumps(messages)}
51
+ api_key = js2py.eval_js(token_js)
52
+ headers = {
53
+ "api-key": api_key,
54
+ "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36",
55
+ }
56
+ async with ClientSession(
57
+ headers=headers
58
+ ) as session:
59
+ async with session.post("https://api.deepai.org/make_me_a_pizza", proxy=proxy, data=payload) as response:
60
+ response.raise_for_status()
61
+ async for stream in response.content.iter_any():
62
+ if stream:
63
+ yield stream.decode()
g4f/Provider/DfeHub.py ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import json
4
+ import re
5
+ import time
6
+
7
+ import requests
8
+
9
+ from ..typing import Any, CreateResult
10
+ from .base_provider import BaseProvider
11
+
12
+
13
+ class DfeHub(BaseProvider):
14
+ url = "https://chat.dfehub.com/"
15
+ supports_stream = True
16
+ supports_gpt_35_turbo = True
17
+
18
+ @staticmethod
19
+ def create_completion(
20
+ model: str,
21
+ messages: list[dict[str, str]],
22
+ stream: bool, **kwargs: Any) -> CreateResult:
23
+
24
+ headers = {
25
+ "authority" : "chat.dfehub.com",
26
+ "accept" : "*/*",
27
+ "accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
28
+ "content-type" : "application/json",
29
+ "origin" : "https://chat.dfehub.com",
30
+ "referer" : "https://chat.dfehub.com/",
31
+ "sec-ch-ua" : '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
32
+ "sec-ch-ua-mobile" : "?0",
33
+ "sec-ch-ua-platform": '"macOS"',
34
+ "sec-fetch-dest" : "empty",
35
+ "sec-fetch-mode" : "cors",
36
+ "sec-fetch-site" : "same-origin",
37
+ "user-agent" : "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36",
38
+ "x-requested-with" : "XMLHttpRequest",
39
+ }
40
+
41
+ json_data = {
42
+ "messages" : messages,
43
+ "model" : "gpt-3.5-turbo",
44
+ "temperature" : kwargs.get("temperature", 0.5),
45
+ "presence_penalty" : kwargs.get("presence_penalty", 0),
46
+ "frequency_penalty" : kwargs.get("frequency_penalty", 0),
47
+ "top_p" : kwargs.get("top_p", 1),
48
+ "stream" : True
49
+ }
50
+
51
+ response = requests.post("https://chat.dfehub.com/api/openai/v1/chat/completions",
52
+ headers=headers, json=json_data, timeout=3)
53
+
54
+ for chunk in response.iter_lines():
55
+ if b"detail" in chunk:
56
+ delay = re.findall(r"\d+\.\d+", chunk.decode())
57
+ delay = float(delay[-1])
58
+ time.sleep(delay)
59
+ yield from DfeHub.create_completion(model, messages, stream, **kwargs)
60
+ if b"content" in chunk:
61
+ data = json.loads(chunk.decode().split("data: ")[1])
62
+ yield (data["choices"][0]["delta"]["content"])
63
+
64
+ @classmethod
65
+ @property
66
+ def params(cls):
67
+ params = [
68
+ ("model", "str"),
69
+ ("messages", "list[dict[str, str]]"),
70
+ ("stream", "bool"),
71
+ ("temperature", "float"),
72
+ ("presence_penalty", "int"),
73
+ ("frequency_penalty", "int"),
74
+ ("top_p", "int"),
75
+ ]
76
+ param = ", ".join([": ".join(p) for p in params])
77
+ return f"g4f.provider.{cls.__name__} supports: ({param})"
g4f/Provider/EasyChat.py ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import json
4
+ import random
5
+
6
+ import requests
7
+
8
+ from ..typing import Any, CreateResult
9
+ from .base_provider import BaseProvider
10
+
11
+
12
+ class EasyChat(BaseProvider):
13
+ url: str = "https://free.easychat.work"
14
+ supports_stream = True
15
+ supports_gpt_35_turbo = True
16
+ working = True
17
+
18
+ @staticmethod
19
+ def create_completion(
20
+ model: str,
21
+ messages: list[dict[str, str]],
22
+ stream: bool, **kwargs: Any) -> CreateResult:
23
+
24
+ active_servers = [
25
+ "https://chat10.fastgpt.me",
26
+ "https://chat9.fastgpt.me",
27
+ "https://chat1.fastgpt.me",
28
+ "https://chat2.fastgpt.me",
29
+ "https://chat3.fastgpt.me",
30
+ "https://chat4.fastgpt.me",
31
+ "https://gxos1h1ddt.fastgpt.me"
32
+ ]
33
+
34
+ server = active_servers[kwargs.get("active_server", random.randint(0, 5))]
35
+ headers = {
36
+ "authority" : f"{server}".replace("https://", ""),
37
+ "accept" : "text/event-stream",
38
+ "accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3,fa=0.2",
39
+ "content-type" : "application/json",
40
+ "origin" : f"{server}",
41
+ "referer" : f"{server}/",
42
+ "x-requested-with" : "XMLHttpRequest",
43
+ 'plugins' : '0',
44
+ 'sec-ch-ua' : '"Chromium";v="116", "Not)A;Brand";v="24", "Google Chrome";v="116"',
45
+ 'sec-ch-ua-mobile' : '?0',
46
+ 'sec-ch-ua-platform': '"Windows"',
47
+ 'sec-fetch-dest' : 'empty',
48
+ 'sec-fetch-mode' : 'cors',
49
+ 'sec-fetch-site' : 'same-origin',
50
+ 'user-agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36',
51
+ 'usesearch' : 'false',
52
+ 'x-requested-with' : 'XMLHttpRequest'
53
+ }
54
+
55
+ json_data = {
56
+ "messages" : messages,
57
+ "stream" : stream,
58
+ "model" : model,
59
+ "temperature" : kwargs.get("temperature", 0.5),
60
+ "presence_penalty" : kwargs.get("presence_penalty", 0),
61
+ "frequency_penalty" : kwargs.get("frequency_penalty", 0),
62
+ "top_p" : kwargs.get("top_p", 1)
63
+ }
64
+
65
+ session = requests.Session()
66
+ # init cookies from server
67
+ session.get(f"{server}/")
68
+
69
+ response = session.post(f"{server}/api/openai/v1/chat/completions",
70
+ headers=headers, json=json_data, stream=stream)
71
+
72
+ if response.status_code == 200:
73
+
74
+ if stream == False:
75
+ json_data = response.json()
76
+
77
+ if "choices" in json_data:
78
+ yield json_data["choices"][0]["message"]["content"]
79
+ else:
80
+ raise Exception("No response from server")
81
+
82
+ else:
83
+
84
+ for chunk in response.iter_lines():
85
+
86
+ if b"content" in chunk:
87
+ splitData = chunk.decode().split("data:")
88
+
89
+ if len(splitData) > 1:
90
+ yield json.loads(splitData[1])["choices"][0]["delta"]["content"]
91
+ else:
92
+ continue
93
+ else:
94
+ raise Exception(f"Error {response.status_code} from server : {response.reason}")
95
+
96
+
97
+ @classmethod
98
+ @property
99
+ def params(cls):
100
+ params = [
101
+ ("model", "str"),
102
+ ("messages", "list[dict[str, str]]"),
103
+ ("stream", "bool"),
104
+ ("temperature", "float"),
105
+ ("presence_penalty", "int"),
106
+ ("frequency_penalty", "int"),
107
+ ("top_p", "int"),
108
+ ("active_server", "int"),
109
+ ]
110
+ param = ", ".join([": ".join(p) for p in params])
111
+ return f"g4f.provider.{cls.__name__} supports: ({param})"
g4f/Provider/Equing.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import json
4
+ from abc import ABC, abstractmethod
5
+
6
+ import requests
7
+
8
+ from ..typing import Any, CreateResult
9
+
10
+
11
+ class Equing(ABC):
12
+ url: str = 'https://next.eqing.tech/'
13
+ working = True
14
+ needs_auth = False
15
+ supports_stream = True
16
+ supports_gpt_35_turbo = True
17
+ supports_gpt_4 = False
18
+
19
+ @staticmethod
20
+ @abstractmethod
21
+ def create_completion(
22
+ model: str,
23
+ messages: list[dict[str, str]],
24
+ stream: bool, **kwargs: Any) -> CreateResult:
25
+
26
+ headers = {
27
+ 'authority' : 'next.eqing.tech',
28
+ 'accept' : 'text/event-stream',
29
+ 'accept-language' : 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
30
+ 'cache-control' : 'no-cache',
31
+ 'content-type' : 'application/json',
32
+ 'origin' : 'https://next.eqing.tech',
33
+ 'plugins' : '0',
34
+ 'pragma' : 'no-cache',
35
+ 'referer' : 'https://next.eqing.tech/',
36
+ 'sec-ch-ua' : '"Not/A)Brand";v="99", "Google Chrome";v="115", "Chromium";v="115"',
37
+ 'sec-ch-ua-mobile' : '?0',
38
+ 'sec-ch-ua-platform': '"macOS"',
39
+ 'sec-fetch-dest' : 'empty',
40
+ 'sec-fetch-mode' : 'cors',
41
+ 'sec-fetch-site' : 'same-origin',
42
+ 'user-agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36',
43
+ 'usesearch' : 'false',
44
+ 'x-requested-with' : 'XMLHttpRequest'
45
+ }
46
+
47
+ json_data = {
48
+ 'messages' : messages,
49
+ 'stream' : stream,
50
+ 'model' : model,
51
+ 'temperature' : kwargs.get('temperature', 0.5),
52
+ 'presence_penalty' : kwargs.get('presence_penalty', 0),
53
+ 'frequency_penalty' : kwargs.get('frequency_penalty', 0),
54
+ 'top_p' : kwargs.get('top_p', 1),
55
+ }
56
+
57
+ response = requests.post('https://next.eqing.tech/api/openai/v1/chat/completions',
58
+ headers=headers, json=json_data, stream=stream)
59
+
60
+ if not stream:
61
+ yield response.json()["choices"][0]["message"]["content"]
62
+ return
63
+
64
+ for line in response.iter_content(chunk_size=1024):
65
+ if line:
66
+ if b'content' in line:
67
+ line_json = json.loads(line.decode('utf-8').split('data: ')[1])
68
+ token = line_json['choices'][0]['delta'].get('content')
69
+ if token:
70
+ yield token
71
+
72
+ @classmethod
73
+ @property
74
+ def params(cls):
75
+ params = [
76
+ ("model", "str"),
77
+ ("messages", "list[dict[str, str]]"),
78
+ ("stream", "bool"),
79
+ ]
80
+ param = ", ".join([": ".join(p) for p in params])
81
+ return f"g4f.provider.{cls.__name__} supports: ({param})"
g4f/Provider/FastGpt.py ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import json
4
+ import random
5
+ from abc import ABC, abstractmethod
6
+
7
+ import requests
8
+
9
+ from ..typing import Any, CreateResult
10
+
11
+
12
+ class FastGpt(ABC):
13
+ url: str = 'https://chat9.fastgpt.me/'
14
+ working = False
15
+ needs_auth = False
16
+ supports_stream = True
17
+ supports_gpt_35_turbo = True
18
+ supports_gpt_4 = False
19
+
20
+ @staticmethod
21
+ @abstractmethod
22
+ def create_completion(
23
+ model: str,
24
+ messages: list[dict[str, str]],
25
+ stream: bool, **kwargs: Any) -> CreateResult:
26
+
27
+ headers = {
28
+ 'authority' : 'chat9.fastgpt.me',
29
+ 'accept' : 'text/event-stream',
30
+ 'accept-language' : 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
31
+ 'cache-control' : 'no-cache',
32
+ 'content-type' : 'application/json',
33
+ 'origin' : 'https://chat9.fastgpt.me',
34
+ 'plugins' : '0',
35
+ 'pragma' : 'no-cache',
36
+ 'referer' : 'https://chat9.fastgpt.me/',
37
+ 'sec-ch-ua' : '"Not/A)Brand";v="99", "Google Chrome";v="115", "Chromium";v="115"',
38
+ 'sec-ch-ua-mobile' : '?0',
39
+ 'sec-ch-ua-platform': '"macOS"',
40
+ 'sec-fetch-dest' : 'empty',
41
+ 'sec-fetch-mode' : 'cors',
42
+ 'sec-fetch-site' : 'same-origin',
43
+ 'user-agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36',
44
+ 'usesearch' : 'false',
45
+ 'x-requested-with' : 'XMLHttpRequest',
46
+ }
47
+
48
+ json_data = {
49
+ 'messages' : messages,
50
+ 'stream' : stream,
51
+ 'model' : model,
52
+ 'temperature' : kwargs.get('temperature', 0.5),
53
+ 'presence_penalty' : kwargs.get('presence_penalty', 0),
54
+ 'frequency_penalty' : kwargs.get('frequency_penalty', 0),
55
+ 'top_p' : kwargs.get('top_p', 1),
56
+ }
57
+
58
+ subdomain = random.choice([
59
+ 'jdaen979ew',
60
+ 'chat9'
61
+ ])
62
+
63
+ response = requests.post(f'https://{subdomain}.fastgpt.me/api/openai/v1/chat/completions',
64
+ headers=headers, json=json_data, stream=stream)
65
+
66
+ for line in response.iter_lines():
67
+ if line:
68
+ try:
69
+ if b'content' in line:
70
+ line_json = json.loads(line.decode('utf-8').split('data: ')[1])
71
+ token = line_json['choices'][0]['delta'].get('content')
72
+ if token:
73
+ yield token
74
+ except:
75
+ continue
76
+
77
+ @classmethod
78
+ @property
79
+ def params(cls):
80
+ params = [
81
+ ("model", "str"),
82
+ ("messages", "list[dict[str, str]]"),
83
+ ("stream", "bool"),
84
+ ]
85
+ param = ", ".join([": ".join(p) for p in params])
86
+ return f"g4f.provider.{cls.__name__} supports: ({param})"
g4f/Provider/Forefront.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import json
4
+
5
+ import requests
6
+
7
+ from ..typing import Any, CreateResult
8
+ from .base_provider import BaseProvider
9
+
10
+
11
+ class Forefront(BaseProvider):
12
+ url = "https://forefront.com"
13
+ supports_stream = True
14
+ supports_gpt_35_turbo = True
15
+
16
+ @staticmethod
17
+ def create_completion(
18
+ model: str,
19
+ messages: list[dict[str, str]],
20
+ stream: bool, **kwargs: Any) -> CreateResult:
21
+
22
+ json_data = {
23
+ "text" : messages[-1]["content"],
24
+ "action" : "noauth",
25
+ "id" : "",
26
+ "parentId" : "",
27
+ "workspaceId" : "",
28
+ "messagePersona": "607e41fe-95be-497e-8e97-010a59b2e2c0",
29
+ "model" : "gpt-4",
30
+ "messages" : messages[:-1] if len(messages) > 1 else [],
31
+ "internetMode" : "auto",
32
+ }
33
+
34
+ response = requests.post("https://streaming.tenant-forefront-default.knative.chi.coreweave.com/free-chat",
35
+ json=json_data, stream=True)
36
+
37
+ response.raise_for_status()
38
+ for token in response.iter_lines():
39
+ if b"delta" in token:
40
+ yield json.loads(token.decode().split("data: ")[1])["delta"]
g4f/Provider/Freet.py ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+
3
+ import requests
4
+
5
+ from ..typing import Any, CreateResult
6
+ from .base_provider import BaseProvider
7
+
8
+
9
+ class Freet(BaseProvider):
10
+ url = "https://biwjo6q8.freet.to"
11
+ supports_stream = True
12
+
13
+ supports_gpt_35_turbo = True
14
+ supports_gpt_35_turbo_16k = True
15
+ supports_gpt_35_turbo_16k_0613 = True
16
+ supports_gpt_4 = True
17
+ supports_gpt_4_0613 = True
18
+ supports_gpt_4_32k = True
19
+ supports_gpt_4_32k_0613 = True
20
+
21
+ working = True
22
+
23
+
24
+ @staticmethod
25
+ def create_completion(
26
+ model: str,
27
+ messages: list[dict[str, str]],
28
+ stream: bool,
29
+ **kwargs: Any,
30
+ ) -> CreateResult:
31
+ active_servers = [
32
+ "https://biwjo6q8.freet.to",
33
+ ]
34
+ server = active_servers[kwargs.get("active_server", 0)]
35
+ headers = {
36
+ "authority": f"{server}".replace("https://", ""),
37
+ "authorization:": "Bearer nk-Tmd-Ni-xiang-Gou=Sb-90807rqHgl8b3jrNkSjEvt90EiMEoCsbKJ2kggV1iHzTKEDWv1tcgazgdsuw0S4pZ1W",
38
+ "accept": "text/event-stream",
39
+ "accept-language": "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3,fa=0.2",
40
+ "content-type": "application/json",
41
+ "origin": f"{server}",
42
+ "referer": f"{server}/",
43
+ "x-requested-with": "XMLHttpRequest",
44
+ 'plugins': '0',
45
+ 'sec-ch-ua': '"Chromium";v="116", "Not)A;Brand";v="24", "Google Chrome";v="116"',
46
+ 'sec-ch-ua-mobile': '?0',
47
+ 'sec-ch-ua-platform': '"Windows"',
48
+ 'sec-fetch-dest': 'empty',
49
+ 'sec-fetch-mode': 'cors',
50
+ 'sec-fetch-site': 'same-origin',
51
+ 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36',
52
+ 'usesearch': 'false',
53
+ 'x-requested-with': 'XMLHttpRequest'
54
+ }
55
+
56
+ json_data = {
57
+ "messages": messages,
58
+ "stream": stream,
59
+ "model": model,
60
+ "temperature": kwargs.get("temperature", 0.5),
61
+ "presence_penalty": kwargs.get("presence_penalty", 0),
62
+ "frequency_penalty": kwargs.get("frequency_penalty", 0),
63
+ "top_p": kwargs.get("top_p", 1),
64
+ }
65
+
66
+ session = requests.Session()
67
+ # init cookies from server
68
+ session.get(f"{server}/")
69
+
70
+ response = session.post(
71
+ f"{server}/api/openai/v1/chat/completions",
72
+ headers=headers,
73
+ json=json_data,
74
+ stream=stream,
75
+ )
76
+ if response.status_code == 200:
77
+ if stream == False:
78
+ json_data = response.json()
79
+ if "choices" in json_data:
80
+ yield json_data["choices"][0]["message"]["content"]
81
+ else:
82
+ raise Exception("No response from server")
83
+ else:
84
+
85
+ for chunk in response.iter_lines():
86
+ if b"content" in chunk:
87
+ splitData = chunk.decode().split("data:")
88
+ if len(splitData) > 1:
89
+ yield json.loads(splitData[1])["choices"][0]["delta"]["content"]
90
+ else:
91
+ continue
92
+ else:
93
+ raise Exception(f"Error {response.status_code} from server : {response.reason}")
94
+
95
+ @classmethod
96
+ @property
97
+ def params(cls):
98
+ params = [
99
+ ("model", "str"),
100
+ ("messages", "list[dict[str, str]]"),
101
+ ("stream", "bool"),
102
+ ("temperature", "float"),
103
+ ("presence_penalty", "int"),
104
+ ("frequency_penalty", "int"),
105
+ ("top_p", "int"),
106
+ ("active_server", "int"),
107
+ ]
108
+ param = ", ".join([": ".join(p) for p in params])
109
+ return f"g4f.provider.{cls.__name__} supports: ({param})"
g4f/Provider/GetGpt.py ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import json
4
+ import os
5
+ import uuid
6
+
7
+ import requests
8
+ from Crypto.Cipher import AES
9
+
10
+ from ..typing import Any, CreateResult
11
+ from .base_provider import BaseProvider
12
+
13
+
14
+ class GetGpt(BaseProvider):
15
+ url = 'https://chat.getgpt.world/'
16
+ supports_stream = True
17
+ working = True
18
+ supports_gpt_35_turbo = True
19
+
20
+ @staticmethod
21
+ def create_completion(
22
+ model: str,
23
+ messages: list[dict[str, str]],
24
+ stream: bool, **kwargs: Any) -> CreateResult:
25
+
26
+ headers = {
27
+ 'Content-Type' : 'application/json',
28
+ 'Referer' : 'https://chat.getgpt.world/',
29
+ 'user-agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36',
30
+ }
31
+
32
+ data = json.dumps(
33
+ {
34
+ 'messages' : messages,
35
+ 'frequency_penalty' : kwargs.get('frequency_penalty', 0),
36
+ 'max_tokens' : kwargs.get('max_tokens', 4000),
37
+ 'model' : 'gpt-3.5-turbo',
38
+ 'presence_penalty' : kwargs.get('presence_penalty', 0),
39
+ 'temperature' : kwargs.get('temperature', 1),
40
+ 'top_p' : kwargs.get('top_p', 1),
41
+ 'stream' : True,
42
+ 'uuid' : str(uuid.uuid4())
43
+ }
44
+ )
45
+
46
+ res = requests.post('https://chat.getgpt.world/api/chat/stream',
47
+ headers=headers, json={'signature': _encrypt(data)}, stream=True)
48
+
49
+ res.raise_for_status()
50
+ for line in res.iter_lines():
51
+ if b'content' in line:
52
+ line_json = json.loads(line.decode('utf-8').split('data: ')[1])
53
+ yield (line_json['choices'][0]['delta']['content'])
54
+
55
+ @classmethod
56
+ @property
57
+ def params(cls):
58
+ params = [
59
+ ('model', 'str'),
60
+ ('messages', 'list[dict[str, str]]'),
61
+ ('stream', 'bool'),
62
+ ('temperature', 'float'),
63
+ ('presence_penalty', 'int'),
64
+ ('frequency_penalty', 'int'),
65
+ ('top_p', 'int'),
66
+ ('max_tokens', 'int'),
67
+ ]
68
+ param = ', '.join([': '.join(p) for p in params])
69
+ return f'g4f.provider.{cls.__name__} supports: ({param})'
70
+
71
+
72
+ def _encrypt(e: str):
73
+ t = os.urandom(8).hex().encode('utf-8')
74
+ n = os.urandom(8).hex().encode('utf-8')
75
+ r = e.encode('utf-8')
76
+
77
+ cipher = AES.new(t, AES.MODE_CBC, n)
78
+ ciphertext = cipher.encrypt(_pad_data(r))
79
+
80
+ return ciphertext.hex() + t.decode('utf-8') + n.decode('utf-8')
81
+
82
+
83
+ def _pad_data(data: bytes) -> bytes:
84
+ block_size = AES.block_size
85
+ padding_size = block_size - len(data) % block_size
86
+ padding = bytes([padding_size] * padding_size)
87
+
88
+ return data + padding
g4f/Provider/H2o.py CHANGED
@@ -5,25 +5,25 @@ import uuid
5
 
6
  from aiohttp import ClientSession
7
 
8
- from ..typing import AsyncResult, Messages
9
  from .base_provider import AsyncGeneratorProvider, format_prompt
10
 
11
 
12
  class H2o(AsyncGeneratorProvider):
13
  url = "https://gpt-gm.h2o.ai"
14
- working = False
15
  model = "h2oai/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1"
16
 
17
  @classmethod
18
  async def create_async_generator(
19
  cls,
20
  model: str,
21
- messages: Messages,
22
  proxy: str = None,
23
  **kwargs
24
- ) -> AsyncResult:
25
  model = model if model else cls.model
26
- headers = {"Referer": cls.url + "/"}
27
 
28
  async with ClientSession(
29
  headers=headers
@@ -36,14 +36,14 @@ class H2o(AsyncGeneratorProvider):
36
  "searchEnabled": "true",
37
  }
38
  async with session.post(
39
- f"{cls.url}/settings",
40
  proxy=proxy,
41
  data=data
42
  ) as response:
43
  response.raise_for_status()
44
 
45
  async with session.post(
46
- f"{cls.url}/conversation",
47
  proxy=proxy,
48
  json={"model": model},
49
  ) as response:
@@ -71,7 +71,7 @@ class H2o(AsyncGeneratorProvider):
71
  },
72
  }
73
  async with session.post(
74
- f"{cls.url}/conversation/{conversationId}",
75
  proxy=proxy,
76
  json=data
77
  ) as response:
@@ -83,14 +83,6 @@ class H2o(AsyncGeneratorProvider):
83
  if not line["token"]["special"]:
84
  yield line["token"]["text"]
85
 
86
- async with session.delete(
87
- f"{cls.url}/conversation/{conversationId}",
88
- proxy=proxy,
89
- json=data
90
- ) as response:
91
- response.raise_for_status()
92
-
93
-
94
  @classmethod
95
  @property
96
  def params(cls):
 
5
 
6
  from aiohttp import ClientSession
7
 
8
+ from ..typing import AsyncGenerator
9
  from .base_provider import AsyncGeneratorProvider, format_prompt
10
 
11
 
12
  class H2o(AsyncGeneratorProvider):
13
  url = "https://gpt-gm.h2o.ai"
14
+ working = True
15
  model = "h2oai/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1"
16
 
17
  @classmethod
18
  async def create_async_generator(
19
  cls,
20
  model: str,
21
+ messages: list[dict[str, str]],
22
  proxy: str = None,
23
  **kwargs
24
+ ) -> AsyncGenerator:
25
  model = model if model else cls.model
26
+ headers = {"Referer": "https://gpt-gm.h2o.ai/"}
27
 
28
  async with ClientSession(
29
  headers=headers
 
36
  "searchEnabled": "true",
37
  }
38
  async with session.post(
39
+ "https://gpt-gm.h2o.ai/settings",
40
  proxy=proxy,
41
  data=data
42
  ) as response:
43
  response.raise_for_status()
44
 
45
  async with session.post(
46
+ "https://gpt-gm.h2o.ai/conversation",
47
  proxy=proxy,
48
  json={"model": model},
49
  ) as response:
 
71
  },
72
  }
73
  async with session.post(
74
+ f"https://gpt-gm.h2o.ai/conversation/{conversationId}",
75
  proxy=proxy,
76
  json=data
77
  ) as response:
 
83
  if not line["token"]["special"]:
84
  yield line["token"]["text"]
85
 
 
 
 
 
 
 
 
 
86
  @classmethod
87
  @property
88
  def params(cls):
g4f/Provider/HuggingChat.py ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import json
4
+
5
+ from aiohttp import ClientSession
6
+
7
+ from ..typing import AsyncGenerator
8
+ from .base_provider import AsyncGeneratorProvider, format_prompt, get_cookies
9
+
10
+
11
+ class HuggingChat(AsyncGeneratorProvider):
12
+ url = "https://huggingface.co/chat/"
13
+ needs_auth = True
14
+ working = True
15
+ model = "OpenAssistant/oasst-sft-6-llama-30b-xor"
16
+
17
+ @classmethod
18
+ async def create_async_generator(
19
+ cls,
20
+ model: str,
21
+ messages: list[dict[str, str]],
22
+ stream: bool = True,
23
+ proxy: str = None,
24
+ cookies: dict = None,
25
+ **kwargs
26
+ ) -> AsyncGenerator:
27
+ model = model if model else cls.model
28
+ if not cookies:
29
+ cookies = get_cookies(".huggingface.co")
30
+ if proxy and "://" not in proxy:
31
+ proxy = f"http://{proxy}"
32
+
33
+ headers = {
34
+ 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36',
35
+ }
36
+ async with ClientSession(
37
+ cookies=cookies,
38
+ headers=headers
39
+ ) as session:
40
+ async with session.post("https://huggingface.co/chat/conversation", proxy=proxy, json={"model": model}) as response:
41
+ conversation_id = (await response.json())["conversationId"]
42
+
43
+ send = {
44
+ "inputs": format_prompt(messages),
45
+ "parameters": {
46
+ "temperature": 0.2,
47
+ "truncate": 1000,
48
+ "max_new_tokens": 1024,
49
+ "stop": ["</s>"],
50
+ "top_p": 0.95,
51
+ "repetition_penalty": 1.2,
52
+ "top_k": 50,
53
+ "return_full_text": False,
54
+ **kwargs
55
+ },
56
+ "stream": stream,
57
+ "options": {
58
+ "id": "9e9b8bc4-6604-40c6-994e-8eb78fa32e37",
59
+ "response_id": "04ce2602-3bea-45e8-8efc-cef00680376a",
60
+ "is_retry": False,
61
+ "use_cache": False,
62
+ "web_search_id": ""
63
+ }
64
+ }
65
+ async with session.post(f"https://huggingface.co/chat/conversation/{conversation_id}", proxy=proxy, json=send) as response:
66
+ if not stream:
67
+ data = await response.json()
68
+ if "error" in data:
69
+ raise RuntimeError(data["error"])
70
+ elif isinstance(data, list):
71
+ yield data[0]["generated_text"].strip()
72
+ else:
73
+ raise RuntimeError(f"Response: {data}")
74
+ else:
75
+ start = "data:"
76
+ first = True
77
+ async for line in response.content:
78
+ line = line.decode("utf-8")
79
+ if not line:
80
+ continue
81
+ if line.startswith(start):
82
+ line = json.loads(line[len(start):-1])
83
+ if "token" not in line:
84
+ raise RuntimeError(f"Response: {line}")
85
+ if not line["token"]["special"]:
86
+ if first:
87
+ yield line["token"]["text"].lstrip()
88
+ first = False
89
+ else:
90
+ yield line["token"]["text"]
91
+
92
+ async with session.delete(f"https://huggingface.co/chat/conversation/{conversation_id}", proxy=proxy) as response:
93
+ response.raise_for_status()
94
+
95
+
96
+ @classmethod
97
+ @property
98
+ def params(cls):
99
+ params = [
100
+ ("model", "str"),
101
+ ("messages", "list[dict[str, str]]"),
102
+ ("stream", "bool"),
103
+ ("proxy", "str"),
104
+ ]
105
+ param = ", ".join([": ".join(p) for p in params])
106
+ return f"g4f.provider.{cls.__name__} supports: ({param})"
g4f/Provider/I207m.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import uuid
2
+
3
+ import requests
4
+
5
+ from ..typing import Any, CreateResult
6
+ from .base_provider import BaseProvider
7
+
8
+
9
+ class I207m(BaseProvider):
10
+ url = "https://gpt3.i207m.top"
11
+ supports_stream = True
12
+ needs_auth = False
13
+ supports_gpt_4 = True
14
+ supports_gpt_4_32k = True
15
+
16
+ @staticmethod
17
+ def create_completion(
18
+ model: str,
19
+ messages: list[dict[str, str]],
20
+ stream: bool,
21
+ **kwargs: Any,
22
+ ) -> CreateResult:
23
+ headers = {
24
+ "authority": "gpt3.i207m.top",
25
+ "content-type": "application/json",
26
+ "origin": "https://gpt3.i207m.top",
27
+ "referer": "https://gpt3.i207m.top/",
28
+ "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36",
29
+ "x-auth-code": str(kwargs.get("auth")),
30
+ }
31
+ models = {
32
+ "gpt-4": {
33
+ "id": "gpt-4",
34
+ "name": "GPT-4",
35
+ "maxLength": 24000,
36
+ "tokenLimit": 8000,
37
+ },
38
+ "gpt-4-32k": {
39
+ "id":"gpt-4-32k",
40
+ "name":"GPT-4-32k",
41
+ "maxLength":96000,
42
+ "tokenLimit":32000
43
+ },
44
+ }
45
+ json_data = {
46
+ "conversationId": str(uuid.uuid4()),
47
+ "model": models[model],
48
+ "messages": messages,
49
+ "key": "",
50
+ "prompt": "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully. Respond using markdown.",
51
+ }
52
+
53
+ response = requests.post(
54
+ "https://gpt3.i207m.top/api/chat",
55
+ headers=headers,
56
+ json=json_data,
57
+ stream=True,
58
+ )
59
+ response.raise_for_status()
60
+ for token in response.iter_content(chunk_size=2046):
61
+ yield token.decode("utf-8")
62
+
63
+ @classmethod
64
+ @property
65
+ def params(cls):
66
+ params = [
67
+ ("model", "str"),
68
+ ("messages", "list[dict[str, str]]"),
69
+ ("stream", "bool"),
70
+ ("auth", "str"),
71
+ ]
72
+ param = ", ".join([": ".join(p) for p in params])
73
+ return f"g4f.provider.{cls.__name__} supports: ({param})"
g4f/Provider/Liaobots.py CHANGED
@@ -1,10 +1,11 @@
1
  from __future__ import annotations
2
 
 
3
  import uuid
4
 
5
  from aiohttp import ClientSession
6
 
7
- from ..typing import AsyncResult, Messages
8
  from .base_provider import AsyncGeneratorProvider
9
 
10
  models = {
@@ -29,7 +30,7 @@ models = {
29
  }
30
 
31
  class Liaobots(AsyncGeneratorProvider):
32
- url = "https://liaobots.site"
33
  working = True
34
  supports_gpt_35_turbo = True
35
  supports_gpt_4 = True
@@ -39,12 +40,14 @@ class Liaobots(AsyncGeneratorProvider):
39
  async def create_async_generator(
40
  cls,
41
  model: str,
42
- messages: Messages,
43
  auth: str = None,
44
  proxy: str = None,
45
  **kwargs
46
- ) -> AsyncResult:
47
  model = model if model in models else "gpt-3.5-turbo"
 
 
48
  headers = {
49
  "authority": "liaobots.com",
50
  "content-type": "application/json",
@@ -55,23 +58,11 @@ class Liaobots(AsyncGeneratorProvider):
55
  async with ClientSession(
56
  headers=headers
57
  ) as session:
58
- cls._auth_code = auth if isinstance(auth, str) else cls._auth_code
59
- if not cls._auth_code:
60
- async with session.post(
61
- "https://liaobots.work/recaptcha/api/login",
62
- proxy=proxy,
63
- data={"token": "abcdefghijklmnopqrst"},
64
- verify_ssl=False
65
- ) as response:
66
  response.raise_for_status()
67
- async with session.post(
68
- "https://liaobots.work/api/user",
69
- proxy=proxy,
70
- json={"authcode": ""},
71
- verify_ssl=False
72
- ) as response:
73
- response.raise_for_status()
74
- cls._auth_code = (await response.json(content_type=None))["authCode"]
75
  data = {
76
  "conversationId": str(uuid.uuid4()),
77
  "model": models[model],
@@ -79,13 +70,7 @@ class Liaobots(AsyncGeneratorProvider):
79
  "key": "",
80
  "prompt": "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully.",
81
  }
82
- async with session.post(
83
- "https://liaobots.work/api/chat",
84
- proxy=proxy,
85
- json=data,
86
- headers={"x-auth-code": cls._auth_code},
87
- verify_ssl=False
88
- ) as response:
89
  response.raise_for_status()
90
  async for stream in response.content.iter_any():
91
  if stream:
 
1
  from __future__ import annotations
2
 
3
+ import json
4
  import uuid
5
 
6
  from aiohttp import ClientSession
7
 
8
+ from ..typing import AsyncGenerator
9
  from .base_provider import AsyncGeneratorProvider
10
 
11
  models = {
 
30
  }
31
 
32
  class Liaobots(AsyncGeneratorProvider):
33
+ url = "https://liaobots.com"
34
  working = True
35
  supports_gpt_35_turbo = True
36
  supports_gpt_4 = True
 
40
  async def create_async_generator(
41
  cls,
42
  model: str,
43
+ messages: list[dict[str, str]],
44
  auth: str = None,
45
  proxy: str = None,
46
  **kwargs
47
+ ) -> AsyncGenerator:
48
  model = model if model in models else "gpt-3.5-turbo"
49
+ if proxy and "://" not in proxy:
50
+ proxy = f"http://{proxy}"
51
  headers = {
52
  "authority": "liaobots.com",
53
  "content-type": "application/json",
 
58
  async with ClientSession(
59
  headers=headers
60
  ) as session:
61
+ auth_code = auth if isinstance(auth, str) else cls._auth_code
62
+ if not auth_code:
63
+ async with session.post(cls.url + "/api/user", proxy=proxy, json={"authcode": ""}) as response:
 
 
 
 
 
64
  response.raise_for_status()
65
+ auth_code = cls._auth_code = json.loads(await response.text())["authCode"]
 
 
 
 
 
 
 
66
  data = {
67
  "conversationId": str(uuid.uuid4()),
68
  "model": models[model],
 
70
  "key": "",
71
  "prompt": "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully.",
72
  }
73
+ async with session.post(cls.url + "/api/chat", proxy=proxy, json=data, headers={"x-auth-code": auth_code}) as response:
 
 
 
 
 
 
74
  response.raise_for_status()
75
  async for stream in response.content.iter_any():
76
  if stream:
g4f/Provider/Lockchat.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import json
4
+
5
+ import requests
6
+
7
+ from ..typing import Any, CreateResult
8
+ from .base_provider import BaseProvider
9
+
10
+
11
+ class Lockchat(BaseProvider):
12
+ url: str = "http://supertest.lockchat.app"
13
+ supports_stream = True
14
+ supports_gpt_35_turbo = True
15
+ supports_gpt_4 = True
16
+
17
+ @staticmethod
18
+ def create_completion(
19
+ model: str,
20
+ messages: list[dict[str, str]],
21
+ stream: bool, **kwargs: Any) -> CreateResult:
22
+
23
+ temperature = float(kwargs.get("temperature", 0.7))
24
+ payload = {
25
+ "temperature": temperature,
26
+ "messages" : messages,
27
+ "model" : model,
28
+ "stream" : True,
29
+ }
30
+
31
+ headers = {
32
+ "user-agent": "ChatX/39 CFNetwork/1408.0.4 Darwin/22.5.0",
33
+ }
34
+ response = requests.post("http://supertest.lockchat.app/v1/chat/completions",
35
+ json=payload, headers=headers, stream=True)
36
+
37
+ response.raise_for_status()
38
+ for token in response.iter_lines():
39
+ if b"The model: `gpt-4` does not exist" in token:
40
+ print("error, retrying...")
41
+ Lockchat.create_completion(
42
+ model = model,
43
+ messages = messages,
44
+ stream = stream,
45
+ temperature = temperature,
46
+ **kwargs)
47
+
48
+ if b"content" in token:
49
+ token = json.loads(token.decode("utf-8").split("data: ")[1])
50
+ token = token["choices"][0]["delta"].get("content")
51
+ if token:
52
+ yield (token)
53
+
54
+ @classmethod
55
+ @property
56
+ def params(cls):
57
+ params = [
58
+ ("model", "str"),
59
+ ("messages", "list[dict[str, str]]"),
60
+ ("stream", "bool"),
61
+ ("temperature", "float"),
62
+ ]
63
+ param = ", ".join([": ".join(p) for p in params])
64
+ return f"g4f.provider.{cls.__name__} supports: ({param})"
g4f/Provider/Opchatgpts.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from .ChatgptLogin import ChatgptLogin
4
+
5
+
6
+ class Opchatgpts(ChatgptLogin):
7
+ url = "https://opchatgpts.net"
8
+ working = True
g4f/Provider/OpenAssistant.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import json
4
+
5
+ from aiohttp import ClientSession
6
+
7
+ from ..typing import Any, AsyncGenerator
8
+ from .base_provider import AsyncGeneratorProvider, format_prompt, get_cookies
9
+
10
+
11
+ class OpenAssistant(AsyncGeneratorProvider):
12
+ url = "https://open-assistant.io/chat"
13
+ needs_auth = True
14
+ working = True
15
+ model = "OA_SFT_Llama_30B_6"
16
+
17
+ @classmethod
18
+ async def create_async_generator(
19
+ cls,
20
+ model: str,
21
+ messages: list[dict[str, str]],
22
+ proxy: str = None,
23
+ cookies: dict = None,
24
+ **kwargs: Any
25
+ ) -> AsyncGenerator:
26
+ if proxy and "://" not in proxy:
27
+ proxy = f"http://{proxy}"
28
+ if not cookies:
29
+ cookies = get_cookies("open-assistant.io")
30
+
31
+ headers = {
32
+ 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36',
33
+ }
34
+ async with ClientSession(
35
+ cookies=cookies,
36
+ headers=headers
37
+ ) as session:
38
+ async with session.post("https://open-assistant.io/api/chat", proxy=proxy) as response:
39
+ chat_id = (await response.json())["id"]
40
+
41
+ data = {
42
+ "chat_id": chat_id,
43
+ "content": f"<s>[INST]\n{format_prompt(messages)}\n[/INST]",
44
+ "parent_id": None
45
+ }
46
+ async with session.post("https://open-assistant.io/api/chat/prompter_message", proxy=proxy, json=data) as response:
47
+ parent_id = (await response.json())["id"]
48
+
49
+ data = {
50
+ "chat_id": chat_id,
51
+ "parent_id": parent_id,
52
+ "model_config_name": model if model else cls.model,
53
+ "sampling_parameters":{
54
+ "top_k": 50,
55
+ "top_p": None,
56
+ "typical_p": None,
57
+ "temperature": 0.35,
58
+ "repetition_penalty": 1.1111111111111112,
59
+ "max_new_tokens": 1024,
60
+ **kwargs
61
+ },
62
+ "plugins":[]
63
+ }
64
+ async with session.post("https://open-assistant.io/api/chat/assistant_message", proxy=proxy, json=data) as response:
65
+ data = await response.json()
66
+ if "id" in data:
67
+ message_id = data["id"]
68
+ elif "message" in data:
69
+ raise RuntimeError(data["message"])
70
+ else:
71
+ response.raise_for_status()
72
+
73
+ params = {
74
+ 'chat_id': chat_id,
75
+ 'message_id': message_id,
76
+ }
77
+ async with session.post("https://open-assistant.io/api/chat/events", proxy=proxy, params=params) as response:
78
+ start = "data: "
79
+ async for line in response.content:
80
+ line = line.decode("utf-8")
81
+ if line and line.startswith(start):
82
+ line = json.loads(line[len(start):])
83
+ if line["event_type"] == "token":
84
+ yield line["text"]
85
+
86
+ params = {
87
+ 'chat_id': chat_id,
88
+ }
89
+ async with session.delete("https://open-assistant.io/api/chat", proxy=proxy, params=params) as response:
90
+ response.raise_for_status()
91
+
92
+ @classmethod
93
+ @property
94
+ def params(cls):
95
+ params = [
96
+ ("model", "str"),
97
+ ("messages", "list[dict[str, str]]"),
98
+ ("stream", "bool"),
99
+ ("proxy", "str"),
100
+ ]
101
+ param = ", ".join([": ".join(p) for p in params])
102
+ return f"g4f.provider.{cls.__name__} supports: ({param})"
g4f/Provider/OpenaiChat.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from curl_cffi.requests import AsyncSession
4
+ import uuid
5
+ import json
6
+
7
+ from .base_provider import AsyncProvider, get_cookies, format_prompt
8
+ from ..typing import AsyncGenerator
9
+
10
+
11
+ class OpenaiChat(AsyncProvider):
12
+ url = "https://chat.openai.com"
13
+ needs_auth = True
14
+ working = True
15
+ supports_gpt_35_turbo = True
16
+ _access_token = None
17
+
18
+ @classmethod
19
+ async def create_async(
20
+ cls,
21
+ model: str,
22
+ messages: list[dict[str, str]],
23
+ proxy: str = None,
24
+ access_token: str = None,
25
+ cookies: dict = None,
26
+ **kwargs: dict
27
+ ) -> AsyncGenerator:
28
+ proxies = None
29
+ if proxy:
30
+ if "://" not in proxy:
31
+ proxy = f"http://{proxy}"
32
+ proxies = {
33
+ "http": proxy,
34
+ "https": proxy
35
+ }
36
+ if not access_token:
37
+ access_token = await cls.get_access_token(cookies, proxies)
38
+ headers = {
39
+ "Accept": "text/event-stream",
40
+ "Authorization": f"Bearer {access_token}",
41
+ }
42
+ async with AsyncSession(proxies=proxies, headers=headers, impersonate="chrome107") as session:
43
+ messages = [
44
+ {
45
+ "id": str(uuid.uuid4()),
46
+ "author": {"role": "user"},
47
+ "content": {"content_type": "text", "parts": [format_prompt(messages)]},
48
+ },
49
+ ]
50
+ data = {
51
+ "action": "next",
52
+ "messages": messages,
53
+ "conversation_id": None,
54
+ "parent_message_id": str(uuid.uuid4()),
55
+ "model": "text-davinci-002-render-sha",
56
+ "history_and_training_disabled": True,
57
+ }
58
+ response = await session.post("https://chat.openai.com/backend-api/conversation", json=data)
59
+ response.raise_for_status()
60
+ last_message = None
61
+ for line in response.content.decode().splitlines():
62
+ if line.startswith("data: "):
63
+ line = line[6:]
64
+ if line != "[DONE]":
65
+ line = json.loads(line)
66
+ if "message" in line:
67
+ last_message = line["message"]["content"]["parts"][0]
68
+ return last_message
69
+
70
+
71
+ @classmethod
72
+ async def get_access_token(cls, cookies: dict = None, proxies: dict = None):
73
+ if not cls._access_token:
74
+ cookies = cookies if cookies else get_cookies("chat.openai.com")
75
+ async with AsyncSession(proxies=proxies, cookies=cookies, impersonate="chrome107") as session:
76
+ response = await session.get("https://chat.openai.com/api/auth/session")
77
+ response.raise_for_status()
78
+ cls._access_token = response.json()["accessToken"]
79
+ return cls._access_token
80
+
81
+
82
+ @classmethod
83
+ @property
84
+ def params(cls):
85
+ params = [
86
+ ("model", "str"),
87
+ ("messages", "list[dict[str, str]]"),
88
+ ("stream", "bool"),
89
+ ("proxy", "str"),
90
+ ("access_token", "str"),
91
+ ("cookies", "dict[str, str]")
92
+ ]
93
+ param = ", ".join([": ".join(p) for p in params])
94
+ return f"g4f.provider.{cls.__name__} supports: ({param})"
g4f/Provider/Providers/Forefront.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ import requests
4
+ from ...typing import sha256, Dict, get_type_hints
5
+
6
+ url = 'https://forefront.com'
7
+ model = ['gpt-3.5-turbo']
8
+ supports_stream = True
9
+ needs_auth = False
10
+ working = False
11
+
12
+
13
+ def _create_completion(model: str, messages: list, stream: bool, **kwargs):
14
+ json_data = {
15
+ 'text': messages[-1]['content'],
16
+ 'action': 'noauth',
17
+ 'id': '',
18
+ 'parentId': '',
19
+ 'workspaceId': '',
20
+ 'messagePersona': '607e41fe-95be-497e-8e97-010a59b2e2c0',
21
+ 'model': 'gpt-4',
22
+ 'messages': messages[:-1] if len(messages) > 1 else [],
23
+ 'internetMode': 'auto'
24
+ }
25
+ response = requests.post( 'https://streaming.tenant-forefront-default.knative.chi.coreweave.com/free-chat',
26
+ json=json_data, stream=True)
27
+ for token in response.iter_lines():
28
+ if b'delta' in token:
29
+ token = json.loads(token.decode().split('data: ')[1])['delta']
30
+ yield (token)
31
+
32
+ params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
33
+ '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
g4f/Provider/Qidinam.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json, random, string, time
2
+
3
+ from aiohttp import ClientSession
4
+ from ..typing import Any, CreateResult
5
+ from .base_provider import AsyncProvider, format_prompt
6
+
7
+ class Qidinam(AsyncProvider):
8
+ url = "https://ai.qidianym.net/api/chat-process"
9
+ working = True
10
+ supports_gpt_35_turbo = True
11
+ supports_stream = True
12
+
13
+ @classmethod
14
+ async def create_async(
15
+ cls,
16
+ model: str,
17
+ messages: dict[str, str],
18
+ **kwargs: Any,
19
+ ) -> CreateResult:
20
+
21
+ base = ""
22
+ for message in messages:
23
+ base += "%s: %s\n" % (message["role"], message["content"])
24
+ base += "assistant:"
25
+
26
+ headers = {
27
+ "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36"
28
+ }
29
+ data: dict[str, Any] = {
30
+ "prompt": base,
31
+ "options": {},
32
+ "systemMessage": "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully. Respond using markdown.",
33
+ "temperature": kwargs.get("temperature", 0.8),
34
+ "top_p": kwargs.get("top_p", 1),
35
+ }
36
+ url = "https://ai.qidianym.net/api/chat-process"
37
+
38
+ # Use aiohttp for asynchronous HTTP requests
39
+ async with ClientSession() as session:
40
+ async with session.post(url, headers=headers, json=data) as response:
41
+ response.raise_for_status()
42
+ lines = response.text.strip().split("\n")
43
+ res = json.loads(lines[-1])
44
+ return await res["text"]
45
+
46
+ @classmethod
47
+ @property
48
+ def params(cls):
49
+ params = [
50
+ ("model", "str"),
51
+ ("messages", "list[dict[str, str]]"),
52
+ ("stream", "bool"),
53
+ ("temperature", "float"),
54
+ ("top_p", "int"),
55
+ ]
56
+ param = ", ".join([": ".join(p) for p in params])
57
+ return f"g4f.provider.{cls.__name__} supports: ({param})"
g4f/Provider/Raycast.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import json
4
+
5
+ import requests
6
+
7
+ from ..typing import Any, CreateResult
8
+ from .base_provider import BaseProvider
9
+
10
+
11
+ class Raycast(BaseProvider):
12
+ url = "https://raycast.com"
13
+ supports_gpt_35_turbo = True
14
+ supports_gpt_4 = True
15
+ supports_stream = True
16
+ needs_auth = True
17
+ working = True
18
+
19
+ @staticmethod
20
+ def create_completion(
21
+ model: str,
22
+ messages: list[dict[str, str]],
23
+ stream: bool,
24
+ **kwargs: Any,
25
+ ) -> CreateResult:
26
+ auth = kwargs.get('auth')
27
+ headers = {
28
+ 'Accept': 'application/json',
29
+ 'Accept-Language': 'en-US,en;q=0.9',
30
+ 'Authorization': f'Bearer {auth}',
31
+ 'Content-Type': 'application/json',
32
+ 'User-Agent': 'Raycast/0 CFNetwork/1410.0.3 Darwin/22.6.0',
33
+ }
34
+ parsed_messages = []
35
+ for message in messages:
36
+ parsed_messages.append({
37
+ 'author': message['role'],
38
+ 'content': {'text': message['content']}
39
+ })
40
+ data = {
41
+ "debug": False,
42
+ "locale": "en-CN",
43
+ "messages": parsed_messages,
44
+ "model": model,
45
+ "provider": "openai",
46
+ "source": "ai_chat",
47
+ "system_instruction": "markdown",
48
+ "temperature": 0.5
49
+ }
50
+ response = requests.post("https://backend.raycast.com/api/v1/ai/chat_completions", headers=headers, json=data, stream=True)
51
+ for token in response.iter_lines():
52
+ if b'data: ' not in token:
53
+ continue
54
+ completion_chunk = json.loads(token.decode().replace('data: ', ''))
55
+ token = completion_chunk['text']
56
+ if token != None:
57
+ yield token
58
+
59
+ @classmethod
60
+ @property
61
+ def params(cls):
62
+ params = [
63
+ ("model", "str"),
64
+ ("messages", "list[dict[str, str]]"),
65
+ ("stream", "bool"),
66
+ ("temperature", "float"),
67
+ ("top_p", "int"),
68
+ ("model", "str"),
69
+ ("auth", "str"),
70
+ ]
71
+ param = ", ".join([": ".join(p) for p in params])
72
+ return f"g4f.provider.{cls.__name__} supports: ({param})"
g4f/Provider/Theb.py ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import json
4
+ import random
5
+
6
+ import requests
7
+
8
+ from ..typing import Any, CreateResult
9
+ from .base_provider import BaseProvider
10
+
11
+
12
+ class Theb(BaseProvider):
13
+ url = "https://theb.ai"
14
+ working = True
15
+ supports_stream = True
16
+ supports_gpt_35_turbo = True
17
+ needs_auth = True
18
+
19
+ @staticmethod
20
+ def create_completion(
21
+ model: str,
22
+ messages: list[dict[str, str]],
23
+ stream: bool, **kwargs: Any) -> CreateResult:
24
+
25
+ conversation = "\n".join(f"{message['role']}: {message['content']}" for message in messages)
26
+ conversation += "\nassistant: "
27
+
28
+ auth = kwargs.get("auth", {
29
+ "bearer_token":"free",
30
+ "org_id":"theb",
31
+ })
32
+
33
+ bearer_token = auth["bearer_token"]
34
+ org_id = auth["org_id"]
35
+
36
+ headers = {
37
+ 'authority' : 'beta.theb.ai',
38
+ 'accept' : 'text/event-stream',
39
+ 'accept-language' : 'id-ID,id;q=0.9,en-US;q=0.8,en;q=0.7',
40
+ 'authorization' : 'Bearer '+bearer_token,
41
+ 'content-type' : 'application/json',
42
+ 'origin' : 'https://beta.theb.ai',
43
+ 'referer' : 'https://beta.theb.ai/home',
44
+ 'sec-ch-ua' : '"Chromium";v="116", "Not)A;Brand";v="24", "Google Chrome";v="116"',
45
+ 'sec-ch-ua-mobile' : '?0',
46
+ 'sec-ch-ua-platform': '"Windows"',
47
+ 'sec-fetch-dest' : 'empty',
48
+ 'sec-fetch-mode' : 'cors',
49
+ 'sec-fetch-site' : 'same-origin',
50
+ 'user-agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36',
51
+ 'x-ai-model' : 'ee8d4f29cb7047f78cbe84313ed6ace8',
52
+ }
53
+
54
+ req_rand = random.randint(100000000, 9999999999)
55
+
56
+ json_data: dict[str, Any] = {
57
+ "text" : conversation,
58
+ "category" : "04f58f64a4aa4191a957b47290fee864",
59
+ "model" : "ee8d4f29cb7047f78cbe84313ed6ace8",
60
+ "model_params": {
61
+ "system_prompt" : "You are ChatGPT, a large language model trained by OpenAI, based on the GPT-3.5 architecture.\nKnowledge cutoff: 2021-09\nCurrent date: {{YYYY-MM-DD}}",
62
+ "temperature" : kwargs.get("temperature", 1),
63
+ "top_p" : kwargs.get("top_p", 1),
64
+ "frequency_penalty" : kwargs.get("frequency_penalty", 0),
65
+ "presence_penalty" : kwargs.get("presence_penalty", 0),
66
+ "long_term_memory" : "auto"
67
+ }
68
+ }
69
+
70
+ response = requests.post(f"https://beta.theb.ai/api/conversation?org_id={org_id}&req_rand={req_rand}",
71
+ headers=headers, json=json_data, stream=True)
72
+
73
+ response.raise_for_status()
74
+ content = ""
75
+ next_content = ""
76
+ for chunk in response.iter_lines():
77
+ if b"content" in chunk:
78
+ next_content = content
79
+ data = json.loads(chunk.decode().split("data: ")[1])
80
+ content = data["content"]
81
+ yield data["content"].replace(next_content, "")
82
+
83
+ @classmethod
84
+ @property
85
+ def params(cls):
86
+ params = [
87
+ ("model", "str"),
88
+ ("messages", "list[dict[str, str]]"),
89
+ ("auth", "list[dict[str, str]]"),
90
+ ("stream", "bool"),
91
+ ("temperature", "float"),
92
+ ("presence_penalty", "int"),
93
+ ("frequency_penalty", "int"),
94
+ ("top_p", "int")
95
+ ]
96
+ param = ", ".join([": ".join(p) for p in params])
97
+ return f"g4f.provider.{cls.__name__} supports: ({param})"
g4f/Provider/V50.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import uuid
4
+
5
+ import requests
6
+
7
+ from ..typing import Any, CreateResult
8
+ from .base_provider import BaseProvider
9
+
10
+
11
+ class V50(BaseProvider):
12
+ url = 'https://p5.v50.ltd'
13
+ supports_gpt_35_turbo = True
14
+ supports_stream = False
15
+ needs_auth = False
16
+ working = False
17
+
18
+ @staticmethod
19
+ def create_completion(
20
+ model: str,
21
+ messages: list[dict[str, str]],
22
+ stream: bool, **kwargs: Any) -> CreateResult:
23
+
24
+ conversation = "\n".join(f"{message['role']}: {message['content']}" for message in messages)
25
+ conversation += "\nassistant: "
26
+
27
+ payload = {
28
+ "prompt" : conversation,
29
+ "options" : {},
30
+ "systemMessage" : ".",
31
+ "temperature" : kwargs.get("temperature", 0.4),
32
+ "top_p" : kwargs.get("top_p", 0.4),
33
+ "model" : model,
34
+ "user" : str(uuid.uuid4())
35
+ }
36
+
37
+ headers = {
38
+ 'authority' : 'p5.v50.ltd',
39
+ 'accept' : 'application/json, text/plain, */*',
40
+ 'accept-language' : 'id-ID,id;q=0.9,en-US;q=0.8,en;q=0.7',
41
+ 'content-type' : 'application/json',
42
+ 'origin' : 'https://p5.v50.ltd',
43
+ 'referer' : 'https://p5.v50.ltd/',
44
+ 'sec-ch-ua-platform': '"Windows"',
45
+ 'sec-fetch-dest' : 'empty',
46
+ 'sec-fetch-mode' : 'cors',
47
+ 'sec-fetch-site' : 'same-origin',
48
+ 'user-agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36'
49
+ }
50
+ response = requests.post("https://p5.v50.ltd/api/chat-process",
51
+ json=payload, headers=headers, proxies=kwargs['proxy'] if 'proxy' in kwargs else {})
52
+
53
+ if "https://fk1.v50.ltd" not in response.text:
54
+ yield response.text
55
+
56
+ @classmethod
57
+ @property
58
+ def params(cls):
59
+ params = [
60
+ ("model", "str"),
61
+ ("messages", "list[dict[str, str]]"),
62
+ ("stream", "bool"),
63
+ ("temperature", "float"),
64
+ ("top_p", "int"),
65
+ ]
66
+ param = ", ".join([": ".join(p) for p in params])
67
+ return f"g4f.provider.{cls.__name__} supports: ({param})"
g4f/Provider/Vercel.py CHANGED
@@ -1,381 +1,358 @@
1
  from __future__ import annotations
 
 
2
 
3
- import json, base64, requests, execjs, random, uuid
 
4
 
5
- from ..typing import Messages, TypedDict, CreateResult
6
- from .base_provider import BaseProvider
7
- from abc import abstractmethod
8
 
9
 
10
- class Vercel(BaseProvider):
11
- url = 'https://sdk.vercel.ai'
12
  working = True
13
  supports_gpt_35_turbo = True
14
- supports_stream = True
15
 
16
- @staticmethod
17
- @abstractmethod
18
- def create_completion(
19
  model: str,
20
- messages: Messages,
21
- stream: bool,
22
  proxy: str = None,
23
  **kwargs
24
- ) -> CreateResult:
25
- if not model:
26
- model = "gpt-3.5-turbo"
27
- elif model not in model_info:
28
- raise ValueError(f"Model are not supported: {model}")
 
 
 
 
 
 
29
 
30
  headers = {
31
- 'authority' : 'sdk.vercel.ai',
32
- 'accept' : '*/*',
33
- 'accept-language' : 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
34
- 'cache-control' : 'no-cache',
35
- 'content-type' : 'application/json',
36
- 'custom-encoding' : get_anti_bot_token(),
37
- 'origin' : 'https://sdk.vercel.ai',
38
- 'pragma' : 'no-cache',
39
- 'referer' : 'https://sdk.vercel.ai/',
40
- 'sec-ch-ua' : '"Google Chrome";v="117", "Not;A=Brand";v="8", "Chromium";v="117"',
41
- 'sec-ch-ua-mobile' : '?0',
42
- 'sec-ch-ua-platform': '"macOS"',
43
- 'sec-fetch-dest' : 'empty',
44
- 'sec-fetch-mode' : 'cors',
45
- 'sec-fetch-site' : 'same-origin',
46
- 'user-agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.%s.%s Safari/537.36' % (
47
- random.randint(99, 999),
48
- random.randint(99, 999)
49
- )
50
  }
 
 
 
 
 
 
 
 
 
 
 
 
51
 
52
- json_data = {
53
- 'model' : model_info[model]['id'],
54
- 'messages' : messages,
55
- 'playgroundId': str(uuid.uuid4()),
56
- 'chatIndex' : 0,
57
- **model_info[model]['default_params'],
58
- **kwargs
59
- }
60
-
61
- max_retries = kwargs.get('max_retries', 20)
62
- for i in range(max_retries):
63
- response = requests.post('https://sdk.vercel.ai/api/generate',
64
- headers=headers, json=json_data, stream=True, proxies={"https": proxy})
65
- try:
66
- response.raise_for_status()
67
- except Exception:
68
- continue
69
- for token in response.iter_content(chunk_size=None):
70
- yield token.decode()
71
- break
72
-
73
-
74
- def get_anti_bot_token() -> str:
75
- headers = {
76
- 'authority' : 'sdk.vercel.ai',
77
- 'accept' : '*/*',
78
- 'accept-language' : 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
79
- 'cache-control' : 'no-cache',
80
- 'pragma' : 'no-cache',
81
- 'referer' : 'https://sdk.vercel.ai/',
82
- 'sec-ch-ua' : '"Google Chrome";v="117", "Not;A=Brand";v="8", "Chromium";v="117"',
83
- 'sec-ch-ua-mobile' : '?0',
84
- 'sec-ch-ua-platform': '"macOS"',
85
- 'sec-fetch-dest' : 'empty',
86
- 'sec-fetch-mode' : 'cors',
87
- 'sec-fetch-site' : 'same-origin',
88
- 'user-agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.%s.%s Safari/537.36' % (
89
- random.randint(99, 999),
90
- random.randint(99, 999)
91
- )
92
- }
93
-
94
- response = requests.get('https://sdk.vercel.ai/openai.jpeg',
95
- headers=headers).text
96
 
97
- raw_data = json.loads(base64.b64decode(response,
98
- validate=True))
 
 
 
 
 
 
 
 
99
 
100
- js_script = '''const globalThis={marker:"mark"};String.prototype.fontcolor=function(){return `<font>${this}</font>`};
101
- return (%s)(%s)''' % (raw_data['c'], raw_data['a'])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
102
 
103
- raw_token = json.dumps({'r': execjs.compile(js_script).call(''), 't': raw_data['t']},
104
- separators = (",", ":"))
105
-
106
- return base64.b64encode(raw_token.encode('utf-16le')).decode()
107
 
108
  class ModelInfo(TypedDict):
109
  id: str
110
  default_params: dict[str, Any]
111
 
 
112
  model_info: dict[str, ModelInfo] = {
113
- 'claude-instant-v1': {
114
- 'id': 'anthropic:claude-instant-v1',
115
- 'default_params': {
116
- 'temperature': 1,
117
- 'maximumLength': 1024,
118
- 'topP': 1,
119
- 'topK': 1,
120
- 'presencePenalty': 1,
121
- 'frequencyPenalty': 1,
122
- 'stopSequences': ['\n\nHuman:'],
123
  },
124
  },
125
- 'claude-v1': {
126
- 'id': 'anthropic:claude-v1',
127
- 'default_params': {
128
- 'temperature': 1,
129
- 'maximumLength': 1024,
130
- 'topP': 1,
131
- 'topK': 1,
132
- 'presencePenalty': 1,
133
- 'frequencyPenalty': 1,
134
- 'stopSequences': ['\n\nHuman:'],
135
  },
136
  },
137
- 'claude-v2': {
138
- 'id': 'anthropic:claude-v2',
139
- 'default_params': {
140
- 'temperature': 1,
141
- 'maximumLength': 1024,
142
- 'topP': 1,
143
- 'topK': 1,
144
- 'presencePenalty': 1,
145
- 'frequencyPenalty': 1,
146
- 'stopSequences': ['\n\nHuman:'],
147
  },
148
  },
149
- 'a16z-infra/llama7b-v2-chat': {
150
- 'id': 'replicate:a16z-infra/llama7b-v2-chat',
151
- 'default_params': {
152
- 'temperature': 0.75,
153
- 'maximumLength': 3000,
154
- 'topP': 1,
155
- 'repetitionPenalty': 1,
156
  },
157
  },
158
- 'a16z-infra/llama13b-v2-chat': {
159
- 'id': 'replicate:a16z-infra/llama13b-v2-chat',
160
- 'default_params': {
161
- 'temperature': 0.75,
162
- 'maximumLength': 3000,
163
- 'topP': 1,
164
- 'repetitionPenalty': 1,
165
  },
166
  },
167
- 'replicate/llama-2-70b-chat': {
168
- 'id': 'replicate:replicate/llama-2-70b-chat',
169
- 'default_params': {
170
- 'temperature': 0.75,
171
- 'maximumLength': 3000,
172
- 'topP': 1,
173
- 'repetitionPenalty': 1,
174
  },
175
  },
176
- 'bigscience/bloom': {
177
- 'id': 'huggingface:bigscience/bloom',
178
- 'default_params': {
179
- 'temperature': 0.5,
180
- 'maximumLength': 1024,
181
- 'topP': 0.95,
182
- 'topK': 4,
183
- 'repetitionPenalty': 1.03,
184
  },
185
  },
186
- 'google/flan-t5-xxl': {
187
- 'id': 'huggingface:google/flan-t5-xxl',
188
- 'default_params': {
189
- 'temperature': 0.5,
190
- 'maximumLength': 1024,
191
- 'topP': 0.95,
192
- 'topK': 4,
193
- 'repetitionPenalty': 1.03,
194
  },
195
  },
196
- 'EleutherAI/gpt-neox-20b': {
197
- 'id': 'huggingface:EleutherAI/gpt-neox-20b',
198
- 'default_params': {
199
- 'temperature': 0.5,
200
- 'maximumLength': 1024,
201
- 'topP': 0.95,
202
- 'topK': 4,
203
- 'repetitionPenalty': 1.03,
204
- 'stopSequences': [],
205
  },
206
  },
207
- 'OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5': {
208
- 'id': 'huggingface:OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5',
209
- 'default_params': {
210
- 'maximumLength': 1024,
211
- 'typicalP': 0.2,
212
- 'repetitionPenalty': 1,
213
- },
214
  },
215
- 'OpenAssistant/oasst-sft-1-pythia-12b': {
216
- 'id': 'huggingface:OpenAssistant/oasst-sft-1-pythia-12b',
217
- 'default_params': {
218
- 'maximumLength': 1024,
219
- 'typicalP': 0.2,
220
- 'repetitionPenalty': 1,
221
- },
222
  },
223
- 'bigcode/santacoder': {
224
- 'id': 'huggingface:bigcode/santacoder',
225
- 'default_params': {
226
- 'temperature': 0.5,
227
- 'maximumLength': 1024,
228
- 'topP': 0.95,
229
- 'topK': 4,
230
- 'repetitionPenalty': 1.03,
231
  },
232
  },
233
- 'command-light-nightly': {
234
- 'id': 'cohere:command-light-nightly',
235
- 'default_params': {
236
- 'temperature': 0.9,
237
- 'maximumLength': 1024,
238
- 'topP': 1,
239
- 'topK': 0,
240
- 'presencePenalty': 0,
241
- 'frequencyPenalty': 0,
242
- 'stopSequences': [],
243
  },
244
  },
245
- 'command-nightly': {
246
- 'id': 'cohere:command-nightly',
247
- 'default_params': {
248
- 'temperature': 0.9,
249
- 'maximumLength': 1024,
250
- 'topP': 1,
251
- 'topK': 0,
252
- 'presencePenalty': 0,
253
- 'frequencyPenalty': 0,
254
- 'stopSequences': [],
255
  },
256
  },
257
- 'gpt-4': {
258
- 'id': 'openai:gpt-4',
259
- 'default_params': {
260
- 'temperature': 0.7,
261
- 'maximumLength': 8192,
262
- 'topP': 1,
263
- 'presencePenalty': 0,
264
- 'frequencyPenalty': 0,
265
- 'stopSequences': [],
266
  },
267
  },
268
- 'gpt-4-0613': {
269
- 'id': 'openai:gpt-4-0613',
270
- 'default_params': {
271
- 'temperature': 0.7,
272
- 'maximumLength': 8192,
273
- 'topP': 1,
274
- 'presencePenalty': 0,
275
- 'frequencyPenalty': 0,
276
- 'stopSequences': [],
277
  },
278
  },
279
- 'code-davinci-002': {
280
- 'id': 'openai:code-davinci-002',
281
- 'default_params': {
282
- 'temperature': 0.5,
283
- 'maximumLength': 1024,
284
- 'topP': 1,
285
- 'presencePenalty': 0,
286
- 'frequencyPenalty': 0,
287
- 'stopSequences': [],
288
  },
289
  },
290
- 'gpt-3.5-turbo': {
291
- 'id': 'openai:gpt-3.5-turbo',
292
- 'default_params': {
293
- 'temperature': 0.7,
294
- 'maximumLength': 4096,
295
- 'topP': 1,
296
- 'topK': 1,
297
- 'presencePenalty': 1,
298
- 'frequencyPenalty': 1,
299
- 'stopSequences': [],
300
  },
301
  },
302
- 'gpt-3.5-turbo-16k': {
303
- 'id': 'openai:gpt-3.5-turbo-16k',
304
- 'default_params': {
305
- 'temperature': 0.7,
306
- 'maximumLength': 16280,
307
- 'topP': 1,
308
- 'topK': 1,
309
- 'presencePenalty': 1,
310
- 'frequencyPenalty': 1,
311
- 'stopSequences': [],
312
  },
313
  },
314
- 'gpt-3.5-turbo-16k-0613': {
315
- 'id': 'openai:gpt-3.5-turbo-16k-0613',
316
- 'default_params': {
317
- 'temperature': 0.7,
318
- 'maximumLength': 16280,
319
- 'topP': 1,
320
- 'topK': 1,
321
- 'presencePenalty': 1,
322
- 'frequencyPenalty': 1,
323
- 'stopSequences': [],
324
  },
325
  },
326
- 'text-ada-001': {
327
- 'id': 'openai:text-ada-001',
328
- 'default_params': {
329
- 'temperature': 0.5,
330
- 'maximumLength': 1024,
331
- 'topP': 1,
332
- 'presencePenalty': 0,
333
- 'frequencyPenalty': 0,
334
- 'stopSequences': [],
335
  },
336
  },
337
- 'text-babbage-001': {
338
- 'id': 'openai:text-babbage-001',
339
- 'default_params': {
340
- 'temperature': 0.5,
341
- 'maximumLength': 1024,
342
- 'topP': 1,
343
- 'presencePenalty': 0,
344
- 'frequencyPenalty': 0,
345
- 'stopSequences': [],
346
  },
347
  },
348
- 'text-curie-001': {
349
- 'id': 'openai:text-curie-001',
350
- 'default_params': {
351
- 'temperature': 0.5,
352
- 'maximumLength': 1024,
353
- 'topP': 1,
354
- 'presencePenalty': 0,
355
- 'frequencyPenalty': 0,
356
- 'stopSequences': [],
357
  },
358
  },
359
- 'text-davinci-002': {
360
- 'id': 'openai:text-davinci-002',
361
- 'default_params': {
362
- 'temperature': 0.5,
363
- 'maximumLength': 1024,
364
- 'topP': 1,
365
- 'presencePenalty': 0,
366
- 'frequencyPenalty': 0,
367
- 'stopSequences': [],
368
  },
369
  },
370
- 'text-davinci-003': {
371
- 'id': 'openai:text-davinci-003',
372
- 'default_params': {
373
- 'temperature': 0.5,
374
- 'maximumLength': 4097,
375
- 'topP': 1,
376
- 'presencePenalty': 0,
377
- 'frequencyPenalty': 0,
378
- 'stopSequences': [],
379
  },
380
  },
381
- }
 
1
  from __future__ import annotations
2
+ import os
3
+ import asyncio
4
 
5
+ import base64, json, uuid, quickjs, random
6
+ from curl_cffi.requests import AsyncSession
7
 
8
+ from ..typing import Any, TypedDict
9
+ from .base_provider import AsyncProvider
 
10
 
11
 
12
+ class Vercel(AsyncProvider):
13
+ url = "https://sdk.vercel.ai"
14
  working = True
15
  supports_gpt_35_turbo = True
16
+ model = "replicate:replicate/llama-2-70b-chat"
17
 
18
+ @classmethod
19
+ async def create_async(
20
+ cls,
21
  model: str,
22
+ messages: list[dict[str, str]],
 
23
  proxy: str = None,
24
  **kwargs
25
+ ) -> str:
26
+ if model in ["gpt-3.5-turbo", "gpt-4"]:
27
+ model = "openai:" + model
28
+ model = model if model else cls.model
29
+ proxies = None
30
+ if proxy:
31
+ if "://" not in proxy:
32
+ proxy = "http://" + proxy
33
+ proxies = {"http": proxy, "https": proxy}
34
+ if os.name == 'nt':
35
+ asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
36
 
37
  headers = {
38
+ "User-Agent": "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.{rand1}.{rand2} Safari/537.36".format(
39
+ rand1=random.randint(0,9999),
40
+ rand2=random.randint(0,9999)
41
+ ),
42
+ "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8",
43
+ "Accept-Encoding": "gzip, deflate, br",
44
+ "Accept-Language": "en-US,en;q=0.5",
45
+ "TE": "trailers",
 
 
 
 
 
 
 
 
 
 
 
46
  }
47
+ async with AsyncSession(headers=headers, proxies=proxies, impersonate="chrome107") as session:
48
+ response = await session.get(cls.url + "/openai.jpeg")
49
+ response.raise_for_status()
50
+ custom_encoding = _get_custom_encoding(response.text)
51
+ headers = {
52
+ "Content-Type": "application/json",
53
+ "Custom-Encoding": custom_encoding,
54
+ }
55
+ data = _create_payload(model, messages)
56
+ response = await session.post(cls.url + "/api/generate", json=data, headers=headers)
57
+ response.raise_for_status()
58
+ return response.text
59
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
60
 
61
+ def _create_payload(model: str, messages: list[dict[str, str]]) -> dict[str, Any]:
62
+ if model not in model_info:
63
+ raise ValueError(f'Model are not supported: {model}')
64
+ default_params = model_info[model]["default_params"]
65
+ return {
66
+ "messages": messages,
67
+ "playgroundId": str(uuid.uuid4()),
68
+ "chatIndex": 0,
69
+ "model": model
70
+ } | default_params
71
 
72
+ # based on https://github.com/ading2210/vercel-llm-api
73
+ def _get_custom_encoding(text: str) -> str:
74
+ data = json.loads(base64.b64decode(text, validate=True))
75
+ script = """
76
+ String.prototype.fontcolor = function() {{
77
+ return `<font>${{this}}</font>`
78
+ }}
79
+ var globalThis = {{marker: "mark"}};
80
+ ({script})({key})
81
+ """.format(
82
+ script=data["c"], key=data["a"]
83
+ )
84
+ context = quickjs.Context() # type: ignore
85
+ token_data = json.loads(context.eval(script).json()) # type: ignore
86
+ token_data[2] = "mark"
87
+ token = {"r": token_data, "t": data["t"]}
88
+ token_str = json.dumps(token, separators=(",", ":")).encode("utf-16le")
89
+ return base64.b64encode(token_str).decode()
90
 
 
 
 
 
91
 
92
  class ModelInfo(TypedDict):
93
  id: str
94
  default_params: dict[str, Any]
95
 
96
+
97
  model_info: dict[str, ModelInfo] = {
98
+ "anthropic:claude-instant-v1": {
99
+ "id": "anthropic:claude-instant-v1",
100
+ "default_params": {
101
+ "temperature": 1,
102
+ "maxTokens": 200,
103
+ "topP": 1,
104
+ "topK": 1,
105
+ "presencePenalty": 1,
106
+ "frequencyPenalty": 1,
107
+ "stopSequences": ["\n\nHuman:"],
108
  },
109
  },
110
+ "anthropic:claude-v1": {
111
+ "id": "anthropic:claude-v1",
112
+ "default_params": {
113
+ "temperature": 1,
114
+ "maxTokens": 200,
115
+ "topP": 1,
116
+ "topK": 1,
117
+ "presencePenalty": 1,
118
+ "frequencyPenalty": 1,
119
+ "stopSequences": ["\n\nHuman:"],
120
  },
121
  },
122
+ "anthropic:claude-v2": {
123
+ "id": "anthropic:claude-v2",
124
+ "default_params": {
125
+ "temperature": 1,
126
+ "maxTokens": 200,
127
+ "topP": 1,
128
+ "topK": 1,
129
+ "presencePenalty": 1,
130
+ "frequencyPenalty": 1,
131
+ "stopSequences": ["\n\nHuman:"],
132
  },
133
  },
134
+ "replicate:a16z-infra/llama7b-v2-chat": {
135
+ "id": "replicate:a16z-infra/llama7b-v2-chat",
136
+ "default_params": {
137
+ "temperature": 0.75,
138
+ "maxTokens": 500,
139
+ "topP": 1,
140
+ "repetitionPenalty": 1,
141
  },
142
  },
143
+ "replicate:a16z-infra/llama13b-v2-chat": {
144
+ "id": "replicate:a16z-infra/llama13b-v2-chat",
145
+ "default_params": {
146
+ "temperature": 0.75,
147
+ "maxTokens": 500,
148
+ "topP": 1,
149
+ "repetitionPenalty": 1,
150
  },
151
  },
152
+ "replicate:replicate/llama-2-70b-chat": {
153
+ "id": "replicate:replicate/llama-2-70b-chat",
154
+ "default_params": {
155
+ "temperature": 0.75,
156
+ "maxTokens": 1000,
157
+ "topP": 1,
158
+ "repetitionPenalty": 1,
159
  },
160
  },
161
+ "huggingface:bigscience/bloom": {
162
+ "id": "huggingface:bigscience/bloom",
163
+ "default_params": {
164
+ "temperature": 0.5,
165
+ "maxTokens": 200,
166
+ "topP": 0.95,
167
+ "topK": 4,
168
+ "repetitionPenalty": 1.03,
169
  },
170
  },
171
+ "huggingface:google/flan-t5-xxl": {
172
+ "id": "huggingface:google/flan-t5-xxl",
173
+ "default_params": {
174
+ "temperature": 0.5,
175
+ "maxTokens": 200,
176
+ "topP": 0.95,
177
+ "topK": 4,
178
+ "repetitionPenalty": 1.03,
179
  },
180
  },
181
+ "huggingface:EleutherAI/gpt-neox-20b": {
182
+ "id": "huggingface:EleutherAI/gpt-neox-20b",
183
+ "default_params": {
184
+ "temperature": 0.5,
185
+ "maxTokens": 200,
186
+ "topP": 0.95,
187
+ "topK": 4,
188
+ "repetitionPenalty": 1.03,
189
+ "stopSequences": [],
190
  },
191
  },
192
+ "huggingface:OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5": {
193
+ "id": "huggingface:OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5",
194
+ "default_params": {"maxTokens": 200, "typicalP": 0.2, "repetitionPenalty": 1},
 
 
 
 
195
  },
196
+ "huggingface:OpenAssistant/oasst-sft-1-pythia-12b": {
197
+ "id": "huggingface:OpenAssistant/oasst-sft-1-pythia-12b",
198
+ "default_params": {"maxTokens": 200, "typicalP": 0.2, "repetitionPenalty": 1},
 
 
 
 
199
  },
200
+ "huggingface:bigcode/santacoder": {
201
+ "id": "huggingface:bigcode/santacoder",
202
+ "default_params": {
203
+ "temperature": 0.5,
204
+ "maxTokens": 200,
205
+ "topP": 0.95,
206
+ "topK": 4,
207
+ "repetitionPenalty": 1.03,
208
  },
209
  },
210
+ "cohere:command-light-nightly": {
211
+ "id": "cohere:command-light-nightly",
212
+ "default_params": {
213
+ "temperature": 0.9,
214
+ "maxTokens": 200,
215
+ "topP": 1,
216
+ "topK": 0,
217
+ "presencePenalty": 0,
218
+ "frequencyPenalty": 0,
219
+ "stopSequences": [],
220
  },
221
  },
222
+ "cohere:command-nightly": {
223
+ "id": "cohere:command-nightly",
224
+ "default_params": {
225
+ "temperature": 0.9,
226
+ "maxTokens": 200,
227
+ "topP": 1,
228
+ "topK": 0,
229
+ "presencePenalty": 0,
230
+ "frequencyPenalty": 0,
231
+ "stopSequences": [],
232
  },
233
  },
234
+ "openai:gpt-4": {
235
+ "id": "openai:gpt-4",
236
+ "default_params": {
237
+ "temperature": 0.7,
238
+ "maxTokens": 500,
239
+ "topP": 1,
240
+ "presencePenalty": 0,
241
+ "frequencyPenalty": 0,
242
+ "stopSequences": [],
243
  },
244
  },
245
+ "openai:gpt-4-0613": {
246
+ "id": "openai:gpt-4-0613",
247
+ "default_params": {
248
+ "temperature": 0.7,
249
+ "maxTokens": 500,
250
+ "topP": 1,
251
+ "presencePenalty": 0,
252
+ "frequencyPenalty": 0,
253
+ "stopSequences": [],
254
  },
255
  },
256
+ "openai:code-davinci-002": {
257
+ "id": "openai:code-davinci-002",
258
+ "default_params": {
259
+ "temperature": 0.5,
260
+ "maxTokens": 200,
261
+ "topP": 1,
262
+ "presencePenalty": 0,
263
+ "frequencyPenalty": 0,
264
+ "stopSequences": [],
265
  },
266
  },
267
+ "openai:gpt-3.5-turbo": {
268
+ "id": "openai:gpt-3.5-turbo",
269
+ "default_params": {
270
+ "temperature": 0.7,
271
+ "maxTokens": 500,
272
+ "topP": 1,
273
+ "topK": 1,
274
+ "presencePenalty": 1,
275
+ "frequencyPenalty": 1,
276
+ "stopSequences": [],
277
  },
278
  },
279
+ "openai:gpt-3.5-turbo-16k": {
280
+ "id": "openai:gpt-3.5-turbo-16k",
281
+ "default_params": {
282
+ "temperature": 0.7,
283
+ "maxTokens": 500,
284
+ "topP": 1,
285
+ "topK": 1,
286
+ "presencePenalty": 1,
287
+ "frequencyPenalty": 1,
288
+ "stopSequences": [],
289
  },
290
  },
291
+ "openai:gpt-3.5-turbo-16k-0613": {
292
+ "id": "openai:gpt-3.5-turbo-16k-0613",
293
+ "default_params": {
294
+ "temperature": 0.7,
295
+ "maxTokens": 500,
296
+ "topP": 1,
297
+ "topK": 1,
298
+ "presencePenalty": 1,
299
+ "frequencyPenalty": 1,
300
+ "stopSequences": [],
301
  },
302
  },
303
+ "openai:text-ada-001": {
304
+ "id": "openai:text-ada-001",
305
+ "default_params": {
306
+ "temperature": 0.5,
307
+ "maxTokens": 200,
308
+ "topP": 1,
309
+ "presencePenalty": 0,
310
+ "frequencyPenalty": 0,
311
+ "stopSequences": [],
312
  },
313
  },
314
+ "openai:text-babbage-001": {
315
+ "id": "openai:text-babbage-001",
316
+ "default_params": {
317
+ "temperature": 0.5,
318
+ "maxTokens": 200,
319
+ "topP": 1,
320
+ "presencePenalty": 0,
321
+ "frequencyPenalty": 0,
322
+ "stopSequences": [],
323
  },
324
  },
325
+ "openai:text-curie-001": {
326
+ "id": "openai:text-curie-001",
327
+ "default_params": {
328
+ "temperature": 0.5,
329
+ "maxTokens": 200,
330
+ "topP": 1,
331
+ "presencePenalty": 0,
332
+ "frequencyPenalty": 0,
333
+ "stopSequences": [],
334
  },
335
  },
336
+ "openai:text-davinci-002": {
337
+ "id": "openai:text-davinci-002",
338
+ "default_params": {
339
+ "temperature": 0.5,
340
+ "maxTokens": 200,
341
+ "topP": 1,
342
+ "presencePenalty": 0,
343
+ "frequencyPenalty": 0,
344
+ "stopSequences": [],
345
  },
346
  },
347
+ "openai:text-davinci-003": {
348
+ "id": "openai:text-davinci-003",
349
+ "default_params": {
350
+ "temperature": 0.5,
351
+ "maxTokens": 200,
352
+ "topP": 1,
353
+ "presencePenalty": 0,
354
+ "frequencyPenalty": 0,
355
+ "stopSequences": [],
356
  },
357
  },
358
+ }
g4f/Provider/Vitalentum.py CHANGED
@@ -4,7 +4,7 @@ import json
4
  from aiohttp import ClientSession
5
 
6
  from .base_provider import AsyncGeneratorProvider
7
- from ..typing import AsyncResult, Messages
8
 
9
  class Vitalentum(AsyncGeneratorProvider):
10
  url = "https://app.vitalentum.io"
@@ -16,10 +16,10 @@ class Vitalentum(AsyncGeneratorProvider):
16
  async def create_async_generator(
17
  cls,
18
  model: str,
19
- messages: Messages,
20
  proxy: str = None,
21
  **kwargs
22
- ) -> AsyncResult:
23
  headers = {
24
  "User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36",
25
  "Accept" : "text/event-stream",
@@ -42,13 +42,11 @@ class Vitalentum(AsyncGeneratorProvider):
42
  async with ClientSession(
43
  headers=headers
44
  ) as session:
45
- async with session.post(f"{cls.url}/api/converse-edge", json=data, proxy=proxy) as response:
46
  response.raise_for_status()
47
  async for line in response.content:
48
  line = line.decode()
49
- if line.startswith("data: "):
50
- if line.startswith("data: [DONE]"):
51
- break
52
  line = json.loads(line[6:-1])
53
  content = line["choices"][0]["delta"].get("content")
54
  if content:
@@ -62,7 +60,6 @@ class Vitalentum(AsyncGeneratorProvider):
62
  ("model", "str"),
63
  ("messages", "list[dict[str, str]]"),
64
  ("stream", "bool"),
65
- ("proxy", "str"),
66
  ("temperature", "float"),
67
  ]
68
  param = ", ".join([": ".join(p) for p in params])
 
4
  from aiohttp import ClientSession
5
 
6
  from .base_provider import AsyncGeneratorProvider
7
+ from ..typing import AsyncGenerator
8
 
9
  class Vitalentum(AsyncGeneratorProvider):
10
  url = "https://app.vitalentum.io"
 
16
  async def create_async_generator(
17
  cls,
18
  model: str,
19
+ messages: list[dict[str, str]],
20
  proxy: str = None,
21
  **kwargs
22
+ ) -> AsyncGenerator:
23
  headers = {
24
  "User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36",
25
  "Accept" : "text/event-stream",
 
42
  async with ClientSession(
43
  headers=headers
44
  ) as session:
45
+ async with session.post(cls.url + "/api/converse-edge", json=data, proxy=proxy) as response:
46
  response.raise_for_status()
47
  async for line in response.content:
48
  line = line.decode()
49
+ if line.startswith("data: ") and not line.startswith("data: [DONE]"):
 
 
50
  line = json.loads(line[6:-1])
51
  content = line["choices"][0]["delta"].get("content")
52
  if content:
 
60
  ("model", "str"),
61
  ("messages", "list[dict[str, str]]"),
62
  ("stream", "bool"),
 
63
  ("temperature", "float"),
64
  ]
65
  param = ", ".join([": ".join(p) for p in params])
g4f/Provider/Wewordle.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import random, string, time
4
+ from aiohttp import ClientSession
5
+
6
+ from .base_provider import AsyncProvider
7
+
8
+
9
+ class Wewordle(AsyncProvider):
10
+ url = "https://wewordle.org"
11
+ working = True
12
+ supports_gpt_35_turbo = True
13
+
14
+ @classmethod
15
+ async def create_async(
16
+ cls,
17
+ model: str,
18
+ messages: list[dict[str, str]],
19
+ proxy: str = None,
20
+ **kwargs
21
+ ) -> str:
22
+
23
+ headers = {
24
+ "accept" : "*/*",
25
+ "pragma" : "no-cache",
26
+ "Content-Type" : "application/json",
27
+ "Connection" : "keep-alive"
28
+ }
29
+
30
+ _user_id = "".join(random.choices(f"{string.ascii_lowercase}{string.digits}", k=16))
31
+ _app_id = "".join(random.choices(f"{string.ascii_lowercase}{string.digits}", k=31))
32
+ _request_date = time.strftime("%Y-%m-%dT%H:%M:%S.000Z", time.gmtime())
33
+ data = {
34
+ "user" : _user_id,
35
+ "messages" : messages,
36
+ "subscriber": {
37
+ "originalPurchaseDate" : None,
38
+ "originalApplicationVersion" : None,
39
+ "allPurchaseDatesMillis" : {},
40
+ "entitlements" : {"active": {}, "all": {}},
41
+ "allPurchaseDates" : {},
42
+ "allExpirationDatesMillis" : {},
43
+ "allExpirationDates" : {},
44
+ "originalAppUserId" : f"$RCAnonymousID:{_app_id}",
45
+ "latestExpirationDate" : None,
46
+ "requestDate" : _request_date,
47
+ "latestExpirationDateMillis" : None,
48
+ "nonSubscriptionTransactions" : [],
49
+ "originalPurchaseDateMillis" : None,
50
+ "managementURL" : None,
51
+ "allPurchasedProductIdentifiers": [],
52
+ "firstSeen" : _request_date,
53
+ "activeSubscriptions" : [],
54
+ }
55
+ }
56
+
57
+
58
+ async with ClientSession(
59
+ headers=headers
60
+ ) as session:
61
+ async with session.post(f"{cls.url}/gptapi/v1/android/turbo", proxy=proxy, json=data) as response:
62
+ response.raise_for_status()
63
+ content = (await response.json())["message"]["content"]
64
+ if content:
65
+ return content
g4f/Provider/Wuguokai.py ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import random
4
+
5
+ import requests
6
+
7
+ from ..typing import Any, CreateResult
8
+ from .base_provider import BaseProvider
9
+
10
+
11
+ class Wuguokai(BaseProvider):
12
+ url = 'https://chat.wuguokai.xyz'
13
+ supports_gpt_35_turbo = True
14
+ working = True
15
+
16
+ @staticmethod
17
+ def create_completion(
18
+ model: str,
19
+ messages: list[dict[str, str]],
20
+ stream: bool,
21
+ **kwargs: Any,
22
+ ) -> CreateResult:
23
+ base = ''
24
+ for message in messages:
25
+ base += '%s: %s\n' % (message['role'], message['content'])
26
+ base += 'assistant:'
27
+
28
+ headers = {
29
+ 'authority': 'ai-api.wuguokai.xyz',
30
+ 'accept': 'application/json, text/plain, */*',
31
+ 'accept-language': 'id-ID,id;q=0.9,en-US;q=0.8,en;q=0.7',
32
+ 'content-type': 'application/json',
33
+ 'origin': 'https://chat.wuguokai.xyz',
34
+ 'referer': 'https://chat.wuguokai.xyz/',
35
+ 'sec-ch-ua': '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
36
+ 'sec-ch-ua-mobile': '?0',
37
+ 'sec-ch-ua-platform': '"Windows"',
38
+ 'sec-fetch-dest': 'empty',
39
+ 'sec-fetch-mode': 'cors',
40
+ 'sec-fetch-site': 'same-site',
41
+ 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36'
42
+ }
43
+ data ={
44
+ "prompt": base,
45
+ "options": {},
46
+ "userId": f"#/chat/{random.randint(1,99999999)}",
47
+ "usingContext": True
48
+ }
49
+ response = requests.post("https://ai-api20.wuguokai.xyz/api/chat-process", headers=headers, timeout=3, json=data, proxies=kwargs['proxy'] if 'proxy' in kwargs else {})
50
+ _split = response.text.split("> 若回答失败请重试或多刷新几次界面后重试")
51
+ if response.status_code == 200:
52
+ if len(_split) > 1:
53
+ yield _split[1].strip()
54
+ else:
55
+ yield _split[0].strip()
56
+ else:
57
+ raise Exception(f"Error: {response.status_code} {response.reason}")
58
+
59
+ @classmethod
60
+ @property
61
+ def params(cls):
62
+ params = [
63
+ ("model", "str"),
64
+ ("messages", "list[dict[str, str]]"),
65
+ ("stream", "bool")
66
+ ]
67
+ param = ", ".join([": ".join(p) for p in params])
68
+ return f"g4f.provider.{cls.__name__} supports: ({param})"
g4f/Provider/Ylokh.py CHANGED
@@ -1,10 +1,10 @@
1
  from __future__ import annotations
2
 
3
  import json
 
4
 
5
- from ..requests import StreamSession
6
  from .base_provider import AsyncGeneratorProvider
7
- from ..typing import AsyncResult, Messages
8
 
9
  class Ylokh(AsyncGeneratorProvider):
10
  url = "https://chat.ylokh.xyz"
@@ -16,16 +16,21 @@ class Ylokh(AsyncGeneratorProvider):
16
  async def create_async_generator(
17
  cls,
18
  model: str,
19
- messages: Messages,
20
  stream: bool = True,
21
  proxy: str = None,
22
- timeout: int = 120,
23
  **kwargs
24
- ) -> AsyncResult:
25
  model = model if model else "gpt-3.5-turbo"
26
  headers = {
27
- "Origin" : cls.url,
28
- "Referer": cls.url + "/",
 
 
 
 
 
 
29
  }
30
  data = {
31
  "messages": messages,
@@ -38,20 +43,16 @@ class Ylokh(AsyncGeneratorProvider):
38
  "stream": stream,
39
  **kwargs
40
  }
41
- async with StreamSession(
42
- headers=headers,
43
- proxies={"https": proxy},
44
- timeout=timeout
45
  ) as session:
46
- async with session.post("https://chatapi.ylokh.xyz/v1/chat/completions", json=data) as response:
47
  response.raise_for_status()
48
  if stream:
49
- async for line in response.iter_lines():
50
  line = line.decode()
51
- if line.startswith("data: "):
52
- if line.startswith("data: [DONE]"):
53
- break
54
- line = json.loads(line[6:])
55
  content = line["choices"][0]["delta"].get("content")
56
  if content:
57
  yield content
@@ -69,9 +70,7 @@ class Ylokh(AsyncGeneratorProvider):
69
  ("messages", "list[dict[str, str]]"),
70
  ("stream", "bool"),
71
  ("proxy", "str"),
72
- ("timeout", "int"),
73
  ("temperature", "float"),
74
- ("top_p", "float"),
75
  ]
76
  param = ", ".join([": ".join(p) for p in params])
77
  return f"g4f.provider.{cls.__name__} supports: ({param})"
 
1
  from __future__ import annotations
2
 
3
  import json
4
+ from aiohttp import ClientSession
5
 
 
6
  from .base_provider import AsyncGeneratorProvider
7
+ from ..typing import AsyncGenerator
8
 
9
  class Ylokh(AsyncGeneratorProvider):
10
  url = "https://chat.ylokh.xyz"
 
16
  async def create_async_generator(
17
  cls,
18
  model: str,
19
+ messages: list[dict[str, str]],
20
  stream: bool = True,
21
  proxy: str = None,
 
22
  **kwargs
23
+ ) -> AsyncGenerator:
24
  model = model if model else "gpt-3.5-turbo"
25
  headers = {
26
+ "User-Agent" : "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/116.0",
27
+ "Accept" : "*/*",
28
+ "Accept-language" : "de,en-US;q=0.7,en;q=0.3",
29
+ "Origin" : cls.url,
30
+ "Referer" : cls.url + "/",
31
+ "Sec-Fetch-Dest" : "empty",
32
+ "Sec-Fetch-Mode" : "cors",
33
+ "Sec-Fetch-Site" : "same-origin",
34
  }
35
  data = {
36
  "messages": messages,
 
43
  "stream": stream,
44
  **kwargs
45
  }
46
+ async with ClientSession(
47
+ headers=headers
 
 
48
  ) as session:
49
+ async with session.post("https://chatapi.ylokh.xyz/v1/chat/completions", json=data, proxy=proxy) as response:
50
  response.raise_for_status()
51
  if stream:
52
+ async for line in response.content:
53
  line = line.decode()
54
+ if line.startswith("data: ") and not line.startswith("data: [DONE]"):
55
+ line = json.loads(line[6:-1])
 
 
56
  content = line["choices"][0]["delta"].get("content")
57
  if content:
58
  yield content
 
70
  ("messages", "list[dict[str, str]]"),
71
  ("stream", "bool"),
72
  ("proxy", "str"),
 
73
  ("temperature", "float"),
 
74
  ]
75
  param = ", ".join([": ".join(p) for p in params])
76
  return f"g4f.provider.{cls.__name__} supports: ({param})"
g4f/Provider/You.py CHANGED
@@ -2,8 +2,9 @@ from __future__ import annotations
2
 
3
  import json
4
 
5
- from ..requests import StreamSession
6
- from ..typing import AsyncGenerator, Messages
 
7
  from .base_provider import AsyncGeneratorProvider, format_prompt
8
 
9
 
@@ -11,30 +12,29 @@ class You(AsyncGeneratorProvider):
11
  url = "https://you.com"
12
  working = True
13
  supports_gpt_35_turbo = True
 
14
 
15
 
16
  @classmethod
17
  async def create_async_generator(
18
  cls,
19
  model: str,
20
- messages: Messages,
21
  proxy: str = None,
22
- timeout: int = 120,
23
  **kwargs,
24
  ) -> AsyncGenerator:
25
- async with StreamSession(proxies={"https": proxy}, impersonate="chrome107", timeout=timeout) as session:
26
  headers = {
27
  "Accept": "text/event-stream",
28
- "Referer": f"{cls.url}/search?fromSearchBar=true&tbm=youchat",
29
  }
30
- data = {"q": format_prompt(messages), "domain": "youchat", "chat": ""}
31
- async with session.get(
32
- f"{cls.url}/api/streamingSearch",
33
- params=data,
34
  headers=headers
35
- ) as response:
36
- response.raise_for_status()
37
- start = b'data: {"youChatToken": '
38
- async for line in response.iter_lines():
39
- if line.startswith(start):
40
- yield json.loads(line[len(start):-1])
 
2
 
3
  import json
4
 
5
+ from curl_cffi.requests import AsyncSession
6
+
7
+ from ..typing import AsyncGenerator
8
  from .base_provider import AsyncGeneratorProvider, format_prompt
9
 
10
 
 
12
  url = "https://you.com"
13
  working = True
14
  supports_gpt_35_turbo = True
15
+ supports_stream = False
16
 
17
 
18
  @classmethod
19
  async def create_async_generator(
20
  cls,
21
  model: str,
22
+ messages: list[dict[str, str]],
23
  proxy: str = None,
 
24
  **kwargs,
25
  ) -> AsyncGenerator:
26
+ async with AsyncSession(proxies={"https": proxy}, impersonate="chrome107") as session:
27
  headers = {
28
  "Accept": "text/event-stream",
29
+ "Referer": "https://you.com/search?fromSearchBar=true&tbm=youchat",
30
  }
31
+ response = await session.get(
32
+ "https://you.com/api/streamingSearch",
33
+ params={"q": format_prompt(messages), "domain": "youchat", "chat": ""},
 
34
  headers=headers
35
+ )
36
+ response.raise_for_status()
37
+ start = 'data: {"youChatToken": '
38
+ for line in response.text.splitlines():
39
+ if line.startswith(start):
40
+ yield json.loads(line[len(start): -1])
g4f/Provider/Yqcloud.py CHANGED
@@ -1,9 +1,8 @@
1
  from __future__ import annotations
2
 
3
- import random
4
  from aiohttp import ClientSession
5
 
6
- from ..typing import AsyncResult, Messages
7
  from .base_provider import AsyncGeneratorProvider, format_prompt
8
 
9
 
@@ -15,22 +14,19 @@ class Yqcloud(AsyncGeneratorProvider):
15
  @staticmethod
16
  async def create_async_generator(
17
  model: str,
18
- messages: Messages,
19
  proxy: str = None,
20
  **kwargs,
21
- ) -> AsyncResult:
22
  async with ClientSession(
23
  headers=_create_header()
24
  ) as session:
25
- payload = _create_payload(messages, **kwargs)
26
  async with session.post("https://api.aichatos.cloud/api/generateStream", proxy=proxy, json=payload) as response:
27
  response.raise_for_status()
28
- async for chunk in response.content.iter_any():
29
- if chunk:
30
- chunk = chunk.decode()
31
- if "sorry, 您的ip已由于触发防滥用检测而被封禁" in chunk:
32
- raise RuntimeError("IP address is blocked by abuse detection.")
33
- yield chunk
34
 
35
 
36
  def _create_header():
@@ -41,19 +37,12 @@ def _create_header():
41
  }
42
 
43
 
44
- def _create_payload(
45
- messages: Messages,
46
- system_message: str = "",
47
- user_id: int = None,
48
- **kwargs
49
- ):
50
- if not user_id:
51
- user_id = random.randint(1690000544336, 2093025544336)
52
  return {
53
  "prompt": format_prompt(messages),
54
  "network": True,
55
- "system": system_message,
56
  "withoutContext": False,
57
  "stream": True,
58
- "userId": f"#/chat/{user_id}"
59
  }
 
1
  from __future__ import annotations
2
 
 
3
  from aiohttp import ClientSession
4
 
5
+ from ..typing import AsyncGenerator
6
  from .base_provider import AsyncGeneratorProvider, format_prompt
7
 
8
 
 
14
  @staticmethod
15
  async def create_async_generator(
16
  model: str,
17
+ messages: list[dict[str, str]],
18
  proxy: str = None,
19
  **kwargs,
20
+ ) -> AsyncGenerator:
21
  async with ClientSession(
22
  headers=_create_header()
23
  ) as session:
24
+ payload = _create_payload(messages)
25
  async with session.post("https://api.aichatos.cloud/api/generateStream", proxy=proxy, json=payload) as response:
26
  response.raise_for_status()
27
+ async for stream in response.content.iter_any():
28
+ if stream:
29
+ yield stream.decode()
 
 
 
30
 
31
 
32
  def _create_header():
 
37
  }
38
 
39
 
40
+ def _create_payload(messages: list[dict[str, str]]):
 
 
 
 
 
 
 
41
  return {
42
  "prompt": format_prompt(messages),
43
  "network": True,
44
+ "system": "",
45
  "withoutContext": False,
46
  "stream": True,
47
+ "userId": "#/chat/1693025544336"
48
  }