diff --git a/.github/README.md b/.github/README.md index 1f559ee78394ac95416fbc5d92cb787412821065..1e310a8c0f93b23d2fdb281fbb4fa213876183eb 100644 --- a/.github/README.md +++ b/.github/README.md @@ -1,45 +1,48 @@
- NeuroGPT + NeuroGPT - Русский | English + Русский | English -> ### The project is currently undergoing a transition to another client and reconstruction of the API. Technical work is continuing. We apologize for any inconvenience. - -

Free API service providing access to GPT-3.5, GPT-4, and other language models. Before using it, please make sure you check out our wiki. The project utilizes a modified version of gpt4free, as well as ChuanhuChatGPT as a web interface. We extend our gratitude to the authors.

+

NeuroGPT позволяет простому пользователю получить бесплатный доступ к gpt-3.5, gpt-4 и другим языковым моделям без VPN и регистрации аккаунта. В стабильном режиме на данный момент функционирует только gpt-3.5-turbo 4097. Постоянный доступ к остальным моделям не гарантируется. + +Проект основан на модифицированных версиях gpt4free и ChuanhuChatGPT

+Благодарность авторам. -

- license + license - latest -

+ latest + + +
Перед использованием обязательно ознакомьтесь с wiki проекта
+
Инструкции по установке:
- + windows - + linux - + macos - - - portable +

+ + portable -
News and feedback:
+
Новости и обратная связь:
- telegram + telegram - telegram_chat + telegram_chat -
Support the project:
+
Поддержать проект:
neurogen_boosty @@ -47,16 +50,20 @@
-## Features +## Дисклеймер: + +Поскольку данный проект функционирует не через официальное API, а благодаря доступу, полученному путем обратной инженерии, то API провайдеры могут падать, а различные модели отключаться. Пожалуйста, учтите это. Если вам необходима высокая стабильность для работы, то стоит обойти этот проект стороной. Также важно помнить, что поддержка осуществляется на чистом энтузиазме. + +## Возможности: -- Web search -- [Model list](https://status.neuroapi.host/v1/status) -- Dialog context -- No-logs -- API endpoint -- Dialog history -- Setting generation parameters for GPT models -- Built-in prompt templates and jailbreaks for various tasks +- Веб-поиск +- [Список моделей](https://status.neuroapi.host/v1/status) +- Встроенные шаблоны промптов под разные задачи +- Встроенные джейлбрейки для снятия цензуры +- Контекст беседы +- Режим endpoint для работы с API +- Изменение параметров генерации для gpt-моделей +- Сохранение и загрузка истории диалогов
diff --git a/.github/README_EN.md b/.github/README_EN.md new file mode 100644 index 0000000000000000000000000000000000000000..c1d54234879c4711bcde1eb33a962463c6a95892 --- /dev/null +++ b/.github/README_EN.md @@ -0,0 +1,75 @@ +
+ + NeuroGPT + + + Русский | English + +

NeuroGPT allows free use of gpt-3.5, gpt-4, and other language models without VPN and account registration. It operates through API Reverse Engineering. At the moment, only gpt-3.5-turbo 4097 is functioning in stable mode. Continuous access to other models is not guaranteed. + +The project is based on modified versions of gpt4free and ChuanhuChatGPT

+We extend our gratitude to the authors. + + + license + + + latest + + +
Before using NeuroGPT, be sure to check out our project's wiki
+
Installation instructions:
+ + + windows + + + linux + + + macos +

+ + portable + + +
News and feedback:
+ + + telegram + + + telegram_chat + + +
Support the project:
+ + + neurogen_boosty + + +
+ +## Disclaimer: + +Given that this project doesn't use an official API but relies on reverse-engineered method, there's a risk of API providers failing and certain models disconnecting. Consider this if you need high stability for your work. Remember, support is provided solely out of enthusiasm. + +## Features: + +- Web search +- [List of models](https://status.neuroapi.host/v1/status) +- Built-in prompt templates for various tasks +- Built-in jailbreaks for removing censorship +- Conversation context +- API endpoint +- Setting generation parameters for GPT models +- Saving and loading dialogue history + +
+ + +*** + +[![Star History Chart](https://api.star-history.com/svg?repos=Em1tSan/NeuroGPT&type=Date)](https://star-history.com/#Em1tSan/NeuroGPT&Date) + +
diff --git a/backend/backend.py b/backend/backend.py index b7f39fe680ed7ba822b798bf572b3f596f543c39..f5bf850416e2583a7a14de7299927e72ca563841 100644 --- a/backend/backend.py +++ b/backend/backend.py @@ -4,13 +4,12 @@ import json import random import string import socket -import nest_asyncio import requests -from typing import Any -from flask import Flask, request, jsonify, Response +from flask import Flask, request, jsonify, Response, stream_with_context from flask_cors import CORS +from threading import Thread import logging from .embedding_processing import embedding_processing @@ -18,11 +17,8 @@ import g4f from g4f import ChatCompletion, Provider, BaseProvider, models from g4f.models import ModelUtils -nest_asyncio.apply() - app = Flask(__name__) CORS(app) - LOG = logging.getLogger(__name__) embedding_proc = embedding_processing() @@ -71,48 +67,35 @@ def chat_completions(): }, } - def streaming(): - for chunk in response: + def stream(): + nonlocal response + for token in response: + completion_timestamp = int(time.time()) + completion_id = ''.join(random.choices( + 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789', k=28)) + completion_data = { 'id': f'chatcmpl-{completion_id}', 'object': 'chat.completion.chunk', 'created': completion_timestamp, - 'model': model, 'choices': [ { - 'index': 0, 'delta': { - 'content': chunk, + 'content': token }, - 'finish_reason': None, + 'index': 0, + 'finish_reason': None } - ], + ] } + #print(token) + #print(completion_data) + #print('data: %s\n\n' % json.dumps(completion_data, separators=(',' ':'))) + yield 'data: %s\n\n' % json.dumps(completion_data, separators=(',' ':')) + time.sleep(0.02) + print('===Start Streaming===') + return app.response_class(stream(), mimetype='text/event-stream') - content = json.dumps(completion_data, separators=(',', ':')) - yield f'data: {content}\n\n' - time.sleep(0.05) - - end_completion_data: dict[str, Any] = { - 'id': f'chatcmpl-{completion_id}', - 'object': 'chat.completion.chunk', - 'created': completion_timestamp, - 'model': model, - 'choices': [ - { - 'index': 0, - 'delta': {}, - 'finish_reason': 'stop', - } - ], - } - content = json.dumps(end_completion_data, separators=(',', ':')) - yield f'data: {content}\n\n' - - return app.response_class(streaming(), mimetype='text/event-stream') - -@app.route("/engines/text-embedding-ada-002/embeddings", methods=["POST"]) -@app.route("/engines/text-similarity-davinci-001/embeddings", methods=["POST"]) @app.route('/v1/embeddings', methods=['POST']) @app.route('/embeddings', methods=['POST']) def create_embedding(): diff --git a/backend/embedding_processing.py b/backend/embedding_processing.py index b5bb997e22ff070eb1721b0937b9cd15804c070e..9e270a911dbfaaa367c40e0e5d83ace53a1a284b 100644 --- a/backend/embedding_processing.py +++ b/backend/embedding_processing.py @@ -49,7 +49,7 @@ class embedding_processing: response_embedding = self.transform_embedding_to_dict(embeddings_list,text_list) return response_embedding - def transform_embedding_to_dict(self, embedding_list, text_list, model_name="text-embedding-ada-002"): + def transform_embedding_to_dict(self, embedding_list, text_list, model_name="text-embedding-elmo-002"): prompt_tokens = sum(len(text) for text in text_list) total_tokens = sum(len(embedding) for embedding in embedding_list) diff --git a/docker-compose/docker-compose.traefik.yml b/docker-compose/docker-compose.traefik.yml index bcb98cbc9b44b716cecdb05bc8e0930c8ecb91d3..0b3186edd367df1230f4dd7c566905f317255089 100644 --- a/docker-compose/docker-compose.traefik.yml +++ b/docker-compose/docker-compose.traefik.yml @@ -39,8 +39,6 @@ services: dockerfile: Dockerfile container_name: neurogengpt hostname: neurogengpt - environment: - - SUBF=/sub env_file: - .env networks: diff --git a/docker-compose/docker-compose.yml b/docker-compose/docker-compose.yml index 8ca796a7390a956048fa3deff390ce607d6a4d66..a1f5c4484dac43aacb453bc6b9844e03c1bd951a 100644 --- a/docker-compose/docker-compose.yml +++ b/docker-compose/docker-compose.yml @@ -6,11 +6,9 @@ services: context: ../ dockerfile: Dockerfile ports: - - "7860:7860" - - "1337:1337" + - "7861:7860" + - "1338:1337" volumes: - ../config.json:/app/config.json - environment: - - SUBF=/sub env_file: - .env diff --git a/endpoint.py b/endpoint.py index 4d492f917061d988230a7e8227116fe51e764636..c7ac727fc520b552d42c78cd846d605343616d71 100644 --- a/endpoint.py +++ b/endpoint.py @@ -1,21 +1,22 @@ +import fastwsgi import socket -import nest_asyncio from backend.backend import app -from waitress import serve +from multiprocessing import Process -nest_asyncio.apply() +def run_api_server(): + fastwsgi.run(wsgi_app=app, host='0.0.0.0', port=1337) -site_config = { - 'host': '0.0.0.0', - 'port': 1337, - 'debug': False - } if __name__ == "__main__": hostname = socket.gethostname() ip_address = socket.gethostbyname(hostname) - + site_config = { + 'host': '0.0.0.0', + 'port': 1337, + 'debug': False + } print(f"Running on http://127.0.0.1:{site_config['port']}") print(f"Running on http://{ip_address}:{site_config['port']}") - serve(app, host='0.0.0.0', port=site_config['port'],threads=4) \ No newline at end of file + api_process = Process(target=run_api_server) + api_process.start() \ No newline at end of file diff --git a/g4f/Provider/AItianhu.py b/g4f/Provider/AItianhu.py index 56d9a9abdf39a0fb7f4bdd4561ad117fc14f8ed0..2e12989619ae3dd03b1b0b8e2c23784fb4aad5d2 100644 --- a/g4f/Provider/AItianhu.py +++ b/g4f/Provider/AItianhu.py @@ -1,10 +1,10 @@ from __future__ import annotations import json +from aiohttp import ClientSession, http -from ..typing import AsyncResult, Messages -from ..requests import StreamSession -from .base_provider import AsyncGeneratorProvider, format_prompt, get_cookies +from ..typing import AsyncGenerator +from .base_provider import AsyncGeneratorProvider, format_prompt class AItianhu(AsyncGeneratorProvider): @@ -16,50 +16,46 @@ class AItianhu(AsyncGeneratorProvider): async def create_async_generator( cls, model: str, - messages: Messages, + messages: list[dict[str, str]], proxy: str = None, - cookies: dict = None, - timeout: int = 120, **kwargs - ) -> AsyncResult: - if not cookies: - cookies = get_cookies("www.aitianhu.com") - data = { - "prompt": format_prompt(messages), - "options": {}, - "systemMessage": "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully.", - "temperature": 0.8, - "top_p": 1, - **kwargs - } + ) -> AsyncGenerator: headers = { - "Authority": cls.url, + "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/116.0", "Accept": "application/json, text/plain, */*", + "Accept-Language": "de,en-US;q=0.7,en;q=0.3", + "Content-Type": "application/json", "Origin": cls.url, - "Referer": f"{cls.url}/" + "Connection": "keep-alive", + "Referer": cls.url + "/", + "Sec-Fetch-Dest": "empty", + "Sec-Fetch-Mode": "cors", + "Sec-Fetch-Site": "same-origin", } - async with StreamSession( + async with ClientSession( headers=headers, - cookies=cookies, - timeout=timeout, - proxies={"https": proxy}, - impersonate="chrome107", - verify=False + version=http.HttpVersion10 ) as session: - async with session.post(f"{cls.url}/api/chat-process", json=data) as response: + data = { + "prompt": format_prompt(messages), + "options": {}, + "systemMessage": "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully.", + "temperature": 0.8, + "top_p": 1, + **kwargs + } + async with session.post( + cls.url + "/api/chat-process", + proxy=proxy, + json=data, + ssl=False, + ) as response: response.raise_for_status() - async for line in response.iter_lines(): - if line == b"