diff --git a/g4f/g4f/Provider/AItianhu.py b/g4f/g4f/Provider/AItianhu.py new file mode 100644 index 0000000000000000000000000000000000000000..0f01e536fdf105215de6e6392d4a62aa8c655093 --- /dev/null +++ b/g4f/g4f/Provider/AItianhu.py @@ -0,0 +1,50 @@ +from __future__ import annotations + +import json +from curl_cffi.requests import AsyncSession + +from .base_provider import AsyncProvider, format_prompt + + +class AItianhu(AsyncProvider): + url = "https://www.aitianhu.com" + working = True + supports_gpt_35_turbo = True + + @classmethod + async def create_async( + cls, + model: str, + messages: list[dict[str, str]], + proxy: str = None, + **kwargs + ) -> str: + data = { + "prompt": format_prompt(messages), + "options": {}, + "systemMessage": "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully.", + "temperature": 0.8, + "top_p": 1, + **kwargs + } + async with AsyncSession(proxies={"https": proxy}, impersonate="chrome107", verify=False) as session: + response = await session.post(cls.url + "/api/chat-process", json=data) + response.raise_for_status() + line = response.text.splitlines()[-1] + line = json.loads(line) + return line["text"] + + + @classmethod + @property + def params(cls): + params = [ + ("model", "str"), + ("messages", "list[dict[str, str]]"), + ("stream", "bool"), + ("proxy", "str"), + ("temperature", "float"), + ("top_p", "int"), + ] + param = ", ".join([": ".join(p) for p in params]) + return f"g4f.provider.{cls.__name__} supports: ({param})" diff --git a/g4f/g4f/Provider/AItianhuSpace.py b/g4f/g4f/Provider/AItianhuSpace.py new file mode 100644 index 0000000000000000000000000000000000000000..8beb33552d94d8c9007a5a29d00ec77c745eb09d --- /dev/null +++ b/g4f/g4f/Provider/AItianhuSpace.py @@ -0,0 +1,73 @@ +from __future__ import annotations + +import random, json + +from g4f.requests import AsyncSession, StreamRequest +from .base_provider import AsyncGeneratorProvider, format_prompt + +domains = { + "gpt-3.5-turbo": ".aitianhu.space", + "gpt-4": ".aitianhu.website", +} + +class AItianhuSpace(AsyncGeneratorProvider): + url = "https://chat3.aiyunos.top/" + working = True + supports_gpt_35_turbo = True + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: list[dict[str, str]], + stream: bool = True, + **kwargs + ) -> str: + if not model: + model = "gpt-3.5-turbo" + elif not model in domains: + raise ValueError(f"Model are not supported: {model}") + + chars = 'abcdefghijklmnopqrstuvwxyz0123456789' + rand = ''.join(random.choice(chars) for _ in range(6)) + domain = domains[model] + url = f'https://{rand}{domain}/api/chat-process' + + headers = { + "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36", + } + async with AsyncSession(headers=headers, impersonate="chrome107", verify=False) as session: + data = { + "prompt": format_prompt(messages), + "options": {}, + "systemMessage": "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully.", + "temperature": 0.8, + "top_p": 1, + **kwargs + } + async with StreamRequest(session, "POST", url, json=data) as response: + response.raise_for_status() + async for line in response.content: + line = json.loads(line.rstrip()) + if "detail" in line: + content = line["detail"]["choices"][0]["delta"].get("content") + if content: + yield content + elif "message" in line and "AI-4接口非常昂贵" in line["message"]: + raise RuntimeError("Rate limit for GPT 4 reached") + else: + raise RuntimeError("Response: {line}") + + + @classmethod + @property + def params(cls): + params = [ + ("model", "str"), + ("messages", "list[dict[str, str]]"), + ("stream", "bool"), + ("temperature", "float"), + ("top_p", "int"), + ] + param = ", ".join([": ".join(p) for p in params]) + return f"g4f.provider.{cls.__name__} supports: ({param})" diff --git a/g4f/g4f/Provider/Acytoo.py b/g4f/g4f/Provider/Acytoo.py new file mode 100644 index 0000000000000000000000000000000000000000..d36ca6da22ddfa43690abdd0db27e6f971320f93 --- /dev/null +++ b/g4f/g4f/Provider/Acytoo.py @@ -0,0 +1,51 @@ +from __future__ import annotations + +from aiohttp import ClientSession + +from ..typing import AsyncGenerator +from .base_provider import AsyncGeneratorProvider + + +class Acytoo(AsyncGeneratorProvider): + url = 'https://chat.acytoo.com' + working = True + supports_gpt_35_turbo = True + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: list[dict[str, str]], + proxy: str = None, + **kwargs + ) -> AsyncGenerator: + + async with ClientSession( + headers=_create_header() + ) as session: + async with session.post( + cls.url + '/api/completions', + proxy=proxy, + json=_create_payload(messages, **kwargs) + ) as response: + response.raise_for_status() + async for stream in response.content.iter_any(): + if stream: + yield stream.decode() + + +def _create_header(): + return { + 'accept': '*/*', + 'content-type': 'application/json', + } + + +def _create_payload(messages: list[dict[str, str]], temperature: float = 0.5, **kwargs): + return { + 'key' : '', + 'model' : 'gpt-3.5-turbo', + 'messages' : messages, + 'temperature' : temperature, + 'password' : '' + } \ No newline at end of file diff --git a/g4f/g4f/Provider/AiService.py b/g4f/g4f/Provider/AiService.py new file mode 100644 index 0000000000000000000000000000000000000000..2b5a6e7de3912f7588377a881b7d5523e35d7212 --- /dev/null +++ b/g4f/g4f/Provider/AiService.py @@ -0,0 +1,36 @@ +from __future__ import annotations + +import requests + +from ..typing import Any, CreateResult +from .base_provider import BaseProvider + + +class AiService(BaseProvider): + url = "https://aiservice.vercel.app/" + working = False + supports_gpt_35_turbo = True + + @staticmethod + def create_completion( + model: str, + messages: list[dict[str, str]], + stream: bool, + **kwargs: Any, + ) -> CreateResult: + base = "\n".join(f"{message['role']}: {message['content']}" for message in messages) + base += "\nassistant: " + + headers = { + "accept": "*/*", + "content-type": "text/plain;charset=UTF-8", + "sec-fetch-dest": "empty", + "sec-fetch-mode": "cors", + "sec-fetch-site": "same-origin", + "Referer": "https://aiservice.vercel.app/chat", + } + data = {"input": base} + url = "https://aiservice.vercel.app/api/chat/answer" + response = requests.post(url, headers=headers, json=data) + response.raise_for_status() + yield response.json()["data"] diff --git a/g4f/g4f/Provider/Aibn.py b/g4f/g4f/Provider/Aibn.py new file mode 100644 index 0000000000000000000000000000000000000000..1ef928bed74c1a5808b1e539ef4ac9e5ddb49422 --- /dev/null +++ b/g4f/g4f/Provider/Aibn.py @@ -0,0 +1,51 @@ +from __future__ import annotations + +import time +import hashlib + +from ..typing import AsyncGenerator +from g4f.requests import AsyncSession +from .base_provider import AsyncGeneratorProvider + + +class Aibn(AsyncGeneratorProvider): + url = "https://aibn.cc" + supports_gpt_35_turbo = True + working = True + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: list[dict[str, str]], + **kwargs + ) -> AsyncGenerator: + async with AsyncSession(impersonate="chrome107") as session: + timestamp = int(time.time()) + data = { + "messages": messages, + "pass": None, + "sign": generate_signature(timestamp, messages[-1]["content"]), + "time": timestamp + } + async with session.post(f"{cls.url}/api/generate", json=data) as response: + response.raise_for_status() + async for chunk in response.content.iter_any(): + yield chunk.decode() + + @classmethod + @property + def params(cls): + params = [ + ("model", "str"), + ("messages", "list[dict[str, str]]"), + ("stream", "bool"), + ("temperature", "float"), + ] + param = ", ".join([": ".join(p) for p in params]) + return f"g4f.provider.{cls.__name__} supports: ({param})" + + +def generate_signature(timestamp: int, message: str, secret: str = "undefined"): + data = f"{timestamp}:{message}:{secret}" + return hashlib.sha256(data.encode()).hexdigest() \ No newline at end of file diff --git a/g4f/g4f/Provider/Aichat.py b/g4f/g4f/Provider/Aichat.py new file mode 100644 index 0000000000000000000000000000000000000000..8edd17e2c6938e2fdd4886e2354580f7e4108960 --- /dev/null +++ b/g4f/g4f/Provider/Aichat.py @@ -0,0 +1,54 @@ +from __future__ import annotations + +from aiohttp import ClientSession + +from .base_provider import AsyncProvider, format_prompt + + +class Aichat(AsyncProvider): + url = "https://chat-gpt.org/chat" + working = True + supports_gpt_35_turbo = True + + @staticmethod + async def create_async( + model: str, + messages: list[dict[str, str]], + proxy: str = None, + **kwargs + ) -> str: + headers = { + "authority": "chat-gpt.org", + "accept": "*/*", + "cache-control": "no-cache", + "content-type": "application/json", + "origin": "https://chat-gpt.org", + "pragma": "no-cache", + "referer": "https://chat-gpt.org/chat", + "sec-ch-ua-mobile": "?0", + "sec-ch-ua-platform": '"macOS"', + "sec-fetch-dest": "empty", + "sec-fetch-mode": "cors", + "sec-fetch-site": "same-origin", + "user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36", + } + async with ClientSession( + headers=headers + ) as session: + json_data = { + "message": format_prompt(messages), + "temperature": kwargs.get('temperature', 0.5), + "presence_penalty": 0, + "top_p": kwargs.get('top_p', 1), + "frequency_penalty": 0, + } + async with session.post( + "https://chat-gpt.org/api/text", + proxy=proxy, + json=json_data + ) as response: + response.raise_for_status() + result = await response.json() + if not result['response']: + raise Exception(f"Error Response: {result}") + return result["message"] diff --git a/g4f/g4f/Provider/Ails.py b/g4f/g4f/Provider/Ails.py new file mode 100644 index 0000000000000000000000000000000000000000..d533ae247cba63b236668375786124852f5bbad5 --- /dev/null +++ b/g4f/g4f/Provider/Ails.py @@ -0,0 +1,106 @@ +from __future__ import annotations + +import hashlib +import time +import uuid +import json +from datetime import datetime +from aiohttp import ClientSession + +from ..typing import SHA256, AsyncGenerator +from .base_provider import AsyncGeneratorProvider + + +class Ails(AsyncGeneratorProvider): + url: str = "https://ai.ls" + working = True + supports_gpt_35_turbo = True + + @staticmethod + async def create_async_generator( + model: str, + messages: list[dict[str, str]], + stream: bool, + proxy: str = None, + **kwargs + ) -> AsyncGenerator: + headers = { + "authority": "api.caipacity.com", + "accept": "*/*", + "accept-language": "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3", + "authorization": "Bearer free", + "client-id": str(uuid.uuid4()), + "client-v": "0.1.278", + "content-type": "application/json", + "origin": "https://ai.ls", + "referer": "https://ai.ls/", + "sec-ch-ua": '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"', + "sec-ch-ua-mobile": "?0", + "sec-ch-ua-platform": '"Windows"', + "sec-fetch-dest": "empty", + "sec-fetch-mode": "cors", + "sec-fetch-site": "cross-site", + "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36", + "from-url": "https://ai.ls/?chat=1" + } + async with ClientSession( + headers=headers + ) as session: + timestamp = _format_timestamp(int(time.time() * 1000)) + json_data = { + "model": "gpt-3.5-turbo", + "temperature": kwargs.get("temperature", 0.6), + "stream": True, + "messages": messages, + "d": datetime.now().strftime("%Y-%m-%d"), + "t": timestamp, + "s": _hash({"t": timestamp, "m": messages[-1]["content"]}), + } + async with session.post( + "https://api.caipacity.com/v1/chat/completions", + proxy=proxy, + json=json_data + ) as response: + response.raise_for_status() + start = "data: " + async for line in response.content: + line = line.decode('utf-8') + if line.startswith(start) and line != "data: [DONE]": + line = line[len(start):-1] + line = json.loads(line) + token = line["choices"][0]["delta"].get("content") + if token: + if "ai.ls" in token or "ai.ci" in token: + raise Exception("Response Error: " + token) + yield token + + + @classmethod + @property + def params(cls): + params = [ + ("model", "str"), + ("messages", "list[dict[str, str]]"), + ("stream", "bool"), + ("temperature", "float"), + ] + param = ", ".join([": ".join(p) for p in params]) + return f"g4f.provider.{cls.__name__} supports: ({param})" + + +def _hash(json_data: dict[str, str]) -> SHA256: + base_string: str = "%s:%s:%s:%s" % ( + json_data["t"], + json_data["m"], + "WI,2rU#_r:r~aF4aJ36[.Z(/8Rv93Rf", + len(json_data["m"]), + ) + + return SHA256(hashlib.sha256(base_string.encode()).hexdigest()) + + +def _format_timestamp(timestamp: int) -> str: + e = timestamp + n = e % 10 + r = n + 1 if n % 2 == 0 else n + return str(e - n + r) \ No newline at end of file diff --git a/g4f/g4f/Provider/Aivvm.py b/g4f/g4f/Provider/Aivvm.py new file mode 100644 index 0000000000000000000000000000000000000000..7a3d57bd0c36866fcc9543dbf651bb14982b3c91 --- /dev/null +++ b/g4f/g4f/Provider/Aivvm.py @@ -0,0 +1,78 @@ +from __future__ import annotations +import requests + +from .base_provider import BaseProvider +from ..typing import CreateResult + +models = { + 'gpt-3.5-turbo': {'id': 'gpt-3.5-turbo', 'name': 'GPT-3.5'}, + 'gpt-3.5-turbo-0613': {'id': 'gpt-3.5-turbo-0613', 'name': 'GPT-3.5-0613'}, + 'gpt-3.5-turbo-16k': {'id': 'gpt-3.5-turbo-16k', 'name': 'GPT-3.5-16K'}, + 'gpt-3.5-turbo-16k-0613': {'id': 'gpt-3.5-turbo-16k-0613', 'name': 'GPT-3.5-16K-0613'}, + 'gpt-4': {'id': 'gpt-4', 'name': 'GPT-4'}, + 'gpt-4-0613': {'id': 'gpt-4-0613', 'name': 'GPT-4-0613'}, + 'gpt-4-32k': {'id': 'gpt-4-32k', 'name': 'GPT-4-32K'}, + 'gpt-4-32k-0613': {'id': 'gpt-4-32k-0613', 'name': 'GPT-4-32K-0613'}, +} + +class Aivvm(BaseProvider): + url = 'https://chat.aivvm.com' + supports_stream = True + working = True + supports_gpt_35_turbo = True + supports_gpt_4 = True + + @classmethod + def create_completion(cls, + model: str, + messages: list[dict[str, str]], + stream: bool, + **kwargs + ) -> CreateResult: + if not model: + model = "gpt-3.5-turbo" + elif model not in models: + raise ValueError(f"Model are not supported: {model}") + + headers = { + "authority" : "chat.aivvm.com", + "accept" : "*/*", + "accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3", + "content-type" : "application/json", + "origin" : "https://chat.aivvm.com", + "referer" : "https://chat.aivvm.com/", + "sec-ch-ua" : '"Google Chrome";v="117", "Not;A=Brand";v="8", "Chromium";v="117"', + "sec-ch-ua-mobile" : "?0", + "sec-ch-ua-platform" : '"macOS"', + "sec-fetch-dest" : "empty", + "sec-fetch-mode" : "cors", + "sec-fetch-site" : "same-origin", + "user-agent" : "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36", + } + + json_data = { + "model" : models[model], + "messages" : messages, + "key" : "", + "prompt" : "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully. Respond using markdown.", + "temperature" : kwargs.get("temperature", 0.7) + } + + response = requests.post( + "https://chat.aivvm.com/api/chat", headers=headers, json=json_data, stream=True) + response.raise_for_status() + + for chunk in response.iter_content(chunk_size=None): + yield chunk.decode('utf-8') + + @classmethod + @property + def params(cls): + params = [ + ('model', 'str'), + ('messages', 'list[dict[str, str]]'), + ('stream', 'bool'), + ('temperature', 'float'), + ] + param = ', '.join([': '.join(p) for p in params]) + return f'g4f.provider.{cls.__name__} supports: ({param})' \ No newline at end of file diff --git a/g4f/g4f/Provider/Bard.py b/g4f/g4f/Provider/Bard.py new file mode 100644 index 0000000000000000000000000000000000000000..4e076378c1d6ad63519aa7f37ab5b1f857b660a1 --- /dev/null +++ b/g4f/g4f/Provider/Bard.py @@ -0,0 +1,92 @@ +from __future__ import annotations + +import json +import random +import re + +from aiohttp import ClientSession + +from .base_provider import AsyncProvider, format_prompt, get_cookies + + +class Bard(AsyncProvider): + url = "https://bard.google.com" + needs_auth = True + working = True + _snlm0e = None + + @classmethod + async def create_async( + cls, + model: str, + messages: list[dict[str, str]], + proxy: str = None, + cookies: dict = None, + **kwargs + ) -> str: + prompt = format_prompt(messages) + if proxy and "://" not in proxy: + proxy = f"http://{proxy}" + if not cookies: + cookies = get_cookies(".google.com") + + headers = { + 'authority': 'bard.google.com', + 'origin': 'https://bard.google.com', + 'referer': 'https://bard.google.com/', + 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36', + 'x-same-domain': '1', + } + + async with ClientSession( + cookies=cookies, + headers=headers + ) as session: + if not cls._snlm0e: + async with session.get(cls.url, proxy=proxy) as response: + text = await response.text() + + match = re.search(r'SNlM0e\":\"(.*?)\"', text) + if not match: + raise RuntimeError("No snlm0e value.") + cls._snlm0e = match.group(1) + + params = { + 'bl': 'boq_assistant-bard-web-server_20230326.21_p0', + '_reqid': random.randint(1111, 9999), + 'rt': 'c' + } + + data = { + 'at': cls._snlm0e, + 'f.req': json.dumps([None, json.dumps([[prompt]])]) + } + + intents = '.'.join([ + 'assistant', + 'lamda', + 'BardFrontendService' + ]) + + async with session.post( + f'{cls.url}/_/BardChatUi/data/{intents}/StreamGenerate', + data=data, + params=params, + proxy=proxy + ) as response: + response = await response.text() + response = json.loads(response.splitlines()[3])[0][2] + response = json.loads(response)[4][0][1][0] + return response + + @classmethod + @property + def params(cls): + params = [ + ("model", "str"), + ("messages", "list[dict[str, str]]"), + ("stream", "bool"), + ("proxy", "str"), + ] + param = ", ".join([": ".join(p) for p in params]) + return f"g4f.provider.{cls.__name__} supports: ({param})" diff --git a/g4f/g4f/Provider/Bing.py b/g4f/g4f/Provider/Bing.py new file mode 100644 index 0000000000000000000000000000000000000000..05be27e7285590ca063a636f8b601d92665ae832 --- /dev/null +++ b/g4f/g4f/Provider/Bing.py @@ -0,0 +1,283 @@ +from __future__ import annotations + +import random +import json +import os +from aiohttp import ClientSession, ClientTimeout +from ..typing import AsyncGenerator +from .base_provider import AsyncGeneratorProvider, get_cookies + + +class Bing(AsyncGeneratorProvider): + url = "https://bing.com/chat" + working = True + supports_gpt_4 = True + + @staticmethod + def create_async_generator( + model: str, + messages: list[dict[str, str]], + cookies: dict = None, **kwargs) -> AsyncGenerator: + + if not cookies: + cookies = get_cookies(".bing.com") + if len(messages) < 2: + prompt = messages[0]["content"] + context = None + else: + prompt = messages[-1]["content"] + context = create_context(messages[:-1]) + + if not cookies or "SRCHD" not in cookies: + cookies = { + 'SRCHD' : 'AF=NOFORM', + 'PPLState' : '1', + 'KievRPSSecAuth': '', + 'SUID' : '', + 'SRCHUSR' : '', + 'SRCHHPGUSR' : '', + } + return stream_generate(prompt, context, cookies) + +def create_context(messages: list[dict[str, str]]): + context = "".join(f"[{message['role']}](#message)\n{message['content']}\n\n" for message in messages) + + return context + +class Conversation(): + def __init__(self, conversationId: str, clientId: str, conversationSignature: str) -> None: + self.conversationId = conversationId + self.clientId = clientId + self.conversationSignature = conversationSignature + +async def create_conversation(session: ClientSession) -> Conversation: + url = 'https://www.bing.com/turing/conversation/create' + async with await session.get(url) as response: + response = await response.json() + conversationId = response.get('conversationId') + clientId = response.get('clientId') + conversationSignature = response.get('conversationSignature') + + if not conversationId or not clientId or not conversationSignature: + raise Exception('Failed to create conversation.') + + return Conversation(conversationId, clientId, conversationSignature) + +async def list_conversations(session: ClientSession) -> list: + url = "https://www.bing.com/turing/conversation/chats" + async with session.get(url) as response: + response = await response.json() + return response["chats"] + +async def delete_conversation(session: ClientSession, conversation: Conversation) -> list: + url = "https://sydney.bing.com/sydney/DeleteSingleConversation" + json = { + "conversationId": conversation.conversationId, + "conversationSignature": conversation.conversationSignature, + "participant": {"id": conversation.clientId}, + "source": "cib", + "optionsSets": ["autosave"] + } + async with session.post(url, json=json) as response: + response = await response.json() + return response["result"]["value"] == "Success" + +class Defaults: + delimiter = "\x1e" + ip_address = f"13.{random.randint(104, 107)}.{random.randint(0, 255)}.{random.randint(0, 255)}" + + allowedMessageTypes = [ + "Chat", + "Disengaged", + "AdsQuery", + "SemanticSerp", + "GenerateContentQuery", + "SearchQuery", + "ActionRequest", + "Context", + "Progress", + "AdsQuery", + "SemanticSerp", + ] + + sliceIds = [ + "winmuid3tf", + "osbsdusgreccf", + "ttstmout", + "crchatrev", + "winlongmsgtf", + "ctrlworkpay", + "norespwtf", + "tempcacheread", + "temptacache", + "505scss0", + "508jbcars0", + "515enbotdets0", + "5082tsports", + "515vaoprvs", + "424dagslnv1s0", + "kcimgattcf", + "427startpms0", + ] + + location = { + "locale": "en-US", + "market": "en-US", + "region": "US", + "locationHints": [ + { + "country": "United States", + "state": "California", + "city": "Los Angeles", + "timezoneoffset": 8, + "countryConfidence": 8, + "Center": {"Latitude": 34.0536909, "Longitude": -118.242766}, + "RegionType": 2, + "SourceType": 1, + } + ], + } + + headers = { + 'accept': '*/*', + 'accept-language': 'en-US,en;q=0.9', + 'cache-control': 'max-age=0', + 'sec-ch-ua': '"Chromium";v="110", "Not A(Brand";v="24", "Microsoft Edge";v="110"', + 'sec-ch-ua-arch': '"x86"', + 'sec-ch-ua-bitness': '"64"', + 'sec-ch-ua-full-version': '"110.0.1587.69"', + 'sec-ch-ua-full-version-list': '"Chromium";v="110.0.5481.192", "Not A(Brand";v="24.0.0.0", "Microsoft Edge";v="110.0.1587.69"', + 'sec-ch-ua-mobile': '?0', + 'sec-ch-ua-model': '""', + 'sec-ch-ua-platform': '"Windows"', + 'sec-ch-ua-platform-version': '"15.0.0"', + 'sec-fetch-dest': 'document', + 'sec-fetch-mode': 'navigate', + 'sec-fetch-site': 'none', + 'sec-fetch-user': '?1', + 'upgrade-insecure-requests': '1', + 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 Edg/110.0.1587.69', + 'x-edge-shopping-flag': '1', + 'x-forwarded-for': ip_address, + } + + optionsSets = { + "optionsSets": [ + 'saharasugg', + 'enablenewsfc', + 'clgalileo', + 'gencontentv3', + "nlu_direct_response_filter", + "deepleo", + "disable_emoji_spoken_text", + "responsible_ai_policy_235", + "enablemm", + "h3precise" + "dtappid", + "cricinfo", + "cricinfov2", + "dv3sugg", + "nojbfedge" + ] + } + +def format_message(msg: dict) -> str: + return json.dumps(msg, ensure_ascii=False) + Defaults.delimiter + +def create_message(conversation: Conversation, prompt: str, context: str=None) -> str: + struct = { + 'arguments': [ + { + **Defaults.optionsSets, + 'source': 'cib', + 'allowedMessageTypes': Defaults.allowedMessageTypes, + 'sliceIds': Defaults.sliceIds, + 'traceId': os.urandom(16).hex(), + 'isStartOfSession': True, + 'message': Defaults.location | { + 'author': 'user', + 'inputMethod': 'Keyboard', + 'text': prompt, + 'messageType': 'Chat' + }, + 'conversationSignature': conversation.conversationSignature, + 'participant': { + 'id': conversation.clientId + }, + 'conversationId': conversation.conversationId + } + ], + 'invocationId': '0', + 'target': 'chat', + 'type': 4 + } + + if context: + struct['arguments'][0]['previousMessages'] = [{ + "author": "user", + "description": context, + "contextType": "WebPage", + "messageType": "Context", + "messageId": "discover-web--page-ping-mriduna-----" + }] + return format_message(struct) + +async def stream_generate( + prompt: str, + context: str=None, + cookies: dict=None + ): + async with ClientSession( + timeout=ClientTimeout(total=900), + cookies=cookies, + headers=Defaults.headers, + ) as session: + conversation = await create_conversation(session) + try: + async with session.ws_connect( + 'wss://sydney.bing.com/sydney/ChatHub', + autoping=False, + ) as wss: + + await wss.send_str(format_message({'protocol': 'json', 'version': 1})) + msg = await wss.receive(timeout=900) + + await wss.send_str(create_message(conversation, prompt, context)) + + response_txt = '' + result_text = '' + returned_text = '' + final = False + + while not final: + msg = await wss.receive(timeout=900) + objects = msg.data.split(Defaults.delimiter) + for obj in objects: + if obj is None or not obj: + continue + + response = json.loads(obj) + if response.get('type') == 1 and response['arguments'][0].get('messages'): + message = response['arguments'][0]['messages'][0] + if (message['contentOrigin'] != 'Apology'): + response_txt = result_text + \ + message['adaptiveCards'][0]['body'][0].get('text', '') + + if message.get('messageType'): + inline_txt = message['adaptiveCards'][0]['body'][0]['inlines'][0].get('text') + response_txt += inline_txt + '\n' + result_text += inline_txt + '\n' + + if response_txt.startswith(returned_text): + new = response_txt[len(returned_text):] + if new != "\n": + yield new + returned_text = response_txt + elif response.get('type') == 2: + result = response['item']['result'] + if result.get('error'): + raise Exception(f"{result['value']}: {result['message']}") + final = True + break + finally: + await delete_conversation(session, conversation) \ No newline at end of file diff --git a/g4f/g4f/Provider/ChatBase.py b/g4f/g4f/Provider/ChatBase.py new file mode 100644 index 0000000000000000000000000000000000000000..b98fe56595a161bb5cfbcc7871ff94845edb3b3a --- /dev/null +++ b/g4f/g4f/Provider/ChatBase.py @@ -0,0 +1,62 @@ +from __future__ import annotations + +from aiohttp import ClientSession + +from ..typing import AsyncGenerator +from .base_provider import AsyncGeneratorProvider + + +class ChatBase(AsyncGeneratorProvider): + url = "https://www.chatbase.co" + supports_gpt_35_turbo = True + supports_gpt_4 = True + working = True + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: list[dict[str, str]], + **kwargs + ) -> AsyncGenerator: + if model == "gpt-4": + chat_id = "quran---tafseer-saadi-pdf-wbgknt7zn" + elif model == "gpt-3.5-turbo" or not model: + chat_id = "chatbase--1--pdf-p680fxvnm" + else: + raise ValueError(f"Model are not supported: {model}") + headers = { + "User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36", + "Accept" : "*/*", + "Accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3", + "Origin" : cls.url, + "Referer" : cls.url + "/", + "Sec-Fetch-Dest" : "empty", + "Sec-Fetch-Mode" : "cors", + "Sec-Fetch-Site" : "same-origin", + } + async with ClientSession( + headers=headers + ) as session: + data = { + "messages": messages, + "captchaCode": "hadsa", + "chatId": chat_id, + "conversationId": f"kcXpqEnqUie3dnJlsRi_O-{chat_id}" + } + async with session.post("https://www.chatbase.co/api/fe/chat", json=data) as response: + response.raise_for_status() + async for stream in response.content.iter_any(): + yield stream.decode() + + + @classmethod + @property + def params(cls): + params = [ + ("model", "str"), + ("messages", "list[dict[str, str]]"), + ("stream", "bool"), + ] + param = ", ".join([": ".join(p) for p in params]) + return f"g4f.provider.{cls.__name__} supports: ({param})" \ No newline at end of file diff --git a/g4f/g4f/Provider/ChatgptAi.py b/g4f/g4f/Provider/ChatgptAi.py new file mode 100644 index 0000000000000000000000000000000000000000..e6416cc3ce13728e137fa4c7f95f2f44daa9253f --- /dev/null +++ b/g4f/g4f/Provider/ChatgptAi.py @@ -0,0 +1,75 @@ +from __future__ import annotations + +import re +import html +import json +from aiohttp import ClientSession + +from ..typing import AsyncGenerator +from .base_provider import AsyncGeneratorProvider + + +class ChatgptAi(AsyncGeneratorProvider): + url: str = "https://chatgpt.ai/" + working = True + supports_gpt_35_turbo = True + _system_data = None + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: list[dict[str, str]], + proxy: str = None, + **kwargs + ) -> AsyncGenerator: + headers = { + "authority" : "chatgpt.ai", + "accept" : "*/*", + "accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3", + "cache-control" : "no-cache", + "origin" : "https://chatgpt.ai", + "pragma" : "no-cache", + "referer" : cls.url, + "sec-ch-ua" : '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"', + "sec-ch-ua-mobile" : "?0", + "sec-ch-ua-platform" : '"Windows"', + "sec-fetch-dest" : "empty", + "sec-fetch-mode" : "cors", + "sec-fetch-site" : "same-origin", + "user-agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36", + } + async with ClientSession( + headers=headers + ) as session: + if not cls._system_data: + async with session.get(cls.url, proxy=proxy) as response: + response.raise_for_status() + match = re.findall(r"data-system='([^']+)'", await response.text()) + if not match: + raise RuntimeError("No system data") + cls._system_data = json.loads(html.unescape(match[0])) + + data = { + "botId": cls._system_data["botId"], + "clientId": "", + "contextId": cls._system_data["contextId"], + "id": cls._system_data["id"], + "messages": messages[:-1], + "newMessage": messages[-1]["content"], + "session": cls._system_data["sessionId"], + "stream": True + } + async with session.post( + "https://chatgpt.ai/wp-json/mwai-ui/v1/chats/submit", + proxy=proxy, + json=data + ) as response: + response.raise_for_status() + start = "data: " + async for line in response.content: + line = line.decode('utf-8') + if line.startswith(start): + line = json.loads(line[len(start):-1]) + if line["type"] == "live": + yield line["data"] \ No newline at end of file diff --git a/g4f/g4f/Provider/ChatgptDuo.py b/g4f/g4f/Provider/ChatgptDuo.py new file mode 100644 index 0000000000000000000000000000000000000000..07f4c16c067187cd6492834e1df30bd187b675a6 --- /dev/null +++ b/g4f/g4f/Provider/ChatgptDuo.py @@ -0,0 +1,51 @@ +from __future__ import annotations + +from g4f.requests import AsyncSession +from .base_provider import AsyncProvider, format_prompt + + +class ChatgptDuo(AsyncProvider): + url = "https://chatgptduo.com" + supports_gpt_35_turbo = True + working = True + + @classmethod + async def create_async( + cls, + model: str, + messages: list[dict[str, str]], + **kwargs + ) -> str: + async with AsyncSession(impersonate="chrome107") as session: + prompt = format_prompt(messages), + data = { + "prompt": prompt, + "search": prompt, + "purpose": "ask", + } + async with session.post(f"{cls.url}/", data=data) as response: + response.raise_for_status() + data = await response.json() + + cls._sources = [{ + "title": source["title"], + "url": source["link"], + "snippet": source["snippet"] + } for source in data["results"]] + + return data["answer"] + + @classmethod + def get_sources(cls): + return cls._sources + + @classmethod + @property + def params(cls): + params = [ + ("model", "str"), + ("messages", "list[dict[str, str]]"), + ("stream", "bool"), + ] + param = ", ".join([": ".join(p) for p in params]) + return f"g4f.provider.{cls.__name__} supports: ({param})" \ No newline at end of file diff --git a/g4f/g4f/Provider/ChatgptLogin.py b/g4f/g4f/Provider/ChatgptLogin.py new file mode 100644 index 0000000000000000000000000000000000000000..3eb55a64568c28df41f14051002ade95ca8dbcec --- /dev/null +++ b/g4f/g4f/Provider/ChatgptLogin.py @@ -0,0 +1,74 @@ +from __future__ import annotations + +import os, re +from aiohttp import ClientSession + +from .base_provider import AsyncProvider, format_prompt + + +class ChatgptLogin(AsyncProvider): + url = "https://opchatgpts.net" + supports_gpt_35_turbo = True + working = True + _nonce = None + + @classmethod + async def create_async( + cls, + model: str, + messages: list[dict[str, str]], + **kwargs + ) -> str: + headers = { + "User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36", + "Accept" : "*/*", + "Accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3", + "Origin" : "https://opchatgpts.net", + "Alt-Used" : "opchatgpts.net", + "Referer" : "https://opchatgpts.net/chatgpt-free-use/", + "Sec-Fetch-Dest" : "empty", + "Sec-Fetch-Mode" : "cors", + "Sec-Fetch-Site" : "same-origin", + } + async with ClientSession( + headers=headers + ) as session: + if not cls._nonce: + async with session.get( + "https://opchatgpts.net/chatgpt-free-use/", + params={"id": os.urandom(6).hex()}, + ) as response: + result = re.search(r'data-nonce="(.*?)"', await response.text()) + if not result: + raise RuntimeError("No nonce value") + cls._nonce = result.group(1) + data = { + "_wpnonce": cls._nonce, + "post_id": 28, + "url": "https://opchatgpts.net/chatgpt-free-use", + "action": "wpaicg_chat_shortcode_message", + "message": format_prompt(messages), + "bot_id": 0 + } + async with session.post("https://opchatgpts.net/wp-admin/admin-ajax.php", data=data) as response: + response.raise_for_status() + data = await response.json() + if "data" in data: + return data["data"] + elif "msg" in data: + raise RuntimeError(data["msg"]) + else: + raise RuntimeError(f"Response: {data}") + + + @classmethod + @property + def params(cls): + params = [ + ("model", "str"), + ("messages", "list[dict[str, str]]"), + ("stream", "bool"), + ("temperature", "float"), + ] + param = ", ".join([": ".join(p) for p in params]) + return f"g4f.provider.{cls.__name__} supports: ({param})" \ No newline at end of file diff --git a/g4f/g4f/Provider/CodeLinkAva.py b/g4f/g4f/Provider/CodeLinkAva.py new file mode 100644 index 0000000000000000000000000000000000000000..e3b3eb3eb7df86bdce3d2d805e00396158bd1db8 --- /dev/null +++ b/g4f/g4f/Provider/CodeLinkAva.py @@ -0,0 +1,64 @@ +from __future__ import annotations + +from aiohttp import ClientSession +import json + +from ..typing import AsyncGenerator +from .base_provider import AsyncGeneratorProvider + + +class CodeLinkAva(AsyncGeneratorProvider): + url = "https://ava-ai-ef611.web.app" + supports_gpt_35_turbo = True + working = True + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: list[dict[str, str]], + **kwargs + ) -> AsyncGenerator: + headers = { + "User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36", + "Accept" : "*/*", + "Accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3", + "Origin" : cls.url, + "Referer" : cls.url + "/", + "Sec-Fetch-Dest" : "empty", + "Sec-Fetch-Mode" : "cors", + "Sec-Fetch-Site" : "same-origin", + } + async with ClientSession( + headers=headers + ) as session: + data = { + "messages": messages, + "temperature": 0.6, + "stream": True, + **kwargs + } + async with session.post("https://ava-alpha-api.codelink.io/api/chat", json=data) as response: + response.raise_for_status() + async for line in response.content: + line = line.decode() + if line.startswith("data: "): + if line.startswith("data: [DONE]"): + break + line = json.loads(line[6:-1]) + content = line["choices"][0]["delta"].get("content") + if content: + yield content + + + @classmethod + @property + def params(cls): + params = [ + ("model", "str"), + ("messages", "list[dict[str, str]]"), + ("stream", "bool"), + ("temperature", "float"), + ] + param = ", ".join([": ".join(p) for p in params]) + return f"g4f.provider.{cls.__name__} supports: ({param})" \ No newline at end of file diff --git a/g4f/g4f/Provider/DeepAi.py b/g4f/g4f/Provider/DeepAi.py new file mode 100644 index 0000000000000000000000000000000000000000..a19e4b516ae1458321b6a788c56973c7802175c5 --- /dev/null +++ b/g4f/g4f/Provider/DeepAi.py @@ -0,0 +1,63 @@ +from __future__ import annotations + +import json +import js2py +from aiohttp import ClientSession + +from ..typing import AsyncGenerator +from .base_provider import AsyncGeneratorProvider + + +class DeepAi(AsyncGeneratorProvider): + url: str = "https://deepai.org" + working = True + supports_gpt_35_turbo = True + + @staticmethod + async def create_async_generator( + model: str, + messages: list[dict[str, str]], + proxy: str = None, + **kwargs + ) -> AsyncGenerator: + + token_js = """ +var agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36' +var a, b, c, d, e, h, f, l, g, k, m, n, r, x, C, E, N, F, T, O, P, w, D, G, Q, R, W, I, aa, fa, na, oa, ha, ba, X, ia, ja, ka, J, la, K, L, ca, S, U, M, ma, B, da, V, Y; +h = Math.round(1E11 * Math.random()) + ""; +f = function () { + for (var p = [], q = 0; 64 > q;) p[q] = 0 | 4294967296 * Math.sin(++q % Math.PI); + + return function (t) { + var v, y, H, ea = [v = 1732584193, y = 4023233417, ~v, ~y], + Z = [], + A = unescape(encodeURI(t)) + "\u0080", + z = A.length; + t = --z / 4 + 2 | 15; + for (Z[--t] = 8 * z; ~z;) Z[z >> 2] |= A.charCodeAt(z) << 8 * z--; + for (q = A = 0; q < t; q += 16) { + for (z = ea; 64 > A; z = [H = z[3], v + ((H = z[0] + [v & y | ~v & H, H & v | ~H & y, v ^ y ^ H, y ^ (v | ~H)][z = A >> 4] + p[A] + ~~Z[q | [A, 5 * A + 1, 3 * A + 5, 7 * A][z] & 15]) << (z = [7, 12, 17, 22, 5, 9, 14, 20, 4, 11, 16, 23, 6, 10, 15, 21][4 * z + A++ % 4]) | H >>> -z), v, y]) v = z[1] | 0, y = z[2]; + for (A = 4; A;) ea[--A] += z[A] + } + for (t = ""; 32 > A;) t += (ea[A >> 3] >> 4 * (1 ^ A++) & 15).toString(16); + return t.split("").reverse().join("") + } +}(); + +"tryit-" + h + "-" + f(agent + f(agent + f(agent + h + "x"))); +""" + + payload = {"chas_style": "chat", "chatHistory": json.dumps(messages)} + api_key = js2py.eval_js(token_js) + headers = { + "api-key": api_key, + "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36", + } + async with ClientSession( + headers=headers + ) as session: + async with session.post("https://api.deepai.org/make_me_a_sandwich", proxy=proxy, data=payload) as response: + response.raise_for_status() + async for stream in response.content.iter_any(): + if stream: + yield stream.decode() \ No newline at end of file diff --git a/g4f/g4f/Provider/DfeHub.py b/g4f/g4f/Provider/DfeHub.py new file mode 100644 index 0000000000000000000000000000000000000000..d40e03803130ff4169f66bfe4f9cd2e90239f784 --- /dev/null +++ b/g4f/g4f/Provider/DfeHub.py @@ -0,0 +1,77 @@ +from __future__ import annotations + +import json +import re +import time + +import requests + +from ..typing import Any, CreateResult +from .base_provider import BaseProvider + + +class DfeHub(BaseProvider): + url = "https://chat.dfehub.com/" + supports_stream = True + supports_gpt_35_turbo = True + + @staticmethod + def create_completion( + model: str, + messages: list[dict[str, str]], + stream: bool, **kwargs: Any) -> CreateResult: + + headers = { + "authority" : "chat.dfehub.com", + "accept" : "*/*", + "accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3", + "content-type" : "application/json", + "origin" : "https://chat.dfehub.com", + "referer" : "https://chat.dfehub.com/", + "sec-ch-ua" : '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"', + "sec-ch-ua-mobile" : "?0", + "sec-ch-ua-platform": '"macOS"', + "sec-fetch-dest" : "empty", + "sec-fetch-mode" : "cors", + "sec-fetch-site" : "same-origin", + "user-agent" : "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36", + "x-requested-with" : "XMLHttpRequest", + } + + json_data = { + "messages" : messages, + "model" : "gpt-3.5-turbo", + "temperature" : kwargs.get("temperature", 0.5), + "presence_penalty" : kwargs.get("presence_penalty", 0), + "frequency_penalty" : kwargs.get("frequency_penalty", 0), + "top_p" : kwargs.get("top_p", 1), + "stream" : True + } + + response = requests.post("https://chat.dfehub.com/api/openai/v1/chat/completions", + headers=headers, json=json_data, timeout=3) + + for chunk in response.iter_lines(): + if b"detail" in chunk: + delay = re.findall(r"\d+\.\d+", chunk.decode()) + delay = float(delay[-1]) + time.sleep(delay) + yield from DfeHub.create_completion(model, messages, stream, **kwargs) + if b"content" in chunk: + data = json.loads(chunk.decode().split("data: ")[1]) + yield (data["choices"][0]["delta"]["content"]) + + @classmethod + @property + def params(cls): + params = [ + ("model", "str"), + ("messages", "list[dict[str, str]]"), + ("stream", "bool"), + ("temperature", "float"), + ("presence_penalty", "int"), + ("frequency_penalty", "int"), + ("top_p", "int"), + ] + param = ", ".join([": ".join(p) for p in params]) + return f"g4f.provider.{cls.__name__} supports: ({param})" diff --git a/g4f/g4f/Provider/EasyChat.py b/g4f/g4f/Provider/EasyChat.py new file mode 100644 index 0000000000000000000000000000000000000000..dae5196dd28f1b97d34fc19e0b65f919153a2b30 --- /dev/null +++ b/g4f/g4f/Provider/EasyChat.py @@ -0,0 +1,111 @@ +from __future__ import annotations + +import json +import random + +import requests + +from ..typing import Any, CreateResult +from .base_provider import BaseProvider + + +class EasyChat(BaseProvider): + url: str = "https://free.easychat.work" + supports_stream = True + supports_gpt_35_turbo = True + working = False + + @staticmethod + def create_completion( + model: str, + messages: list[dict[str, str]], + stream: bool, **kwargs: Any) -> CreateResult: + + active_servers = [ + "https://chat10.fastgpt.me", + "https://chat9.fastgpt.me", + "https://chat1.fastgpt.me", + "https://chat2.fastgpt.me", + "https://chat3.fastgpt.me", + "https://chat4.fastgpt.me", + "https://gxos1h1ddt.fastgpt.me" + ] + + server = active_servers[kwargs.get("active_server", random.randint(0, 5))] + headers = { + "authority" : f"{server}".replace("https://", ""), + "accept" : "text/event-stream", + "accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3,fa=0.2", + "content-type" : "application/json", + "origin" : f"{server}", + "referer" : f"{server}/", + "x-requested-with" : "XMLHttpRequest", + 'plugins' : '0', + 'sec-ch-ua' : '"Chromium";v="116", "Not)A;Brand";v="24", "Google Chrome";v="116"', + 'sec-ch-ua-mobile' : '?0', + 'sec-ch-ua-platform': '"Windows"', + 'sec-fetch-dest' : 'empty', + 'sec-fetch-mode' : 'cors', + 'sec-fetch-site' : 'same-origin', + 'user-agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36', + 'usesearch' : 'false', + 'x-requested-with' : 'XMLHttpRequest' + } + + json_data = { + "messages" : messages, + "stream" : stream, + "model" : model, + "temperature" : kwargs.get("temperature", 0.5), + "presence_penalty" : kwargs.get("presence_penalty", 0), + "frequency_penalty" : kwargs.get("frequency_penalty", 0), + "top_p" : kwargs.get("top_p", 1) + } + + session = requests.Session() + # init cookies from server + session.get(f"{server}/") + + response = session.post(f"{server}/api/openai/v1/chat/completions", + headers=headers, json=json_data, stream=stream) + + if response.status_code == 200: + + if stream == False: + json_data = response.json() + + if "choices" in json_data: + yield json_data["choices"][0]["message"]["content"] + else: + raise Exception("No response from server") + + else: + + for chunk in response.iter_lines(): + + if b"content" in chunk: + splitData = chunk.decode().split("data:") + + if len(splitData) > 1: + yield json.loads(splitData[1])["choices"][0]["delta"]["content"] + else: + continue + else: + raise Exception(f"Error {response.status_code} from server : {response.reason}") + + + @classmethod + @property + def params(cls): + params = [ + ("model", "str"), + ("messages", "list[dict[str, str]]"), + ("stream", "bool"), + ("temperature", "float"), + ("presence_penalty", "int"), + ("frequency_penalty", "int"), + ("top_p", "int"), + ("active_server", "int"), + ] + param = ", ".join([": ".join(p) for p in params]) + return f"g4f.provider.{cls.__name__} supports: ({param})" diff --git a/g4f/g4f/Provider/Equing.py b/g4f/g4f/Provider/Equing.py new file mode 100644 index 0000000000000000000000000000000000000000..261c53c01219d4c4a1f80d08cf1df33ccb3e0813 --- /dev/null +++ b/g4f/g4f/Provider/Equing.py @@ -0,0 +1,81 @@ +from __future__ import annotations + +import json +from abc import ABC, abstractmethod + +import requests + +from ..typing import Any, CreateResult +from .base_provider import BaseProvider + + +class Equing(BaseProvider): + url: str = 'https://next.eqing.tech/' + working = False + supports_stream = True + supports_gpt_35_turbo = True + supports_gpt_4 = False + + @staticmethod + @abstractmethod + def create_completion( + model: str, + messages: list[dict[str, str]], + stream: bool, **kwargs: Any) -> CreateResult: + + headers = { + 'authority' : 'next.eqing.tech', + 'accept' : 'text/event-stream', + 'accept-language' : 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3', + 'cache-control' : 'no-cache', + 'content-type' : 'application/json', + 'origin' : 'https://next.eqing.tech', + 'plugins' : '0', + 'pragma' : 'no-cache', + 'referer' : 'https://next.eqing.tech/', + 'sec-ch-ua' : '"Not/A)Brand";v="99", "Google Chrome";v="115", "Chromium";v="115"', + 'sec-ch-ua-mobile' : '?0', + 'sec-ch-ua-platform': '"macOS"', + 'sec-fetch-dest' : 'empty', + 'sec-fetch-mode' : 'cors', + 'sec-fetch-site' : 'same-origin', + 'user-agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36', + 'usesearch' : 'false', + 'x-requested-with' : 'XMLHttpRequest' + } + + json_data = { + 'messages' : messages, + 'stream' : stream, + 'model' : model, + 'temperature' : kwargs.get('temperature', 0.5), + 'presence_penalty' : kwargs.get('presence_penalty', 0), + 'frequency_penalty' : kwargs.get('frequency_penalty', 0), + 'top_p' : kwargs.get('top_p', 1), + } + + response = requests.post('https://next.eqing.tech/api/openai/v1/chat/completions', + headers=headers, json=json_data, stream=stream) + + if not stream: + yield response.json()["choices"][0]["message"]["content"] + return + + for line in response.iter_content(chunk_size=1024): + if line: + if b'content' in line: + line_json = json.loads(line.decode('utf-8').split('data: ')[1]) + token = line_json['choices'][0]['delta'].get('content') + if token: + yield token + + @classmethod + @property + def params(cls): + params = [ + ("model", "str"), + ("messages", "list[dict[str, str]]"), + ("stream", "bool"), + ] + param = ", ".join([": ".join(p) for p in params]) + return f"g4f.provider.{cls.__name__} supports: ({param})" \ No newline at end of file diff --git a/g4f/g4f/Provider/FastGpt.py b/g4f/g4f/Provider/FastGpt.py new file mode 100644 index 0000000000000000000000000000000000000000..ef47f75215ba933c540c7cfaa575e7a3b244ffc4 --- /dev/null +++ b/g4f/g4f/Provider/FastGpt.py @@ -0,0 +1,86 @@ +from __future__ import annotations + +import json +import random +from abc import ABC, abstractmethod + +import requests + +from ..typing import Any, CreateResult + + +class FastGpt(ABC): + url: str = 'https://chat9.fastgpt.me/' + working = False + needs_auth = False + supports_stream = True + supports_gpt_35_turbo = True + supports_gpt_4 = False + + @staticmethod + @abstractmethod + def create_completion( + model: str, + messages: list[dict[str, str]], + stream: bool, **kwargs: Any) -> CreateResult: + + headers = { + 'authority' : 'chat9.fastgpt.me', + 'accept' : 'text/event-stream', + 'accept-language' : 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3', + 'cache-control' : 'no-cache', + 'content-type' : 'application/json', + 'origin' : 'https://chat9.fastgpt.me', + 'plugins' : '0', + 'pragma' : 'no-cache', + 'referer' : 'https://chat9.fastgpt.me/', + 'sec-ch-ua' : '"Not/A)Brand";v="99", "Google Chrome";v="115", "Chromium";v="115"', + 'sec-ch-ua-mobile' : '?0', + 'sec-ch-ua-platform': '"macOS"', + 'sec-fetch-dest' : 'empty', + 'sec-fetch-mode' : 'cors', + 'sec-fetch-site' : 'same-origin', + 'user-agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36', + 'usesearch' : 'false', + 'x-requested-with' : 'XMLHttpRequest', + } + + json_data = { + 'messages' : messages, + 'stream' : stream, + 'model' : model, + 'temperature' : kwargs.get('temperature', 0.5), + 'presence_penalty' : kwargs.get('presence_penalty', 0), + 'frequency_penalty' : kwargs.get('frequency_penalty', 0), + 'top_p' : kwargs.get('top_p', 1), + } + + subdomain = random.choice([ + 'jdaen979ew', + 'chat9' + ]) + + response = requests.post(f'https://{subdomain}.fastgpt.me/api/openai/v1/chat/completions', + headers=headers, json=json_data, stream=stream) + + for line in response.iter_lines(): + if line: + try: + if b'content' in line: + line_json = json.loads(line.decode('utf-8').split('data: ')[1]) + token = line_json['choices'][0]['delta'].get('content') + if token: + yield token + except: + continue + + @classmethod + @property + def params(cls): + params = [ + ("model", "str"), + ("messages", "list[dict[str, str]]"), + ("stream", "bool"), + ] + param = ", ".join([": ".join(p) for p in params]) + return f"g4f.provider.{cls.__name__} supports: ({param})" \ No newline at end of file diff --git a/g4f/g4f/Provider/Forefront.py b/g4f/g4f/Provider/Forefront.py new file mode 100644 index 0000000000000000000000000000000000000000..8f51fb579ae40c5a8c7609dc481a13bcefa7a366 --- /dev/null +++ b/g4f/g4f/Provider/Forefront.py @@ -0,0 +1,40 @@ +from __future__ import annotations + +import json + +import requests + +from ..typing import Any, CreateResult +from .base_provider import BaseProvider + + +class Forefront(BaseProvider): + url = "https://forefront.com" + supports_stream = True + supports_gpt_35_turbo = True + + @staticmethod + def create_completion( + model: str, + messages: list[dict[str, str]], + stream: bool, **kwargs: Any) -> CreateResult: + + json_data = { + "text" : messages[-1]["content"], + "action" : "noauth", + "id" : "", + "parentId" : "", + "workspaceId" : "", + "messagePersona": "607e41fe-95be-497e-8e97-010a59b2e2c0", + "model" : "gpt-4", + "messages" : messages[:-1] if len(messages) > 1 else [], + "internetMode" : "auto", + } + + response = requests.post("https://streaming.tenant-forefront-default.knative.chi.coreweave.com/free-chat", + json=json_data, stream=True) + + response.raise_for_status() + for token in response.iter_lines(): + if b"delta" in token: + yield json.loads(token.decode().split("data: ")[1])["delta"] diff --git a/g4f/g4f/Provider/GetGpt.py b/g4f/g4f/Provider/GetGpt.py new file mode 100644 index 0000000000000000000000000000000000000000..b96efaac78d8c2443d53e584b8bc9fae50de3114 --- /dev/null +++ b/g4f/g4f/Provider/GetGpt.py @@ -0,0 +1,88 @@ +from __future__ import annotations + +import json +import os +import uuid + +import requests +from Crypto.Cipher import AES + +from ..typing import Any, CreateResult +from .base_provider import BaseProvider + + +class GetGpt(BaseProvider): + url = 'https://chat.getgpt.world/' + supports_stream = True + working = False + supports_gpt_35_turbo = True + + @staticmethod + def create_completion( + model: str, + messages: list[dict[str, str]], + stream: bool, **kwargs: Any) -> CreateResult: + + headers = { + 'Content-Type' : 'application/json', + 'Referer' : 'https://chat.getgpt.world/', + 'user-agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36', + } + + data = json.dumps( + { + 'messages' : messages, + 'frequency_penalty' : kwargs.get('frequency_penalty', 0), + 'max_tokens' : kwargs.get('max_tokens', 4000), + 'model' : 'gpt-3.5-turbo', + 'presence_penalty' : kwargs.get('presence_penalty', 0), + 'temperature' : kwargs.get('temperature', 1), + 'top_p' : kwargs.get('top_p', 1), + 'stream' : True, + 'uuid' : str(uuid.uuid4()) + } + ) + + res = requests.post('https://chat.getgpt.world/api/chat/stream', + headers=headers, json={'signature': _encrypt(data)}, stream=True) + + res.raise_for_status() + for line in res.iter_lines(): + if b'content' in line: + line_json = json.loads(line.decode('utf-8').split('data: ')[1]) + yield (line_json['choices'][0]['delta']['content']) + + @classmethod + @property + def params(cls): + params = [ + ('model', 'str'), + ('messages', 'list[dict[str, str]]'), + ('stream', 'bool'), + ('temperature', 'float'), + ('presence_penalty', 'int'), + ('frequency_penalty', 'int'), + ('top_p', 'int'), + ('max_tokens', 'int'), + ] + param = ', '.join([': '.join(p) for p in params]) + return f'g4f.provider.{cls.__name__} supports: ({param})' + + +def _encrypt(e: str): + t = os.urandom(8).hex().encode('utf-8') + n = os.urandom(8).hex().encode('utf-8') + r = e.encode('utf-8') + + cipher = AES.new(t, AES.MODE_CBC, n) + ciphertext = cipher.encrypt(_pad_data(r)) + + return ciphertext.hex() + t.decode('utf-8') + n.decode('utf-8') + + +def _pad_data(data: bytes) -> bytes: + block_size = AES.block_size + padding_size = block_size - len(data) % block_size + padding = bytes([padding_size] * padding_size) + + return data + padding diff --git a/g4f/g4f/Provider/GptGo.py b/g4f/g4f/Provider/GptGo.py new file mode 100644 index 0000000000000000000000000000000000000000..7db8fb0d669b560a472d27775a66e5d77c0ff849 --- /dev/null +++ b/g4f/g4f/Provider/GptGo.py @@ -0,0 +1,78 @@ +from __future__ import annotations + +from aiohttp import ClientSession +import json + +from ..typing import AsyncGenerator +from .base_provider import AsyncGeneratorProvider, format_prompt + + +class GptGo(AsyncGeneratorProvider): + url = "https://gptgo.ai" + supports_gpt_35_turbo = True + working = True + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: list[dict[str, str]], + proxy: str = None, + **kwargs + ) -> AsyncGenerator: + headers = { + "User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36", + "Accept" : "*/*", + "Accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3", + "Origin" : cls.url, + "Referer" : cls.url + "/", + "Sec-Fetch-Dest" : "empty", + "Sec-Fetch-Mode" : "cors", + "Sec-Fetch-Site" : "same-origin", + } + async with ClientSession( + headers=headers + ) as session: + async with session.get( + "https://gptgo.ai/action_get_token.php", + params={ + "q": format_prompt(messages), + "hlgpt": "default", + "hl": "en" + }, + proxy=proxy + ) as response: + response.raise_for_status() + token = (await response.json(content_type=None))["token"] + + async with session.get( + "https://gptgo.ai/action_ai_gpt.php", + params={ + "token": token, + }, + proxy=proxy + ) as response: + response.raise_for_status() + start = "data: " + async for line in response.content: + line = line.decode() + if line.startswith("data: "): + if line.startswith("data: [DONE]"): + break + line = json.loads(line[len(start):-1]) + content = line["choices"][0]["delta"].get("content") + if content: + yield content + + + @classmethod + @property + def params(cls): + params = [ + ("model", "str"), + ("messages", "list[dict[str, str]]"), + ("stream", "bool"), + ("temperature", "float"), + ] + param = ", ".join([": ".join(p) for p in params]) + return f"g4f.provider.{cls.__name__} supports: ({param})" \ No newline at end of file diff --git a/g4f/g4f/Provider/H2o.py b/g4f/g4f/Provider/H2o.py new file mode 100644 index 0000000000000000000000000000000000000000..d92bd6d1d4726785051c7d4c5248dd50dd709805 --- /dev/null +++ b/g4f/g4f/Provider/H2o.py @@ -0,0 +1,109 @@ +from __future__ import annotations + +import json +import uuid + +from aiohttp import ClientSession + +from ..typing import AsyncGenerator +from .base_provider import AsyncGeneratorProvider, format_prompt + + +class H2o(AsyncGeneratorProvider): + url = "https://gpt-gm.h2o.ai" + working = True + model = "h2oai/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1" + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: list[dict[str, str]], + proxy: str = None, + **kwargs + ) -> AsyncGenerator: + model = model if model else cls.model + headers = {"Referer": cls.url + "/"} + + async with ClientSession( + headers=headers + ) as session: + data = { + "ethicsModalAccepted": "true", + "shareConversationsWithModelAuthors": "true", + "ethicsModalAcceptedAt": "", + "activeModel": model, + "searchEnabled": "true", + } + async with session.post( + f"{cls.url}/settings", + proxy=proxy, + data=data + ) as response: + response.raise_for_status() + + async with session.post( + f"{cls.url}/conversation", + proxy=proxy, + json={"model": model}, + ) as response: + response.raise_for_status() + conversationId = (await response.json())["conversationId"] + + data = { + "inputs": format_prompt(messages), + "parameters": { + "temperature": 0.4, + "truncate": 2048, + "max_new_tokens": 1024, + "do_sample": True, + "repetition_penalty": 1.2, + "return_full_text": False, + **kwargs + }, + "stream": True, + "options": { + "id": str(uuid.uuid4()), + "response_id": str(uuid.uuid4()), + "is_retry": False, + "use_cache": False, + "web_search_id": "", + }, + } + async with session.post( + f"{cls.url}/conversation/{conversationId}", + proxy=proxy, + json=data + ) as response: + start = "data:" + async for line in response.content: + line = line.decode("utf-8") + if line and line.startswith(start): + line = json.loads(line[len(start):-1]) + if not line["token"]["special"]: + yield line["token"]["text"] + + async with session.delete( + f"{cls.url}/conversation/{conversationId}", + proxy=proxy, + json=data + ) as response: + response.raise_for_status() + + + @classmethod + @property + def params(cls): + params = [ + ("model", "str"), + ("messages", "list[dict[str, str]]"), + ("stream", "bool"), + ("temperature", "float"), + ("truncate", "int"), + ("max_new_tokens", "int"), + ("do_sample", "bool"), + ("repetition_penalty", "float"), + ("return_full_text", "bool"), + ] + param = ", ".join([": ".join(p) for p in params]) + return f"g4f.provider.{cls.__name__} supports: ({param})" diff --git a/g4f/g4f/Provider/HuggingChat.py b/g4f/g4f/Provider/HuggingChat.py new file mode 100644 index 0000000000000000000000000000000000000000..b2cf9793137ab7832fb3c7623ff45a469d988632 --- /dev/null +++ b/g4f/g4f/Provider/HuggingChat.py @@ -0,0 +1,104 @@ +from __future__ import annotations + +import json + +from aiohttp import ClientSession + +from ..typing import AsyncGenerator +from .base_provider import AsyncGeneratorProvider, format_prompt, get_cookies + + +class HuggingChat(AsyncGeneratorProvider): + url = "https://huggingface.co/chat" + needs_auth = True + working = True + model = "OpenAssistant/oasst-sft-6-llama-30b-xor" + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: list[dict[str, str]], + stream: bool = True, + proxy: str = None, + cookies: dict = None, + **kwargs + ) -> AsyncGenerator: + model = model if model else cls.model + if proxy and "://" not in proxy: + proxy = f"http://{proxy}" + if not cookies: + cookies = get_cookies(".huggingface.co") + + headers = { + 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36', + } + async with ClientSession( + cookies=cookies, + headers=headers + ) as session: + async with session.post(f"{cls.url}/conversation", proxy=proxy, json={"model": model}) as response: + conversation_id = (await response.json())["conversationId"] + + send = { + "inputs": format_prompt(messages), + "parameters": { + "temperature": 0.2, + "truncate": 1000, + "max_new_tokens": 1024, + "stop": [""], + "top_p": 0.95, + "repetition_penalty": 1.2, + "top_k": 50, + "return_full_text": False, + **kwargs + }, + "stream": stream, + "options": { + "id": "9e9b8bc4-6604-40c6-994e-8eb78fa32e37", + "response_id": "04ce2602-3bea-45e8-8efc-cef00680376a", + "is_retry": False, + "use_cache": False, + "web_search_id": "" + } + } + async with session.post(f"{cls.url}/conversation/{conversation_id}", proxy=proxy, json=send) as response: + if not stream: + data = await response.json() + if "error" in data: + raise RuntimeError(data["error"]) + elif isinstance(data, list): + yield data[0]["generated_text"].strip() + else: + raise RuntimeError(f"Response: {data}") + else: + start = "data:" + first = True + async for line in response.content: + line = line.decode("utf-8") + if line.startswith(start): + line = json.loads(line[len(start):-1]) + if "token" not in line: + raise RuntimeError(f"Response: {line}") + if not line["token"]["special"]: + if first: + yield line["token"]["text"].lstrip() + first = False + else: + yield line["token"]["text"] + + async with session.delete(f"{cls.url}/conversation/{conversation_id}", proxy=proxy) as response: + response.raise_for_status() + + + @classmethod + @property + def params(cls): + params = [ + ("model", "str"), + ("messages", "list[dict[str, str]]"), + ("stream", "bool"), + ("proxy", "str"), + ] + param = ", ".join([": ".join(p) for p in params]) + return f"g4f.provider.{cls.__name__} supports: ({param})" diff --git a/g4f/g4f/Provider/Liaobots.py b/g4f/g4f/Provider/Liaobots.py new file mode 100644 index 0000000000000000000000000000000000000000..ea3e0d45cab69dec340699ee52070c6ea3e4bdbe --- /dev/null +++ b/g4f/g4f/Provider/Liaobots.py @@ -0,0 +1,89 @@ +from __future__ import annotations + +import json +import uuid + +from aiohttp import ClientSession + +from ..typing import AsyncGenerator +from .base_provider import AsyncGeneratorProvider + +models = { + "gpt-4": { + "id": "gpt-4", + "name": "GPT-4", + "maxLength": 24000, + "tokenLimit": 8000, + }, + "gpt-3.5-turbo": { + "id": "gpt-3.5-turbo", + "name": "GPT-3.5", + "maxLength": 12000, + "tokenLimit": 4000, + }, + "gpt-3.5-turbo-16k": { + "id": "gpt-3.5-turbo-16k", + "name": "GPT-3.5-16k", + "maxLength": 48000, + "tokenLimit": 16000, + }, +} + +class Liaobots(AsyncGeneratorProvider): + url = "https://liaobots.com" + working = False + supports_gpt_35_turbo = True + supports_gpt_4 = True + _auth_code = None + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: list[dict[str, str]], + auth: str = None, + proxy: str = None, + **kwargs + ) -> AsyncGenerator: + model = model if model in models else "gpt-3.5-turbo" + headers = { + "authority": "liaobots.com", + "content-type": "application/json", + "origin": cls.url, + "referer": cls.url + "/", + "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36", + } + async with ClientSession( + headers=headers + ) as session: + auth_code = auth if isinstance(auth, str) else cls._auth_code + if not auth_code: + async with session.post(cls.url + "/api/user", proxy=proxy, json={"authcode": ""}) as response: + response.raise_for_status() + auth_code = cls._auth_code = json.loads(await response.text())["authCode"] + data = { + "conversationId": str(uuid.uuid4()), + "model": models[model], + "messages": messages, + "key": "", + "prompt": "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully.", + } + async with session.post(cls.url + "/api/chat", proxy=proxy, json=data, headers={"x-auth-code": auth_code}) as response: + response.raise_for_status() + async for stream in response.content.iter_any(): + if stream: + yield stream.decode() + + + @classmethod + @property + def params(cls): + params = [ + ("model", "str"), + ("messages", "list[dict[str, str]]"), + ("stream", "bool"), + ("proxy", "str"), + ("auth", "str"), + ] + param = ", ".join([": ".join(p) for p in params]) + return f"g4f.provider.{cls.__name__} supports: ({param})" diff --git a/g4f/g4f/Provider/Lockchat.py b/g4f/g4f/Provider/Lockchat.py new file mode 100644 index 0000000000000000000000000000000000000000..c15eec8dd99f6a50b7eb02cf8ff14494380f4b9a --- /dev/null +++ b/g4f/g4f/Provider/Lockchat.py @@ -0,0 +1,64 @@ +from __future__ import annotations + +import json + +import requests + +from ..typing import Any, CreateResult +from .base_provider import BaseProvider + + +class Lockchat(BaseProvider): + url: str = "http://supertest.lockchat.app" + supports_stream = True + supports_gpt_35_turbo = True + supports_gpt_4 = True + + @staticmethod + def create_completion( + model: str, + messages: list[dict[str, str]], + stream: bool, **kwargs: Any) -> CreateResult: + + temperature = float(kwargs.get("temperature", 0.7)) + payload = { + "temperature": temperature, + "messages" : messages, + "model" : model, + "stream" : True, + } + + headers = { + "user-agent": "ChatX/39 CFNetwork/1408.0.4 Darwin/22.5.0", + } + response = requests.post("http://supertest.lockchat.app/v1/chat/completions", + json=payload, headers=headers, stream=True) + + response.raise_for_status() + for token in response.iter_lines(): + if b"The model: `gpt-4` does not exist" in token: + print("error, retrying...") + Lockchat.create_completion( + model = model, + messages = messages, + stream = stream, + temperature = temperature, + **kwargs) + + if b"content" in token: + token = json.loads(token.decode("utf-8").split("data: ")[1]) + token = token["choices"][0]["delta"].get("content") + if token: + yield (token) + + @classmethod + @property + def params(cls): + params = [ + ("model", "str"), + ("messages", "list[dict[str, str]]"), + ("stream", "bool"), + ("temperature", "float"), + ] + param = ", ".join([": ".join(p) for p in params]) + return f"g4f.provider.{cls.__name__} supports: ({param})" diff --git a/g4f/g4f/Provider/Myshell.py b/g4f/g4f/Provider/Myshell.py new file mode 100644 index 0000000000000000000000000000000000000000..0ddd3029647c541cf4e454ef393185e33e326039 --- /dev/null +++ b/g4f/g4f/Provider/Myshell.py @@ -0,0 +1,172 @@ +from __future__ import annotations + +import json, uuid, hashlib, time, random + +from aiohttp import ClientSession +from aiohttp.http import WSMsgType +import asyncio + +from ..typing import AsyncGenerator +from .base_provider import AsyncGeneratorProvider, format_prompt + + +models = { + "samantha": "1e3be7fe89e94a809408b1154a2ee3e1", + "gpt-3.5-turbo": "8077335db7cd47e29f7de486612cc7fd", + "gpt-4": "01c8de4fbfc548df903712b0922a4e01", +} + + +class Myshell(AsyncGeneratorProvider): + url = "https://app.myshell.ai/chat" + working = True + supports_gpt_35_turbo = True + supports_gpt_4 = True + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: list[dict[str, str]], + **kwargs + ) -> AsyncGenerator: + if not model: + bot_id = models["samantha"] + elif model in models: + bot_id = models[model] + else: + raise ValueError(f"Model are not supported: {model}") + + user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36' + visitor_id = generate_visitor_id(user_agent) + + async with ClientSession( + headers={'User-Agent': user_agent} + ) as session: + async with session.ws_connect( + "wss://api.myshell.ai/ws/?EIO=4&transport=websocket", + autoping=False, + timeout=90 + ) as wss: + # Send and receive hello message + await wss.receive_str() + message = json.dumps({"token": None, "visitorId": visitor_id}) + await wss.send_str(f"40/chat,{message}") + await wss.receive_str() + + # Fix "need_verify_captcha" issue + await asyncio.sleep(5) + + # Create chat message + text = format_prompt(messages) + chat_data = json.dumps(["text_chat",{ + "reqId": str(uuid.uuid4()), + "botUid": bot_id, + "sourceFrom": "myshellWebsite", + "text": text, + **generate_signature(text) + }]) + + # Send chat message + chat_start = "42/chat," + chat_message = f"{chat_start}{chat_data}" + await wss.send_str(chat_message) + + # Receive messages + async for message in wss: + if message.type != WSMsgType.TEXT: + continue + # Ping back + if message.data == "2": + await wss.send_str("3") + continue + # Is not chat message + if not message.data.startswith(chat_start): + continue + data_type, data = json.loads(message.data[len(chat_start):]) + if data_type == "text_stream": + if data["data"]["text"]: + yield data["data"]["text"] + elif data["data"]["isFinal"]: + break + elif data_type in ("message_replied", "need_verify_captcha"): + raise RuntimeError(f"Received unexpected message: {data_type}") + + + @classmethod + @property + def params(cls): + params = [ + ("model", "str"), + ("messages", "list[dict[str, str]]"), + ("stream", "bool"), + ] + param = ", ".join([": ".join(p) for p in params]) + return f"g4f.provider.{cls.__name__} supports: ({param})" + + +def generate_timestamp() -> str: + return str( + int( + str(int(time.time() * 1000))[:-1] + + str( + sum( + 2 * int(digit) + if idx % 2 == 0 + else 3 * int(digit) + for idx, digit in enumerate(str(int(time.time() * 1000))[:-1]) + ) + % 10 + ) + ) + ) + +def generate_signature(text: str): + timestamp = generate_timestamp() + version = 'v1.0.0' + secret = '8@VXGK3kKHr!u2gA' + data = f"{version}#{text}#{timestamp}#{secret}" + signature = hashlib.md5(data.encode()).hexdigest() + signature = signature[::-1] + return { + "signature": signature, + "timestamp": timestamp, + "version": version + } + +def xor_hash(B: str): + r = [] + i = 0 + + def o(e, t): + o_val = 0 + for i in range(len(t)): + o_val |= r[i] << (8 * i) + return e ^ o_val + + for e in range(len(B)): + t = ord(B[e]) + r.insert(0, 255 & t) + + if len(r) >= 4: + i = o(i, r) + r = [] + + if len(r) > 0: + i = o(i, r) + + return hex(i)[2:] + +def performance() -> str: + t = int(time.time() * 1000) + e = 0 + while t == int(time.time() * 1000): + e += 1 + return hex(t)[2:] + hex(e)[2:] + +def generate_visitor_id(user_agent: str) -> str: + f = performance() + r = hex(int(random.random() * (16**16)))[2:-2] + d = xor_hash(user_agent) + e = hex(1080 * 1920)[2:] + return f"{f}-{r}-{d}-{e}-{f}" \ No newline at end of file diff --git a/g4f/g4f/Provider/Opchatgpts.py b/g4f/g4f/Provider/Opchatgpts.py new file mode 100644 index 0000000000000000000000000000000000000000..166323bdd329ce2a66c1ccbe76ed77086a7e19d6 --- /dev/null +++ b/g4f/g4f/Provider/Opchatgpts.py @@ -0,0 +1,8 @@ +from __future__ import annotations + +from .ChatgptLogin import ChatgptLogin + + +class Opchatgpts(ChatgptLogin): + url = "https://opchatgpts.net" + working = True \ No newline at end of file diff --git a/g4f/g4f/Provider/OpenAssistant.py b/g4f/g4f/Provider/OpenAssistant.py new file mode 100644 index 0000000000000000000000000000000000000000..1e9a0661b4fdf03ee0fa30eeb229ce155c33ce94 --- /dev/null +++ b/g4f/g4f/Provider/OpenAssistant.py @@ -0,0 +1,100 @@ +from __future__ import annotations + +import json + +from aiohttp import ClientSession + +from ..typing import Any, AsyncGenerator +from .base_provider import AsyncGeneratorProvider, format_prompt, get_cookies + + +class OpenAssistant(AsyncGeneratorProvider): + url = "https://open-assistant.io/chat" + needs_auth = True + working = True + model = "OA_SFT_Llama_30B_6" + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: list[dict[str, str]], + proxy: str = None, + cookies: dict = None, + **kwargs: Any + ) -> AsyncGenerator: + if not cookies: + cookies = get_cookies("open-assistant.io") + + headers = { + 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36', + } + async with ClientSession( + cookies=cookies, + headers=headers + ) as session: + async with session.post("https://open-assistant.io/api/chat", proxy=proxy) as response: + chat_id = (await response.json())["id"] + + data = { + "chat_id": chat_id, + "content": f"[INST]\n{format_prompt(messages)}\n[/INST]", + "parent_id": None + } + async with session.post("https://open-assistant.io/api/chat/prompter_message", proxy=proxy, json=data) as response: + parent_id = (await response.json())["id"] + + data = { + "chat_id": chat_id, + "parent_id": parent_id, + "model_config_name": model if model else cls.model, + "sampling_parameters":{ + "top_k": 50, + "top_p": None, + "typical_p": None, + "temperature": 0.35, + "repetition_penalty": 1.1111111111111112, + "max_new_tokens": 1024, + **kwargs + }, + "plugins":[] + } + async with session.post("https://open-assistant.io/api/chat/assistant_message", proxy=proxy, json=data) as response: + data = await response.json() + if "id" in data: + message_id = data["id"] + elif "message" in data: + raise RuntimeError(data["message"]) + else: + response.raise_for_status() + + params = { + 'chat_id': chat_id, + 'message_id': message_id, + } + async with session.post("https://open-assistant.io/api/chat/events", proxy=proxy, params=params) as response: + start = "data: " + async for line in response.content: + line = line.decode("utf-8") + if line and line.startswith(start): + line = json.loads(line[len(start):]) + if line["event_type"] == "token": + yield line["text"] + + params = { + 'chat_id': chat_id, + } + async with session.delete("https://open-assistant.io/api/chat", proxy=proxy, params=params) as response: + response.raise_for_status() + + @classmethod + @property + def params(cls): + params = [ + ("model", "str"), + ("messages", "list[dict[str, str]]"), + ("stream", "bool"), + ("proxy", "str"), + ] + param = ", ".join([": ".join(p) for p in params]) + return f"g4f.provider.{cls.__name__} supports: ({param})" diff --git a/g4f/g4f/Provider/OpenaiChat.py b/g4f/g4f/Provider/OpenaiChat.py new file mode 100644 index 0000000000000000000000000000000000000000..f7dc82980094f09a440b9f61cc14b0160e933b6f --- /dev/null +++ b/g4f/g4f/Provider/OpenaiChat.py @@ -0,0 +1,88 @@ +from __future__ import annotations + +from curl_cffi.requests import AsyncSession +import uuid +import json + +from .base_provider import AsyncProvider, get_cookies, format_prompt +from ..typing import AsyncGenerator + + +class OpenaiChat(AsyncProvider): + url = "https://chat.openai.com" + needs_auth = True + working = True + supports_gpt_35_turbo = True + _access_token = None + + @classmethod + async def create_async( + cls, + model: str, + messages: list[dict[str, str]], + proxy: str = None, + access_token: str = None, + cookies: dict = None, + **kwargs: dict + ) -> AsyncGenerator: + proxies = {"https": proxy} + if not access_token: + access_token = await cls.get_access_token(cookies, proxies) + headers = { + "Accept": "text/event-stream", + "Authorization": f"Bearer {access_token}", + } + async with AsyncSession(proxies=proxies, headers=headers, impersonate="chrome107") as session: + messages = [ + { + "id": str(uuid.uuid4()), + "author": {"role": "user"}, + "content": {"content_type": "text", "parts": [format_prompt(messages)]}, + }, + ] + data = { + "action": "next", + "messages": messages, + "conversation_id": None, + "parent_message_id": str(uuid.uuid4()), + "model": "text-davinci-002-render-sha", + "history_and_training_disabled": True, + } + response = await session.post("https://chat.openai.com/backend-api/conversation", json=data) + response.raise_for_status() + last_message = None + for line in response.content.decode().splitlines(): + if line.startswith("data: "): + line = line[6:] + if line == "[DONE]": + break + line = json.loads(line) + if "message" in line: + last_message = line["message"]["content"]["parts"][0] + return last_message + + + @classmethod + async def get_access_token(cls, cookies: dict = None, proxies: dict = None) -> str: + if not cls._access_token: + cookies = cookies if cookies else get_cookies("chat.openai.com") + async with AsyncSession(proxies=proxies, cookies=cookies, impersonate="chrome107") as session: + response = await session.get("https://chat.openai.com/api/auth/session") + response.raise_for_status() + cls._access_token = response.json()["accessToken"] + return cls._access_token + + + @classmethod + @property + def params(cls): + params = [ + ("model", "str"), + ("messages", "list[dict[str, str]]"), + ("stream", "bool"), + ("proxy", "str"), + ("access_token", "str"), + ("cookies", "dict[str, str]") + ] + param = ", ".join([": ".join(p) for p in params]) + return f"g4f.provider.{cls.__name__} supports: ({param})" \ No newline at end of file diff --git a/g4f/g4f/Provider/PerplexityAi.py b/g4f/g4f/Provider/PerplexityAi.py new file mode 100644 index 0000000000000000000000000000000000000000..fc0fd48c573375551b9c0d08b9b7132c6a2f2178 --- /dev/null +++ b/g4f/g4f/Provider/PerplexityAi.py @@ -0,0 +1,87 @@ +from __future__ import annotations + +import json +import time +import base64 +from curl_cffi.requests import AsyncSession + +from .base_provider import AsyncProvider, format_prompt + + +class PerplexityAi(AsyncProvider): + url = "https://www.perplexity.ai" + working = True + supports_gpt_35_turbo = True + _sources = [] + + @classmethod + async def create_async( + cls, + model: str, + messages: list[dict[str, str]], + proxy: str = None, + **kwargs + ) -> str: + url = cls.url + "/socket.io/?EIO=4&transport=polling" + async with AsyncSession(proxies={"https": proxy}, impersonate="chrome107") as session: + url_session = "https://www.perplexity.ai/api/auth/session" + response = await session.get(url_session) + + response = await session.get(url, params={"t": timestamp()}) + response.raise_for_status() + sid = json.loads(response.text[1:])["sid"] + + data = '40{"jwt":"anonymous-ask-user"}' + response = await session.post(url, params={"t": timestamp(), "sid": sid}, data=data) + response.raise_for_status() + + data = "424" + json.dumps([ + "perplexity_ask", + format_prompt(messages), + { + "version":"2.1", + "source":"default", + "language":"en", + "timezone": time.tzname[0], + "search_focus":"internet", + "mode":"concise" + } + ]) + response = await session.post(url, params={"t": timestamp(), "sid": sid}, data=data) + response.raise_for_status() + + while True: + response = await session.get(url, params={"t": timestamp(), "sid": sid}) + response.raise_for_status() + for line in response.text.splitlines(): + if line.startswith("434"): + result = json.loads(json.loads(line[3:])[0]["text"]) + + cls._sources = [{ + "title": source["name"], + "url": source["url"], + "snippet": source["snippet"] + } for source in result["web_results"]] + + return result["answer"] + + @classmethod + def get_sources(cls): + return cls._sources + + + @classmethod + @property + def params(cls): + params = [ + ("model", "str"), + ("messages", "list[dict[str, str]]"), + ("stream", "bool"), + ("proxy", "str"), + ] + param = ", ".join([": ".join(p) for p in params]) + return f"g4f.provider.{cls.__name__} supports: ({param})" + + +def timestamp() -> str: + return base64.urlsafe_b64encode(int(time.time()-1407782612).to_bytes(4, 'big')).decode() \ No newline at end of file diff --git a/g4f/g4f/Provider/Raycast.py b/g4f/g4f/Provider/Raycast.py new file mode 100644 index 0000000000000000000000000000000000000000..7ddc8acd70f870bab1db90f3d279c37de4f46234 --- /dev/null +++ b/g4f/g4f/Provider/Raycast.py @@ -0,0 +1,72 @@ +from __future__ import annotations + +import json + +import requests + +from ..typing import Any, CreateResult +from .base_provider import BaseProvider + + +class Raycast(BaseProvider): + url = "https://raycast.com" + supports_gpt_35_turbo = True + supports_gpt_4 = True + supports_stream = True + needs_auth = True + working = True + + @staticmethod + def create_completion( + model: str, + messages: list[dict[str, str]], + stream: bool, + **kwargs: Any, + ) -> CreateResult: + auth = kwargs.get('auth') + headers = { + 'Accept': 'application/json', + 'Accept-Language': 'en-US,en;q=0.9', + 'Authorization': f'Bearer {auth}', + 'Content-Type': 'application/json', + 'User-Agent': 'Raycast/0 CFNetwork/1410.0.3 Darwin/22.6.0', + } + parsed_messages = [] + for message in messages: + parsed_messages.append({ + 'author': message['role'], + 'content': {'text': message['content']} + }) + data = { + "debug": False, + "locale": "en-CN", + "messages": parsed_messages, + "model": model, + "provider": "openai", + "source": "ai_chat", + "system_instruction": "markdown", + "temperature": 0.5 + } + response = requests.post("https://backend.raycast.com/api/v1/ai/chat_completions", headers=headers, json=data, stream=True) + for token in response.iter_lines(): + if b'data: ' not in token: + continue + completion_chunk = json.loads(token.decode().replace('data: ', '')) + token = completion_chunk['text'] + if token != None: + yield token + + @classmethod + @property + def params(cls): + params = [ + ("model", "str"), + ("messages", "list[dict[str, str]]"), + ("stream", "bool"), + ("temperature", "float"), + ("top_p", "int"), + ("model", "str"), + ("auth", "str"), + ] + param = ", ".join([": ".join(p) for p in params]) + return f"g4f.provider.{cls.__name__} supports: ({param})" diff --git a/g4f/g4f/Provider/Theb.py b/g4f/g4f/Provider/Theb.py new file mode 100644 index 0000000000000000000000000000000000000000..72fce3ac6f2b58fbd569153cc2025c7f03d94c12 --- /dev/null +++ b/g4f/g4f/Provider/Theb.py @@ -0,0 +1,97 @@ +from __future__ import annotations + +import json +import random + +import requests + +from ..typing import Any, CreateResult +from .base_provider import BaseProvider + + +class Theb(BaseProvider): + url = "https://theb.ai" + working = True + supports_stream = True + supports_gpt_35_turbo = True + needs_auth = True + + @staticmethod + def create_completion( + model: str, + messages: list[dict[str, str]], + stream: bool, **kwargs: Any) -> CreateResult: + + conversation = "\n".join(f"{message['role']}: {message['content']}" for message in messages) + conversation += "\nassistant: " + + auth = kwargs.get("auth", { + "bearer_token":"free", + "org_id":"theb", + }) + + bearer_token = auth["bearer_token"] + org_id = auth["org_id"] + + headers = { + 'authority' : 'beta.theb.ai', + 'accept' : 'text/event-stream', + 'accept-language' : 'id-ID,id;q=0.9,en-US;q=0.8,en;q=0.7', + 'authorization' : 'Bearer '+bearer_token, + 'content-type' : 'application/json', + 'origin' : 'https://beta.theb.ai', + 'referer' : 'https://beta.theb.ai/home', + 'sec-ch-ua' : '"Chromium";v="116", "Not)A;Brand";v="24", "Google Chrome";v="116"', + 'sec-ch-ua-mobile' : '?0', + 'sec-ch-ua-platform': '"Windows"', + 'sec-fetch-dest' : 'empty', + 'sec-fetch-mode' : 'cors', + 'sec-fetch-site' : 'same-origin', + 'user-agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36', + 'x-ai-model' : 'ee8d4f29cb7047f78cbe84313ed6ace8', + } + + req_rand = random.randint(100000000, 9999999999) + + json_data: dict[str, Any] = { + "text" : conversation, + "category" : "04f58f64a4aa4191a957b47290fee864", + "model" : "ee8d4f29cb7047f78cbe84313ed6ace8", + "model_params": { + "system_prompt" : "You are ChatGPT, a large language model trained by OpenAI, based on the GPT-3.5 architecture.\nKnowledge cutoff: 2021-09\nCurrent date: {{YYYY-MM-DD}}", + "temperature" : kwargs.get("temperature", 1), + "top_p" : kwargs.get("top_p", 1), + "frequency_penalty" : kwargs.get("frequency_penalty", 0), + "presence_penalty" : kwargs.get("presence_penalty", 0), + "long_term_memory" : "auto" + } + } + + response = requests.post(f"https://beta.theb.ai/api/conversation?org_id={org_id}&req_rand={req_rand}", + headers=headers, json=json_data, stream=True) + + response.raise_for_status() + content = "" + next_content = "" + for chunk in response.iter_lines(): + if b"content" in chunk: + next_content = content + data = json.loads(chunk.decode().split("data: ")[1]) + content = data["content"] + yield data["content"].replace(next_content, "") + + @classmethod + @property + def params(cls): + params = [ + ("model", "str"), + ("messages", "list[dict[str, str]]"), + ("auth", "list[dict[str, str]]"), + ("stream", "bool"), + ("temperature", "float"), + ("presence_penalty", "int"), + ("frequency_penalty", "int"), + ("top_p", "int") + ] + param = ", ".join([": ".join(p) for p in params]) + return f"g4f.provider.{cls.__name__} supports: ({param})" \ No newline at end of file diff --git a/g4f/g4f/Provider/V50.py b/g4f/g4f/Provider/V50.py new file mode 100644 index 0000000000000000000000000000000000000000..81a95ba8db7211de946cce0711b52827145c9dca --- /dev/null +++ b/g4f/g4f/Provider/V50.py @@ -0,0 +1,67 @@ +from __future__ import annotations + +import uuid + +import requests + +from ..typing import Any, CreateResult +from .base_provider import BaseProvider + + +class V50(BaseProvider): + url = 'https://p5.v50.ltd' + supports_gpt_35_turbo = True + supports_stream = False + needs_auth = False + working = False + + @staticmethod + def create_completion( + model: str, + messages: list[dict[str, str]], + stream: bool, **kwargs: Any) -> CreateResult: + + conversation = "\n".join(f"{message['role']}: {message['content']}" for message in messages) + conversation += "\nassistant: " + + payload = { + "prompt" : conversation, + "options" : {}, + "systemMessage" : ".", + "temperature" : kwargs.get("temperature", 0.4), + "top_p" : kwargs.get("top_p", 0.4), + "model" : model, + "user" : str(uuid.uuid4()) + } + + headers = { + 'authority' : 'p5.v50.ltd', + 'accept' : 'application/json, text/plain, */*', + 'accept-language' : 'id-ID,id;q=0.9,en-US;q=0.8,en;q=0.7', + 'content-type' : 'application/json', + 'origin' : 'https://p5.v50.ltd', + 'referer' : 'https://p5.v50.ltd/', + 'sec-ch-ua-platform': '"Windows"', + 'sec-fetch-dest' : 'empty', + 'sec-fetch-mode' : 'cors', + 'sec-fetch-site' : 'same-origin', + 'user-agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36' + } + response = requests.post("https://p5.v50.ltd/api/chat-process", + json=payload, headers=headers, proxies=kwargs['proxy'] if 'proxy' in kwargs else {}) + + if "https://fk1.v50.ltd" not in response.text: + yield response.text + + @classmethod + @property + def params(cls): + params = [ + ("model", "str"), + ("messages", "list[dict[str, str]]"), + ("stream", "bool"), + ("temperature", "float"), + ("top_p", "int"), + ] + param = ", ".join([": ".join(p) for p in params]) + return f"g4f.provider.{cls.__name__} supports: ({param})" \ No newline at end of file diff --git a/g4f/g4f/Provider/Vercel.py b/g4f/g4f/Provider/Vercel.py new file mode 100644 index 0000000000000000000000000000000000000000..2d20ca6a2de0b6fdb674090f5c305f5d544d9f86 --- /dev/null +++ b/g4f/g4f/Provider/Vercel.py @@ -0,0 +1,377 @@ +from __future__ import annotations + +import json, base64, requests, execjs, random, uuid + +from ..typing import Any, TypedDict, CreateResult +from .base_provider import BaseProvider +from abc import abstractmethod + + +class Vercel(BaseProvider): + url = 'https://sdk.vercel.ai' + working = True + supports_gpt_35_turbo = True + supports_stream = True + + @staticmethod + @abstractmethod + def create_completion( + model: str, + messages: list[dict[str, str]], + stream: bool, + **kwargs + ) -> CreateResult: + if not model: + model = "gpt-3.5-turbo" + elif model not in model_info: + raise ValueError(f"Model are not supported: {model}") + + headers = { + 'authority' : 'sdk.vercel.ai', + 'accept' : '*/*', + 'accept-language' : 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3', + 'cache-control' : 'no-cache', + 'content-type' : 'application/json', + 'custom-encoding' : get_anti_bot_token(), + 'origin' : 'https://sdk.vercel.ai', + 'pragma' : 'no-cache', + 'referer' : 'https://sdk.vercel.ai/', + 'sec-ch-ua' : '"Google Chrome";v="117", "Not;A=Brand";v="8", "Chromium";v="117"', + 'sec-ch-ua-mobile' : '?0', + 'sec-ch-ua-platform': '"macOS"', + 'sec-fetch-dest' : 'empty', + 'sec-fetch-mode' : 'cors', + 'sec-fetch-site' : 'same-origin', + 'user-agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.%s.%s Safari/537.36' % ( + random.randint(99, 999), + random.randint(99, 999) + ) + } + + json_data = { + 'model' : model_info[model]['id'], + 'messages' : messages, + 'playgroundId': str(uuid.uuid4()), + 'chatIndex' : 0} | model_info[model]['default_params'] + + max_retries = kwargs.get('max_retries', 20) + for i in range(max_retries): + response = requests.post('https://sdk.vercel.ai/api/generate', + headers=headers, json=json_data, stream=True) + try: + response.raise_for_status() + except: + continue + for token in response.iter_content(chunk_size=None): + yield token.decode() + break + + +def get_anti_bot_token() -> str: + headers = { + 'authority' : 'sdk.vercel.ai', + 'accept' : '*/*', + 'accept-language' : 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3', + 'cache-control' : 'no-cache', + 'pragma' : 'no-cache', + 'referer' : 'https://sdk.vercel.ai/', + 'sec-ch-ua' : '"Google Chrome";v="117", "Not;A=Brand";v="8", "Chromium";v="117"', + 'sec-ch-ua-mobile' : '?0', + 'sec-ch-ua-platform': '"macOS"', + 'sec-fetch-dest' : 'empty', + 'sec-fetch-mode' : 'cors', + 'sec-fetch-site' : 'same-origin', + 'user-agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.%s.%s Safari/537.36' % ( + random.randint(99, 999), + random.randint(99, 999) + ) + } + + response = requests.get('https://sdk.vercel.ai/openai.jpeg', + headers=headers).text + + raw_data = json.loads(base64.b64decode(response, + validate=True)) + + js_script = '''const globalThis={marker:"mark"};String.prototype.fontcolor=function(){return `${this}`}; + return (%s)(%s)''' % (raw_data['c'], raw_data['a']) + + raw_token = json.dumps({'r': execjs.compile(js_script).call(''), 't': raw_data['t']}, + separators = (",", ":")) + + return base64.b64encode(raw_token.encode('utf-16le')).decode() + +class ModelInfo(TypedDict): + id: str + default_params: dict[str, Any] + +model_info: dict[str, ModelInfo] = { + 'claude-instant-v1': { + 'id': 'anthropic:claude-instant-v1', + 'default_params': { + 'temperature': 1, + 'maximumLength': 1024, + 'topP': 1, + 'topK': 1, + 'presencePenalty': 1, + 'frequencyPenalty': 1, + 'stopSequences': ['\n\nHuman:'], + }, + }, + 'claude-v1': { + 'id': 'anthropic:claude-v1', + 'default_params': { + 'temperature': 1, + 'maximumLength': 1024, + 'topP': 1, + 'topK': 1, + 'presencePenalty': 1, + 'frequencyPenalty': 1, + 'stopSequences': ['\n\nHuman:'], + }, + }, + 'claude-v2': { + 'id': 'anthropic:claude-v2', + 'default_params': { + 'temperature': 1, + 'maximumLength': 1024, + 'topP': 1, + 'topK': 1, + 'presencePenalty': 1, + 'frequencyPenalty': 1, + 'stopSequences': ['\n\nHuman:'], + }, + }, + 'a16z-infra/llama7b-v2-chat': { + 'id': 'replicate:a16z-infra/llama7b-v2-chat', + 'default_params': { + 'temperature': 0.75, + 'maximumLength': 3000, + 'topP': 1, + 'repetitionPenalty': 1, + }, + }, + 'a16z-infra/llama13b-v2-chat': { + 'id': 'replicate:a16z-infra/llama13b-v2-chat', + 'default_params': { + 'temperature': 0.75, + 'maximumLength': 3000, + 'topP': 1, + 'repetitionPenalty': 1, + }, + }, + 'replicate/llama-2-70b-chat': { + 'id': 'replicate:replicate/llama-2-70b-chat', + 'default_params': { + 'temperature': 0.75, + 'maximumLength': 3000, + 'topP': 1, + 'repetitionPenalty': 1, + }, + }, + 'bigscience/bloom': { + 'id': 'huggingface:bigscience/bloom', + 'default_params': { + 'temperature': 0.5, + 'maximumLength': 1024, + 'topP': 0.95, + 'topK': 4, + 'repetitionPenalty': 1.03, + }, + }, + 'google/flan-t5-xxl': { + 'id': 'huggingface:google/flan-t5-xxl', + 'default_params': { + 'temperature': 0.5, + 'maximumLength': 1024, + 'topP': 0.95, + 'topK': 4, + 'repetitionPenalty': 1.03, + }, + }, + 'EleutherAI/gpt-neox-20b': { + 'id': 'huggingface:EleutherAI/gpt-neox-20b', + 'default_params': { + 'temperature': 0.5, + 'maximumLength': 1024, + 'topP': 0.95, + 'topK': 4, + 'repetitionPenalty': 1.03, + 'stopSequences': [], + }, + }, + 'OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5': { + 'id': 'huggingface:OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5', + 'default_params': { + 'maximumLength': 1024, + 'typicalP': 0.2, + 'repetitionPenalty': 1, + }, + }, + 'OpenAssistant/oasst-sft-1-pythia-12b': { + 'id': 'huggingface:OpenAssistant/oasst-sft-1-pythia-12b', + 'default_params': { + 'maximumLength': 1024, + 'typicalP': 0.2, + 'repetitionPenalty': 1, + }, + }, + 'bigcode/santacoder': { + 'id': 'huggingface:bigcode/santacoder', + 'default_params': { + 'temperature': 0.5, + 'maximumLength': 1024, + 'topP': 0.95, + 'topK': 4, + 'repetitionPenalty': 1.03, + }, + }, + 'command-light-nightly': { + 'id': 'cohere:command-light-nightly', + 'default_params': { + 'temperature': 0.9, + 'maximumLength': 1024, + 'topP': 1, + 'topK': 0, + 'presencePenalty': 0, + 'frequencyPenalty': 0, + 'stopSequences': [], + }, + }, + 'command-nightly': { + 'id': 'cohere:command-nightly', + 'default_params': { + 'temperature': 0.9, + 'maximumLength': 1024, + 'topP': 1, + 'topK': 0, + 'presencePenalty': 0, + 'frequencyPenalty': 0, + 'stopSequences': [], + }, + }, + 'gpt-4': { + 'id': 'openai:gpt-4', + 'default_params': { + 'temperature': 0.7, + 'maximumLength': 8192, + 'topP': 1, + 'presencePenalty': 0, + 'frequencyPenalty': 0, + 'stopSequences': [], + }, + }, + 'gpt-4-0613': { + 'id': 'openai:gpt-4-0613', + 'default_params': { + 'temperature': 0.7, + 'maximumLength': 8192, + 'topP': 1, + 'presencePenalty': 0, + 'frequencyPenalty': 0, + 'stopSequences': [], + }, + }, + 'code-davinci-002': { + 'id': 'openai:code-davinci-002', + 'default_params': { + 'temperature': 0.5, + 'maximumLength': 1024, + 'topP': 1, + 'presencePenalty': 0, + 'frequencyPenalty': 0, + 'stopSequences': [], + }, + }, + 'gpt-3.5-turbo': { + 'id': 'openai:gpt-3.5-turbo', + 'default_params': { + 'temperature': 0.7, + 'maximumLength': 4096, + 'topP': 1, + 'topK': 1, + 'presencePenalty': 1, + 'frequencyPenalty': 1, + 'stopSequences': [], + }, + }, + 'gpt-3.5-turbo-16k': { + 'id': 'openai:gpt-3.5-turbo-16k', + 'default_params': { + 'temperature': 0.7, + 'maximumLength': 16280, + 'topP': 1, + 'topK': 1, + 'presencePenalty': 1, + 'frequencyPenalty': 1, + 'stopSequences': [], + }, + }, + 'gpt-3.5-turbo-16k-0613': { + 'id': 'openai:gpt-3.5-turbo-16k-0613', + 'default_params': { + 'temperature': 0.7, + 'maximumLength': 16280, + 'topP': 1, + 'topK': 1, + 'presencePenalty': 1, + 'frequencyPenalty': 1, + 'stopSequences': [], + }, + }, + 'text-ada-001': { + 'id': 'openai:text-ada-001', + 'default_params': { + 'temperature': 0.5, + 'maximumLength': 1024, + 'topP': 1, + 'presencePenalty': 0, + 'frequencyPenalty': 0, + 'stopSequences': [], + }, + }, + 'text-babbage-001': { + 'id': 'openai:text-babbage-001', + 'default_params': { + 'temperature': 0.5, + 'maximumLength': 1024, + 'topP': 1, + 'presencePenalty': 0, + 'frequencyPenalty': 0, + 'stopSequences': [], + }, + }, + 'text-curie-001': { + 'id': 'openai:text-curie-001', + 'default_params': { + 'temperature': 0.5, + 'maximumLength': 1024, + 'topP': 1, + 'presencePenalty': 0, + 'frequencyPenalty': 0, + 'stopSequences': [], + }, + }, + 'text-davinci-002': { + 'id': 'openai:text-davinci-002', + 'default_params': { + 'temperature': 0.5, + 'maximumLength': 1024, + 'topP': 1, + 'presencePenalty': 0, + 'frequencyPenalty': 0, + 'stopSequences': [], + }, + }, + 'text-davinci-003': { + 'id': 'openai:text-davinci-003', + 'default_params': { + 'temperature': 0.5, + 'maximumLength': 4097, + 'topP': 1, + 'presencePenalty': 0, + 'frequencyPenalty': 0, + 'stopSequences': [], + }, + }, +} \ No newline at end of file diff --git a/g4f/g4f/Provider/Vitalentum.py b/g4f/g4f/Provider/Vitalentum.py new file mode 100644 index 0000000000000000000000000000000000000000..d5265428cdf2098f25c8d899e43d1959f037b042 --- /dev/null +++ b/g4f/g4f/Provider/Vitalentum.py @@ -0,0 +1,68 @@ +from __future__ import annotations + +import json +from aiohttp import ClientSession + +from .base_provider import AsyncGeneratorProvider +from ..typing import AsyncGenerator + +class Vitalentum(AsyncGeneratorProvider): + url = "https://app.vitalentum.io" + working = True + supports_gpt_35_turbo = True + + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: list[dict[str, str]], + proxy: str = None, + **kwargs + ) -> AsyncGenerator: + headers = { + "User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36", + "Accept" : "text/event-stream", + "Accept-language" : "de,en-US;q=0.7,en;q=0.3", + "Origin" : cls.url, + "Referer" : cls.url + "/", + "Sec-Fetch-Dest" : "empty", + "Sec-Fetch-Mode" : "cors", + "Sec-Fetch-Site" : "same-origin", + } + conversation = json.dumps({"history": [{ + "speaker": "human" if message["role"] == "user" else "bot", + "text": message["content"], + } for message in messages]}) + data = { + "conversation": conversation, + "temperature": 0.7, + **kwargs + } + async with ClientSession( + headers=headers + ) as session: + async with session.post(cls.url + "/api/converse-edge", json=data, proxy=proxy) as response: + response.raise_for_status() + async for line in response.content: + line = line.decode() + if line.startswith("data: "): + if line.startswith("data: [DONE]"): + break + line = json.loads(line[6:-1]) + content = line["choices"][0]["delta"].get("content") + if content: + yield content + + + @classmethod + @property + def params(cls): + params = [ + ("model", "str"), + ("messages", "list[dict[str, str]]"), + ("stream", "bool"), + ("temperature", "float"), + ] + param = ", ".join([": ".join(p) for p in params]) + return f"g4f.provider.{cls.__name__} supports: ({param})" \ No newline at end of file diff --git a/g4f/g4f/Provider/Wewordle.py b/g4f/g4f/Provider/Wewordle.py new file mode 100644 index 0000000000000000000000000000000000000000..a7bdc722795274270750f2609121c79a311df92e --- /dev/null +++ b/g4f/g4f/Provider/Wewordle.py @@ -0,0 +1,65 @@ +from __future__ import annotations + +import random, string, time +from aiohttp import ClientSession + +from .base_provider import AsyncProvider + + +class Wewordle(AsyncProvider): + url = "https://wewordle.org" + working = True + supports_gpt_35_turbo = True + + @classmethod + async def create_async( + cls, + model: str, + messages: list[dict[str, str]], + proxy: str = None, + **kwargs + ) -> str: + + headers = { + "accept" : "*/*", + "pragma" : "no-cache", + "Content-Type" : "application/json", + "Connection" : "keep-alive" + } + + _user_id = "".join(random.choices(f"{string.ascii_lowercase}{string.digits}", k=16)) + _app_id = "".join(random.choices(f"{string.ascii_lowercase}{string.digits}", k=31)) + _request_date = time.strftime("%Y-%m-%dT%H:%M:%S.000Z", time.gmtime()) + data = { + "user" : _user_id, + "messages" : messages, + "subscriber": { + "originalPurchaseDate" : None, + "originalApplicationVersion" : None, + "allPurchaseDatesMillis" : {}, + "entitlements" : {"active": {}, "all": {}}, + "allPurchaseDates" : {}, + "allExpirationDatesMillis" : {}, + "allExpirationDates" : {}, + "originalAppUserId" : f"$RCAnonymousID:{_app_id}", + "latestExpirationDate" : None, + "requestDate" : _request_date, + "latestExpirationDateMillis" : None, + "nonSubscriptionTransactions" : [], + "originalPurchaseDateMillis" : None, + "managementURL" : None, + "allPurchasedProductIdentifiers": [], + "firstSeen" : _request_date, + "activeSubscriptions" : [], + } + } + + + async with ClientSession( + headers=headers + ) as session: + async with session.post(f"{cls.url}/gptapi/v1/android/turbo", proxy=proxy, json=data) as response: + response.raise_for_status() + content = (await response.json())["message"]["content"] + if content: + return content \ No newline at end of file diff --git a/g4f/g4f/Provider/Wuguokai.py b/g4f/g4f/Provider/Wuguokai.py new file mode 100644 index 0000000000000000000000000000000000000000..0a46f6ee9922aabb03f920a92e3bced7fb45870b --- /dev/null +++ b/g4f/g4f/Provider/Wuguokai.py @@ -0,0 +1,63 @@ +from __future__ import annotations + +import random + +import requests + +from ..typing import Any, CreateResult +from .base_provider import BaseProvider, format_prompt + + +class Wuguokai(BaseProvider): + url = 'https://chat.wuguokai.xyz' + supports_gpt_35_turbo = True + working = False + + @staticmethod + def create_completion( + model: str, + messages: list[dict[str, str]], + stream: bool, + **kwargs: Any, + ) -> CreateResult: + headers = { + 'authority': 'ai-api.wuguokai.xyz', + 'accept': 'application/json, text/plain, */*', + 'accept-language': 'id-ID,id;q=0.9,en-US;q=0.8,en;q=0.7', + 'content-type': 'application/json', + 'origin': 'https://chat.wuguokai.xyz', + 'referer': 'https://chat.wuguokai.xyz/', + 'sec-ch-ua': '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"', + 'sec-ch-ua-mobile': '?0', + 'sec-ch-ua-platform': '"Windows"', + 'sec-fetch-dest': 'empty', + 'sec-fetch-mode': 'cors', + 'sec-fetch-site': 'same-site', + 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36' + } + data ={ + "prompt": format_prompt(messages), + "options": {}, + "userId": f"#/chat/{random.randint(1,99999999)}", + "usingContext": True + } + response = requests.post("https://ai-api20.wuguokai.xyz/api/chat-process", headers=headers, timeout=3, json=data, proxies=kwargs['proxy'] if 'proxy' in kwargs else {}) + _split = response.text.split("> 若回答失败请重试或多刷新几次界面后重试") + if response.status_code == 200: + if len(_split) > 1: + yield _split[1].strip() + else: + yield _split[0].strip() + else: + raise Exception(f"Error: {response.status_code} {response.reason}") + + @classmethod + @property + def params(cls): + params = [ + ("model", "str"), + ("messages", "list[dict[str, str]]"), + ("stream", "bool") + ] + param = ", ".join([": ".join(p) for p in params]) + return f"g4f.provider.{cls.__name__} supports: ({param})" \ No newline at end of file diff --git a/g4f/g4f/Provider/Ylokh.py b/g4f/g4f/Provider/Ylokh.py new file mode 100644 index 0000000000000000000000000000000000000000..c7b92089905a3da5d98c50e073d88f2f4b498e6d --- /dev/null +++ b/g4f/g4f/Provider/Ylokh.py @@ -0,0 +1,79 @@ +from __future__ import annotations + +import json +from aiohttp import ClientSession + +from .base_provider import AsyncGeneratorProvider +from ..typing import AsyncGenerator + +class Ylokh(AsyncGeneratorProvider): + url = "https://chat.ylokh.xyz" + working = True + supports_gpt_35_turbo = True + + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: list[dict[str, str]], + stream: bool = True, + proxy: str = None, + **kwargs + ) -> AsyncGenerator: + model = model if model else "gpt-3.5-turbo" + headers = { + "User-Agent" : "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/116.0", + "Accept" : "*/*", + "Accept-language" : "de,en-US;q=0.7,en;q=0.3", + "Origin" : cls.url, + "Referer" : cls.url + "/", + "Sec-Fetch-Dest" : "empty", + "Sec-Fetch-Mode" : "cors", + "Sec-Fetch-Site" : "same-origin", + } + data = { + "messages": messages, + "model": model, + "temperature": 1, + "presence_penalty": 0, + "top_p": 1, + "frequency_penalty": 0, + "allow_fallback": True, + "stream": stream, + **kwargs + } + async with ClientSession( + headers=headers + ) as session: + async with session.post("https://chatapi.ylokh.xyz/v1/chat/completions", json=data, proxy=proxy) as response: + response.raise_for_status() + if stream: + async for line in response.content: + line = line.decode() + if line.startswith("data: "): + if line.startswith("data: [DONE]"): + break + line = json.loads(line[6:-1]) + content = line["choices"][0]["delta"].get("content") + if content: + yield content + else: + chat = await response.json() + yield chat["choices"][0]["message"].get("content") + + + + @classmethod + @property + def params(cls): + params = [ + ("model", "str"), + ("messages", "list[dict[str, str]]"), + ("stream", "bool"), + ("proxy", "str"), + ("temperature", "float"), + ("top_p", "float"), + ] + param = ", ".join([": ".join(p) for p in params]) + return f"g4f.provider.{cls.__name__} supports: ({param})" \ No newline at end of file diff --git a/g4f/g4f/Provider/You.py b/g4f/g4f/Provider/You.py new file mode 100644 index 0000000000000000000000000000000000000000..4f49f15e35734cf1d6eeef1194fbb07833544beb --- /dev/null +++ b/g4f/g4f/Provider/You.py @@ -0,0 +1,40 @@ +from __future__ import annotations + +import json + +from curl_cffi.requests import AsyncSession + +from ..typing import AsyncGenerator +from .base_provider import AsyncGeneratorProvider, format_prompt + + +class You(AsyncGeneratorProvider): + url = "https://you.com" + working = True + supports_gpt_35_turbo = True + supports_stream = False + + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: list[dict[str, str]], + proxy: str = None, + **kwargs, + ) -> AsyncGenerator: + async with AsyncSession(proxies={"https": proxy}, impersonate="chrome107") as session: + headers = { + "Accept": "text/event-stream", + "Referer": "https://you.com/search?fromSearchBar=true&tbm=youchat", + } + response = await session.get( + "https://you.com/api/streamingSearch", + params={"q": format_prompt(messages), "domain": "youchat", "chat": ""}, + headers=headers + ) + response.raise_for_status() + start = 'data: {"youChatToken": ' + for line in response.text.splitlines(): + if line.startswith(start): + yield json.loads(line[len(start): -1]) \ No newline at end of file diff --git a/g4f/g4f/Provider/Yqcloud.py b/g4f/g4f/Provider/Yqcloud.py new file mode 100644 index 0000000000000000000000000000000000000000..ac93315c22f5bc5943eba60ac6da125c266c217d --- /dev/null +++ b/g4f/g4f/Provider/Yqcloud.py @@ -0,0 +1,48 @@ +from __future__ import annotations + +from aiohttp import ClientSession + +from ..typing import AsyncGenerator +from .base_provider import AsyncGeneratorProvider, format_prompt + + +class Yqcloud(AsyncGeneratorProvider): + url = "https://chat9.yqcloud.top/" + working = True + supports_gpt_35_turbo = True + + @staticmethod + async def create_async_generator( + model: str, + messages: list[dict[str, str]], + proxy: str = None, + **kwargs, + ) -> AsyncGenerator: + async with ClientSession( + headers=_create_header() + ) as session: + payload = _create_payload(messages) + async with session.post("https://api.aichatos.cloud/api/generateStream", proxy=proxy, json=payload) as response: + response.raise_for_status() + async for stream in response.content.iter_any(): + if stream: + yield stream.decode() + + +def _create_header(): + return { + "accept" : "application/json, text/plain, */*", + "content-type" : "application/json", + "origin" : "https://chat9.yqcloud.top", + } + + +def _create_payload(messages: list[dict[str, str]]): + return { + "prompt": format_prompt(messages), + "network": True, + "system": "", + "withoutContext": False, + "stream": True, + "userId": "#/chat/1693025544336" + } diff --git a/g4f/g4f/Provider/__init__.py b/g4f/g4f/Provider/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..59c91dd539004869bf0f9730cc66e58ebdfc520f --- /dev/null +++ b/g4f/g4f/Provider/__init__.py @@ -0,0 +1,95 @@ +from __future__ import annotations +from .Acytoo import Acytoo +from .Aibn import Aibn +from .Aichat import Aichat +from .Ails import Ails +from .AiService import AiService +from .AItianhu import AItianhu +from .AItianhuSpace import AItianhuSpace +from .Aivvm import Aivvm +from .Bard import Bard +from .Bing import Bing +from .ChatBase import ChatBase +from .ChatgptAi import ChatgptAi +from .ChatgptDuo import ChatgptDuo +from .ChatgptLogin import ChatgptLogin +from .CodeLinkAva import CodeLinkAva +from .DeepAi import DeepAi +from .DfeHub import DfeHub +from .EasyChat import EasyChat +from .Forefront import Forefront +from .GetGpt import GetGpt +from .GptGo import GptGo +from .H2o import H2o +from .HuggingChat import HuggingChat +from .Liaobots import Liaobots +from .Lockchat import Lockchat +from .Myshell import Myshell +from .Opchatgpts import Opchatgpts +from .OpenaiChat import OpenaiChat +from .OpenAssistant import OpenAssistant +from .PerplexityAi import PerplexityAi +from .Raycast import Raycast +from .Theb import Theb +from .Vercel import Vercel +from .Vitalentum import Vitalentum +from .Wewordle import Wewordle +from .Ylokh import Ylokh +from .You import You +from .Yqcloud import Yqcloud +from .Equing import Equing +from .FastGpt import FastGpt +from .V50 import V50 +from .Wuguokai import Wuguokai + +from .base_provider import BaseProvider, AsyncProvider, AsyncGeneratorProvider +from .retry_provider import RetryProvider + +__all__ = [ + 'BaseProvider', + 'AsyncProvider', + 'AsyncGeneratorProvider', + 'RetryProvider', + 'Acytoo', + 'Aibn', + 'Aichat', + 'Ails', + 'AiService', + 'AItianhu', + 'AItianhuSpace', + 'Aivvm', + 'Bard', + 'Bing', + 'ChatBase', + 'ChatgptAi', + 'ChatgptDuo', + 'ChatgptLogin', + 'CodeLinkAva', + 'DeepAi', + 'DfeHub', + 'EasyChat', + 'Forefront', + 'GetGpt', + 'GptGo', + 'H2o', + 'HuggingChat', + 'Liaobots', + 'Lockchat', + 'Myshell', + 'Opchatgpts', + 'Raycast', + 'OpenaiChat', + 'OpenAssistant', + 'PerplexityAi', + 'Theb', + 'Vercel', + 'Vitalentum', + 'Wewordle', + 'Ylokh', + 'You', + 'Yqcloud', + 'Equing', + 'FastGpt', + 'Wuguokai', + 'V50' +] diff --git a/g4f/g4f/Provider/__pycache__/AItianhu.cpython-310.pyc b/g4f/g4f/Provider/__pycache__/AItianhu.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..da2dcffa689b2c88cf49de67cdf5cffd215bc0d7 Binary files /dev/null and b/g4f/g4f/Provider/__pycache__/AItianhu.cpython-310.pyc differ diff --git a/g4f/g4f/Provider/__pycache__/AItianhuSpace.cpython-310.pyc b/g4f/g4f/Provider/__pycache__/AItianhuSpace.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3322dc6415eece36f746dd22399cf8506a6330f9 Binary files /dev/null and b/g4f/g4f/Provider/__pycache__/AItianhuSpace.cpython-310.pyc differ diff --git a/g4f/g4f/Provider/__pycache__/Acytoo.cpython-310.pyc b/g4f/g4f/Provider/__pycache__/Acytoo.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d4da9b26460017aa63a2e7a88ad15b961d98bc74 Binary files /dev/null and b/g4f/g4f/Provider/__pycache__/Acytoo.cpython-310.pyc differ diff --git a/g4f/g4f/Provider/__pycache__/AiService.cpython-310.pyc b/g4f/g4f/Provider/__pycache__/AiService.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..baffffc024862e5b473d3201b34025f1ffad4c76 Binary files /dev/null and b/g4f/g4f/Provider/__pycache__/AiService.cpython-310.pyc differ diff --git a/g4f/g4f/Provider/__pycache__/Aibn.cpython-310.pyc b/g4f/g4f/Provider/__pycache__/Aibn.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..df215fc73a10f56b85e940a197c9c1ad7b11206b Binary files /dev/null and b/g4f/g4f/Provider/__pycache__/Aibn.cpython-310.pyc differ diff --git a/g4f/g4f/Provider/__pycache__/Aichat.cpython-310.pyc b/g4f/g4f/Provider/__pycache__/Aichat.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f43075c7deb9ca263e13b7fb4b6e90a7343cc4d8 Binary files /dev/null and b/g4f/g4f/Provider/__pycache__/Aichat.cpython-310.pyc differ diff --git a/g4f/g4f/Provider/__pycache__/Ails.cpython-310.pyc b/g4f/g4f/Provider/__pycache__/Ails.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9934715c5ba1cd153ea4e3e62b2e51a6c104852f Binary files /dev/null and b/g4f/g4f/Provider/__pycache__/Ails.cpython-310.pyc differ diff --git a/g4f/g4f/Provider/__pycache__/Aivvm.cpython-310.pyc b/g4f/g4f/Provider/__pycache__/Aivvm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..24575cc358d312485fc7f74c177a720b0235d7cd Binary files /dev/null and b/g4f/g4f/Provider/__pycache__/Aivvm.cpython-310.pyc differ diff --git a/g4f/g4f/Provider/__pycache__/Bard.cpython-310.pyc b/g4f/g4f/Provider/__pycache__/Bard.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bb8ed108a099c5b4c7668d4d1afdfd098c2f29a7 Binary files /dev/null and b/g4f/g4f/Provider/__pycache__/Bard.cpython-310.pyc differ diff --git a/g4f/g4f/Provider/__pycache__/Bing.cpython-310.pyc b/g4f/g4f/Provider/__pycache__/Bing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cfa8e22fb8d50985f9cd86d051f2d8d96d2d4e54 Binary files /dev/null and b/g4f/g4f/Provider/__pycache__/Bing.cpython-310.pyc differ diff --git a/g4f/g4f/Provider/__pycache__/ChatBase.cpython-310.pyc b/g4f/g4f/Provider/__pycache__/ChatBase.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ff81e2d16dcd7cf85b6a9e36223e20c8ebc2d217 Binary files /dev/null and b/g4f/g4f/Provider/__pycache__/ChatBase.cpython-310.pyc differ diff --git a/g4f/g4f/Provider/__pycache__/ChatgptAi.cpython-310.pyc b/g4f/g4f/Provider/__pycache__/ChatgptAi.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e370e64da650f44267500ac0d5322d06dc96fbbd Binary files /dev/null and b/g4f/g4f/Provider/__pycache__/ChatgptAi.cpython-310.pyc differ diff --git a/g4f/g4f/Provider/__pycache__/ChatgptDuo.cpython-310.pyc b/g4f/g4f/Provider/__pycache__/ChatgptDuo.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ba60a39c5b61e0710bcab2f131bda8877d22e04d Binary files /dev/null and b/g4f/g4f/Provider/__pycache__/ChatgptDuo.cpython-310.pyc differ diff --git a/g4f/g4f/Provider/__pycache__/ChatgptLogin.cpython-310.pyc b/g4f/g4f/Provider/__pycache__/ChatgptLogin.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e87ac0006e485861255a6ec8090c108a53010f85 Binary files /dev/null and b/g4f/g4f/Provider/__pycache__/ChatgptLogin.cpython-310.pyc differ diff --git a/g4f/g4f/Provider/__pycache__/CodeLinkAva.cpython-310.pyc b/g4f/g4f/Provider/__pycache__/CodeLinkAva.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..82fbf1e16d966d0d252fc6c95281dbf3500878c4 Binary files /dev/null and b/g4f/g4f/Provider/__pycache__/CodeLinkAva.cpython-310.pyc differ diff --git a/g4f/g4f/Provider/__pycache__/DeepAi.cpython-310.pyc b/g4f/g4f/Provider/__pycache__/DeepAi.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c96f6942fdd378a3d0b8ecd06b43a683568cdf1c Binary files /dev/null and b/g4f/g4f/Provider/__pycache__/DeepAi.cpython-310.pyc differ diff --git a/g4f/g4f/Provider/__pycache__/DfeHub.cpython-310.pyc b/g4f/g4f/Provider/__pycache__/DfeHub.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b6f9cf36e953f1e4893023d0cbfb89cb62b6d62e Binary files /dev/null and b/g4f/g4f/Provider/__pycache__/DfeHub.cpython-310.pyc differ diff --git a/g4f/g4f/Provider/__pycache__/EasyChat.cpython-310.pyc b/g4f/g4f/Provider/__pycache__/EasyChat.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..62c15e12d26c91ed12f46bc0c8ae26eb8a499058 Binary files /dev/null and b/g4f/g4f/Provider/__pycache__/EasyChat.cpython-310.pyc differ diff --git a/g4f/g4f/Provider/__pycache__/Equing.cpython-310.pyc b/g4f/g4f/Provider/__pycache__/Equing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8183a4efb9e2983cfcc18836cd2a912698b1ef3f Binary files /dev/null and b/g4f/g4f/Provider/__pycache__/Equing.cpython-310.pyc differ diff --git a/g4f/g4f/Provider/__pycache__/FastGpt.cpython-310.pyc b/g4f/g4f/Provider/__pycache__/FastGpt.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fd739647c3425d330edc2aa063ddd05f3e056d70 Binary files /dev/null and b/g4f/g4f/Provider/__pycache__/FastGpt.cpython-310.pyc differ diff --git a/g4f/g4f/Provider/__pycache__/Forefront.cpython-310.pyc b/g4f/g4f/Provider/__pycache__/Forefront.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4f6078f52677211855ae86d5621f4166e8104434 Binary files /dev/null and b/g4f/g4f/Provider/__pycache__/Forefront.cpython-310.pyc differ diff --git a/g4f/g4f/Provider/__pycache__/GetGpt.cpython-310.pyc b/g4f/g4f/Provider/__pycache__/GetGpt.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..806d5ad20787bc41deced34170cb7071cf32b06d Binary files /dev/null and b/g4f/g4f/Provider/__pycache__/GetGpt.cpython-310.pyc differ diff --git a/g4f/g4f/Provider/__pycache__/GptGo.cpython-310.pyc b/g4f/g4f/Provider/__pycache__/GptGo.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..adb019d25e127b3fd3b0ad6ce018928afd51ce7e Binary files /dev/null and b/g4f/g4f/Provider/__pycache__/GptGo.cpython-310.pyc differ diff --git a/g4f/g4f/Provider/__pycache__/H2o.cpython-310.pyc b/g4f/g4f/Provider/__pycache__/H2o.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..80af236accff6f2c16f552bcc53b0135a92defcc Binary files /dev/null and b/g4f/g4f/Provider/__pycache__/H2o.cpython-310.pyc differ diff --git a/g4f/g4f/Provider/__pycache__/HuggingChat.cpython-310.pyc b/g4f/g4f/Provider/__pycache__/HuggingChat.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d57192bd309491a541fafe7e0c81977ec66ec7f9 Binary files /dev/null and b/g4f/g4f/Provider/__pycache__/HuggingChat.cpython-310.pyc differ diff --git a/g4f/g4f/Provider/__pycache__/Liaobots.cpython-310.pyc b/g4f/g4f/Provider/__pycache__/Liaobots.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..39b6d396fc31690504f93016e5fabec796a6db05 Binary files /dev/null and b/g4f/g4f/Provider/__pycache__/Liaobots.cpython-310.pyc differ diff --git a/g4f/g4f/Provider/__pycache__/Lockchat.cpython-310.pyc b/g4f/g4f/Provider/__pycache__/Lockchat.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..05068a089f13530b673f5a6d5e46f95a6752c772 Binary files /dev/null and b/g4f/g4f/Provider/__pycache__/Lockchat.cpython-310.pyc differ diff --git a/g4f/g4f/Provider/__pycache__/Myshell.cpython-310.pyc b/g4f/g4f/Provider/__pycache__/Myshell.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0aa90be9ac5ce130bebc11453446a3698ce508fc Binary files /dev/null and b/g4f/g4f/Provider/__pycache__/Myshell.cpython-310.pyc differ diff --git a/g4f/g4f/Provider/__pycache__/Opchatgpts.cpython-310.pyc b/g4f/g4f/Provider/__pycache__/Opchatgpts.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..684dc28d6f6f7b24b8448d2be15ebc9ec3f6c9b7 Binary files /dev/null and b/g4f/g4f/Provider/__pycache__/Opchatgpts.cpython-310.pyc differ diff --git a/g4f/g4f/Provider/__pycache__/OpenAssistant.cpython-310.pyc b/g4f/g4f/Provider/__pycache__/OpenAssistant.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1982b503e1c4a58d5c76ec67ae7dc38a75384fef Binary files /dev/null and b/g4f/g4f/Provider/__pycache__/OpenAssistant.cpython-310.pyc differ diff --git a/g4f/g4f/Provider/__pycache__/OpenaiChat.cpython-310.pyc b/g4f/g4f/Provider/__pycache__/OpenaiChat.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eac7a9b6576f1ae68ebfedbbf250d8bf41ba5599 Binary files /dev/null and b/g4f/g4f/Provider/__pycache__/OpenaiChat.cpython-310.pyc differ diff --git a/g4f/g4f/Provider/__pycache__/PerplexityAi.cpython-310.pyc b/g4f/g4f/Provider/__pycache__/PerplexityAi.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ea1de01b91d8c702f0ff13a5a7074ee2e3360571 Binary files /dev/null and b/g4f/g4f/Provider/__pycache__/PerplexityAi.cpython-310.pyc differ diff --git a/g4f/g4f/Provider/__pycache__/Raycast.cpython-310.pyc b/g4f/g4f/Provider/__pycache__/Raycast.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dcae0a15ced02818c883535b1e6f8b4bd54b8295 Binary files /dev/null and b/g4f/g4f/Provider/__pycache__/Raycast.cpython-310.pyc differ diff --git a/g4f/g4f/Provider/__pycache__/Theb.cpython-310.pyc b/g4f/g4f/Provider/__pycache__/Theb.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bbc711e9bf36ee1b60c50c42fd60051956a12f72 Binary files /dev/null and b/g4f/g4f/Provider/__pycache__/Theb.cpython-310.pyc differ diff --git a/g4f/g4f/Provider/__pycache__/V50.cpython-310.pyc b/g4f/g4f/Provider/__pycache__/V50.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0cddcef6de4e40a3ba9b5bf0712ec342a015dcf7 Binary files /dev/null and b/g4f/g4f/Provider/__pycache__/V50.cpython-310.pyc differ diff --git a/g4f/g4f/Provider/__pycache__/Vercel.cpython-310.pyc b/g4f/g4f/Provider/__pycache__/Vercel.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5dc9808ffbf648933387db101f991446418a0bc4 Binary files /dev/null and b/g4f/g4f/Provider/__pycache__/Vercel.cpython-310.pyc differ diff --git a/g4f/g4f/Provider/__pycache__/Vitalentum.cpython-310.pyc b/g4f/g4f/Provider/__pycache__/Vitalentum.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ecac9f151e8cf1edb4c91a21121573a2c6840c33 Binary files /dev/null and b/g4f/g4f/Provider/__pycache__/Vitalentum.cpython-310.pyc differ diff --git a/g4f/g4f/Provider/__pycache__/Wewordle.cpython-310.pyc b/g4f/g4f/Provider/__pycache__/Wewordle.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9baf24dc926e200bee8e0188004e721e7244f51c Binary files /dev/null and b/g4f/g4f/Provider/__pycache__/Wewordle.cpython-310.pyc differ diff --git a/g4f/g4f/Provider/__pycache__/Wuguokai.cpython-310.pyc b/g4f/g4f/Provider/__pycache__/Wuguokai.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e93d03d3480cef067cedc2c70989cc2412ac75f1 Binary files /dev/null and b/g4f/g4f/Provider/__pycache__/Wuguokai.cpython-310.pyc differ diff --git a/g4f/g4f/Provider/__pycache__/Ylokh.cpython-310.pyc b/g4f/g4f/Provider/__pycache__/Ylokh.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a3ebd9092fc00936475a05321a6b7cdb44ad7300 Binary files /dev/null and b/g4f/g4f/Provider/__pycache__/Ylokh.cpython-310.pyc differ diff --git a/g4f/g4f/Provider/__pycache__/You.cpython-310.pyc b/g4f/g4f/Provider/__pycache__/You.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..93820612faa01fcebdffa3d451de97660d6e420b Binary files /dev/null and b/g4f/g4f/Provider/__pycache__/You.cpython-310.pyc differ diff --git a/g4f/g4f/Provider/__pycache__/Yqcloud.cpython-310.pyc b/g4f/g4f/Provider/__pycache__/Yqcloud.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d5dfb20b82b92c3dd5b372c09ed62a254278fa41 Binary files /dev/null and b/g4f/g4f/Provider/__pycache__/Yqcloud.cpython-310.pyc differ diff --git a/g4f/g4f/Provider/__pycache__/__init__.cpython-310.pyc b/g4f/g4f/Provider/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a137ce856d6b252fe9c35df09690e87d3746a9de Binary files /dev/null and b/g4f/g4f/Provider/__pycache__/__init__.cpython-310.pyc differ diff --git a/g4f/g4f/Provider/__pycache__/base_provider.cpython-310.pyc b/g4f/g4f/Provider/__pycache__/base_provider.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..de9de25b0c6b035eb67cef4f172a4d8cda845a6b Binary files /dev/null and b/g4f/g4f/Provider/__pycache__/base_provider.cpython-310.pyc differ diff --git a/g4f/g4f/Provider/__pycache__/helper.cpython-310.pyc b/g4f/g4f/Provider/__pycache__/helper.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9365edc85d416df7f0df06f0d1eb439c8974699f Binary files /dev/null and b/g4f/g4f/Provider/__pycache__/helper.cpython-310.pyc differ diff --git a/g4f/g4f/Provider/__pycache__/retry_provider.cpython-310.pyc b/g4f/g4f/Provider/__pycache__/retry_provider.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c8a247332dbd89020a52be7ec91ce352314844c2 Binary files /dev/null and b/g4f/g4f/Provider/__pycache__/retry_provider.cpython-310.pyc differ diff --git a/g4f/g4f/Provider/base_provider.py b/g4f/g4f/Provider/base_provider.py new file mode 100644 index 0000000000000000000000000000000000000000..a21dc8712ab6b17d31ad32037a77639032892de8 --- /dev/null +++ b/g4f/g4f/Provider/base_provider.py @@ -0,0 +1,136 @@ +from __future__ import annotations + +from asyncio import AbstractEventLoop +from concurrent.futures import ThreadPoolExecutor +from abc import ABC, abstractmethod + +from .helper import get_event_loop, get_cookies, format_prompt +from ..typing import AsyncGenerator, CreateResult + + +class BaseProvider(ABC): + url: str + working = False + needs_auth = False + supports_stream = False + supports_gpt_35_turbo = False + supports_gpt_4 = False + + @staticmethod + @abstractmethod + def create_completion( + model: str, + messages: list[dict[str, str]], + stream: bool, + **kwargs + ) -> CreateResult: + raise NotImplementedError() + + @classmethod + async def create_async( + cls, + model: str, + messages: list[dict[str, str]], + *, + loop: AbstractEventLoop = None, + executor: ThreadPoolExecutor = None, + **kwargs + ) -> str: + if not loop: + loop = get_event_loop() + def create_func(): + return "".join(cls.create_completion( + model, + messages, + False, + **kwargs + )) + return await loop.run_in_executor( + executor, + create_func + ) + + @classmethod + @property + def params(cls): + params = [ + ("model", "str"), + ("messages", "list[dict[str, str]]"), + ("stream", "bool"), + ] + param = ", ".join([": ".join(p) for p in params]) + return f"g4f.provider.{cls.__name__} supports: ({param})" + + +class AsyncProvider(BaseProvider): + @classmethod + def create_completion( + cls, + model: str, + messages: list[dict[str, str]], + stream: bool = False, + **kwargs + ) -> CreateResult: + loop = get_event_loop() + coro = cls.create_async(model, messages, **kwargs) + yield loop.run_until_complete(coro) + + @staticmethod + @abstractmethod + async def create_async( + model: str, + messages: list[dict[str, str]], + **kwargs + ) -> str: + raise NotImplementedError() + + +class AsyncGeneratorProvider(AsyncProvider): + supports_stream = True + + @classmethod + def create_completion( + cls, + model: str, + messages: list[dict[str, str]], + stream: bool = True, + **kwargs + ) -> CreateResult: + loop = get_event_loop() + generator = cls.create_async_generator( + model, + messages, + stream=stream, + **kwargs + ) + gen = generator.__aiter__() + while True: + try: + yield loop.run_until_complete(gen.__anext__()) + except StopAsyncIteration: + break + + @classmethod + async def create_async( + cls, + model: str, + messages: list[dict[str, str]], + **kwargs + ) -> str: + return "".join([ + chunk async for chunk in cls.create_async_generator( + model, + messages, + stream=False, + **kwargs + ) + ]) + + @staticmethod + @abstractmethod + def create_async_generator( + model: str, + messages: list[dict[str, str]], + **kwargs + ) -> AsyncGenerator: + raise NotImplementedError() \ No newline at end of file diff --git a/g4f/g4f/Provider/helper.py b/g4f/g4f/Provider/helper.py new file mode 100644 index 0000000000000000000000000000000000000000..e14ae65eca2fdc32a970c2f463eed6db0949a9ca --- /dev/null +++ b/g4f/g4f/Provider/helper.py @@ -0,0 +1,54 @@ +from __future__ import annotations + +import asyncio +import sys +from asyncio import AbstractEventLoop + +import browser_cookie3 + +_cookies: dict[str, dict[str, str]] = {} + +# Use own event_loop_policy with a selector event loop on windows. +if sys.platform == 'win32': + _event_loop_policy = asyncio.WindowsSelectorEventLoopPolicy() +else: + _event_loop_policy = asyncio.get_event_loop_policy() + +# If event loop is already running, handle nested event loops +# If "nest_asyncio" is installed, patch the event loop. +def get_event_loop() -> AbstractEventLoop: + try: + asyncio.get_running_loop() + except RuntimeError: + return _event_loop_policy.get_event_loop() + try: + event_loop = _event_loop_policy.get_event_loop() + if not hasattr(event_loop.__class__, "_nest_patched"): + import nest_asyncio + nest_asyncio.apply(event_loop) + return event_loop + except ImportError: + raise RuntimeError( + 'Use "create_async" instead of "create" function in a running event loop. Or install the "nest_asyncio" package.') + +# Load cookies for a domain from all supported browser. +# Cache the results in the "_cookies" variable +def get_cookies(cookie_domain: str) -> dict: + if cookie_domain not in _cookies: + _cookies[cookie_domain] = {} + try: + for cookie in browser_cookie3.load(cookie_domain): + _cookies[cookie_domain][cookie.name] = cookie.value + except: + pass + return _cookies[cookie_domain] + + +def format_prompt(messages: list[dict[str, str]], add_special_tokens=False): + if add_special_tokens or len(messages) > 1: + formatted = "\n".join( + ["%s: %s" % ((message["role"]).capitalize(), message["content"]) for message in messages] + ) + return f"{formatted}\nAssistant:" + else: + return messages[0]["content"] \ No newline at end of file diff --git a/g4f/g4f/Provider/retry_provider.py b/g4f/g4f/Provider/retry_provider.py new file mode 100644 index 0000000000000000000000000000000000000000..e1a9cd1f25f8fbf53718039e1f995e2a8d4651e2 --- /dev/null +++ b/g4f/g4f/Provider/retry_provider.py @@ -0,0 +1,81 @@ +from __future__ import annotations + +import random + +from ..typing import CreateResult +from .base_provider import BaseProvider, AsyncProvider + + +class RetryProvider(AsyncProvider): + __name__ = "RetryProvider" + working = True + needs_auth = False + supports_stream = True + supports_gpt_35_turbo = False + supports_gpt_4 = False + + def __init__( + self, + providers: list[type[BaseProvider]], + shuffle: bool = True + ) -> None: + self.providers = providers + self.shuffle = shuffle + + + def create_completion( + self, + model: str, + messages: list[dict[str, str]], + stream: bool = False, + **kwargs + ) -> CreateResult: + if stream: + providers = [provider for provider in self.providers if provider.supports_stream] + else: + providers = self.providers + if self.shuffle: + random.shuffle(providers) + + self.exceptions = {} + started = False + for provider in providers: + try: + for token in provider.create_completion(model, messages, stream, **kwargs): + yield token + started = True + if started: + return + except Exception as e: + self.exceptions[provider.__name__] = e + if started: + break + + self.raise_exceptions() + + async def create_async( + self, + model: str, + messages: list[dict[str, str]], + **kwargs + ) -> str: + providers = [provider for provider in self.providers if issubclass(provider, AsyncProvider)] + if self.shuffle: + random.shuffle(providers) + + self.exceptions = {} + for provider in providers: + try: + return await provider.create_async(model, messages, **kwargs) + except Exception as e: + self.exceptions[provider.__name__] = e + + self.raise_exceptions() + + def raise_exceptions(self): + if self.exceptions: + raise RuntimeError("\n".join(["All providers failed:"] + [ + f"{p}: {self.exceptions[p].__class__.__name__}: {self.exceptions[p]}" for p in self.exceptions + ])) + + raise RuntimeError("No provider found") \ No newline at end of file diff --git a/g4f/g4f/__init__.py b/g4f/g4f/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..46c3292cc280cc89a2157f77b72ade3ddab16f4e --- /dev/null +++ b/g4f/g4f/__init__.py @@ -0,0 +1,93 @@ +from __future__ import annotations +from g4f import models +from .Provider import BaseProvider, AsyncProvider +from .typing import Any, CreateResult, Union + +logging = False +version = '0.1.3.7' + +def get_model_and_provider(model: Union[models.Model, str], provider: type[BaseProvider], stream: bool): + if isinstance(model, str): + if model in models.ModelUtils.convert: + model = models.ModelUtils.convert[model] + else: + raise Exception(f'The model: {model} does not exist') + + if not provider: + provider = model.best_provider + + if not provider: + raise Exception(f'No provider found for model: {model}') + + if not provider.working: + raise Exception(f'{provider.__name__} is not working') + + if not provider.supports_stream and stream: + raise Exception( + f'ValueError: {provider.__name__} does not support "stream" argument') + + if logging: + print(f'Using {provider.__name__} provider') + + return model, provider + +class ChatCompletion: + @staticmethod + def create( + model : Union[models.Model, str], + messages : list[dict[str, str]], + provider : Union[type[BaseProvider], None] = None, + stream : bool = False, + auth : Union[str, None] = None, + **kwargs + ) -> Union[CreateResult, str]: + + model, provider = get_model_and_provider(model, provider, stream) + + if provider.needs_auth and not auth: + raise Exception( + f'ValueError: {provider.__name__} requires authentication (use auth=\'cookie or token or jwt ...\' param)') + + if provider.needs_auth: + kwargs['auth'] = auth + + result = provider.create_completion(model.name, messages, stream, **kwargs) + return result if stream else ''.join(result) + + @staticmethod + async def create_async( + model : Union[models.Model, str], + messages : list[dict[str, str]], + provider : Union[type[BaseProvider], None] = None, + **kwargs + ) -> str: + model, provider = get_model_and_provider(model, provider, False) + + return await provider.create_async(model.name, messages, **kwargs) + +class Completion: + @staticmethod + def create( + model : Union[models.Model, str], + prompt : str, + provider : Union[type[BaseProvider], None] = None, + stream : bool = False, **kwargs) -> Union[CreateResult, str]: + + allowed_models = [ + 'code-davinci-002', + 'text-ada-001', + 'text-babbage-001', + 'text-curie-001', + 'text-davinci-002', + 'text-davinci-003' + ] + + if model not in allowed_models: + raise Exception(f'ValueError: Can\'t use {model} with Completion.create()') + + model, provider = get_model_and_provider(model, provider, stream) + + result = provider.create_completion(model.name, + [{"role": "user", "content": prompt}], stream, **kwargs) + + return result if stream else ''.join(result) \ No newline at end of file diff --git a/g4f/g4f/__pycache__/__init__.cpython-310.pyc b/g4f/g4f/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b9e094b1a95d546ae30f25fbe2fe772afcfa5274 Binary files /dev/null and b/g4f/g4f/__pycache__/__init__.cpython-310.pyc differ diff --git a/g4f/g4f/__pycache__/models.cpython-310.pyc b/g4f/g4f/__pycache__/models.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..98ab77aba4f5f6c75e479a3ffd58f24225d47e90 Binary files /dev/null and b/g4f/g4f/__pycache__/models.cpython-310.pyc differ diff --git a/g4f/g4f/__pycache__/requests.cpython-310.pyc b/g4f/g4f/__pycache__/requests.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f5eb8912a03070cdee86a6e872b48118dd7a99d9 Binary files /dev/null and b/g4f/g4f/__pycache__/requests.cpython-310.pyc differ diff --git a/g4f/g4f/__pycache__/typing.cpython-310.pyc b/g4f/g4f/__pycache__/typing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..63d1f0124e158dec0ce0ab6fd5b71a3ab4204262 Binary files /dev/null and b/g4f/g4f/__pycache__/typing.cpython-310.pyc differ diff --git a/g4f/g4f/models.py b/g4f/g4f/models.py new file mode 100644 index 0000000000000000000000000000000000000000..7c2d6822f31f48b2a0cd897d0fcf70bc2ce7dbbb --- /dev/null +++ b/g4f/g4f/models.py @@ -0,0 +1,250 @@ +from __future__ import annotations +from dataclasses import dataclass +from .typing import Union +from .Provider import BaseProvider, RetryProvider +from .Provider import ( + ChatgptLogin, + ChatgptAi, + ChatBase, + Vercel, + DeepAi, + Aivvm, + Bard, + H2o, + GptGo, + Bing, + PerplexityAi, + Wewordle, + Yqcloud, + AItianhu, + AItianhuSpace, + Aichat, + Myshell, + Aibn, + ChatgptDuo, +) + +@dataclass(unsafe_hash=True) +class Model: + name: str + base_provider: str + best_provider: Union[type[BaseProvider], RetryProvider] = None + +# Config for HuggingChat, OpenAssistant +# Works for Liaobots, H2o, OpenaiChat, Yqcloud, You +default = Model( + name = "", + base_provider = "", + best_provider = RetryProvider([ + Bing, # Not fully GPT 3 or 4 + PerplexityAi, # Adds references to sources + Wewordle, # Responds with markdown + Yqcloud, # Answers short questions in chinese + ChatBase, # Don't want to answer creatively + ChatgptDuo, # Include search results + DeepAi, ChatgptLogin, ChatgptAi, Aivvm, GptGo, AItianhu, AItianhuSpace, Aichat, Myshell, Aibn, + ]) +) + +# GPT-3.5 / GPT-4 +gpt_35_turbo = Model( + name = 'gpt-3.5-turbo', + base_provider = 'openai', + best_provider = RetryProvider([ + DeepAi, ChatgptLogin, ChatgptAi, Aivvm, GptGo, AItianhu, Aichat, AItianhuSpace, Myshell, Aibn, + ]) +) + +gpt_4 = Model( + name = 'gpt-4', + base_provider = 'openai', + best_provider = RetryProvider([ + Myshell, AItianhuSpace, + ]) +) + +# Bard +palm = Model( + name = 'palm', + base_provider = 'google', + best_provider = Bard) + +# H2o +falcon_7b = Model( + name = 'h2oai/h2ogpt-gm-oasst1-en-2048-falcon-7b-v3', + base_provider = 'huggingface', + best_provider = H2o) + +falcon_40b = Model( + name = 'h2oai/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1', + base_provider = 'huggingface', + best_provider = H2o) + +llama_13b = Model( + name = 'h2oai/h2ogpt-gm-oasst1-en-2048-open-llama-13b', + base_provider = 'huggingface', + best_provider = H2o) + +# Vercel +claude_instant_v1 = Model( + name = 'claude-instant-v1', + base_provider = 'anthropic', + best_provider = Vercel) + +claude_v1 = Model( + name = 'claude-v1', + base_provider = 'anthropic', + best_provider = Vercel) + +claude_v2 = Model( + name = 'claude-v2', + base_provider = 'anthropic', + best_provider = Vercel) + +command_light_nightly = Model( + name = 'command-light-nightly', + base_provider = 'cohere', + best_provider = Vercel) + +command_nightly = Model( + name = 'command-nightly', + base_provider = 'cohere', + best_provider = Vercel) + +gpt_neox_20b = Model( + name = 'EleutherAI/gpt-neox-20b', + base_provider = 'huggingface', + best_provider = Vercel) + +oasst_sft_1_pythia_12b = Model( + name = 'OpenAssistant/oasst-sft-1-pythia-12b', + base_provider = 'huggingface', + best_provider = Vercel) + +oasst_sft_4_pythia_12b_epoch_35 = Model( + name = 'OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5', + base_provider = 'huggingface', + best_provider = Vercel) + +santacoder = Model( + name = 'bigcode/santacoder', + base_provider = 'huggingface', + best_provider = Vercel) + +bloom = Model( + name = 'bigscience/bloom', + base_provider = 'huggingface', + best_provider = Vercel) + +flan_t5_xxl = Model( + name = 'google/flan-t5-xxl', + base_provider = 'huggingface', + best_provider = Vercel) + +code_davinci_002 = Model( + name = 'code-davinci-002', + base_provider = 'openai', + best_provider = Vercel) + +gpt_35_turbo_16k = Model( + name = 'gpt-3.5-turbo-16k', + base_provider = 'openai', + best_provider = Vercel) + +gpt_35_turbo_16k_0613 = Model( + name = 'gpt-3.5-turbo-16k-0613', + base_provider = 'openai') + +gpt_35_turbo_0613 = Model( + name = 'gpt-3.5-turbo-0613', + base_provider = 'openai', + best_provider = RetryProvider([ + Aivvm, ChatgptLogin + ]) +) + +gpt_4_0613 = Model( + name = 'gpt-4-0613', + base_provider = 'openai', + best_provider = Vercel) + +text_ada_001 = Model( + name = 'text-ada-001', + base_provider = 'openai', + best_provider = Vercel) + +text_babbage_001 = Model( + name = 'text-babbage-001', + base_provider = 'openai', + best_provider = Vercel) + +text_curie_001 = Model( + name = 'text-curie-001', + base_provider = 'openai', + best_provider = Vercel) + +text_davinci_002 = Model( + name = 'text-davinci-002', + base_provider = 'openai', + best_provider = Vercel) + +text_davinci_003 = Model( + name = 'text-davinci-003', + base_provider = 'openai', + best_provider = Vercel) + +llama13b_v2_chat = Model( + name = 'replicate:a16z-infra/llama13b-v2-chat', + base_provider = 'replicate', + best_provider = Vercel) + +llama7b_v2_chat = Model( + name = 'replicate:a16z-infra/llama7b-v2-chat', + base_provider = 'replicate', + best_provider = Vercel) + + +class ModelUtils: + convert: dict[str, Model] = { + # gpt-3.5 / gpt-4 + 'gpt-3.5-turbo' : gpt_35_turbo, + 'gpt-3.5-turbo-16k' : gpt_35_turbo_16k, + 'gpt-4' : gpt_4, + 'gpt-4-0613' : gpt_4_0613, + 'gpt-3.5-turbo-16k-0613' : gpt_35_turbo_16k_0613, + + # Bard + 'palm2' : palm, + 'palm' : palm, + 'google' : palm, + 'google-bard' : palm, + 'google-palm' : palm, + 'bard' : palm, + + # H2o + 'falcon-40b' : falcon_40b, + 'falcon-7b' : falcon_7b, + 'llama-13b' : llama_13b, + + # Vercel + 'claude-instant-v1' : claude_instant_v1, + 'claude-v1' : claude_v1, + 'claude-v2' : claude_v2, + 'command-nightly' : command_nightly, + 'gpt-neox-20b' : gpt_neox_20b, + 'santacoder' : santacoder, + 'bloom' : bloom, + 'flan-t5-xxl' : flan_t5_xxl, + 'code-davinci-002' : code_davinci_002, + 'text-ada-001' : text_ada_001, + 'text-babbage-001' : text_babbage_001, + 'text-curie-001' : text_curie_001, + 'text-davinci-002' : text_davinci_002, + 'text-davinci-003' : text_davinci_003, + 'llama13b-v2-chat' : llama13b_v2_chat, + 'llama7b-v2-chat' : llama7b_v2_chat, + + 'oasst-sft-1-pythia-12b' : oasst_sft_1_pythia_12b, + 'oasst-sft-4-pythia-12b-epoch-3.5' : oasst_sft_4_pythia_12b_epoch_35, + 'command-light-nightly' : command_light_nightly, + } \ No newline at end of file diff --git a/g4f/g4f/requests.py b/g4f/g4f/requests.py new file mode 100644 index 0000000000000000000000000000000000000000..51d31e1eb4e764fb85b6e704a0fa2480c9d230e1 --- /dev/null +++ b/g4f/g4f/requests.py @@ -0,0 +1,96 @@ +from __future__ import annotations + +import json, sys +from functools import partialmethod + +from aiohttp import StreamReader +from aiohttp.base_protocol import BaseProtocol + +from curl_cffi.requests import AsyncSession as BaseSession +from curl_cffi.requests.cookies import Request, Response + + +class StreamResponse: + def __init__(self, inner: Response, content: StreamReader, request: Request): + self.inner = inner + self.content = content + self.request = request + self.status_code = inner.status_code + self.reason = inner.reason + self.ok = inner.ok + self.headers = inner.headers + self.cookies = inner.cookies + + async def text(self) -> str: + content = await self.content.read() + return content.decode() + + def raise_for_status(self): + if not self.ok: + raise RuntimeError(f"HTTP Error {self.status_code}: {self.reason}") + + async def json(self, **kwargs): + return json.loads(await self.content.read(), **kwargs) + +class StreamRequest: + def __init__(self, session: AsyncSession, method: str, url: str, **kwargs): + self.session = session + self.loop = session.loop + self.content = StreamReader( + BaseProtocol(session.loop), + sys.maxsize, + loop=session.loop + ) + self.method = method + self.url = url + self.options = kwargs + + def on_content(self, data): + if not self.enter.done(): + self.enter.set_result(None) + self.content.feed_data(data) + + def on_done(self, task): + self.content.feed_eof() + self.curl.clean_after_perform() + self.curl.reset() + self.session.push_curl(self.curl) + + async def __aenter__(self) -> StreamResponse: + self.curl = await self.session.pop_curl() + self.enter = self.loop.create_future() + request, _, header_buffer = self.session._set_curl_options( + self.curl, + self.method, + self.url, + content_callback=self.on_content, + **self.options + ) + await self.session.acurl.add_handle(self.curl, False) + self.handle = self.session.acurl._curl2future[self.curl] + self.handle.add_done_callback(self.on_done) + await self.enter + return StreamResponse( + self.session._parse_response(self.curl, request, _, header_buffer), + self.content, + request + ) + + async def __aexit__(self, exc_type, exc, tb): + pass + +class AsyncSession(BaseSession): + def request( + self, + method: str, + url: str, + **kwargs + ) -> StreamRequest: + return StreamRequest(self, method, url, **kwargs) + + head = partialmethod(request, "HEAD") + get = partialmethod(request, "GET") + post = partialmethod(request, "POST") + put = partialmethod(request, "PUT") + patch = partialmethod(request, "PATCH") + delete = partialmethod(request, "DELETE") \ No newline at end of file diff --git a/g4f/g4f/typing.py b/g4f/g4f/typing.py new file mode 100644 index 0000000000000000000000000000000000000000..5f63c222d8f8a8322789d30ab8993fa882bbf639 --- /dev/null +++ b/g4f/g4f/typing.py @@ -0,0 +1,20 @@ +import sys +from typing import Any, AsyncGenerator, Generator, NewType, Tuple, Union + +if sys.version_info >= (3, 8): + from typing import TypedDict +else: + from typing_extensions import TypedDict + +SHA256 = NewType('sha_256_hash', str) +CreateResult = Generator[str, None, None] + +__all__ = [ + 'Any', + 'AsyncGenerator', + 'Generator', + 'Tuple', + 'TypedDict', + 'SHA256', + 'CreateResult', +]