| import requests | |
| from uuid import uuid4 | |
| import json | |
| from typing import Any, Dict, Optional | |
| class OPENGPT: | |
| def __init__( | |
| self, | |
| max_tokens: int = 600, | |
| timeout: int = 30, | |
| intro: Optional[str] = None, | |
| filepath: Optional[str] = None, | |
| update_file: bool = True, | |
| proxies: Dict[str, str] = {}, | |
| history_offset: int = 10250, | |
| act: Optional[str] = None, | |
| ): | |
| """Instantiates OPENGPT | |
| Args: | |
| max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600. | |
| timeout (int, optional): Http request timeout. Defaults to 30. | |
| intro (str, optional): Conversation introductory prompt. Defaults to None. | |
| filepath (str, optional): Path to file containing conversation history. Defaults to None. | |
| update_file (bool, optional): Add new prompts and responses to the file. Defaults to True. | |
| proxies (dict, optional): Http request proxies. Defaults to {}. | |
| history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250. | |
| act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None. | |
| """ | |
| self.session = requests.Session() | |
| self.max_tokens_to_sample = max_tokens | |
| self.chat_endpoint = "https://opengpts-example-vz4y4ooboq-uc.a.run.app/runs/stream" | |
| self.stream_chunk_size = 64 | |
| self.timeout = timeout | |
| self.last_response = {} | |
| self.assistant_id = "bca37014-6f97-4f2b-8928-81ea8d478d88" | |
| self.authority = "opengpts-example-vz4y4ooboq-uc.a.run.app" | |
| self.headers = { | |
| "authority": self.authority, | |
| "accept": "text/event-stream", | |
| "accept-language": "en-US,en;q=0.7", | |
| "cache-control": "no-cache", | |
| "content-type": "application/json", | |
| "origin": "https://opengpts-example-vz4y4ooboq-uc.a.run.app", | |
| "pragma": "no-cache", | |
| "referer": "https://opengpts-example-vz4y4ooboq-uc.a.run.app/", | |
| "sec-fetch-site": "same-origin", | |
| "sec-gpc": "1", | |
| "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36", | |
| } | |
| def ask( | |
| self, | |
| prompt: str, | |
| stream: bool = False, | |
| raw: bool = False, | |
| optimizer: Optional[str] = None, | |
| ) -> Dict[str, Any]: | |
| """Chat with AI | |
| Args: | |
| prompt (str): Prompt to be send. | |
| stream (bool, optional): Flag for streaming response. Defaults to False. | |
| raw (bool, optional): Stream back raw response as received. Defaults to False. | |
| optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None. | |
| Returns: | |
| dict: Response from the AI. | |
| """ | |
| self.session.headers.update(self.headers) | |
| self.session.headers.update( | |
| dict( | |
| cookie=f"opengpts_user_id={uuid4().__str__()}", | |
| ) | |
| ) | |
| payload = { | |
| "input": [ | |
| { | |
| "additional_kwargs": {}, | |
| "type": "human", | |
| "example": False, | |
| }, | |
| ], | |
| "assistant_id": self.assistant_id, | |
| "thread_id": "", | |
| } | |
| def for_stream(): | |
| response = self.session.post( | |
| self.chat_endpoint, json=payload, stream=True, timeout=self.timeout | |
| ) | |
| if ( | |
| not response.ok | |
| or not response.headers.get("Content-Type") | |
| == "text/event-stream; charset=utf-8" | |
| ): | |
| raise Exception( | |
| f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}" | |
| ) | |
| for value in response.iter_lines( | |
| decode_unicode=True, | |
| chunk_size=self.stream_chunk_size, | |
| ): | |
| try: | |
| modified_value = re.sub("data:", "", value) | |
| resp = json.loads(modified_value) | |
| if len(resp) == 1: | |
| continue | |
| self.last_response.update(resp[1]) | |
| yield value if raw else resp[1] | |
| except json.decoder.JSONDecodeError: | |
| pass | |
| def for_non_stream(): | |
| for _ in for_stream(): | |
| pass | |
| return self.last_response | |
| return for_stream() if stream else for_non_stream() | |
| def chat( | |
| self, | |
| prompt: str, | |
| stream: bool = False, | |
| optimizer: Optional[str] = None, | |
| ) -> str: | |
| """Generate response `str` | |
| Args: | |
| prompt (str): Prompt to be send. | |
| stream (bool, optional): Flag for streaming response. Defaults to False. | |
| optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None. | |
| Returns: | |
| str: Response generated | |
| """ | |
| def for_stream(): | |
| for response in self.ask(prompt, True): | |
| yield self.get_message(response) | |
| def for_non_stream(): | |
| return self.get_message(self.ask(prompt, False, optimizer=optimizer)) | |
| return for_stream() if stream else for_non_stream() | |
| def get_message(self, response: Dict[str, Any]) -> str: | |
| """Retrieves message only from response | |
| Args: | |
| response (dict): Response generated by `self.ask` | |
| Returns: | |
| str: Message extracted | |
| """ | |
| assert isinstance(response, dict), "Response should be of dict data-type only" | |
| return response["content"] | |