Abhaykoul commited on
Commit
d641223
1 Parent(s): a681a39

Create AI.py

Browse files
Files changed (1) hide show
  1. AI.py +240 -0
AI.py ADDED
@@ -0,0 +1,240 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+ from requests import get
3
+ from uuid import uuid4
4
+ from re import findal
5
+
6
+ import io
7
+ import re
8
+ import json
9
+ from webscout.AIutel import Optimizers
10
+ from webscout.AIutel import Conversation
11
+ from typing import Any
12
+ import logging
13
+ class OPENGPT:
14
+ def __init__(
15
+ self,
16
+ is_conversation: bool = True,
17
+ max_tokens: int = 600,
18
+ timeout: int = 30,
19
+ intro: str = None,
20
+ filepath: str = None,
21
+ update_file: bool = True,
22
+ proxies: dict = {},
23
+ history_offset: int = 10250,
24
+ act: str = None,
25
+ ):
26
+ """Instantiates OPENGPT
27
+
28
+ Args:
29
+ is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True
30
+ max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
31
+ timeout (int, optional): Http request timeout. Defaults to 30.
32
+ intro (str, optional): Conversation introductory prompt. Defaults to None.
33
+ filepath (str, optional): Path to file containing conversation history. Defaults to None.
34
+ update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
35
+ proxies (dict, optional): Http request proxies. Defaults to {}.
36
+ history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
37
+ act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
38
+ """
39
+ self.session = requests.Session()
40
+ self.max_tokens_to_sample = max_tokens
41
+ self.is_conversation = is_conversation
42
+ self.chat_endpoint = (
43
+ "https://opengpts-example-vz4y4ooboq-uc.a.run.app/runs/stream"
44
+ )
45
+ self.stream_chunk_size = 64
46
+ self.timeout = timeout
47
+ self.last_response = {}
48
+ self.assistant_id = "bca37014-6f97-4f2b-8928-81ea8d478d88"
49
+ self.authority = "opengpts-example-vz4y4ooboq-uc.a.run.app"
50
+
51
+ self.headers = {
52
+ "authority": self.authority,
53
+ "accept": "text/event-stream",
54
+ "accept-language": "en-US,en;q=0.7",
55
+ "cache-control": "no-cache",
56
+ "content-type": "application/json",
57
+ "origin": "https://opengpts-example-vz4y4ooboq-uc.a.run.app",
58
+ "pragma": "no-cache",
59
+ "referer": "https://opengpts-example-vz4y4ooboq-uc.a.run.app/",
60
+ "sec-fetch-site": "same-origin",
61
+ "sec-gpc": "1",
62
+ "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
63
+ }
64
+
65
+ self.__available_optimizers = (
66
+ method
67
+ for method in dir(Optimizers)
68
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
69
+ )
70
+ self.session.headers.update(self.headers)
71
+ Conversation.intro = (
72
+ AwesomePrompts().get_act(
73
+ act, raise_not_found=True, default=None, case_insensitive=True
74
+ )
75
+ if act
76
+ else intro or Conversation.intro
77
+ )
78
+ self.conversation = Conversation(
79
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
80
+ )
81
+ self.conversation.history_offset = history_offset
82
+ self.session.proxies = proxies
83
+
84
+ def ask(
85
+ self,
86
+ prompt: str,
87
+ stream: bool = False,
88
+ raw: bool = False,
89
+ optimizer: str = None,
90
+ conversationally: bool = False,
91
+ ) -> dict:
92
+ """Chat with AI
93
+
94
+ Args:
95
+ prompt (str): Prompt to be send.
96
+ stream (bool, optional): Flag for streaming response. Defaults to False.
97
+ raw (bool, optional): Stream back raw response as received. Defaults to False.
98
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
99
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
100
+ Returns:
101
+ dict : {}
102
+ ```json
103
+ {
104
+ "messages": [
105
+ {
106
+ "content": "Hello there",
107
+ "additional_kwargs": {},
108
+ "type": "human",
109
+ "example": false
110
+ },
111
+ {
112
+ "content": "Hello! How can I assist you today?",
113
+ "additional_kwargs": {
114
+ "agent": {
115
+ "return_values": {
116
+ "output": "Hello! How can I assist you today?"
117
+ },
118
+ "log": "Hello! How can I assist you today?",
119
+ "type": "AgentFinish"
120
+ }
121
+ },
122
+ "type": "ai",
123
+ "example": false
124
+ }]
125
+ }
126
+ ```
127
+ """
128
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
129
+ if optimizer:
130
+ if optimizer in self.__available_optimizers:
131
+ conversation_prompt = getattr(Optimizers, optimizer)(
132
+ conversation_prompt if conversationally else prompt
133
+ )
134
+ else:
135
+ raise Exception(
136
+ f"Optimizer is not one of {self.__available_optimizers}"
137
+ )
138
+
139
+ self.session.headers.update(self.headers)
140
+ self.session.headers.update(
141
+ dict(
142
+ cookie=f"opengpts_user_id={uuid4().__str__()}",
143
+ )
144
+ )
145
+ payload = {
146
+ "input": [
147
+ {
148
+ "content": conversation_prompt,
149
+ "additional_kwargs": {},
150
+ "type": "human",
151
+ "example": False,
152
+ },
153
+ ],
154
+ "assistant_id": self.assistant_id,
155
+ "thread_id": "",
156
+ }
157
+
158
+ def for_stream():
159
+ response = self.session.post(
160
+ self.chat_endpoint, json=payload, stream=True, timeout=self.timeout
161
+ )
162
+ if (
163
+ not response.ok
164
+ or not response.headers.get("Content-Type")
165
+ == "text/event-stream; charset=utf-8"
166
+ ):
167
+ raise Exception(
168
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
169
+ )
170
+
171
+ for value in response.iter_lines(
172
+ decode_unicode=True,
173
+ chunk_size=self.stream_chunk_size,
174
+ ):
175
+ try:
176
+ modified_value = re.sub("data:", "", value)
177
+ resp = json.loads(modified_value)
178
+ if len(resp) == 1:
179
+ continue
180
+ self.last_response.update(resp[1])
181
+ yield value if raw else resp[1]
182
+ except json.decoder.JSONDecodeError:
183
+ pass
184
+ self.conversation.update_chat_history(
185
+ prompt, self.get_message(self.last_response)
186
+ )
187
+
188
+ def for_non_stream():
189
+ for _ in for_stream():
190
+ pass
191
+ return self.last_response
192
+
193
+ return for_stream() if stream else for_non_stream()
194
+
195
+ def chat(
196
+ self,
197
+ prompt: str,
198
+ stream: bool = False,
199
+ optimizer: str = None,
200
+ conversationally: bool = False,
201
+ ) -> str:
202
+ """Generate response `str`
203
+ Args:
204
+ prompt (str): Prompt to be send.
205
+ stream (bool, optional): Flag for streaming response. Defaults to False.
206
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
207
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
208
+ Returns:
209
+ str: Response generated
210
+ """
211
+
212
+ def for_stream():
213
+ for response in self.ask(
214
+ prompt, True, optimizer=optimizer, conversationally=conversationally
215
+ ):
216
+ yield self.get_message(response)
217
+
218
+ def for_non_stream():
219
+ return self.get_message(
220
+ self.ask(
221
+ prompt,
222
+ False,
223
+ optimizer=optimizer,
224
+ conversationally=conversationally,
225
+ )
226
+ )
227
+
228
+ return for_stream() if stream else for_non_stream()
229
+
230
+ def get_message(self, response: dict) -> str:
231
+ """Retrieves message only from response
232
+
233
+ Args:
234
+ response (dict): Response generated by `self.ask`
235
+
236
+ Returns:
237
+ str: Message extracted
238
+ """
239
+ assert isinstance(response, dict), "Response should be of dict data-type only"
240
+ return response["content"]