Update AI.py
Browse files
AI.py
CHANGED
@@ -1,27 +1,22 @@
|
|
1 |
import requests
|
2 |
-
from requests import get
|
3 |
from uuid import uuid4
|
4 |
-
import io
|
5 |
-
import re
|
6 |
import json
|
7 |
-
from typing import Any
|
8 |
-
|
9 |
class OPENGPT:
|
10 |
def __init__(
|
11 |
self,
|
12 |
max_tokens: int = 600,
|
13 |
timeout: int = 30,
|
14 |
-
intro: str = None,
|
15 |
-
filepath: str = None,
|
16 |
update_file: bool = True,
|
17 |
-
proxies:
|
18 |
history_offset: int = 10250,
|
19 |
-
act: str = None,
|
20 |
):
|
21 |
"""Instantiates OPENGPT
|
22 |
-
|
23 |
Args:
|
24 |
-
is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True
|
25 |
max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
|
26 |
timeout (int, optional): Http request timeout. Defaults to 30.
|
27 |
intro (str, optional): Conversation introductory prompt. Defaults to None.
|
@@ -33,9 +28,7 @@ class OPENGPT:
|
|
33 |
"""
|
34 |
self.session = requests.Session()
|
35 |
self.max_tokens_to_sample = max_tokens
|
36 |
-
self.chat_endpoint =
|
37 |
-
"https://opengpts-example-vz4y4ooboq-uc.a.run.app/runs/stream"
|
38 |
-
)
|
39 |
self.stream_chunk_size = 64
|
40 |
self.timeout = timeout
|
41 |
self.last_response = {}
|
@@ -56,51 +49,22 @@ class OPENGPT:
|
|
56 |
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
|
57 |
}
|
58 |
|
59 |
-
|
60 |
def ask(
|
61 |
self,
|
62 |
prompt: str,
|
63 |
stream: bool = False,
|
64 |
raw: bool = False,
|
65 |
-
optimizer: str = None,
|
66 |
-
) ->
|
67 |
"""Chat with AI
|
68 |
-
|
69 |
Args:
|
70 |
prompt (str): Prompt to be send.
|
71 |
stream (bool, optional): Flag for streaming response. Defaults to False.
|
72 |
raw (bool, optional): Stream back raw response as received. Defaults to False.
|
73 |
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
74 |
-
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
75 |
Returns:
|
76 |
-
dict
|
77 |
-
```json
|
78 |
-
{
|
79 |
-
"messages": [
|
80 |
-
{
|
81 |
-
"content": "Hello there",
|
82 |
-
"additional_kwargs": {},
|
83 |
-
"type": "human",
|
84 |
-
"example": false
|
85 |
-
},
|
86 |
-
{
|
87 |
-
"content": "Hello! How can I assist you today?",
|
88 |
-
"additional_kwargs": {
|
89 |
-
"agent": {
|
90 |
-
"return_values": {
|
91 |
-
"output": "Hello! How can I assist you today?"
|
92 |
-
},
|
93 |
-
"log": "Hello! How can I assist you today?",
|
94 |
-
"type": "AgentFinish"
|
95 |
-
}
|
96 |
-
},
|
97 |
-
"type": "ai",
|
98 |
-
"example": false
|
99 |
-
}]
|
100 |
-
}
|
101 |
-
```
|
102 |
"""
|
103 |
-
|
104 |
self.session.headers.update(self.headers)
|
105 |
self.session.headers.update(
|
106 |
dict(
|
@@ -145,7 +109,6 @@ class OPENGPT:
|
|
145 |
yield value if raw else resp[1]
|
146 |
except json.decoder.JSONDecodeError:
|
147 |
pass
|
148 |
-
)
|
149 |
|
150 |
def for_non_stream():
|
151 |
for _ in for_stream():
|
@@ -158,43 +121,31 @@ class OPENGPT:
|
|
158 |
self,
|
159 |
prompt: str,
|
160 |
stream: bool = False,
|
161 |
-
optimizer: str = None,
|
162 |
) -> str:
|
163 |
"""Generate response `str`
|
164 |
Args:
|
165 |
prompt (str): Prompt to be send.
|
166 |
stream (bool, optional): Flag for streaming response. Defaults to False.
|
167 |
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
168 |
-
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
169 |
Returns:
|
170 |
str: Response generated
|
171 |
"""
|
172 |
-
|
173 |
def for_stream():
|
174 |
-
for response in self.ask(
|
175 |
-
prompt, True
|
176 |
-
):
|
177 |
yield self.get_message(response)
|
178 |
|
179 |
def for_non_stream():
|
180 |
-
return self.get_message(
|
181 |
-
self.ask(
|
182 |
-
prompt,
|
183 |
-
False,
|
184 |
-
optimizer=optimizer,
|
185 |
-
)
|
186 |
-
)
|
187 |
|
188 |
return for_stream() if stream else for_non_stream()
|
189 |
|
190 |
-
def get_message(self, response:
|
191 |
"""Retrieves message only from response
|
192 |
-
|
193 |
Args:
|
194 |
response (dict): Response generated by `self.ask`
|
195 |
-
|
196 |
Returns:
|
197 |
str: Message extracted
|
198 |
"""
|
199 |
assert isinstance(response, dict), "Response should be of dict data-type only"
|
200 |
-
return response["content"]
|
|
|
1 |
import requests
|
|
|
2 |
from uuid import uuid4
|
|
|
|
|
3 |
import json
|
4 |
+
from typing import Any, Dict, Optional
|
5 |
+
|
6 |
class OPENGPT:
|
7 |
def __init__(
|
8 |
self,
|
9 |
max_tokens: int = 600,
|
10 |
timeout: int = 30,
|
11 |
+
intro: Optional[str] = None,
|
12 |
+
filepath: Optional[str] = None,
|
13 |
update_file: bool = True,
|
14 |
+
proxies: Dict[str, str] = {},
|
15 |
history_offset: int = 10250,
|
16 |
+
act: Optional[str] = None,
|
17 |
):
|
18 |
"""Instantiates OPENGPT
|
|
|
19 |
Args:
|
|
|
20 |
max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
|
21 |
timeout (int, optional): Http request timeout. Defaults to 30.
|
22 |
intro (str, optional): Conversation introductory prompt. Defaults to None.
|
|
|
28 |
"""
|
29 |
self.session = requests.Session()
|
30 |
self.max_tokens_to_sample = max_tokens
|
31 |
+
self.chat_endpoint = "https://opengpts-example-vz4y4ooboq-uc.a.run.app/runs/stream"
|
|
|
|
|
32 |
self.stream_chunk_size = 64
|
33 |
self.timeout = timeout
|
34 |
self.last_response = {}
|
|
|
49 |
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
|
50 |
}
|
51 |
|
|
|
52 |
def ask(
|
53 |
self,
|
54 |
prompt: str,
|
55 |
stream: bool = False,
|
56 |
raw: bool = False,
|
57 |
+
optimizer: Optional[str] = None,
|
58 |
+
) -> Dict[str, Any]:
|
59 |
"""Chat with AI
|
|
|
60 |
Args:
|
61 |
prompt (str): Prompt to be send.
|
62 |
stream (bool, optional): Flag for streaming response. Defaults to False.
|
63 |
raw (bool, optional): Stream back raw response as received. Defaults to False.
|
64 |
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
|
65 |
Returns:
|
66 |
+
dict: Response from the AI.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
67 |
"""
|
|
|
68 |
self.session.headers.update(self.headers)
|
69 |
self.session.headers.update(
|
70 |
dict(
|
|
|
109 |
yield value if raw else resp[1]
|
110 |
except json.decoder.JSONDecodeError:
|
111 |
pass
|
|
|
112 |
|
113 |
def for_non_stream():
|
114 |
for _ in for_stream():
|
|
|
121 |
self,
|
122 |
prompt: str,
|
123 |
stream: bool = False,
|
124 |
+
optimizer: Optional[str] = None,
|
125 |
) -> str:
|
126 |
"""Generate response `str`
|
127 |
Args:
|
128 |
prompt (str): Prompt to be send.
|
129 |
stream (bool, optional): Flag for streaming response. Defaults to False.
|
130 |
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
|
131 |
Returns:
|
132 |
str: Response generated
|
133 |
"""
|
|
|
134 |
def for_stream():
|
135 |
+
for response in self.ask(prompt, True):
|
|
|
|
|
136 |
yield self.get_message(response)
|
137 |
|
138 |
def for_non_stream():
|
139 |
+
return self.get_message(self.ask(prompt, False, optimizer=optimizer))
|
|
|
|
|
|
|
|
|
|
|
|
|
140 |
|
141 |
return for_stream() if stream else for_non_stream()
|
142 |
|
143 |
+
def get_message(self, response: Dict[str, Any]) -> str:
|
144 |
"""Retrieves message only from response
|
|
|
145 |
Args:
|
146 |
response (dict): Response generated by `self.ask`
|
|
|
147 |
Returns:
|
148 |
str: Message extracted
|
149 |
"""
|
150 |
assert isinstance(response, dict), "Response should be of dict data-type only"
|
151 |
+
return response["content"]
|