Spaces:
Sleeping
Sleeping
dhumra_chatbot_assistant
#1
by
DhumraAI
- opened
- app.py +130 -0
- chat.py +66 -0
- requirements.txt +3 -0
app.py
ADDED
@@ -0,0 +1,130 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import uuid
|
3 |
+
from pathlib import Path
|
4 |
+
from typing import Dict, List, Tuple
|
5 |
+
|
6 |
+
import gradio as gr
|
7 |
+
import requests
|
8 |
+
|
9 |
+
from chat import ChatGpt
|
10 |
+
|
11 |
+
# Environment Variables
|
12 |
+
DEBUG = bool(os.getenv("DEBUG", False))
|
13 |
+
VERBOSE = bool(os.getenv("V", False))
|
14 |
+
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
|
15 |
+
BING_TRANSLATE_API_KEY = os.getenv("BING_TRANSLATE_API_KEY")
|
16 |
+
|
17 |
+
# Type Definitions
|
18 |
+
ROLE_USER = "user"
|
19 |
+
ROLE_ASSISTANT = "assistant"
|
20 |
+
CHATGPT_MSG = Dict[str, str] # {"role": "user|assistant", "content": "text"}
|
21 |
+
CHATGPT_HISTROY = List[CHATGPT_MSG]
|
22 |
+
CHATBOT_MSG = Tuple[str, str] # (user_message, bot_response)
|
23 |
+
CHATBOT_HISTORY = List[CHATBOT_MSG]
|
24 |
+
|
25 |
+
# Constants
|
26 |
+
LANG_BO = "bo"
|
27 |
+
LANG_MEDIUM = "en"
|
28 |
+
|
29 |
+
chatbot = ChatGpt(OPENAI_API_KEY)
|
30 |
+
|
31 |
+
|
32 |
+
def bing_translate(text: str, from_lang: str, to_lang: str):
|
33 |
+
if DEBUG:
|
34 |
+
if from_lang != "bo":
|
35 |
+
return "ཀཀཀཀཀཀ"
|
36 |
+
return "aaaaa"
|
37 |
+
headers = {
|
38 |
+
"Ocp-Apim-Subscription-Key": BING_TRANSLATE_API_KEY,
|
39 |
+
"Content-Type": "application/json",
|
40 |
+
"Ocp-Apim-Subscription-Region": "australiaeast",
|
41 |
+
"X-ClientTraceId": str(uuid.uuid4()),
|
42 |
+
}
|
43 |
+
resp = requests.post(
|
44 |
+
url="https://api.cognitive.microsofttranslator.com/translate",
|
45 |
+
params={"api-version": "3.0", "from": from_lang, "to": to_lang},
|
46 |
+
json=[{"text": text}],
|
47 |
+
headers=headers,
|
48 |
+
)
|
49 |
+
result = resp.json()
|
50 |
+
if resp.status_code == 200:
|
51 |
+
return result[0]["translations"][0]["text"]
|
52 |
+
else:
|
53 |
+
raise Exception("Error in translation API: ", result)
|
54 |
+
|
55 |
+
|
56 |
+
def user(input_bo: str, history_bo: list):
|
57 |
+
history_bo.append([input_bo, None])
|
58 |
+
return "", history_bo
|
59 |
+
|
60 |
+
|
61 |
+
def bot(history_bo: list, chat_id: str):
|
62 |
+
"""Translate user input to English, send to OpenAI, translate response to Tibetan, and return to user.
|
63 |
+
|
64 |
+
Args:
|
65 |
+
input_bo (str): Tibetan input from user
|
66 |
+
history_bo (CHATBOT_HISTORY): Tibetan history of gradio chatbot
|
67 |
+
history_en (CHATGPT_HISTORY): English history of OpenAI ChatGPT
|
68 |
+
|
69 |
+
Returns:
|
70 |
+
history_bo (CHATBOT_HISTORY): Tibetan history of gradio chatbot
|
71 |
+
history_en (CHATGPT_HISTORY): English history of OpenAI ChatGPT
|
72 |
+
"""
|
73 |
+
input_bo = history_bo[-1][0]
|
74 |
+
input_ = bing_translate(input_bo, LANG_BO, LANG_MEDIUM)
|
75 |
+
response = chatbot.generate_response(input_)
|
76 |
+
resopnse_bo = bing_translate(response, LANG_MEDIUM, LANG_BO)
|
77 |
+
history_bo[-1][1] = resopnse_bo
|
78 |
+
if VERBOSE:
|
79 |
+
print("------------------------")
|
80 |
+
print(history_bo)
|
81 |
+
print(history_en)
|
82 |
+
print("------------------------")
|
83 |
+
|
84 |
+
return history_bo
|
85 |
+
|
86 |
+
|
87 |
+
def get_chat_id():
|
88 |
+
chatbot.clear_history()
|
89 |
+
return str(uuid.uuid4())
|
90 |
+
|
91 |
+
|
92 |
+
css_fn = Path(__file__).resolve().parent / "static" / "app.css"
|
93 |
+
assert css_fn.exists() and css_fn.is_file(), f"CSS file not found: {css_fn}"
|
94 |
+
|
95 |
+
with gr.Blocks(css=str(css_fn), theme=gr.themes.Soft()) as demo:
|
96 |
+
chat_id = gr.State(value=get_chat_id)
|
97 |
+
history_en = gr.State(value=[])
|
98 |
+
history_bo = gr.Chatbot(label="Tibetan Chatbot", elem_id="maiChatHistory")
|
99 |
+
|
100 |
+
input_bo = gr.Textbox(
|
101 |
+
show_label=False,
|
102 |
+
placeholder="Type here...",
|
103 |
+
elem_id="maiChatInput",
|
104 |
+
)
|
105 |
+
input_submit_btn = gr.Button("Submit")
|
106 |
+
input_bo.submit(
|
107 |
+
fn=user,
|
108 |
+
inputs=[input_bo, history_bo],
|
109 |
+
outputs=[input_bo, history_bo],
|
110 |
+
queue=False,
|
111 |
+
).then(
|
112 |
+
fn=bot,
|
113 |
+
inputs=[history_bo, chat_id],
|
114 |
+
outputs=[history_bo],
|
115 |
+
)
|
116 |
+
input_submit_btn.click(
|
117 |
+
fn=user,
|
118 |
+
inputs=[input_bo, history_bo],
|
119 |
+
outputs=[input_bo, history_bo],
|
120 |
+
queue=False,
|
121 |
+
).then(
|
122 |
+
fn=bot,
|
123 |
+
inputs=[history_bo, chat_id],
|
124 |
+
outputs=[history_bo],
|
125 |
+
)
|
126 |
+
|
127 |
+
clear = gr.Button("Clear")
|
128 |
+
clear.click(lambda: [], None, history_bo, queue=False)
|
129 |
+
|
130 |
+
demo.launch()
|
chat.py
ADDED
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from typing import Tuple
|
3 |
+
|
4 |
+
import openai
|
5 |
+
|
6 |
+
|
7 |
+
class ChatGpt:
|
8 |
+
def __init__(self, api_key, max_tokens=4096):
|
9 |
+
self.api_key = api_key
|
10 |
+
self.max_tokens = max_tokens
|
11 |
+
self.message_history = []
|
12 |
+
self.total_tokens = 0
|
13 |
+
|
14 |
+
# Set up the OpenAI API client
|
15 |
+
openai.api_key = self.api_key
|
16 |
+
|
17 |
+
def clear_history(self):
|
18 |
+
self.message_history = []
|
19 |
+
self.total_tokens = 0
|
20 |
+
|
21 |
+
def add_message(self, role: str, content: str):
|
22 |
+
self.message_history.append({"role": role, "content": content})
|
23 |
+
self._truncate_history()
|
24 |
+
|
25 |
+
def add_system_message(self, content: str):
|
26 |
+
self.add_message("system", content)
|
27 |
+
|
28 |
+
def generate_response(self, user_input: str) -> str:
|
29 |
+
self.add_message("user", user_input)
|
30 |
+
response = self._call_openai_api(self.message_history)
|
31 |
+
self.add_message("assistant", response)
|
32 |
+
|
33 |
+
return response
|
34 |
+
|
35 |
+
def _truncate_history(self):
|
36 |
+
while self.total_tokens > self.max_tokens:
|
37 |
+
if self.message_history[0]["role"] != "system":
|
38 |
+
self.message_history.pop(0)
|
39 |
+
else:
|
40 |
+
break
|
41 |
+
|
42 |
+
def _call_openai_api(self, messages) -> str:
|
43 |
+
response = openai.ChatCompletion.create(
|
44 |
+
model="gpt-3.5-turbo", messages=messages
|
45 |
+
)
|
46 |
+
self.total_tokens += response["usage"]["total_tokens"]
|
47 |
+
return response["choices"][0]["message"]["content"].strip()
|
48 |
+
|
49 |
+
|
50 |
+
if __name__ == "__main__":
|
51 |
+
chat = ChatGpt(os.getenv("OPENAI_API_KEY"))
|
52 |
+
|
53 |
+
chat.add_system_message("The assistant can answer questions and tell jokes.")
|
54 |
+
user_input = "Tell me a joke."
|
55 |
+
user_msg, bot_response = chat.generate_response(user_input)
|
56 |
+
assert user_msg == user_input
|
57 |
+
print("User:", user_msg)
|
58 |
+
print("Assistant:", bot_response)
|
59 |
+
print("Total Tokens:", chat.total_tokens)
|
60 |
+
|
61 |
+
user_input = "another one"
|
62 |
+
user_msg, bot_response = chat.generate_response(user_input)
|
63 |
+
assert user_msg == user_input
|
64 |
+
print("User:", user_msg)
|
65 |
+
print("Assistant:", bot_response)
|
66 |
+
print("Total Tokens:", chat.total_tokens)
|
requirements.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
gradio==3.26.0
|
2 |
+
requests==2.28.2
|
3 |
+
openai==0.27.4, <1.0
|