Spaces:
Sleeping
Sleeping
Commit
·
5bd8642
1
Parent(s):
feaa332
Upload 5 files
Browse files- .env +1 -0
- .gitignore +3 -0
- gradio_ui.py +13 -0
- requirements.txt +72 -0
- utils.py +51 -0
.env
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
OPENAI_API_KEY='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'
|
.gitignore
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
venv/
|
2 |
+
.env
|
3 |
+
__pycache__/
|
gradio_ui.py
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
|
3 |
+
from utils import generate_response
|
4 |
+
|
5 |
+
with gr.Blocks() as demo:
|
6 |
+
|
7 |
+
chatbot = gr.Chatbot(label='Network Design Chatbot', height=600)
|
8 |
+
msg = gr.Textbox()
|
9 |
+
clear = gr.ClearButton([msg, chatbot])
|
10 |
+
|
11 |
+
msg.submit(generate_response, [msg, chatbot], [msg, chatbot])
|
12 |
+
|
13 |
+
demo.launch()
|
requirements.txt
ADDED
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
aiofiles==23.1.0
|
2 |
+
aiohttp==3.8.4
|
3 |
+
aiosignal==1.3.1
|
4 |
+
altair==5.0.1
|
5 |
+
annotated-types==0.5.0
|
6 |
+
anyio==3.7.1
|
7 |
+
async-timeout==4.0.2
|
8 |
+
attrs==23.1.0
|
9 |
+
certifi==2023.5.7
|
10 |
+
charset-normalizer==3.2.0
|
11 |
+
click==8.1.4
|
12 |
+
contourpy==1.1.0
|
13 |
+
cycler==0.11.0
|
14 |
+
exceptiongroup==1.1.2
|
15 |
+
fastapi==0.100.0
|
16 |
+
ffmpy==0.3.0
|
17 |
+
filelock==3.12.2
|
18 |
+
fonttools==4.40.0
|
19 |
+
frozenlist==1.3.3
|
20 |
+
fsspec==2023.6.0
|
21 |
+
gradio==3.36.1
|
22 |
+
gradio_client==0.2.8
|
23 |
+
h11==0.14.0
|
24 |
+
httpcore==0.17.3
|
25 |
+
httpx==0.24.1
|
26 |
+
huggingface-hub==0.16.4
|
27 |
+
idna==3.4
|
28 |
+
importlib-resources==6.0.0
|
29 |
+
Jinja2==3.1.2
|
30 |
+
jsonschema==4.18.0
|
31 |
+
jsonschema-specifications==2023.6.1
|
32 |
+
kiwisolver==1.4.4
|
33 |
+
linkify-it-py==2.0.2
|
34 |
+
markdown-it-py==2.2.0
|
35 |
+
MarkupSafe==2.1.3
|
36 |
+
matplotlib==3.7.2
|
37 |
+
mdit-py-plugins==0.3.3
|
38 |
+
mdurl==0.1.2
|
39 |
+
multidict==6.0.4
|
40 |
+
numpy==1.25.1
|
41 |
+
openai==0.27.8
|
42 |
+
orjson==3.9.2
|
43 |
+
packaging==23.1
|
44 |
+
pandas==2.0.3
|
45 |
+
Pillow==10.0.0
|
46 |
+
pydantic==2.0.2
|
47 |
+
pydantic_core==2.1.2
|
48 |
+
pydub==0.25.1
|
49 |
+
Pygments==2.15.1
|
50 |
+
pyparsing==3.0.9
|
51 |
+
python-dateutil==2.8.2
|
52 |
+
python-dotenv==1.0.0
|
53 |
+
python-multipart==0.0.6
|
54 |
+
pytz==2023.3
|
55 |
+
PyYAML==6.0
|
56 |
+
referencing==0.29.1
|
57 |
+
requests==2.31.0
|
58 |
+
rpds-py==0.8.10
|
59 |
+
semantic-version==2.10.0
|
60 |
+
six==1.16.0
|
61 |
+
sniffio==1.3.0
|
62 |
+
starlette==0.27.0
|
63 |
+
toolz==0.12.0
|
64 |
+
tqdm==4.65.0
|
65 |
+
typing_extensions==4.7.1
|
66 |
+
tzdata==2023.3
|
67 |
+
uc-micro-py==1.0.2
|
68 |
+
urllib3==2.0.3
|
69 |
+
uvicorn==0.22.0
|
70 |
+
websockets==11.0.3
|
71 |
+
yarl==1.9.2
|
72 |
+
zipp==3.16.0
|
utils.py
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import random
|
3 |
+
import time
|
4 |
+
|
5 |
+
import openai
|
6 |
+
from dotenv import load_dotenv, find_dotenv
|
7 |
+
|
8 |
+
load_dotenv(find_dotenv())
|
9 |
+
|
10 |
+
openai.api_key = os.getenv('OPENAI_API_KEY')
|
11 |
+
|
12 |
+
def chat_completion(messages: list) -> str:
|
13 |
+
try:
|
14 |
+
completion = openai.ChatCompletion.create(
|
15 |
+
model='gpt-3.5-turbo',
|
16 |
+
messages=messages
|
17 |
+
)
|
18 |
+
return completion['choices'][0]['message']['content']
|
19 |
+
except:
|
20 |
+
return 'We are facing a technical issue at this moment.'
|
21 |
+
|
22 |
+
def generate_messages(messages: list, query: str) -> list:
|
23 |
+
formated_messages = [
|
24 |
+
{
|
25 |
+
'role': 'system',
|
26 |
+
'content': 'You are a helpful Network Design assistant.'
|
27 |
+
}
|
28 |
+
]
|
29 |
+
for m in messages:
|
30 |
+
formated_messages.append({
|
31 |
+
'role': 'user',
|
32 |
+
'content': m[0]
|
33 |
+
})
|
34 |
+
formated_messages.append({
|
35 |
+
'role': 'assistant',
|
36 |
+
'content': m[1]
|
37 |
+
})
|
38 |
+
formated_messages.append(
|
39 |
+
{
|
40 |
+
'role': 'user',
|
41 |
+
'content': query
|
42 |
+
}
|
43 |
+
)
|
44 |
+
return formated_messages
|
45 |
+
|
46 |
+
def generate_response(query: str, chat_history: list) -> tuple:
|
47 |
+
messages = generate_messages(chat_history, query)
|
48 |
+
bot_message = chat_completion(messages)
|
49 |
+
chat_history.append((query, bot_message))
|
50 |
+
time.sleep(random.randint(0, 5))
|
51 |
+
return '', chat_history
|