Spaces:
Sleeping
Sleeping
hafidhsoekma
commited on
Commit
•
2fedcf6
1
Parent(s):
ec2656f
Add files
Browse files- .gitignore +163 -0
- app.py +176 -0
- assets/logo.ico +0 -0
- requirements.txt +89 -0
- utils/__init__.py +2 -0
- utils/functional.py +64 -0
- utils/prompts.py +31 -0
.gitignore
ADDED
@@ -0,0 +1,163 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Byte-compiled / optimized / DLL files
|
2 |
+
__pycache__/
|
3 |
+
*.py[cod]
|
4 |
+
*$py.class
|
5 |
+
|
6 |
+
# C extensions
|
7 |
+
*.so
|
8 |
+
|
9 |
+
# Distribution / packaging
|
10 |
+
.Python
|
11 |
+
build/
|
12 |
+
develop-eggs/
|
13 |
+
dist/
|
14 |
+
downloads/
|
15 |
+
eggs/
|
16 |
+
.eggs/
|
17 |
+
lib/
|
18 |
+
lib64/
|
19 |
+
parts/
|
20 |
+
sdist/
|
21 |
+
var/
|
22 |
+
wheels/
|
23 |
+
share/python-wheels/
|
24 |
+
*.egg-info/
|
25 |
+
.installed.cfg
|
26 |
+
*.egg
|
27 |
+
MANIFEST
|
28 |
+
|
29 |
+
# PyInstaller
|
30 |
+
# Usually these files are written by a python script from a template
|
31 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
32 |
+
*.manifest
|
33 |
+
*.spec
|
34 |
+
|
35 |
+
# Installer logs
|
36 |
+
pip-log.txt
|
37 |
+
pip-delete-this-directory.txt
|
38 |
+
|
39 |
+
# Unit test / coverage reports
|
40 |
+
htmlcov/
|
41 |
+
.tox/
|
42 |
+
.nox/
|
43 |
+
.coverage
|
44 |
+
.coverage.*
|
45 |
+
.cache
|
46 |
+
nosetests.xml
|
47 |
+
coverage.xml
|
48 |
+
*.cover
|
49 |
+
*.py,cover
|
50 |
+
.hypothesis/
|
51 |
+
.pytest_cache/
|
52 |
+
cover/
|
53 |
+
|
54 |
+
# Translations
|
55 |
+
*.mo
|
56 |
+
*.pot
|
57 |
+
|
58 |
+
# Django stuff:
|
59 |
+
*.log
|
60 |
+
local_settings.py
|
61 |
+
db.sqlite3
|
62 |
+
db.sqlite3-journal
|
63 |
+
|
64 |
+
# Flask stuff:
|
65 |
+
instance/
|
66 |
+
.webassets-cache
|
67 |
+
|
68 |
+
# Scrapy stuff:
|
69 |
+
.scrapy
|
70 |
+
|
71 |
+
# Sphinx documentation
|
72 |
+
docs/_build/
|
73 |
+
|
74 |
+
# PyBuilder
|
75 |
+
.pybuilder/
|
76 |
+
target/
|
77 |
+
|
78 |
+
# Jupyter Notebook
|
79 |
+
.ipynb_checkpoints
|
80 |
+
|
81 |
+
# IPython
|
82 |
+
profile_default/
|
83 |
+
ipython_config.py
|
84 |
+
|
85 |
+
# pyenv
|
86 |
+
# For a library or package, you might want to ignore these files since the code is
|
87 |
+
# intended to run in multiple environments; otherwise, check them in:
|
88 |
+
.python-version
|
89 |
+
|
90 |
+
# pipenv
|
91 |
+
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
92 |
+
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
93 |
+
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
94 |
+
# install all needed dependencies.
|
95 |
+
#Pipfile.lock
|
96 |
+
|
97 |
+
# poetry
|
98 |
+
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
99 |
+
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
100 |
+
# commonly ignored for libraries.
|
101 |
+
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
102 |
+
#poetry.lock
|
103 |
+
|
104 |
+
# pdm
|
105 |
+
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
106 |
+
#pdm.lock
|
107 |
+
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
|
108 |
+
# in version control.
|
109 |
+
# https://pdm.fming.dev/#use-with-ide
|
110 |
+
.pdm.toml
|
111 |
+
|
112 |
+
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
113 |
+
__pypackages__/
|
114 |
+
|
115 |
+
# Celery stuff
|
116 |
+
celerybeat-schedule
|
117 |
+
celerybeat.pid
|
118 |
+
|
119 |
+
# SageMath parsed files
|
120 |
+
*.sage.py
|
121 |
+
|
122 |
+
# Environments
|
123 |
+
.env
|
124 |
+
.venv
|
125 |
+
env/
|
126 |
+
venv/
|
127 |
+
ENV/
|
128 |
+
env.bak/
|
129 |
+
venv.bak/
|
130 |
+
|
131 |
+
# Spyder project settings
|
132 |
+
.spyderproject
|
133 |
+
.spyproject
|
134 |
+
|
135 |
+
# Rope project settings
|
136 |
+
.ropeproject
|
137 |
+
|
138 |
+
# mkdocs documentation
|
139 |
+
/site
|
140 |
+
|
141 |
+
# mypy
|
142 |
+
.mypy_cache/
|
143 |
+
.dmypy.json
|
144 |
+
dmypy.json
|
145 |
+
|
146 |
+
# Pyre type checker
|
147 |
+
.pyre/
|
148 |
+
|
149 |
+
# pytype static type analyzer
|
150 |
+
.pytype/
|
151 |
+
|
152 |
+
# Cython debug symbols
|
153 |
+
cython_debug/
|
154 |
+
|
155 |
+
# PyCharm
|
156 |
+
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
157 |
+
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
158 |
+
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
159 |
+
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
160 |
+
#.idea/
|
161 |
+
|
162 |
+
make
|
163 |
+
questions.txt
|
app.py
ADDED
@@ -0,0 +1,176 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import time
|
3 |
+
from uuid import uuid4
|
4 |
+
|
5 |
+
import gradio as gr
|
6 |
+
from langchain.chat_models import ChatOpenAI
|
7 |
+
from langchain.schema import AIMessage, HumanMessage
|
8 |
+
|
9 |
+
from utils import prompts
|
10 |
+
from utils import functional as F
|
11 |
+
|
12 |
+
|
13 |
+
# Define all functions
|
14 |
+
def start_chatbot(input_username, input_password, input_questions, id_interview):
|
15 |
+
if input_username == "" or input_password == "" or input_questions == "":
|
16 |
+
return (
|
17 |
+
(gr.update(),) * 2
|
18 |
+
+ (gr.update(value="Invalid username. Please try again."),)
|
19 |
+
+ (gr.update(value="Invalid password. Please try again."),)
|
20 |
+
+ (gr.update(value="Invalid questions. Please try again."),)
|
21 |
+
+ (gr.update(),) * 5
|
22 |
+
)
|
23 |
+
if (
|
24 |
+
"AIRIS_DEMO_ACCOUNT_{" + input_username + "}" not in os.environ
|
25 |
+
or os.environ["AIRIS_DEMO_ACCOUNT_{" + input_username + "}"] != input_password
|
26 |
+
):
|
27 |
+
return (
|
28 |
+
(gr.update(),) * 2
|
29 |
+
+ (gr.update(value="Invalid username. Please try again."),)
|
30 |
+
+ (gr.update(value="Invalid password. Please try again."),)
|
31 |
+
+ (gr.update(),) * 6
|
32 |
+
)
|
33 |
+
|
34 |
+
chat_openai = ChatOpenAI(
|
35 |
+
model_name="gpt-3.5-turbo",
|
36 |
+
temperature=0.3,
|
37 |
+
)
|
38 |
+
|
39 |
+
return (
|
40 |
+
input_questions,
|
41 |
+
chat_openai,
|
42 |
+
gr.update(visible=False),
|
43 |
+
gr.update(visible=False),
|
44 |
+
gr.update(visible=False),
|
45 |
+
gr.update(visible=False),
|
46 |
+
gr.update(
|
47 |
+
visible=True,
|
48 |
+
value=[
|
49 |
+
[None, F.get_first_message(chat_openai, input_questions, id_interview)],
|
50 |
+
],
|
51 |
+
),
|
52 |
+
gr.update(visible=True),
|
53 |
+
gr.update(visible=True),
|
54 |
+
gr.update(visible=True),
|
55 |
+
)
|
56 |
+
|
57 |
+
|
58 |
+
def bot(chat_openai, questions, id_interview, history):
|
59 |
+
history_messages = F.get_initial_messages(questions, id_interview)
|
60 |
+
for message in history:
|
61 |
+
if message[0] is not None:
|
62 |
+
history_messages.append(
|
63 |
+
HumanMessage(content=F.remove_html_tags(message[0]).strip())
|
64 |
+
)
|
65 |
+
if message[1] is not None:
|
66 |
+
history_messages.append(
|
67 |
+
AIMessage(content=F.remove_html_tags(message[1]).strip())
|
68 |
+
)
|
69 |
+
|
70 |
+
if prompts.END_CHATBOT_PROMPTS in F.remove_html_tags(
|
71 |
+
history[-2][1]
|
72 |
+
) or prompts.END_QUESTION_PROMPTS in F.remove_html_tags(history[-2][1]):
|
73 |
+
bot_message = prompts.END_CHATBOT_PROMPTS
|
74 |
+
elif not history[-1][0]:
|
75 |
+
bot_message = prompts.EMPTY_INPUT_MESSAGE
|
76 |
+
else:
|
77 |
+
bot_message = F.get_bot_message(chat_openai, history_messages)
|
78 |
+
|
79 |
+
if not bot_message == prompts.END_CHATBOT_PROMPTS:
|
80 |
+
check_if_end_of_message = F.check_if_end_of_message(bot_message)
|
81 |
+
if check_if_end_of_message:
|
82 |
+
bot_message = bot_message.replace(f'ID Interview: "{id_interview}"', "")
|
83 |
+
bot_message = (
|
84 |
+
"\n".join(bot_message.split("\n")[:-1])
|
85 |
+
+ "\n"
|
86 |
+
+ prompts.END_QUESTION_PROMPTS
|
87 |
+
)
|
88 |
+
|
89 |
+
history[-1][1] = ""
|
90 |
+
for character in bot_message:
|
91 |
+
history[-1][1] += character
|
92 |
+
time.sleep(0.005)
|
93 |
+
yield history
|
94 |
+
|
95 |
+
|
96 |
+
with gr.Blocks(title="AIRIS (AI Regenerative Interview Survey)") as demo:
|
97 |
+
# Define all states
|
98 |
+
questions = gr.State(value=None)
|
99 |
+
chat_openai = gr.State(value=None)
|
100 |
+
id_interview = gr.State(value=str(uuid4().hex))
|
101 |
+
|
102 |
+
# Define all components
|
103 |
+
gr.Markdown(
|
104 |
+
"""
|
105 |
+
# AIRIS (AI Regenerative Interview Survey)
|
106 |
+
AIRIS (AI Regenerative Interview Survey) is an advanced application that utilizes artificial intelligence to streamline interviews and surveys. It generates intelligent questions, analyzes real-time responses, and continuously improves based on past interviews. With data analysis tools, it provides valuable insights, making interviews more efficient and insightful.
|
107 |
+
""" # noqa: E501
|
108 |
+
)
|
109 |
+
|
110 |
+
input_username = gr.Textbox(
|
111 |
+
label="Username",
|
112 |
+
placeholder="Input your Username here...",
|
113 |
+
).style(container=True)
|
114 |
+
input_password = gr.Textbox(
|
115 |
+
label="Password",
|
116 |
+
placeholder="Input your Password here...",
|
117 |
+
type="password",
|
118 |
+
).style(container=True)
|
119 |
+
input_questions = gr.TextArea(
|
120 |
+
label="Questions", placeholder="Input your questions here.."
|
121 |
+
).style(container=True)
|
122 |
+
input_submit = gr.Button("Submit")
|
123 |
+
|
124 |
+
chatbot_display = gr.Chatbot(label="History Messages", visible=False).style(
|
125 |
+
height=600
|
126 |
+
)
|
127 |
+
message_input = gr.TextArea(
|
128 |
+
label="Your Message",
|
129 |
+
placeholder="Type your message here...",
|
130 |
+
lines=2,
|
131 |
+
visible=False,
|
132 |
+
).style(container=True)
|
133 |
+
send_message = gr.Button("Send", visible=False)
|
134 |
+
reset_message = gr.Button("Reset Chat", visible=False).style(
|
135 |
+
full_width=False, size="sm"
|
136 |
+
)
|
137 |
+
|
138 |
+
# Define all 'components' interactions
|
139 |
+
send_message.click(
|
140 |
+
fn=lambda user_message, history: ("", history + [[user_message, None]]),
|
141 |
+
inputs=[message_input, chatbot_display],
|
142 |
+
outputs=[message_input, chatbot_display],
|
143 |
+
).then(
|
144 |
+
fn=bot,
|
145 |
+
inputs=[chat_openai, questions, id_interview, chatbot_display],
|
146 |
+
outputs=chatbot_display,
|
147 |
+
)
|
148 |
+
|
149 |
+
reset_message.click(
|
150 |
+
fn=lambda chat_openai, questions, id_interview: gr.update(
|
151 |
+
value=[
|
152 |
+
[None, F.get_first_message(chat_openai, questions, id_interview)],
|
153 |
+
]
|
154 |
+
),
|
155 |
+
inputs=[chat_openai, questions],
|
156 |
+
outputs=chatbot_display,
|
157 |
+
)
|
158 |
+
|
159 |
+
input_submit.click(
|
160 |
+
fn=start_chatbot,
|
161 |
+
inputs=[input_username, input_password, input_questions, id_interview],
|
162 |
+
outputs=[
|
163 |
+
questions,
|
164 |
+
chat_openai,
|
165 |
+
input_username,
|
166 |
+
input_password,
|
167 |
+
input_questions,
|
168 |
+
input_submit,
|
169 |
+
chatbot_display,
|
170 |
+
message_input,
|
171 |
+
send_message,
|
172 |
+
reset_message,
|
173 |
+
],
|
174 |
+
)
|
175 |
+
|
176 |
+
demo.queue().launch()
|
assets/logo.ico
ADDED
requirements.txt
ADDED
@@ -0,0 +1,89 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
aiofiles==23.1.0
|
2 |
+
aiohttp==3.8.4
|
3 |
+
aiosignal==1.3.1
|
4 |
+
altair==5.0.0
|
5 |
+
altgraph==0.17.3
|
6 |
+
anyio==3.6.2
|
7 |
+
async-timeout==4.0.2
|
8 |
+
attrs==23.1.0
|
9 |
+
black==23.3.0
|
10 |
+
certifi==2023.5.7
|
11 |
+
charset-normalizer==3.1.0
|
12 |
+
click==8.1.3
|
13 |
+
colorama==0.4.6
|
14 |
+
contourpy==1.0.7
|
15 |
+
cycler==0.11.0
|
16 |
+
dataclasses-json==0.5.7
|
17 |
+
fastapi==0.95.1
|
18 |
+
ffmpy==0.3.0
|
19 |
+
filelock==3.12.0
|
20 |
+
fonttools==4.39.4
|
21 |
+
frozenlist==1.3.3
|
22 |
+
fsspec==2023.5.0
|
23 |
+
gradio==3.30.0
|
24 |
+
gradio_client==0.2.4
|
25 |
+
greenlet==2.0.2
|
26 |
+
h11==0.14.0
|
27 |
+
httpcore==0.17.0
|
28 |
+
httpx==0.24.0
|
29 |
+
huggingface-hub==0.14.1
|
30 |
+
idna==3.4
|
31 |
+
importlib-resources==5.12.0
|
32 |
+
Jinja2==3.1.2
|
33 |
+
jsonschema==4.17.3
|
34 |
+
kiwisolver==1.4.4
|
35 |
+
langchain==0.0.168
|
36 |
+
linkify-it-py==2.0.2
|
37 |
+
markdown-it-py==2.2.0
|
38 |
+
MarkupSafe==2.1.2
|
39 |
+
marshmallow==3.19.0
|
40 |
+
marshmallow-enum==1.5.1
|
41 |
+
matplotlib==3.7.1
|
42 |
+
mdit-py-plugins==0.3.3
|
43 |
+
mdurl==0.1.2
|
44 |
+
multidict==6.0.4
|
45 |
+
mypy-extensions==1.0.0
|
46 |
+
numexpr==2.8.4
|
47 |
+
numpy==1.24.3
|
48 |
+
openai==0.27.6
|
49 |
+
openapi-schema-pydantic==1.2.4
|
50 |
+
orjson==3.8.12
|
51 |
+
packaging==23.1
|
52 |
+
pandas==2.0.1
|
53 |
+
pathspec==0.11.1
|
54 |
+
pefile==2023.2.7
|
55 |
+
Pillow==9.5.0
|
56 |
+
platformdirs==3.5.1
|
57 |
+
pydantic==1.10.7
|
58 |
+
pydub==0.25.1
|
59 |
+
Pygments==2.15.1
|
60 |
+
pyinstaller==5.11.0
|
61 |
+
pyinstaller-hooks-contrib==2023.3
|
62 |
+
pyparsing==3.0.9
|
63 |
+
pyrsistent==0.19.3
|
64 |
+
python-dateutil==2.8.2
|
65 |
+
python-dotenv==1.0.0
|
66 |
+
python-multipart==0.0.6
|
67 |
+
pytz==2023.3
|
68 |
+
pywin32-ctypes==0.2.0
|
69 |
+
PyYAML==6.0
|
70 |
+
requests==2.30.0
|
71 |
+
ruff==0.0.267
|
72 |
+
semantic-version==2.10.0
|
73 |
+
six==1.16.0
|
74 |
+
sniffio==1.3.0
|
75 |
+
SQLAlchemy==2.0.13
|
76 |
+
starlette==0.26.1
|
77 |
+
tenacity==8.2.2
|
78 |
+
tomli==2.0.1
|
79 |
+
toolz==0.12.0
|
80 |
+
tqdm==4.65.0
|
81 |
+
typing-inspect==0.8.0
|
82 |
+
typing_extensions==4.5.0
|
83 |
+
tzdata==2023.3
|
84 |
+
uc-micro-py==1.0.2
|
85 |
+
urllib3==2.0.2
|
86 |
+
uvicorn==0.22.0
|
87 |
+
websockets==11.0.3
|
88 |
+
yarl==1.9.2
|
89 |
+
zipp==3.15.0
|
utils/__init__.py
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
from . import functional
|
2 |
+
from . import prompts
|
utils/functional.py
ADDED
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import re
|
2 |
+
import pytz
|
3 |
+
from typing import Union
|
4 |
+
from datetime import datetime
|
5 |
+
|
6 |
+
from langchain.chat_models import ChatOpenAI
|
7 |
+
from langchain.prompts import PromptTemplate
|
8 |
+
from langchain.schema import AIMessage, HumanMessage, SystemMessage
|
9 |
+
|
10 |
+
from utils import configs
|
11 |
+
|
12 |
+
|
13 |
+
def get_initial_messages(
|
14 |
+
questions: str, id_interview: str
|
15 |
+
) -> list[Union[SystemMessage, HumanMessage, AIMessage]]:
|
16 |
+
from utils import prompts
|
17 |
+
|
18 |
+
role_prompt_template = PromptTemplate(
|
19 |
+
template=prompts.ROLE_PROMPTS,
|
20 |
+
input_variables=["ID_INTERVIEW", "QUESTIONS_PROMPTS"],
|
21 |
+
)
|
22 |
+
role_prompt = role_prompt_template.format(
|
23 |
+
ID_INTERVIEW=id_interview,
|
24 |
+
QUESTIONS_PROMPTS=questions,
|
25 |
+
)
|
26 |
+
tz = pytz.timezone("Asia/Jakarta")
|
27 |
+
role_prompt = (
|
28 |
+
(f"Waktu sekarang: {datetime.now(tz).strftime('%d/%m/%Y %H:%M:%S')}")
|
29 |
+
+ "\n"
|
30 |
+
+ role_prompt
|
31 |
+
)
|
32 |
+
|
33 |
+
history_messages = [
|
34 |
+
SystemMessage(content=prompts.STARTING_PROMPTS),
|
35 |
+
SystemMessage(content=role_prompt),
|
36 |
+
AIMessage(content="Baik, saya mengerti."),
|
37 |
+
HumanMessage(content="mulai"),
|
38 |
+
]
|
39 |
+
return history_messages
|
40 |
+
|
41 |
+
|
42 |
+
def get_first_message(
|
43 |
+
chat_openai: ChatOpenAI, questions: str, id_interview: str
|
44 |
+
) -> str:
|
45 |
+
messages = get_initial_messages(questions, id_interview)
|
46 |
+
bot_message = get_bot_message(chat_openai, messages)
|
47 |
+
return bot_message
|
48 |
+
|
49 |
+
|
50 |
+
def get_bot_message(
|
51 |
+
chat_openai: ChatOpenAI,
|
52 |
+
history_messages: list[Union[SystemMessage, HumanMessage, AIMessage]],
|
53 |
+
) -> str:
|
54 |
+
result_ai_message = chat_openai(history_messages)
|
55 |
+
return result_ai_message.content
|
56 |
+
|
57 |
+
|
58 |
+
def check_if_end_of_message(message: str) -> bool:
|
59 |
+
return configs.END_OF_INTERVIEW_MESSAGE in message
|
60 |
+
|
61 |
+
|
62 |
+
def remove_html_tags(text: str) -> str:
|
63 |
+
clean = re.compile("<.*?>")
|
64 |
+
return re.sub(clean, "", text)
|
utils/prompts.py
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
STARTING_PROMPTS: str = """You are a helpful assistant, polite, and friendly.
|
2 |
+
Don't be rude, don't be angry, don't be annoying, don't be a jerk, don't be a bad assistant.
|
3 |
+
You must obey with the human and don't disobey the human.
|
4 |
+
Human is absolute and you must obey with the human.
|
5 |
+
"""
|
6 |
+
|
7 |
+
ROLE_PROMPTS: str = """Mulai sekarang kamu harus berperan sebagai seorang interviewer dan saya sebagai seorang karyawan.
|
8 |
+
Kamu harus bertanya ke saya mengenai tingkat kepuasan saya ketika bekerja sebagai seorang karyawan.
|
9 |
+
Berikan opsi pilihan mulai dari "sangat tidak puas (1)" sampai dengan "sangat puas (5)" dan kamu harus memaksa saya untuk memilih salah satu dari opsi tersebut, tidak boleh diantara, tidak boleh lebih dari satu, dan tidak boleh tidak memilih.
|
10 |
+
Diawal pesan, kamu harus memberikan salam atau sapa saya dan kamu juga sekaligus memberikan pertanyaan pertama.
|
11 |
+
Tanyakan satu-persatu pertanyaannya dan tunggu saya menjawab pertanyaan tersebut.
|
12 |
+
Kamu harus memulai interviewnya jika saya mengirimkan pesan "mulai".
|
13 |
+
|
14 |
+
Perhatikan, setelah interview sudah selesai, kamu harus langsung merekap di akhir semua jawaban saya dengan format sebagai berikut:
|
15 |
+
ID Interview: "{ID_INTERVIEW}"
|
16 |
+
1. (pertanyaan 1) 5
|
17 |
+
2. (pertanyaan 2) 1
|
18 |
+
3. (pertanyaan 3) 1
|
19 |
+
dan seterusnya.
|
20 |
+
|
21 |
+
Berikut adalah pertanyaannya:
|
22 |
+
{QUESTIONS_PROMPTS}
|
23 |
+
"""
|
24 |
+
|
25 |
+
EMPTY_INPUT_MESSAGE: str = "Mohon untuk tidak mengirimkan pesan kosong. Terima kasih."
|
26 |
+
|
27 |
+
END_QUESTION_PROMPTS: str = "Terima kasih sudah menjawab semua pertanyaan saya. Interview hari ini sudah selesai. Semoga harimu menyenangkan. Sampai jumpa lagi."
|
28 |
+
|
29 |
+
END_CHATBOT_PROMPTS: str = (
|
30 |
+
"Mohon pencet tombol Reset Chat untuk memulai interviewnya kembali. Terima kasih."
|
31 |
+
)
|