Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -108,17 +108,48 @@ def get_top_chunks(query, chunk_embeddings, text_chunks):
|
|
108 |
|
109 |
client = InferenceClient("Qwen/Qwen2.5-72B-Instruct")
|
110 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
111 |
def respond(message, history, mom_type):
|
112 |
top_cool_results = get_top_chunks(message, cool_chunk_embeddings, cleaned_cool_chunks) # Complete this line
|
|
|
|
|
113 |
#str_chunks = "\n".join(best_chunks)
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
122 |
if history:
|
123 |
messages.extend(history)
|
124 |
|
@@ -132,59 +163,50 @@ def respond(message, history, mom_type):
|
|
132 |
|
133 |
chatbot = gr.ChatInterface(respond, type="messages")
|
134 |
|
135 |
-
def respond_tutor(message, history, mom_type):
|
136 |
-
top_tutor_results = get_top_chunks(message, tutor_chunk_embeddings, cleaned_tutor_chunks)
|
137 |
-
#str_chunks = "\n".join(best_chunks)
|
138 |
|
139 |
-
messages = [{"role": "system", "content": f"You are chatbot that plays the role of the user's extremely studious, tutor-like mom. Respond in full sentences, don't cut yourself off. Base your response on the provided context: {mom_type}"},
|
140 |
-
{"role": "user",
|
141 |
-
"content": (
|
142 |
-
f"Context:\n{top_tutor_results}\n\n"
|
143 |
-
f"Question{message}"
|
144 |
-
)}]
|
145 |
|
146 |
-
if history:
|
147 |
-
messages.extend(history)
|
148 |
|
149 |
-
messages.append({"role": "user", "content": message})
|
150 |
|
151 |
-
response = client.chat_completion(
|
152 |
-
messages,
|
153 |
-
temperature = 0.2
|
154 |
)
|
155 |
-
return response['choices'][0]['message']['content'].strip()
|
156 |
|
157 |
-
def respond_strict(message, history):
|
158 |
-
top_strict_results = get_top_chunks(message, strict_chunk_embeddings, cleaned_strict_chunks)
|
159 |
#str_chunks = "\n".join(best_chunks)
|
160 |
|
161 |
-
messages = [{"role": "system", "content": f"You are chatbot that plays the role of the user's extremely strict mom. Respond in full sentences, don't cut yourself off. Base your response on the provided context: {top_strict_results}"},
|
162 |
-
{"role": "user",
|
163 |
-
"content": (
|
164 |
-
f"Context:\n{top_strict_results}\n\n"
|
165 |
-
f"Question{message}"
|
166 |
-
)}]
|
167 |
|
168 |
-
if history:
|
169 |
-
|
|
|
|
|
170 |
|
171 |
-
|
|
|
|
|
|
|
|
|
172 |
|
173 |
-
response = client.chat_completion(
|
174 |
-
messages,
|
175 |
-
temperature = 0.2
|
176 |
-
)
|
177 |
-
return response['choices'][0]['message']['content'].strip()
|
178 |
|
179 |
-
with gr.Blocks() as chatbot:
|
180 |
-
|
181 |
-
with gr.Row():
|
182 |
-
mom_type = gr.CheckboxGroup(['Cool Mom', 'Tutor Mom', 'Strict Mom'],label='Choose Your Mom')
|
183 |
-
|
184 |
-
gr.ChatInterface(
|
185 |
-
fn=respond,
|
186 |
-
additional_inputs=[mom_type],
|
187 |
-
title="StudyMama"
|
188 |
-
)
|
189 |
|
190 |
chatbot.launch()
|
|
|
108 |
|
109 |
client = InferenceClient("Qwen/Qwen2.5-72B-Instruct")
|
110 |
|
111 |
+
with gr.Blocks() as chatbot:
|
112 |
+
with gr.Row():
|
113 |
+
cool_button = gr.Button("Cool Mom")
|
114 |
+
tutor_button = gr.Button("Tutor Mom")
|
115 |
+
strict_button = gr.Button("Strict Mom")
|
116 |
+
|
117 |
+
cool_button.click(fn=[]: handle_click("Button 1"), inputs=[], outputs=respond(message, history, "Cool Mom"))
|
118 |
+
tutor_button.click(fn=[]: handle_click("Button 2"), inputs=[], outputs=respond(message, history, "Tutor Mom"))
|
119 |
+
strict_button.click(fn=[]: handle_click("Button 3"), inputs=[], outputs=respond(message, history, "Strict Mom"))
|
120 |
+
|
121 |
+
gr.ChatInterface(
|
122 |
+
#fn=respond,
|
123 |
+
#additional_inputs=[mom_type],
|
124 |
+
title="StudyMama"
|
125 |
+
)
|
126 |
+
|
127 |
def respond(message, history, mom_type):
|
128 |
top_cool_results = get_top_chunks(message, cool_chunk_embeddings, cleaned_cool_chunks) # Complete this line
|
129 |
+
top_tutor_results = get_top_chunks(message, tutor_chunk_embeddings, cleaned_tutor_chunks)
|
130 |
+
top_strict_results = get_top_chunks(message, strict_chunk_embeddings, cleaned_strict_chunks)
|
131 |
#str_chunks = "\n".join(best_chunks)
|
132 |
+
if mom_type == "Cool Mom":
|
133 |
+
messages = [{"role": "system", "content": f"You are chatbot that plays the role of the user's cool and super chill mom. Respond in full sentences, don't cut yourself off. Use responses from this text file: {top_cool_results} and respond very kindly. Do not be mean or strict at all"},
|
134 |
+
{"role": "user",
|
135 |
+
"content": (
|
136 |
+
f"Context:\n{top_cool_results}\n\n"
|
137 |
+
f"Question{message}"
|
138 |
+
)}]
|
139 |
+
elif mom_type == "Tutor Mom":
|
140 |
+
messages = [{"role": "system", "content": f"You are chatbot that plays the role of the user's extremely studious, tutor-like mom. Respond in full sentences, don't cut yourself off. Base your response on the provided context: {mom_type}"},
|
141 |
+
{"role": "user",
|
142 |
+
"content": (
|
143 |
+
f"Context:\n{top_tutor_results}\n\n"
|
144 |
+
f"Question{message}"
|
145 |
+
)}]
|
146 |
+
elif mom_type == "Strict Mom":
|
147 |
+
messages = [{"role": "system", "content": f"You are chatbot that plays the role of the user's extremely strict mom. Respond in full sentences, don't cut yourself off. Base your response on the provided context: {top_strict_results}"},
|
148 |
+
{"role": "user",
|
149 |
+
"content": (
|
150 |
+
f"Context:\n{top_strict_results}\n\n"
|
151 |
+
f"Question{message}"
|
152 |
+
)}]
|
153 |
if history:
|
154 |
messages.extend(history)
|
155 |
|
|
|
163 |
|
164 |
chatbot = gr.ChatInterface(respond, type="messages")
|
165 |
|
166 |
+
#def respond_tutor(message, history, mom_type):
|
167 |
+
# top_tutor_results = get_top_chunks(message, tutor_chunk_embeddings, cleaned_tutor_chunks)
|
168 |
+
# #str_chunks = "\n".join(best_chunks)
|
169 |
|
170 |
+
# messages = [{"role": "system", "content": f"You are chatbot that plays the role of the user's extremely studious, tutor-like mom. Respond in full sentences, don't cut yourself off. Base your response on the provided context: {mom_type}"},
|
171 |
+
# {"role": "user",
|
172 |
+
# "content": (
|
173 |
+
# f"Context:\n{top_tutor_results}\n\n"
|
174 |
+
# f"Question{message}"
|
175 |
+
# )}]
|
176 |
|
177 |
+
# if history:
|
178 |
+
# messages.extend(history)
|
179 |
|
180 |
+
# messages.append({"role": "user", "content": message})
|
181 |
|
182 |
+
# response = client.chat_completion(
|
183 |
+
# messages,
|
184 |
+
# temperature = 0.2
|
185 |
)
|
186 |
+
# return response['choices'][0]['message']['content'].strip()
|
187 |
|
188 |
+
#def respond_strict(message, history):
|
189 |
+
# top_strict_results = get_top_chunks(message, strict_chunk_embeddings, cleaned_strict_chunks)
|
190 |
#str_chunks = "\n".join(best_chunks)
|
191 |
|
192 |
+
# messages = [{"role": "system", "content": f"You are chatbot that plays the role of the user's extremely strict mom. Respond in full sentences, don't cut yourself off. Base your response on the provided context: {top_strict_results}"},
|
193 |
+
# {"role": "user",
|
194 |
+
# "content": (
|
195 |
+
# f"Context:\n{top_strict_results}\n\n"
|
196 |
+
# f"Question{message}"
|
197 |
+
# )}]
|
198 |
|
199 |
+
# if history:
|
200 |
+
# messages.extend(history)
|
201 |
+
#
|
202 |
+
# messages.append({"role": "user", "content": message})
|
203 |
|
204 |
+
# response = client.chat_completion(
|
205 |
+
# messages,
|
206 |
+
# temperature = 0.2
|
207 |
+
# )
|
208 |
+
# return response['choices'][0]['message']['content'].strip()
|
209 |
|
|
|
|
|
|
|
|
|
|
|
210 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
211 |
|
212 |
chatbot.launch()
|