Update app.py
Browse files
app.py
CHANGED
@@ -1,33 +1,25 @@
|
|
1 |
import gradio as gr
|
2 |
-
import time
|
3 |
-
import re
|
4 |
|
5 |
MODELS = ["Mixtral-8x7B-Instruct-v0.1"]
|
6 |
|
7 |
def chat_with_ai(message, chat_history, system_prompt):
|
8 |
-
"""Formats the chat history for
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
"""Simulate API call and get the response. Replace with actual API call."""
|
18 |
-
# Simulate a delay
|
19 |
-
time.sleep(1)
|
20 |
-
# Dummy response
|
21 |
response = f"Simulated response for: {message}"
|
22 |
-
|
|
|
|
|
|
|
23 |
|
24 |
-
|
25 |
-
"""Generates the chatbot response."""
|
26 |
-
response, thinking_time = respond(message, history, model, system_prompt, thinking_budget)
|
27 |
-
history.append({"user": message, "assistant": response})
|
28 |
-
return history, ""
|
29 |
|
30 |
-
# Define the default system prompt
|
31 |
DEFAULT_SYSTEM_PROMPT = """
|
32 |
You are a helpful assistant in normal conversation.
|
33 |
When given a problem to solve, you are an expert problem-solving assistant.
|
@@ -43,16 +35,14 @@ with gr.Blocks() as demo:
|
|
43 |
thinking_budget = gr.Slider(minimum=1, maximum=100, value=10, step=1, label="Thinking Budget")
|
44 |
|
45 |
system_prompt = gr.Textbox(value=DEFAULT_SYSTEM_PROMPT, lines=15, label="System Prompt")
|
46 |
-
chatbot = gr.Chatbot(label="Chat"
|
47 |
msg = gr.Textbox(label="Type your message here...", placeholder="Enter your message...")
|
48 |
|
49 |
-
# Clear chat
|
50 |
def clear_chat():
|
51 |
return [], ""
|
52 |
|
53 |
gr.Button("Clear Chat").click(clear_chat, inputs=None, outputs=[chatbot, msg])
|
54 |
|
55 |
-
# Generate response on message submission
|
56 |
msg.submit(generate, inputs=[msg, chatbot, model, system_prompt, thinking_budget], outputs=[chatbot, msg])
|
57 |
|
58 |
demo.launch()
|
|
|
1 |
import gradio as gr
|
|
|
|
|
2 |
|
3 |
MODELS = ["Mixtral-8x7B-Instruct-v0.1"]
|
4 |
|
5 |
def chat_with_ai(message, chat_history, system_prompt):
|
6 |
+
"""Formats the chat history for processing."""
|
7 |
+
history = [{"role": "system", "content": system_prompt}]
|
8 |
+
history.extend(chat_history)
|
9 |
+
history.append({"role": "user", "content": message})
|
10 |
+
return history
|
11 |
+
|
12 |
+
def generate(message, chat_history, model, system_prompt, thinking_budget):
|
13 |
+
"""Simulates response generation."""
|
14 |
+
# Dummy logic for simulating a response
|
|
|
|
|
|
|
|
|
15 |
response = f"Simulated response for: {message}"
|
16 |
+
|
17 |
+
# Update chat history
|
18 |
+
chat_history.append({"role": "user", "content": message})
|
19 |
+
chat_history.append({"role": "assistant", "content": response})
|
20 |
|
21 |
+
return chat_history, ""
|
|
|
|
|
|
|
|
|
22 |
|
|
|
23 |
DEFAULT_SYSTEM_PROMPT = """
|
24 |
You are a helpful assistant in normal conversation.
|
25 |
When given a problem to solve, you are an expert problem-solving assistant.
|
|
|
35 |
thinking_budget = gr.Slider(minimum=1, maximum=100, value=10, step=1, label="Thinking Budget")
|
36 |
|
37 |
system_prompt = gr.Textbox(value=DEFAULT_SYSTEM_PROMPT, lines=15, label="System Prompt")
|
38 |
+
chatbot = gr.Chatbot(label="Chat")
|
39 |
msg = gr.Textbox(label="Type your message here...", placeholder="Enter your message...")
|
40 |
|
|
|
41 |
def clear_chat():
|
42 |
return [], ""
|
43 |
|
44 |
gr.Button("Clear Chat").click(clear_chat, inputs=None, outputs=[chatbot, msg])
|
45 |
|
|
|
46 |
msg.submit(generate, inputs=[msg, chatbot, model, system_prompt, thinking_budget], outputs=[chatbot, msg])
|
47 |
|
48 |
demo.launch()
|