Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,10 +1,10 @@
|
|
1 |
import os
|
2 |
import gradio as gr
|
3 |
from openai import OpenAI
|
4 |
-
from prompts.main_prompt import MAIN_PROMPT
|
5 |
-
from prompts.initial_prompt import INITIAL_PROMPT
|
6 |
|
7 |
-
# β
|
8 |
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
|
9 |
|
10 |
if not OPENAI_API_KEY:
|
@@ -12,22 +12,24 @@ if not OPENAI_API_KEY:
|
|
12 |
|
13 |
client = OpenAI(api_key=OPENAI_API_KEY)
|
14 |
|
|
|
15 |
def respond(user_message, history):
|
16 |
if not user_message:
|
17 |
return "", history
|
18 |
|
|
|
19 |
try:
|
20 |
assistant_reply = client.chat.completions.create(
|
21 |
model="gpt-4o",
|
22 |
messages=[
|
23 |
-
{"role": "system", "content": MAIN_PROMPT},
|
24 |
*[
|
25 |
{"role": "user", "content": u} if i % 2 == 0 else {"role": "assistant", "content": a}
|
26 |
for i, (u, a) in enumerate(history)
|
27 |
],
|
28 |
{"role": "user", "content": user_message}
|
29 |
],
|
30 |
-
max_tokens=
|
31 |
temperature=0.7,
|
32 |
).choices[0].message.content
|
33 |
except Exception as e:
|
@@ -37,17 +39,16 @@ def respond(user_message, history):
|
|
37 |
|
38 |
return "", history
|
39 |
|
40 |
-
# β
Gradio UI
|
41 |
with gr.Blocks() as demo:
|
42 |
gr.Markdown("# **AI-Guided Math PD Chatbot**")
|
43 |
|
44 |
-
# β
Start Chatbot with MAIN_PROMPT Instead of Initial Reflection
|
45 |
chatbot = gr.Chatbot(
|
46 |
-
value=[("",
|
47 |
height=500
|
48 |
)
|
49 |
|
50 |
-
state_history = gr.State([("",
|
51 |
|
52 |
user_input = gr.Textbox(placeholder="Type your message here...", label="Your Input")
|
53 |
|
|
|
1 |
import os
|
2 |
import gradio as gr
|
3 |
from openai import OpenAI
|
4 |
+
from prompts.main_prompt import MAIN_PROMPT
|
5 |
+
from prompts.initial_prompt import INITIAL_PROMPT
|
6 |
|
7 |
+
# β
Load API Key
|
8 |
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
|
9 |
|
10 |
if not OPENAI_API_KEY:
|
|
|
12 |
|
13 |
client = OpenAI(api_key=OPENAI_API_KEY)
|
14 |
|
15 |
+
# β
Ensure Conversation Happens Step by Step
|
16 |
def respond(user_message, history):
|
17 |
if not user_message:
|
18 |
return "", history
|
19 |
|
20 |
+
# β
AI only sends ONE response at a time (step-by-step)
|
21 |
try:
|
22 |
assistant_reply = client.chat.completions.create(
|
23 |
model="gpt-4o",
|
24 |
messages=[
|
25 |
+
{"role": "system", "content": MAIN_PROMPT},
|
26 |
*[
|
27 |
{"role": "user", "content": u} if i % 2 == 0 else {"role": "assistant", "content": a}
|
28 |
for i, (u, a) in enumerate(history)
|
29 |
],
|
30 |
{"role": "user", "content": user_message}
|
31 |
],
|
32 |
+
max_tokens=256, # β
Limits response size (so AI doesnβt dump everything)
|
33 |
temperature=0.7,
|
34 |
).choices[0].message.content
|
35 |
except Exception as e:
|
|
|
39 |
|
40 |
return "", history
|
41 |
|
42 |
+
# β
Fix Gradio UI to Start Properly
|
43 |
with gr.Blocks() as demo:
|
44 |
gr.Markdown("# **AI-Guided Math PD Chatbot**")
|
45 |
|
|
|
46 |
chatbot = gr.Chatbot(
|
47 |
+
value=[("", INITIAL_PROMPT)], # β
Starts with an introduction message
|
48 |
height=500
|
49 |
)
|
50 |
|
51 |
+
state_history = gr.State([("", INITIAL_PROMPT)]) # β
Ensures step-by-step history
|
52 |
|
53 |
user_input = gr.Textbox(placeholder="Type your message here...", label="Your Input")
|
54 |
|