PaulinaLamberg commited on
Commit
6d7f7c8
·
verified ·
1 Parent(s): b7bfb2d

functionality and styles added

Browse files
Files changed (1) hide show
  1. learnloop.py +172 -107
learnloop.py CHANGED
@@ -1,107 +1,172 @@
1
- import gradio as gr
2
- import sympy as sp
3
- import torch
4
- from transformers import AutoTokenizer, AutoModelForCausalLM
5
-
6
- MODEL_ID = "Qwen/Qwen2.5-0.5B-Instruct"
7
- SYSTEM_PROMPT = "You are a helpful tutor. Match the user's level."
8
-
9
- tok = AutoTokenizer.from_pretrained(MODEL_ID, trust_remote_code=True)
10
- model = AutoModelForCausalLM.from_pretrained(
11
- MODEL_ID,
12
- torch_dtype=torch.float32, # CPU
13
- device_map=None
14
- )
15
- model.eval()
16
-
17
- def verify_math(expr_str: str) -> str:
18
- try:
19
- expr = sp.sympify(expr_str)
20
- simplified = sp.simplify(expr)
21
- return f"Simplified: ${sp.latex(simplified)}$"
22
- except Exception as e:
23
- return f"Could not verify with SymPy: {e}"
24
-
25
- def generate(question: str, level: str, step_by_step: bool) -> str:
26
- if not question.strip():
27
- return "Please enter a question."
28
- style = f"Level: {level}. {'Explain step-by-step.' if step_by_step else 'Be concise.'}"
29
- prompt = f"System: {SYSTEM_PROMPT}\n{style}\nUser: {question}\nAssistant:"
30
- inputs = tok(prompt, return_tensors="pt")
31
- with torch.no_grad():
32
- out = model.generate(
33
- **inputs,
34
- max_new_tokens=192,
35
- do_sample=True,
36
- temperature=0.7,
37
- top_p=0.95,
38
- pad_token_id=tok.eos_token_id
39
- )
40
- text = tok.decode(out[0], skip_special_tokens=True)
41
- if "Assistant:" in text:
42
- text = text.split("Assistant:", 1)[1].strip()
43
- is_math = any(ch in question for ch in "+-*/=^") or question.lower().startswith(("simplify","derive","integrate"))
44
- sympy_note = verify_math(question) if is_math else "No math verification needed."
45
- return f"{text}\n\n---\n**SymPy check:** {sympy_note}\n_Status: Transformers CPU_"
46
-
47
- def build_app():
48
- with gr.Blocks(title="LearnLoop CPU Space") as demo:
49
-
50
- # CSS styles and adding colours
51
- gr.HTML("""
52
- <style>
53
- /* Button colours */
54
- #explain-btn {
55
- background-color: #5499C7; /* blue Explain */
56
- color: white;
57
- border-radius: 8px;
58
- }
59
- #reset-btn {
60
- background-color: #EC7063; /* red Reset */
61
- color: white;
62
- border-radius: 8px;
63
- }
64
- /* Hover-efect */
65
- #explain-btn:hover, #reset-btn:hover {
66
- opacity: 0.85;
67
- }
68
- </style>
69
- """)
70
-
71
- # prints using instructions
72
- gr.Markdown("""
73
- # **LearnL**<span style="font-size:1.2em; color: #21618C">∞</span>**p AI Tutor**
74
- This app uses the [Qwen 2.5 model](https://huggingface.co/Qwen/Qwen2.5-0.5B-Instruct)
75
- to explain questions at different skill levels. It can also verify
76
- mathematical expressions using the SymPy library.
77
-
78
- **How to use:**
79
- 1️⃣ Type your question or a mathematical expression
80
- 2️⃣ Select your level (Beginner, Intermediate, Advanced)
81
- 3️⃣ Choose whether you want a step-by-step explanation
82
- 4️⃣ Press **"Explain"**
83
-
84
- 💬 You can ask your question in **Finnish or English** —
85
- LearnLoop will reply in the same language you use.
86
- """)
87
-
88
- q = gr.Textbox(label="Your question", placeholder="e.g., simplify (x^2 - 1)/(x - 1)")
89
- level = gr.Dropdown(choices=["Beginner","Intermediate","Advanced"], value="Beginner", label="Level")
90
- step = gr.Checkbox(value=True, label="Step-by-step")
91
-
92
- # Mardown for results
93
- out = gr.Markdown()
94
-
95
- # buttons next to each other
96
- with gr.Row():
97
- btn = gr.Button("Explain", elem_id="explain-btn")
98
- reset_btn = gr.ClearButton([q, out], value="Reset", elem_id="reset-btn")
99
-
100
- # connect button to generate function
101
- btn.click(generate, [q, level, step], out)
102
-
103
-
104
- return demo
105
-
106
- if __name__ == "__main__":
107
- build_app().launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import sympy as sp
3
+ import torch
4
+ from transformers import AutoTokenizer, AutoModelForCausalLM
5
+
6
+
7
+ MODEL_ID = "Qwen/Qwen2.5-0.5B-Instruct"
8
+ SYSTEM_PROMPT = "You are a helpful tutor. Match the user's level."
9
+
10
+ tok = AutoTokenizer.from_pretrained(MODEL_ID, trust_remote_code=True)
11
+ model = AutoModelForCausalLM.from_pretrained(
12
+ MODEL_ID,
13
+ dtype=torch.float32, # CPU
14
+ device_map=None
15
+ )
16
+ model.eval()
17
+
18
+
19
+ # SymPy check
20
+ def verify_math(expr_str: str) -> str:
21
+ try:
22
+ expr = sp.sympify(expr_str)
23
+ simplified = sp.simplify(expr)
24
+ return f"Simplified: {simplified}"
25
+ except Exception as e:
26
+ return f"Could not verify with SymPy: {e}"
27
+
28
+
29
+ # Main function which processes the question
30
+ def generate(question: str, level: str, step_by_step: bool) -> str:
31
+ if not question.strip():
32
+ return "Please enter a question."
33
+
34
+ # style forming
35
+ style = f"Level: {level}. {'Explain step-by-step.' if step_by_step else 'Be concise.'}"
36
+
37
+ # dynamic amount of tokens
38
+ max_tokens = 128 if level == "Beginner" else 192 if level == "Intermediate" else 256
39
+
40
+ # final prompt
41
+ prompt = (
42
+ f"{SYSTEM_PROMPT}\n"
43
+ f"{style}\n"
44
+ f"User question: {question}\n"
45
+ f"Assistant:"
46
+ )
47
+
48
+ inputs = tok(prompt, return_tensors="pt")
49
+ with torch.no_grad():
50
+ out = model.generate(
51
+ **inputs,
52
+ max_new_tokens=max_tokens,
53
+ do_sample=False,
54
+ pad_token_id=tok.eos_token_id
55
+ )
56
+
57
+ # decode the answer
58
+ text = tok.decode(out[0], skip_special_tokens=True)
59
+ if "Assistant:" in text:
60
+ text = text.split("Assistant:", 1)[1].strip()
61
+
62
+ # check if it is a math task
63
+ is_math = any(ch in question for ch in "+-*/=^") or question.lower().startswith((
64
+ "simplify", "derive", "integrate", "laske", "sievennä", "derivoi", "integroi"
65
+ ))
66
+ sympy_note = verify_math(question) if is_math else "No math verification needed."
67
+
68
+ return f"{text}\n\n---\n**SymPy check:** {sympy_note}\n\n_Status: Transformers CPU_"
69
+
70
+
71
+ # Building app and IU
72
+ def build_app():
73
+ with gr.Blocks(title="LearnLoop CPU Space") as demo:
74
+
75
+ # CSS styles and adding colours
76
+ gr.HTML("""
77
+ <style>
78
+
79
+ .gradio-container {
80
+ background-color: #EDF6FA !important; /* haalea sininen */
81
+ padding: 24px;
82
+ border-radius: 12px;
83
+ box-shadow: 0 4px 12px rgba(0,0,0,0.05);
84
+ }
85
+
86
+ /* buttons */
87
+ button {
88
+ border-radius: 8px;
89
+ transition: all 0.2s ease-in-out;
90
+ font-weight: 500;
91
+ letter-spacing: 0.5px;
92
+ }
93
+ button:hover {
94
+ opacity: 0.9;
95
+ transform: translateY(-1px);
96
+ }
97
+ button:active {
98
+ filter: brightness(85%);
99
+ transform: scale(0.98);
100
+ }
101
+
102
+ /* Explain ja Reset buttons */
103
+ #explain-btn {
104
+ background-color: #5499C7;
105
+ color: white;
106
+ border: 2px solid #2E86C1;
107
+ }
108
+ #reset-btn {
109
+ background-color: #EC7063;
110
+ color: white;
111
+ border: 2px solid #CB4335;
112
+ }
113
+ #explain-btn:hover, #reset-btn:hover {
114
+ opacity: 0.85;
115
+ }
116
+ #explain-btn:active, #reset-btn:active {
117
+ filter: brightness(85%);
118
+ transform: scale(0.98);
119
+ }
120
+ </style>
121
+ """)
122
+
123
+
124
+ # prints using instructions
125
+ gr.Markdown("""
126
+ # **LearnL**<span style="font-size:1.2em; color: #21618C">∞</span>**p — AI Tutor**
127
+ This app uses the [Qwen 2.5 model](https://huggingface.co/Qwen/Qwen2.5-0.5B-Instruct)
128
+ to explain questions at different skill levels. It can also verify
129
+ mathematical expressions using the SymPy library.
130
+
131
+ **How to use:**
132
+ 1️⃣ Type your question or a mathematical expression.
133
+ 2️⃣ Select your level (Beginner, Intermediate, Advanced).
134
+ 3️⃣ Choose whether you want a step-by-step explanation.
135
+ 4️⃣ Press **"Explain"** or **Enter** on your keyboard.
136
+ 5️⃣ If you want to enter a new question, you can press **"Reset"** or simply **type a new question**.
137
+
138
+ 💬 You can ask your question in **English**.
139
+ """)
140
+
141
+ # User's feed
142
+ q = gr.Textbox(label="Your question", placeholder="e.g., simplify (x^2 - 1)/(x - 1)", elem_id="question-box")
143
+ level = gr.Dropdown(choices=["Beginner", "Intermediate", "Advanced"], value="Beginner", label="Level")
144
+ step = gr.Checkbox(value=True, label="Step-by-step")
145
+
146
+
147
+ # Results
148
+ loading = gr.Markdown(visible=False) # spinner hided at first
149
+ out = gr.Markdown()
150
+
151
+ # Buttons next to each other
152
+ with gr.Row():
153
+ btn = gr.Button("Explain", elem_id="explain-btn")
154
+ reset_btn = gr.ClearButton([q, out, loading], value="Reset", elem_id="reset-btn")
155
+
156
+ # connect to generate function with spinner
157
+ def wrapped_generate(q_val, level_val, step_val):
158
+ # Näytetään spinner ensin
159
+ loading_text = "⏳ Generating explanation..."
160
+ result = generate(q_val, level_val, step_val)
161
+ # hide spinner when ready
162
+ return "", result
163
+
164
+ btn.click(fn=wrapped_generate, inputs=[q, level, step], outputs=[loading, out])
165
+ q.submit(fn=wrapped_generate, inputs=[q, level, step], outputs=[loading, out])
166
+
167
+ return demo
168
+
169
+ # start the app
170
+ if __name__ == "__main__":
171
+ build_app().launch()
172
+