ibraheem007 commited on
Commit
12deee2
Β·
verified Β·
1 Parent(s): dee5908

Update components/ui_components.py

Browse files
Files changed (1) hide show
  1. components/ui_components.py +313 -313
components/ui_components.py CHANGED
@@ -1,313 +1,313 @@
1
- import streamlit as st
2
- from db.helpers import get_research_stats
3
- from generator import model_manager
4
- from export_training_data_from_db import export_training_data_from_db
5
-
6
- def render_header():
7
- st.title("🧠 TailorED - AI-Powered Educational Content Generator")
8
-
9
- def render_sidebar():
10
- with st.sidebar:
11
- # === RESEARCH MODEL SELECTION ===
12
- st.subheader("πŸ”¬ Research Model Selection")
13
-
14
- # Initialize model choice if not exists
15
- if "selected_model" not in st.session_state:
16
- st.session_state.selected_model = "groq"
17
-
18
- # Check if we have existing content and need to show regeneration prompt
19
- if (st.session_state.get("generated_output") and
20
- st.session_state.get("current_page") == "generator" and
21
- not st.session_state.get("showing_regeneration_prompt", False)):
22
-
23
- # Store current model before potential change
24
- previous_model = st.session_state.selected_model
25
-
26
- # Model selection with regeneration logic
27
- model_choice = st.radio(
28
- "Select AI Model:",
29
- options=[
30
- "πŸ§ͺ Phi-3 (Research Model)",
31
- "πŸ“Š Groq (Training Data)"
32
- ],
33
- index=0 if st.session_state.selected_model == "phi3" else 1,
34
- key="research_model_selection"
35
- )
36
-
37
- # Determine new model selection
38
- new_model = "phi3" if model_choice == "πŸ§ͺ Phi-3 (Research Model)" else "groq"
39
-
40
- # If model changed and we have content, show regeneration prompt
41
- if new_model != previous_model:
42
- st.session_state.pending_model_switch = new_model
43
- st.session_state.previous_model = previous_model
44
- st.session_state.showing_regeneration_prompt = True
45
- st.rerun()
46
-
47
- else:
48
- # Normal model selection (no content or prompt already shown)
49
- model_choice = st.radio(
50
- "Select AI Model:",
51
- options=[
52
- "πŸ§ͺ Phi-3 (Research Model)",
53
- "πŸ“Š Groq (Training Data)"
54
- ],
55
- index=0 if st.session_state.selected_model == "phi3" else 1,
56
- key="research_model_selection"
57
- )
58
-
59
- # Update model selection
60
- new_model = "phi3" if model_choice == "πŸ§ͺ Phi-3 (Research Model)" else "groq"
61
- if new_model != st.session_state.selected_model:
62
- st.session_state.selected_model = new_model
63
- if not st.session_state.get("generated_output"):
64
- st.success(f"βœ… Switched to {new_model.upper()} model")
65
-
66
- # Show current model status
67
- current_model = st.session_state.selected_model
68
- if current_model == "phi3":
69
- st.info("πŸ§ͺ **Testing Phi-3** - Research model being evaluated")
70
- else:
71
- st.success("πŸ“Š **Generating Training Data** - Groq outputs will train Phi-3")
72
-
73
- # Render regeneration prompt if needed
74
- if st.session_state.get("showing_regeneration_prompt", False):
75
- render_regeneration_prompt()
76
-
77
- # Research context
78
- st.markdown("---")
79
- st.markdown("### 🎯 Research Mission")
80
- st.markdown("""
81
- We're **fine-tuning Phi-3 Mini** using Groq's high-quality outputs.
82
-
83
- **Your Role:** Compare both models to help improve Phi-3!
84
- - Use **Groq** to create training examples
85
- - Use **Phi-3** to test research progress
86
- - Switch models to compare outputs on the same content
87
- """)
88
-
89
- st.markdown("---")
90
-
91
- st.header("πŸŽ“ Research Progress")
92
- st.write("**Your feedback trains better educational AI**")
93
-
94
- try:
95
- stats = get_research_stats()
96
- render_progress_metrics(stats)
97
- render_quality_indicators(stats)
98
- render_research_status(stats)
99
- render_service_status()
100
- except Exception as e:
101
- st.error(f"Sidebar failed: {e}")
102
- render_default_sidebar()
103
-
104
- def render_regeneration_prompt():
105
- """Show prompt to regenerate content with new model"""
106
- st.markdown("---")
107
- st.warning("πŸ”„ **Model Changed!**")
108
-
109
- previous_model = st.session_state.previous_model
110
- new_model = st.session_state.pending_model_switch
111
-
112
- st.write(f"You switched from **{previous_model.upper()}** to **{new_model.upper()}**.")
113
- st.write("Would you like to regenerate the same content with the new model?")
114
-
115
- col1, col2, col3 = st.columns([1, 1, 1])
116
-
117
- with col1:
118
- if st.button("βœ… Yes, Regenerate", use_container_width=True, key="confirm_regenerate"):
119
- # Trigger regeneration with new model
120
- st.session_state.selected_model = new_model
121
- st.session_state.regenerate_with_new_model = True
122
- st.session_state.showing_regeneration_prompt = False
123
- st.session_state.pending_model_switch = None
124
- st.session_state.previous_model = None
125
- st.rerun()
126
-
127
- with col2:
128
- if st.button("❌ No, Keep Current", use_container_width=True, key="keep_current"):
129
- # Revert to previous model and keep current content
130
- st.session_state.selected_model = st.session_state.previous_model
131
- st.session_state.showing_regeneration_prompt = False
132
- st.session_state.pending_model_switch = None
133
- st.session_state.previous_model = None
134
- st.rerun()
135
-
136
- with col3:
137
- if st.button("🏠 Go to Home", use_container_width=True, key="go_home"):
138
- # Clear content and go to home
139
- from components.session_manager import clear_session
140
- clear_session()
141
- st.session_state.showing_regeneration_prompt = False
142
- st.session_state.pending_model_switch = None
143
- st.session_state.previous_model = None
144
- st.rerun()
145
-
146
- def render_progress_metrics(stats):
147
- col1, col2 = st.columns(2)
148
-
149
- with col1:
150
- st.metric("Total Feedback", stats.get("total_feedback", 0))
151
-
152
- with col2:
153
- st.metric("Content Generated", stats.get("total_content", 0))
154
-
155
- if stats.get("total_feedback", 0) > 0:
156
- render_progress_bar(stats)
157
-
158
- def render_progress_bar(stats):
159
- """Render progress bar towards research goal"""
160
- st.subheader("πŸ“ˆ Our Progress")
161
- target_feedback = 100
162
- total_feedback = stats.get("total_feedback", 0)
163
- progress_percent = min((total_feedback / target_feedback) * 100, 100)
164
- st.progress(progress_percent / 100)
165
- st.caption(f"Goal: 100 feedback points β€’ {total_feedback}/100")
166
-
167
- if total_feedback >= target_feedback:
168
- st.balloons()
169
- st.success("πŸŽ‰ Amazing! We've reached our research goal!")
170
-
171
- def render_quality_indicators(stats):
172
- st.subheader("✨ Model Quality Comparison")
173
-
174
- # Safely get model scores with fallbacks
175
- groq_scores = stats.get("groq_scores", {})
176
- phi3_scores = stats.get("phi3_scores", {})
177
-
178
- groq_clarity = groq_scores.get("clarity", 0)
179
- groq_depth = groq_scores.get("depth", 0)
180
- phi3_clarity = phi3_scores.get("clarity", 0)
181
- phi3_depth = phi3_scores.get("depth", 0)
182
-
183
- # Groq metrics
184
- st.markdown("**πŸ“Š Groq (Training Data)**")
185
- col1, col2 = st.columns(2)
186
- with col1:
187
- # Show delta if we have both scores
188
- delta_clarity = None
189
- if groq_clarity > 0 and phi3_clarity > 0:
190
- delta_clarity = f"+{groq_clarity - phi3_clarity:.1f}"
191
- st.metric("Avg Clarity", f"{groq_clarity}/5", delta=delta_clarity)
192
- with col2:
193
- delta_depth = None
194
- if groq_depth > 0 and phi3_depth > 0:
195
- delta_depth = f"+{groq_depth - phi3_depth:.1f}"
196
- st.metric("Avg Depth", f"{groq_depth}/5", delta=delta_depth)
197
-
198
- # Phi-3 metrics
199
- st.markdown("**πŸ§ͺ Phi-3 (Research Model)**")
200
- col3, col4 = st.columns(2)
201
- with col3:
202
- delta_clarity_phi3 = None
203
- if phi3_clarity > 0 and groq_clarity > 0:
204
- delta_clarity_phi3 = f"{phi3_clarity - groq_clarity:.1f}"
205
- st.metric("Avg Clarity", f"{phi3_clarity}/5", delta=delta_clarity_phi3)
206
- with col4:
207
- delta_depth_phi3 = None
208
- if phi3_depth > 0 and groq_depth > 0:
209
- delta_depth_phi3 = f"{phi3_depth - groq_depth:.1f}"
210
- st.metric("Avg Depth", f"{phi3_depth}/5", delta=delta_depth_phi3)
211
-
212
- # Show quality gap analysis
213
- if groq_clarity > 0 and phi3_clarity > 0:
214
- clarity_gap = groq_clarity - phi3_clarity
215
- depth_gap = groq_depth - phi3_depth
216
-
217
- if clarity_gap > 0 or depth_gap > 0:
218
- st.caption(f"πŸ” Quality gap: Clarity +{clarity_gap:.1f}, Depth +{depth_gap:.1f}")
219
- elif clarity_gap < 0 or depth_gap < 0:
220
- st.caption(f"πŸŽ‰ Phi-3 leads: Clarity {abs(clarity_gap):.1f}, Depth {abs(depth_gap):.1f}")
221
- else:
222
- st.caption("βš–οΈ Models performing equally")
223
-
224
- def render_research_status(stats):
225
- st.subheader("πŸ”¬ Research Progress")
226
-
227
- col1, col2, col3 = st.columns(3)
228
-
229
- with col1:
230
- st.metric("Groq Data", stats.get("groq_feedback_count", 0))
231
- st.caption("For fine-tuning")
232
-
233
- with col2:
234
- st.metric("High-Quality Groq", stats.get("high_quality_groq", 0))
235
- st.caption("Fine-tuning ready")
236
-
237
- with col3:
238
- st.metric("Phi-3 Data", stats.get("phi3_feedback_count", 0))
239
- st.caption("For comparison")
240
-
241
- # Fine-tuning readiness
242
- target_examples = 50
243
- high_quality_groq = stats.get("high_quality_groq", 0)
244
-
245
- if high_quality_groq >= target_examples:
246
- st.success("πŸŽ‰ Ready to fine-tune Phi-3 with Groq data!")
247
- if st.button("πŸš€ Export Fine-tuning Data", use_container_width=True, type="primary"):
248
- from export_training_data_from_db import export_training_data_from_db
249
- if export_training_data_from_db():
250
- st.success("βœ… Groq data exported for Phi-3 fine-tuning!")
251
- else:
252
- st.error("Export failed")
253
- else:
254
- needed = target_examples - high_quality_groq
255
- st.info(f"πŸ“Š Need {needed} more high-quality Groq examples")
256
- progress = high_quality_groq / target_examples if target_examples > 0 else 0
257
- st.progress(progress)
258
- st.caption(f"Progress: {high_quality_groq}/{target_examples} examples")
259
-
260
- def render_service_status():
261
- st.markdown("---")
262
- st.subheader("πŸ›œ Platform Status")
263
-
264
- try:
265
- status = model_manager.get_service_status()
266
-
267
- # Create status columns
268
- col1, col2 = st.columns(2)
269
-
270
- with col1:
271
- # Phi-3 Status
272
- phi3_status = status["phi3"]
273
- if phi3_status["server_healthy"] and phi3_status["model_available"]:
274
- st.success("πŸ§ͺ Phi-3 Mini")
275
- st.caption("Research Model β€’ Ready")
276
- elif phi3_status["server_healthy"]:
277
- st.warning("πŸ§ͺ Phi-3 Mini")
278
- st.caption("Research Model β€’ Needs Setup")
279
- else:
280
- st.error("πŸ§ͺ Phi-3 Mini")
281
- st.caption("Research Model β€’ Offline")
282
-
283
- with col2:
284
- # Groq Status
285
- groq_status = status["groq"]
286
- healthy_count = groq_status['healthy_providers']
287
- total_providers = groq_status['total_providers']
288
-
289
- if healthy_count == total_providers:
290
- st.success("πŸ“Š Groq API")
291
- st.caption("Training Data β€’ Fully Operational")
292
- elif healthy_count > 0:
293
- st.warning("πŸ“Š Groq API")
294
- st.caption(f"Training Data β€’ {healthy_count}/{total_providers} providers")
295
- else:
296
- st.error("πŸ“Š Groq API")
297
- st.caption("Training Data β€’ Offline")
298
-
299
- # Quick health indicator
300
- if status["phi3"]["server_healthy"] and groq_status['healthy_providers'] > 0:
301
- st.caption("πŸ’‘ All systems operational - research ready!")
302
- else:
303
- st.caption("⚠️ Some services limited - research may be affected")
304
-
305
- except Exception as e:
306
- st.error("❌ Status check failed")
307
- st.caption("Research platform may have issues")
308
-
309
- def render_default_sidebar():
310
- st.info("🌟 Start generating content to contribute to our research!")
311
- st.caption("Your feedback on Groq content will train Phi-3 to become a better educational AI")
312
- if st.button("πŸ”„ Refresh Progress", use_container_width=True, key="refresh_progress"):
313
- st.rerun()
 
1
+ import streamlit as st
2
+ from db.helpers import get_research_stats
3
+ from generator import model_manager
4
+ from export_training_data_from_db import export_training_data_from_db
5
+
6
+ def render_header():
7
+ st.title("🧠 TailorED - AI-Powered Educational Content Generator")
8
+
9
+ def render_sidebar():
10
+ with st.sidebar:
11
+ # === RESEARCH MODEL SELECTION ===
12
+ st.subheader("πŸ”¬ Research Model Selection")
13
+
14
+ # Initialize model choice if not exists
15
+ if "selected_model" not in st.session_state:
16
+ st.session_state.selected_model = "groq"
17
+
18
+ # Check if we have existing content and need to show regeneration prompt
19
+ if (st.session_state.get("generated_output") and
20
+ st.session_state.get("current_page") == "generator" and
21
+ not st.session_state.get("showing_regeneration_prompt", False)):
22
+
23
+ # Store current model before potential change
24
+ previous_model = st.session_state.selected_model
25
+
26
+ # Model selection with regeneration logic
27
+ model_choice = st.radio(
28
+ "Select AI Model:",
29
+ options=[
30
+ "πŸ§ͺ Phi-3 (Research Model)",
31
+ "πŸ“Š Groq (Training Data)"
32
+ ],
33
+ index=0 if st.session_state.selected_model == "phi3" else 1,
34
+ key="research_model_selection"
35
+ )
36
+
37
+ # Determine new model selection
38
+ new_model = "phi3" if model_choice == "πŸ§ͺ Phi-3 (Research Model)" else "groq"
39
+
40
+ # If model changed and we have content, show regeneration prompt
41
+ if new_model != previous_model:
42
+ st.session_state.pending_model_switch = new_model
43
+ st.session_state.previous_model = previous_model
44
+ st.session_state.showing_regeneration_prompt = True
45
+ st.rerun()
46
+
47
+ else:
48
+ # Normal model selection (no content or prompt already shown)
49
+ model_choice = st.radio(
50
+ "Select AI Model:",
51
+ options=[
52
+ "πŸ§ͺ Phi-3 (Research Model)",
53
+ "πŸ“Š Groq (Training Data)"
54
+ ],
55
+ index=0 if st.session_state.selected_model == "phi3" else 1,
56
+ key="research_model_selection"
57
+ )
58
+
59
+ # Update model selection
60
+ new_model = "phi3" if model_choice == "πŸ§ͺ Phi-3 (Research Model)" else "groq"
61
+ if new_model != st.session_state.selected_model:
62
+ st.session_state.selected_model = new_model
63
+ if not st.session_state.get("generated_output"):
64
+ st.success(f"βœ… Switched to {new_model.upper()} model")
65
+
66
+ # Show current model status
67
+ current_model = st.session_state.selected_model
68
+ if current_model == "phi3":
69
+ st.info("πŸ§ͺ **Testing Phi-3** - Research model being evaluated")
70
+ else:
71
+ st.success("πŸ“Š **Generating Training Data** - Groq outputs will train Phi-3")
72
+
73
+ # Render regeneration prompt if needed
74
+ if st.session_state.get("showing_regeneration_prompt", False):
75
+ render_regeneration_prompt()
76
+
77
+ # Research context
78
+ st.markdown("---")
79
+ st.markdown("### 🎯 Research Mission")
80
+ st.markdown("""
81
+ We're **fine-tuning Phi-3 Mini** using Groq's high-quality outputs.
82
+
83
+ **Your Role:** Compare both models to help improve Phi-3!
84
+ - Use **Groq** to create training examples
85
+ - Use **Phi-3** to test research progress
86
+ - Switch models to compare outputs on the same content
87
+ """)
88
+
89
+ st.markdown("---")
90
+
91
+ st.header("πŸŽ“ Research Progress")
92
+ st.write("**Your feedback trains better educational AI**")
93
+
94
+ try:
95
+ stats = get_research_stats()
96
+ render_progress_metrics(stats)
97
+ render_quality_indicators(stats)
98
+ render_research_status(stats)
99
+ render_service_status()
100
+ except Exception as e:
101
+ st.error(f"Sidebar failed: {e}")
102
+ render_default_sidebar()
103
+
104
+ def render_regeneration_prompt():
105
+ """Show prompt to regenerate content with new model"""
106
+ st.markdown("---")
107
+ st.warning("πŸ”„ **Model Changed!**")
108
+
109
+ previous_model = st.session_state.previous_model
110
+ new_model = st.session_state.pending_model_switch
111
+
112
+ st.write(f"You switched from **{previous_model.upper()}** to **{new_model.upper()}**.")
113
+ st.write("Would you like to regenerate the same content with the new model?")
114
+
115
+ col1, col2, col3 = st.columns([1, 1, 1])
116
+
117
+ with col1:
118
+ if st.button("βœ… Yes, Regenerate", use_container_width=True, key="confirm_regenerate"):
119
+ # Trigger regeneration with new model
120
+ st.session_state.selected_model = new_model
121
+ st.session_state.regenerate_with_new_model = True
122
+ st.session_state.showing_regeneration_prompt = False
123
+ st.session_state.pending_model_switch = None
124
+ st.session_state.previous_model = None
125
+ st.rerun()
126
+
127
+ with col2:
128
+ if st.button("❌ No, Keep Current", use_container_width=True, key="keep_current"):
129
+ # Revert to previous model and keep current content
130
+ st.session_state.selected_model = st.session_state.previous_model
131
+ st.session_state.showing_regeneration_prompt = False
132
+ st.session_state.pending_model_switch = None
133
+ st.session_state.previous_model = None
134
+ st.rerun()
135
+
136
+ with col3:
137
+ if st.button("🏠 Go to Home", use_container_width=True, key="go_home"):
138
+ # Clear content and go to home
139
+ from components.session_manager import clear_session
140
+ clear_session()
141
+ st.session_state.showing_regeneration_prompt = False
142
+ st.session_state.pending_model_switch = None
143
+ st.session_state.previous_model = None
144
+ st.rerun()
145
+
146
+ def render_progress_metrics(stats):
147
+ col1, col2 = st.columns(2)
148
+
149
+ with col1:
150
+ st.metric("Total Feedback", stats.get("total_feedback", 0))
151
+
152
+ with col2:
153
+ st.metric("Content Generated", stats.get("total_content", 0))
154
+
155
+ if stats.get("total_feedback", 0) > 0:
156
+ render_progress_bar(stats)
157
+
158
+ def render_progress_bar(stats):
159
+ """Render progress bar towards research goal"""
160
+ st.subheader("πŸ“ˆ Our Progress")
161
+ target_feedback = 1000
162
+ total_feedback = stats.get("total_feedback", 0)
163
+ progress_percent = min((total_feedback / target_feedback) * 100, 100)
164
+ st.progress(progress_percent / 100)
165
+ st.caption(f"Goal: 100 feedback points β€’ {total_feedback}/100")
166
+
167
+ if total_feedback >= target_feedback:
168
+ st.balloons()
169
+ st.success("πŸŽ‰ Amazing! We've reached our research goal!")
170
+
171
+ def render_quality_indicators(stats):
172
+ st.subheader("✨ Model Quality Comparison")
173
+
174
+ # Safely get model scores with fallbacks
175
+ groq_scores = stats.get("groq_scores", {})
176
+ phi3_scores = stats.get("phi3_scores", {})
177
+
178
+ groq_clarity = groq_scores.get("clarity", 0)
179
+ groq_depth = groq_scores.get("depth", 0)
180
+ phi3_clarity = phi3_scores.get("clarity", 0)
181
+ phi3_depth = phi3_scores.get("depth", 0)
182
+
183
+ # Groq metrics
184
+ st.markdown("**πŸ“Š Groq (Training Data)**")
185
+ col1, col2 = st.columns(2)
186
+ with col1:
187
+ # Show delta if we have both scores
188
+ delta_clarity = None
189
+ if groq_clarity > 0 and phi3_clarity > 0:
190
+ delta_clarity = f"+{groq_clarity - phi3_clarity:.1f}"
191
+ st.metric("Avg Clarity", f"{groq_clarity}/5", delta=delta_clarity)
192
+ with col2:
193
+ delta_depth = None
194
+ if groq_depth > 0 and phi3_depth > 0:
195
+ delta_depth = f"+{groq_depth - phi3_depth:.1f}"
196
+ st.metric("Avg Depth", f"{groq_depth}/5", delta=delta_depth)
197
+
198
+ # Phi-3 metrics
199
+ st.markdown("**πŸ§ͺ Phi-3 (Research Model)**")
200
+ col3, col4 = st.columns(2)
201
+ with col3:
202
+ delta_clarity_phi3 = None
203
+ if phi3_clarity > 0 and groq_clarity > 0:
204
+ delta_clarity_phi3 = f"{phi3_clarity - groq_clarity:.1f}"
205
+ st.metric("Avg Clarity", f"{phi3_clarity}/5", delta=delta_clarity_phi3)
206
+ with col4:
207
+ delta_depth_phi3 = None
208
+ if phi3_depth > 0 and groq_depth > 0:
209
+ delta_depth_phi3 = f"{phi3_depth - groq_depth:.1f}"
210
+ st.metric("Avg Depth", f"{phi3_depth}/5", delta=delta_depth_phi3)
211
+
212
+ # Show quality gap analysis
213
+ if groq_clarity > 0 and phi3_clarity > 0:
214
+ clarity_gap = groq_clarity - phi3_clarity
215
+ depth_gap = groq_depth - phi3_depth
216
+
217
+ if clarity_gap > 0 or depth_gap > 0:
218
+ st.caption(f"πŸ” Quality gap: Clarity +{clarity_gap:.1f}, Depth +{depth_gap:.1f}")
219
+ elif clarity_gap < 0 or depth_gap < 0:
220
+ st.caption(f"πŸŽ‰ Phi-3 leads: Clarity {abs(clarity_gap):.1f}, Depth {abs(depth_gap):.1f}")
221
+ else:
222
+ st.caption("βš–οΈ Models performing equally")
223
+
224
+ def render_research_status(stats):
225
+ st.subheader("πŸ”¬ Research Progress")
226
+
227
+ col1, col2, col3 = st.columns(3)
228
+
229
+ with col1:
230
+ st.metric("Groq Data", stats.get("groq_feedback_count", 0))
231
+ st.caption("For fine-tuning")
232
+
233
+ with col2:
234
+ st.metric("High-Quality Groq", stats.get("high_quality_groq", 0))
235
+ st.caption("Fine-tuning ready")
236
+
237
+ with col3:
238
+ st.metric("Phi-3 Data", stats.get("phi3_feedback_count", 0))
239
+ st.caption("For comparison")
240
+
241
+ # Fine-tuning readiness
242
+ target_examples = 50
243
+ high_quality_groq = stats.get("high_quality_groq", 0)
244
+
245
+ if high_quality_groq >= target_examples:
246
+ st.success("πŸŽ‰ Ready to fine-tune Phi-3 with Groq data!")
247
+ if st.button("πŸš€ Export Fine-tuning Data", use_container_width=True, type="primary"):
248
+ from export_training_data_from_db import export_training_data_from_db
249
+ if export_training_data_from_db():
250
+ st.success("βœ… Groq data exported for Phi-3 fine-tuning!")
251
+ else:
252
+ st.error("Export failed")
253
+ else:
254
+ needed = target_examples - high_quality_groq
255
+ st.info(f"πŸ“Š Need {needed} more high-quality Groq examples")
256
+ progress = high_quality_groq / target_examples if target_examples > 0 else 0
257
+ st.progress(progress)
258
+ st.caption(f"Progress: {high_quality_groq}/{target_examples} examples")
259
+
260
+ def render_service_status():
261
+ st.markdown("---")
262
+ st.subheader("πŸ›œ Platform Status")
263
+
264
+ try:
265
+ status = model_manager.get_service_status()
266
+
267
+ # Create status columns
268
+ col1, col2 = st.columns(2)
269
+
270
+ with col1:
271
+ # Phi-3 Status
272
+ phi3_status = status["phi3"]
273
+ if phi3_status["server_healthy"] and phi3_status["model_available"]:
274
+ st.success("πŸ§ͺ Phi-3 Mini")
275
+ st.caption("Research Model β€’ Ready")
276
+ elif phi3_status["server_healthy"]:
277
+ st.warning("πŸ§ͺ Phi-3 Mini")
278
+ st.caption("Research Model β€’ Needs Setup")
279
+ else:
280
+ st.error("πŸ§ͺ Phi-3 Mini")
281
+ st.caption("Research Model β€’ Offline")
282
+
283
+ with col2:
284
+ # Groq Status
285
+ groq_status = status["groq"]
286
+ healthy_count = groq_status['healthy_providers']
287
+ total_providers = groq_status['total_providers']
288
+
289
+ if healthy_count == total_providers:
290
+ st.success("πŸ“Š Groq API")
291
+ st.caption("Training Data β€’ Fully Operational")
292
+ elif healthy_count > 0:
293
+ st.warning("πŸ“Š Groq API")
294
+ st.caption(f"Training Data β€’ {healthy_count}/{total_providers} providers")
295
+ else:
296
+ st.error("πŸ“Š Groq API")
297
+ st.caption("Training Data β€’ Offline")
298
+
299
+ # Quick health indicator
300
+ if status["phi3"]["server_healthy"] and groq_status['healthy_providers'] > 0:
301
+ st.caption("πŸ’‘ All systems operational - research ready!")
302
+ else:
303
+ st.caption("⚠️ Some services limited - research may be affected")
304
+
305
+ except Exception as e:
306
+ st.error("❌ Status check failed")
307
+ st.caption("Research platform may have issues")
308
+
309
+ def render_default_sidebar():
310
+ st.info("🌟 Start generating content to contribute to our research!")
311
+ st.caption("Your feedback on Groq content will train Phi-3 to become a better educational AI")
312
+ if st.button("πŸ”„ Refresh Progress", use_container_width=True, key="refresh_progress"):
313
+ st.rerun()