jblast94 commited on
Commit
d908ebb
Β·
verified Β·
1 Parent(s): 34f8426

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +86 -45
app.py CHANGED
@@ -8,8 +8,9 @@ from character_learner import CharacterLearner
8
  from audio_handler import AudioHandler
9
 
10
  class ConversationalAgent:
11
- def __init__(self):
12
- self.llm_handler = LLMHandler()
 
13
  self.memory_manager = MemoryManager()
14
  self.tool_executor = ToolExecutor()
15
  self.character_learner = CharacterLearner(self.memory_manager)
@@ -123,12 +124,13 @@ class ConversationalAgent:
123
  stats.append(f"\n**Total Conversations:** {self.memory_manager.get_conversation_count(self.user_id)}")
124
  return "\n".join(stats)
125
 
126
- # --- REPLACE THIS ENTIRE FUNCTION ---
127
  def create_interface():
128
- """Create and configure the Gradio interface."""
129
- agent = ConversationalAgent()
130
-
131
  with gr.Blocks(title="Personal AI Assistant", theme=gr.themes.Soft()) as demo:
 
 
 
132
  gr.Markdown(
133
  """
134
  # πŸ€– Personal AI Assistant
@@ -182,71 +184,110 @@ def create_interface():
182
  autoplay=True,
183
  type="numpy"
184
  )
185
-
186
- with gr.Row():
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
187
  gr.Markdown("### πŸ“Š Memory Stats")
188
  stats_display = gr.Markdown("Click 'Refresh Stats' to view")
189
  refresh_btn = gr.Button("Refresh Stats πŸ”„")
190
-
191
- # --- Define the update_stats function here ---
192
- def update_stats():
193
- return agent.get_memory_stats()
194
-
195
- # --- Define all event handlers ---
196
- def respond(message, history):
197
- return agent.process_message(message, history, use_voice=False)
198
-
199
- def respond_with_voice(message, history):
200
- return agent.process_message(message, history, use_voice=True)
201
-
202
- def process_audio(audio, history):
203
- return agent.process_voice_input(audio, history)
204
-
 
 
 
 
 
 
 
 
 
 
205
  def clear_history():
206
  return [], ""
207
 
208
- # --- Correctly wire up events ---
209
  msg_input.submit(
210
  respond,
211
- inputs=[msg_input, chatbot],
212
- outputs=[chatbot, msg_input, audio_output]
213
  )
214
-
215
  submit_btn.click(
216
  respond,
217
- inputs=[msg_input, chatbot],
218
- outputs=[chatbot, msg_input, audio_output]
219
  )
220
-
221
  voice_btn.click(
222
  respond_with_voice,
223
- inputs=[msg_input, chatbot],
224
- outputs=[chatbot, msg_input, audio_output]
225
  )
226
-
227
  audio_input.change(
228
  process_audio,
229
- inputs=[audio_input, chatbot],
230
- outputs=[chatbot, msg_input]
231
  )
232
-
233
  clear_btn.click(
234
  clear_history,
235
- outputs=[chatbot, msg_input]
236
  )
237
-
238
- # Correctly wire up the stats button
 
 
 
 
 
 
 
 
 
 
 
239
  refresh_btn.click(
240
- update_stats, # Use the function
241
- outputs=[stats_display] # To the correct output component
 
242
  )
243
-
244
  # Load stats on startup
245
  demo.load(
246
- update_stats, # Use the function
247
- outputs=[stats_display] # To the correct output component
 
248
  )
249
-
250
  return demo
251
 
252
  # ... (rest of your file is unchanged)
 
8
  from audio_handler import AudioHandler
9
 
10
  class ConversationalAgent:
11
+ def __init__(self, model_name: str | None = None):
12
+ # Allow dynamic model override from UI; fall back to env / default.
13
+ self.llm_handler = LLMHandler(model_override=model_name)
14
  self.memory_manager = MemoryManager()
15
  self.tool_executor = ToolExecutor()
16
  self.character_learner = CharacterLearner(self.memory_manager)
 
124
  stats.append(f"\n**Total Conversations:** {self.memory_manager.get_conversation_count(self.user_id)}")
125
  return "\n".join(stats)
126
 
 
127
  def create_interface():
128
+ """Create and configure the Gradio interface with LLM selection."""
129
+ # Top-level stateful agent; will be re-created when model changes.
 
130
  with gr.Blocks(title="Personal AI Assistant", theme=gr.themes.Soft()) as demo:
131
+ # Global state: selected model and agent instance
132
+ model_state = gr.State(os.getenv("PREFERRED_MODEL", "google/gemini-2.0-flash-exp"))
133
+ agent_state = gr.State(ConversationalAgent(model_state.value))
134
  gr.Markdown(
135
  """
136
  # πŸ€– Personal AI Assistant
 
184
  autoplay=True,
185
  type="numpy"
186
  )
187
+
188
+ # Settings / model selection + memory stats
189
+ with gr.Tab("Settings"):
190
+ gr.Markdown("### πŸ”§ LLM Settings")
191
+ with gr.Row():
192
+ llm_model = gr.Dropdown(
193
+ label="Select LLM model (via OpenRouter)",
194
+ choices=[
195
+ "google/gemini-2.0-flash-exp",
196
+ "anthropic/claude-3.5-sonnet",
197
+ "anthropic/claude-3.5-haiku",
198
+ "openai/gpt-4.1-mini",
199
+ ],
200
+ value=os.getenv("PREFERRED_MODEL", "google/gemini-2.0-flash-exp"),
201
+ )
202
+ apply_model_btn = gr.Button("Apply Model")
203
+
204
+ with gr.Tab("Memory Stats"):
205
  gr.Markdown("### πŸ“Š Memory Stats")
206
  stats_display = gr.Markdown("Click 'Refresh Stats' to view")
207
  refresh_btn = gr.Button("Refresh Stats πŸ”„")
208
+
209
+ # --- Helper to (re)build agent when model changes ---
210
+ def build_agent(model_name):
211
+ try:
212
+ return ConversationalAgent(model_name)
213
+ except Exception as e:
214
+ # Surface configuration errors (e.g., missing OPENROUTER_API_KEY)
215
+ print(f"Error initializing ConversationalAgent with model '{model_name}': {e}")
216
+ # Fallback to default without crashing UI
217
+ return ConversationalAgent()
218
+
219
+ # --- Stats updater uses current agent instance ---
220
+ def update_stats(agent_obj):
221
+ return agent_obj.get_memory_stats()
222
+
223
+ # --- Core chat handlers using agent_state ---
224
+ def respond(message, history, agent_obj):
225
+ return agent_obj.process_message(message, history, use_voice=False)
226
+
227
+ def respond_with_voice(message, history, agent_obj):
228
+ return agent_obj.process_message(message, history, use_voice=True)
229
+
230
+ def process_audio(audio, history, agent_obj):
231
+ return agent_obj.process_voice_input(audio, history)
232
+
233
  def clear_history():
234
  return [], ""
235
 
236
+ # --- Wire chat events to use current agent_state ---
237
  msg_input.submit(
238
  respond,
239
+ inputs=[msg_input, chatbot, agent_state],
240
+ outputs=[chatbot, msg_input, audio_output],
241
  )
242
+
243
  submit_btn.click(
244
  respond,
245
+ inputs=[msg_input, chatbot, agent_state],
246
+ outputs=[chatbot, msg_input, audio_output],
247
  )
248
+
249
  voice_btn.click(
250
  respond_with_voice,
251
+ inputs=[msg_input, chatbot, agent_state],
252
+ outputs=[chatbot, msg_input, audio_output],
253
  )
254
+
255
  audio_input.change(
256
  process_audio,
257
+ inputs=[audio_input, chatbot, agent_state],
258
+ outputs=[chatbot, msg_input],
259
  )
260
+
261
  clear_btn.click(
262
  clear_history,
263
+ outputs=[chatbot, msg_input],
264
  )
265
+
266
+ # Model apply: update model_state + agent_state
267
+ def apply_model(selected_model, _old_agent):
268
+ new_agent = build_agent(selected_model)
269
+ return selected_model, new_agent
270
+
271
+ apply_model_btn.click(
272
+ apply_model,
273
+ inputs=[llm_model, agent_state],
274
+ outputs=[model_state, agent_state],
275
+ )
276
+
277
+ # Stats button uses current agent_state
278
  refresh_btn.click(
279
+ update_stats,
280
+ inputs=[agent_state],
281
+ outputs=[stats_display],
282
  )
283
+
284
  # Load stats on startup
285
  demo.load(
286
+ update_stats,
287
+ inputs=[agent_state],
288
+ outputs=[stats_display],
289
  )
290
+
291
  return demo
292
 
293
  # ... (rest of your file is unchanged)