rdune71 commited on
Commit
dac104e
·
1 Parent(s): 0b2b7e6

Targeted chat response fix: enhanced debugging, improved session handling, and better error reporting

Browse files
Files changed (3) hide show
  1. app.py +95 -170
  2. core/providers/ollama.py +58 -53
  3. core/session.py +21 -28
app.py CHANGED
@@ -190,6 +190,28 @@ with st.sidebar:
190
  st.markdown("**Redis:** Healthy")
191
  else:
192
  st.markdown("**Redis:** Unhealthy")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
193
 
194
  # Main interface
195
  st.title("🐱 CosmicCat AI Assistant")
@@ -247,208 +269,111 @@ def validate_user_input(text):
247
 
248
  return True, text.strip()
249
 
250
- # Chat input - FIXED VERSION
251
  user_input = st.chat_input("Type your message here...", disabled=st.session_state.is_processing)
252
 
253
  # Process message when received
254
  if user_input and not st.session_state.is_processing:
 
 
255
  # Validate input
256
- is_valid, validated_input = validate_user_input(user_input)
257
- if not is_valid:
258
- st.error(validated_input)
259
  st.session_state.is_processing = False
260
- st.experimental_rerun() # Fixed: use experimental_rerun
261
- else:
262
- st.session_state.is_processing = True
263
-
264
- # Display user message immediately
265
- with st.chat_message("user"):
266
- st.markdown(validated_input)
267
-
268
- # Add to message history - ensure proper format
269
- st.session_state.messages.append({
270
- "role": "user",
271
- "content": validated_input,
272
- "timestamp": datetime.now().strftime("%H:%M:%S")
273
- })
274
 
275
- # Process AI response
276
- response_container = st.empty()
 
 
 
 
 
 
 
 
277
  status_placeholder = st.empty()
278
  response_placeholder = st.empty()
279
 
280
  try:
 
 
281
  # Get conversation history from session
282
  user_session = session_manager.get_session("default_user")
283
  conversation_history = user_session.get("conversation", []).copy()
284
 
 
 
 
285
  # Add the current user message to history for context
286
- conversation_history.append({"role": "user", "content": validated_input})
287
 
288
- # Check if cosmic mode is enabled
289
- if st.session_state.cosmic_mode:
290
- # Process cosmic cascade response
291
- status_placeholder.info("🐱 Cosmic Kitten Responding...")
 
 
 
 
 
292
 
293
- try:
294
- # Get conversation history
295
- user_session = session_manager.get_session("default_user")
296
- conversation_history = user_session.get("conversation", []).copy()
297
- conversation_history.append({"role": "user", "content": validated_input})
298
-
299
- # Stage 1: Local Ollama Response
300
- ollama_provider = OllamaProvider(st.session_state.selected_model)
301
- local_response = ollama_provider.generate(validated_input, conversation_history)
302
-
303
- if local_response:
304
- # Display response (no nested st.chat_message)
305
- st.markdown(f"### 🐱 Cosmic Kitten Says:\n{local_response}")
306
- st.session_state.messages.append({
307
- "role": "assistant",
308
- "content": local_response,
309
- "source": "local_kitty",
310
- "timestamp": datetime.now().strftime("%H:%M:%S")
311
- })
312
-
313
- # Stage 2: HF Endpoint Analysis
314
- status_placeholder.info("🛰️ Beaming Query to Orbital Station...")
315
- if config.hf_token:
316
- # Check HF status first
317
- hf_status = hf_monitor.check_endpoint_status()
318
- if not hf_status['available']:
319
- status_placeholder.info(personality.get_initializing_message())
320
-
321
- hf_provider = HuggingFaceProvider("meta-llama/Llama-2-7b-chat-hf")
322
- hf_response = hf_provider.generate(validated_input, conversation_history)
323
-
324
- if hf_response:
325
- # Display response (no nested st.chat_message)
326
- st.markdown(f"### 🛰️ Orbital Station Reports:\n{hf_response}")
327
- st.session_state.messages.append({
328
- "role": "assistant",
329
- "content": hf_response,
330
- "source": "orbital_station",
331
- "timestamp": datetime.now().strftime("%H:%M:%S")
332
- })
333
-
334
- # Stage 3: Local Synthesis
335
- status_placeholder.info("🐱 Cosmic Kitten Synthesizing Wisdom...")
336
-
337
- # Update history with both responses
338
- synthesis_history = conversation_history.copy()
339
- synthesis_history.extend([
340
- {"role": "assistant", "content": local_response},
341
- {"role": "assistant", "content": hf_response, "source": "cloud"}
342
- ])
343
-
344
- synthesis = ollama_provider.generate(
345
- f"Synthesize these two perspectives:\n1. Local: {local_response}\n2. Cloud: {hf_response}",
346
- synthesis_history
347
- )
348
-
349
- if synthesis:
350
- # Display response (no nested st.chat_message)
351
- st.markdown(f"### 🌟 Final Cosmic Summary:\n{synthesis}")
352
- st.session_state.messages.append({
353
- "role": "assistant",
354
- "content": synthesis,
355
- "source": "cosmic_summary",
356
- "timestamp": datetime.now().strftime("%H:%M:%S")
357
- })
358
-
359
- status_placeholder.success("✨ Cosmic Cascade Complete!")
360
-
361
- except Exception as e:
362
- error_msg = f"🌌 Cosmic disturbance: {str(e)}"
363
- st.error(error_msg)
364
- st.session_state.messages.append({
365
- "role": "assistant",
366
- "content": error_msg,
367
- "source": "error",
368
- "timestamp": datetime.now().strftime("%H:%M:%S")
369
- })
370
- else:
371
- # Traditional processing
372
- # Try Ollama first
373
- status_placeholder.info("🦙 Contacting Ollama...")
374
- ai_response = None
375
 
376
- try:
377
- # Use the OllamaProvider directly with proper configuration
378
- ollama_provider = OllamaProvider(st.session_state.selected_model)
379
- ai_response = ollama_provider.generate(validated_input, conversation_history)
380
-
381
- if ai_response:
382
- st.markdown(ai_response) # Use st.markdown instead of response_placeholder
383
- status_placeholder.success("✅ Response received!")
384
- else:
385
- status_placeholder.warning("⚠️ Empty response from Ollama")
386
-
387
- except Exception as ollama_error:
388
- error_message = str(ollama_error)
389
- status_placeholder.error(f"❌ Ollama error: {error_message[:100]}...")
390
- logger.error(f"Ollama error: {error_message}")
391
 
392
- # Fallback to HF if available
393
- if config.hf_token and not ai_response:
394
- status_placeholder.info(" Initializing HF Endpoint (2–4 minutes)...")
395
-
396
- try:
397
- # Check HF status first
398
- hf_status = hf_monitor.check_endpoint_status()
399
- if not hf_status['available']:
400
- status_placeholder.info(personality.get_initializing_message())
401
-
402
- # Use the HuggingFaceProvider directly
403
- hf_provider = HuggingFaceProvider("meta-llama/Llama-2-7b-chat-hf")
404
- ai_response = hf_provider.generate(validated_input, conversation_history)
405
-
406
- if ai_response:
407
- st.markdown(ai_response) # Use st.markdown instead of response_placeholder
408
- status_placeholder.success("✅ HF response received!")
409
- else:
410
- status_placeholder.error("❌ No response from HF")
411
-
412
- except Exception as hf_error:
413
- error_message = str(hf_error)
414
- status_placeholder.error(f"❌ HF also failed: {error_message[:100]}...")
415
- logger.error(f"HF error: {error_message}")
416
-
417
- # Save response if successful
418
- if ai_response:
419
- # Update conversation history in session
420
  conversation = user_session.get("conversation", []).copy()
421
- conversation.append({"role": "user", "content": validated_input})
422
- conversation.append({"role": "assistant", "content": ai_response})
423
- session_manager.update_session("default_user", {"conversation": conversation})
424
 
425
- # Add to message history
426
- st.session_state.messages.append({
427
- "role": "assistant",
428
- "content": ai_response,
429
- "timestamp": datetime.now().strftime("%H:%M:%S")
430
- })
431
- else:
432
- error_msg = "Sorry, I couldn't process your request. Please try again."
433
- st.session_state.messages.append({
434
- "role": "assistant",
435
- "content": error_msg,
436
- "timestamp": datetime.now().strftime("%H:%M:%S")
437
- })
438
- st.markdown(error_msg)
439
 
 
 
 
 
 
 
 
 
 
 
440
  except Exception as e:
441
  error_msg = f"System error: {str(e)}"
442
  logger.error(f"Chat processing error: {error_msg}")
443
- st.error(error_msg)
444
  st.session_state.messages.append({
445
  "role": "assistant",
446
  "content": error_msg,
447
- "timestamp": datetime.now().strftime("%H:%M:%S")
448
  })
449
  finally:
450
  st.session_state.is_processing = False
451
- st.experimental_rerun() # Fixed: use experimental_rerun
452
 
453
  # Add evaluation dashboard tab (separate from chat interface) - ONLY ABOUT TAB NOW
454
  st.divider()
 
190
  st.markdown("**Redis:** Healthy")
191
  else:
192
  st.markdown("**Redis:** Unhealthy")
193
+
194
+ # Add debug tools
195
+ st.divider()
196
+ st.subheader("🐛 Debug Tools")
197
+
198
+ if st.button("🔍 Test Ollama Direct"):
199
+ try:
200
+ with st.spinner("Testing..."):
201
+ ollama_provider = OllamaProvider(st.session_state.selected_model)
202
+ test_history = [{"role": "user", "content": "Hello, what day is it?"}]
203
+ response = ollama_provider.generate("Hello, what day is it?", test_history)
204
+ st.success(f"Success! Response: {response[:200] if response else 'Empty'}")
205
+ except Exception as e:
206
+ st.error(f"Error: {str(e)}")
207
+
208
+ if st.button("📋 Show Session Data"):
209
+ try:
210
+ user_session = session_manager.get_session("default_user")
211
+ st.write("Session data:")
212
+ st.json(user_session)
213
+ except Exception as e:
214
+ st.error(f"Error: {str(e)}")
215
 
216
  # Main interface
217
  st.title("🐱 CosmicCat AI Assistant")
 
269
 
270
  return True, text.strip()
271
 
272
+ # Chat input - COMPLETELY REWRITTEN VERSION
273
  user_input = st.chat_input("Type your message here...", disabled=st.session_state.is_processing)
274
 
275
  # Process message when received
276
  if user_input and not st.session_state.is_processing:
277
+ st.session_state.is_processing = True
278
+
279
  # Validate input
280
+ clean_input = user_input.strip()
281
+ if not clean_input:
 
282
  st.session_state.is_processing = False
283
+ st.experimental_rerun()
284
+
285
+ # Display user message immediately
286
+ with st.chat_message("user"):
287
+ st.markdown(clean_input)
 
 
 
 
 
 
 
 
 
288
 
289
+ # Add to message history
290
+ timestamp = datetime.now().strftime("%H:%M:%S")
291
+ st.session_state.messages.append({
292
+ "role": "user",
293
+ "content": clean_input,
294
+ "timestamp": timestamp
295
+ })
296
+
297
+ # Process AI response
298
+ with st.chat_message("assistant"):
299
  status_placeholder = st.empty()
300
  response_placeholder = st.empty()
301
 
302
  try:
303
+ status_placeholder.info("🔄 Processing your request...")
304
+
305
  # Get conversation history from session
306
  user_session = session_manager.get_session("default_user")
307
  conversation_history = user_session.get("conversation", []).copy()
308
 
309
+ # Log conversation history for debugging
310
+ logger.info(f"Conversation history length: {len(conversation_history)}")
311
+
312
  # Add the current user message to history for context
313
+ conversation_history.append({"role": "user", "content": clean_input})
314
 
315
+ # Try Ollama first
316
+ status_placeholder.info("🦙 Contacting Ollama...")
317
+ logger.info("Attempting Ollama connection...")
318
+
319
+ try:
320
+ # Use the OllamaProvider directly with proper configuration
321
+ ollama_provider = OllamaProvider(st.session_state.selected_model)
322
+ logger.info(f"Ollama provider created with model: {st.session_state.selected_model}")
323
+ logger.info(f"Ollama host: {ollama_provider.host}")
324
 
325
+ ai_response = ollama_provider.generate(clean_input, conversation_history)
326
+ logger.info(f"Ollama response received: {type(ai_response)} - {str(ai_response)[:100] if ai_response else 'None'}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
327
 
328
+ if ai_response and ai_response.strip():
329
+ response_placeholder.markdown(ai_response)
330
+ status_placeholder.success("✅ Response received!")
331
+ else:
332
+ status_placeholder.warning("⚠️ Empty response from Ollama")
333
+ ai_response = "I received your message but couldn't generate a proper response. Please try again."
334
+ response_placeholder.markdown(ai_response)
 
 
 
 
 
 
 
 
335
 
336
+ except Exception as ollama_error:
337
+ error_message = str(ollama_error)
338
+ logger.error(f"Ollama error: {error_message}")
339
+ status_placeholder.error(f"❌ Ollama error: {error_message[:100]}...")
340
+ ai_response = f"Error contacting Ollama: {error_message[:100]}..."
341
+ response_placeholder.error(ai_response)
342
+
343
+ # Save response to session and message history
344
+ if ai_response:
345
+ # Update conversation history in session
346
+ try:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
347
  conversation = user_session.get("conversation", []).copy()
348
+ conversation.append({"role": "user", "content": clean_input})
349
+ conversation.append({"role": "assistant", "content": str(ai_response)})
 
350
 
351
+ # Update session with new conversation
352
+ update_result = session_manager.update_session("default_user", {"conversation": conversation})
353
+ logger.info(f"Session update result: {update_result}")
 
 
 
 
 
 
 
 
 
 
 
354
 
355
+ except Exception as session_error:
356
+ logger.error(f"Session update error: {session_error}")
357
+
358
+ # Add to message history
359
+ st.session_state.messages.append({
360
+ "role": "assistant",
361
+ "content": str(ai_response),
362
+ "timestamp": timestamp
363
+ })
364
+
365
  except Exception as e:
366
  error_msg = f"System error: {str(e)}"
367
  logger.error(f"Chat processing error: {error_msg}")
368
+ response_placeholder.error(error_msg)
369
  st.session_state.messages.append({
370
  "role": "assistant",
371
  "content": error_msg,
372
+ "timestamp": timestamp
373
  })
374
  finally:
375
  st.session_state.is_processing = False
376
+ st.experimental_rerun()
377
 
378
  # Add evaluation dashboard tab (separate from chat interface) - ONLY ABOUT TAB NOW
379
  st.divider()
core/providers/ollama.py CHANGED
@@ -21,7 +21,7 @@ class OllamaProvider(LLMProvider):
21
  "ngrok-skip-browser-warning": "true",
22
  "User-Agent": "CosmicCat-AI-Assistant"
23
  }
24
-
25
  def _sanitize_host(self, host: str) -> str:
26
  """Sanitize host URL by removing whitespace and control characters"""
27
  if not host:
@@ -34,7 +34,7 @@ class OllamaProvider(LLMProvider):
34
  if not host.startswith(('http://', 'https://')):
35
  host = 'http://' + host
36
  return host
37
-
38
  def generate(self, prompt: str, conversation_history: List[Dict]) -> Optional[str]:
39
  """Generate a response synchronously"""
40
  try:
@@ -42,7 +42,7 @@ class OllamaProvider(LLMProvider):
42
  except Exception as e:
43
  logger.error(f"Ollama generation failed: {e}")
44
  return None
45
-
46
  def stream_generate(self, prompt: str, conversation_history: List[Dict]) -> Optional[Union[str, List[str]]]:
47
  """Generate a response with streaming support"""
48
  try:
@@ -50,7 +50,7 @@ class OllamaProvider(LLMProvider):
50
  except Exception as e:
51
  logger.error(f"Ollama stream generation failed: {e}")
52
  return None
53
-
54
  def validate_model(self) -> bool:
55
  """Validate if the model is available"""
56
  try:
@@ -75,65 +75,81 @@ class OllamaProvider(LLMProvider):
75
  except Exception as e:
76
  logger.error(f"Model validation failed: {e}")
77
  return False
78
-
79
  def _generate_impl(self, prompt: str, conversation_history: List[Dict]) -> str:
80
- """Implementation of synchronous generation with context injection"""
81
  url = f"{self.host}/api/chat"
82
- messages = conversation_history.copy()
83
 
84
- # Inject context message with current time/date/weather
85
- current_time = datetime.now().strftime("%A, %B %d, %Y at %I:%M %p")
86
- weather_summary = self._get_weather_summary()
87
- context_msg = {
88
- "role": "system",
89
- "content": f"[Current Context: {current_time} | Weather: {weather_summary}]"
90
- }
91
- enhanced_messages = [context_msg] + messages
92
 
93
  # Add the current prompt if not already in history
94
  if not messages or messages[-1].get("content") != prompt:
95
- enhanced_messages.append({"role": "user", "content": prompt})
96
-
97
  payload = {
98
  "model": self.model_name,
99
- "messages": enhanced_messages,
100
  "stream": False
101
  }
102
 
103
- response = requests.post(
104
- url,
105
- json=payload,
106
- headers=self.headers,
107
- timeout=self.timeout
108
- )
109
- response.raise_for_status()
110
- result = response.json()
111
- return result["message"]["content"]
112
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
113
  def _stream_generate_impl(self, prompt: str, conversation_history: List[Dict]) -> List[str]:
114
- """Implementation of streaming generation with context injection"""
115
  url = f"{self.host}/api/chat"
116
  messages = conversation_history.copy()
117
-
118
- # Inject context message with current time/date/weather
119
- current_time = datetime.now().strftime("%A, %B %d, %Y at %I:%M %p")
120
- weather_summary = self._get_weather_summary()
121
- context_msg = {
122
- "role": "system",
123
- "content": f"[Current Context: {current_time} | Weather: {weather_summary}]"
124
- }
125
- enhanced_messages = [context_msg] + messages
126
-
127
  # Add the current prompt if not already in history
128
  if not messages or messages[-1].get("content") != prompt:
129
- enhanced_messages.append({"role": "user", "content": prompt})
130
-
131
  payload = {
132
  "model": self.model_name,
133
- "messages": enhanced_messages,
134
  "stream": True
135
  }
136
-
137
  response = requests.post(
138
  url,
139
  json=payload,
@@ -154,14 +170,3 @@ class OllamaProvider(LLMProvider):
154
  except:
155
  continue
156
  return chunks
157
-
158
- def _get_weather_summary(self) -> str:
159
- """Get formatted weather summary"""
160
- try:
161
- weather = weather_service.get_current_weather("New York")
162
- if weather:
163
- return f"{weather.get('temperature', 'N/A')}°C, {weather.get('description', 'Clear skies')}"
164
- else:
165
- return "Clear skies"
166
- except:
167
- return "Clear skies"
 
21
  "ngrok-skip-browser-warning": "true",
22
  "User-Agent": "CosmicCat-AI-Assistant"
23
  }
24
+
25
  def _sanitize_host(self, host: str) -> str:
26
  """Sanitize host URL by removing whitespace and control characters"""
27
  if not host:
 
34
  if not host.startswith(('http://', 'https://')):
35
  host = 'http://' + host
36
  return host
37
+
38
  def generate(self, prompt: str, conversation_history: List[Dict]) -> Optional[str]:
39
  """Generate a response synchronously"""
40
  try:
 
42
  except Exception as e:
43
  logger.error(f"Ollama generation failed: {e}")
44
  return None
45
+
46
  def stream_generate(self, prompt: str, conversation_history: List[Dict]) -> Optional[Union[str, List[str]]]:
47
  """Generate a response with streaming support"""
48
  try:
 
50
  except Exception as e:
51
  logger.error(f"Ollama stream generation failed: {e}")
52
  return None
53
+
54
  def validate_model(self) -> bool:
55
  """Validate if the model is available"""
56
  try:
 
75
  except Exception as e:
76
  logger.error(f"Model validation failed: {e}")
77
  return False
78
+
79
  def _generate_impl(self, prompt: str, conversation_history: List[Dict]) -> str:
80
+ """Implementation of synchronous generation with enhanced debugging"""
81
  url = f"{self.host}/api/chat"
 
82
 
83
+ # Prepare messages - ensure proper format
84
+ messages = []
85
+ for msg in conversation_history:
86
+ if isinstance(msg, dict) and "role" in msg and "content" in msg:
87
+ messages.append({
88
+ "role": msg["role"],
89
+ "content": str(msg["content"])
90
+ })
91
 
92
  # Add the current prompt if not already in history
93
  if not messages or messages[-1].get("content") != prompt:
94
+ messages.append({"role": "user", "content": prompt})
95
+
96
  payload = {
97
  "model": self.model_name,
98
+ "messages": messages,
99
  "stream": False
100
  }
101
 
102
+ logger.info(f"Ollama request URL: {url}")
103
+ logger.info(f"Ollama request payload: {payload}")
104
+ logger.info(f"Ollama headers: {self.headers}")
 
 
 
 
 
 
105
 
106
+ try:
107
+ response = requests.post(
108
+ url,
109
+ json=payload,
110
+ headers=self.headers,
111
+ timeout=self.timeout
112
+ )
113
+ logger.info(f"Ollama response status: {response.status_code}")
114
+ logger.info(f"Ollama response headers: {dict(response.headers)}")
115
+
116
+ response.raise_for_status()
117
+ result = response.json()
118
+ logger.info(f"Ollama response body: {result}")
119
+
120
+ # Extract content properly
121
+ if "message" in result and "content" in result["message"]:
122
+ content = result["message"]["content"]
123
+ logger.info(f"Extracted content: {content[:100] if content else 'None'}")
124
+ return content
125
+ elif "response" in result:
126
+ content = result["response"]
127
+ logger.info(f"Extracted response: {content[:100] if content else 'None'}")
128
+ return content
129
+ else:
130
+ content = str(result)
131
+ logger.info(f"Raw result as string: {content[:100]}")
132
+ return content
133
+
134
+ except requests.exceptions.RequestException as e:
135
+ logger.error(f"Ollama API request error: {str(e)}")
136
+ raise Exception(f"Ollama API error: {str(e)}")
137
+ except Exception as e:
138
+ logger.error(f"Failed to parse Ollama response: {str(e)}")
139
+ raise Exception(f"Failed to parse Ollama response: {str(e)}")
140
+
141
  def _stream_generate_impl(self, prompt: str, conversation_history: List[Dict]) -> List[str]:
142
+ """Implementation of streaming generation"""
143
  url = f"{self.host}/api/chat"
144
  messages = conversation_history.copy()
 
 
 
 
 
 
 
 
 
 
145
  # Add the current prompt if not already in history
146
  if not messages or messages[-1].get("content") != prompt:
147
+ messages.append({"role": "user", "content": prompt})
 
148
  payload = {
149
  "model": self.model_name,
150
+ "messages": messages,
151
  "stream": True
152
  }
 
153
  response = requests.post(
154
  url,
155
  json=payload,
 
170
  except:
171
  continue
172
  return chunks
 
 
 
 
 
 
 
 
 
 
 
core/session.py CHANGED
@@ -55,40 +55,36 @@ class SessionManager:
55
  return self._create_new_session()
56
 
57
  def update_session(self, user_id: str, data: Dict[str, Any]) -> bool:
58
- """Update user session data using Redis pipelining for efficiency
59
- Args:
60
- user_id: Unique identifier for the user
61
- data: Data to update in the session
62
- Returns:
63
- Boolean indicating success
64
- """
65
  try:
66
- from core.redis_client import redis_client
67
- client = redis_client.get_client()
68
- if not client:
69
- logger.error("Redis client not available")
70
- return False
71
-
72
  session = self.get_session(user_id)
 
 
73
  session.update(data)
74
  session['last_activity'] = time.time()
75
 
76
- # Use pipeline for batch operations
77
- pipe = client.pipeline()
78
-
79
- # Smart serialization - only serialize complex data types
80
  redis_data = {}
81
  for key, value in session.items():
82
  if isinstance(value, (list, dict)):
83
  redis_data[key] = json.dumps(value, default=str)
84
- else:
85
  redis_data[key] = value
86
-
87
- pipe.hset(f"user:{user_id}", mapping=redis_data)
88
- pipe.execute()
 
 
 
 
 
 
 
 
 
 
89
 
90
- logger.debug(f"Successfully updated session for user {user_id}")
91
- return True
92
  except Exception as e:
93
  logger.error(f"Error updating session for user {user_id}: {e}")
94
  return False
@@ -233,17 +229,14 @@ class SessionManager:
233
  return False
234
 
235
  def _create_new_session(self) -> Dict[str, Any]:
236
- """Create a new session with default values
237
- Returns:
238
- Dictionary containing new session data
239
- """
240
  session = {
241
  'conversation': [],
242
  'preferences': {},
243
  'last_activity': time.time(),
244
  'created_at': time.time()
245
  }
246
- logger.debug("Created new session")
247
  return session
248
 
249
  # Global session manager instance
 
55
  return self._create_new_session()
56
 
57
  def update_session(self, user_id: str, data: Dict[str, Any]) -> bool:
58
+ """Update user session data"""
 
 
 
 
 
 
59
  try:
60
+ # Get existing session or create new one
 
 
 
 
 
61
  session = self.get_session(user_id)
62
+
63
+ # Update with new data
64
  session.update(data)
65
  session['last_activity'] = time.time()
66
 
67
+ # Serialize complex data types for Redis
 
 
 
68
  redis_data = {}
69
  for key, value in session.items():
70
  if isinstance(value, (list, dict)):
71
  redis_data[key] = json.dumps(value, default=str)
72
+ elif isinstance(value, (int, float, str, bool)):
73
  redis_data[key] = value
74
+ else:
75
+ redis_data[key] = str(value)
76
+
77
+ # Save to Redis using the memory module function
78
+ from core.memory import save_user_state
79
+ result = save_user_state(user_id, redis_data)
80
+
81
+ if result:
82
+ logger.debug(f"Successfully updated session for user {user_id}")
83
+ else:
84
+ logger.warning(f"Failed to save session for user {user_id}")
85
+
86
+ return result
87
 
 
 
88
  except Exception as e:
89
  logger.error(f"Error updating session for user {user_id}: {e}")
90
  return False
 
229
  return False
230
 
231
  def _create_new_session(self) -> Dict[str, Any]:
232
+ """Create a new session with default values"""
 
 
 
233
  session = {
234
  'conversation': [],
235
  'preferences': {},
236
  'last_activity': time.time(),
237
  'created_at': time.time()
238
  }
239
+ logger.info("Created new session")
240
  return session
241
 
242
  # Global session manager instance