OSOCONSULT commited on
Commit
6e570e2
·
verified ·
1 Parent(s): 9f1687e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -6
app.py CHANGED
@@ -20,10 +20,12 @@ logger = logging.getLogger("GradioApp")
20
  COMPANY_NAME = "OSO Consulting"
21
 
22
  def process_input(audio_file, text_input, model_choice, tone, state):
 
 
 
23
  audio_filename = f"input_{uuid.uuid4().hex}.wav"
24
  user_input = text_input.strip() if text_input else ""
25
 
26
- # Check audio file only if it exists and has data
27
  if audio_file is not None and hasattr(audio_file, "read"):
28
  audio_bytes = audio_file.read()
29
  if len(audio_bytes) > 0:
@@ -37,13 +39,11 @@ def process_input(audio_file, text_input, model_choice, tone, state):
37
 
38
  if not user_input:
39
  error_message = "⚠️ No valid input detected. Please upload audio or type a message."
40
- # Return None for audio and video to avoid Gradio errors
41
- return error_message, None, None, state.get("lead_stage", "initial"), state
42
 
43
  user_id = state.get("user_id", f"user_{uuid.uuid4().hex[:8]}")
44
- if "conversation" not in state:
45
- state["conversation"] = []
46
- state["lead_stage"] = "initial"
47
 
48
  try:
49
  emotion = emotion_detector.get_primary_emotion(user_input)
 
20
  COMPANY_NAME = "OSO Consulting"
21
 
22
  def process_input(audio_file, text_input, model_choice, tone, state):
23
+ if state is None or not isinstance(state, dict):
24
+ state = {"conversation": [], "lead_stage": "initial"}
25
+
26
  audio_filename = f"input_{uuid.uuid4().hex}.wav"
27
  user_input = text_input.strip() if text_input else ""
28
 
 
29
  if audio_file is not None and hasattr(audio_file, "read"):
30
  audio_bytes = audio_file.read()
31
  if len(audio_bytes) > 0:
 
39
 
40
  if not user_input:
41
  error_message = "⚠️ No valid input detected. Please upload audio or type a message."
42
+ return error_message, None, None, "initial", state
 
43
 
44
  user_id = state.get("user_id", f"user_{uuid.uuid4().hex[:8]}")
45
+ state.setdefault("conversation", [])
46
+ state.setdefault("lead_stage", "initial")
 
47
 
48
  try:
49
  emotion = emotion_detector.get_primary_emotion(user_input)