Tonic commited on
Commit
6de3447
1 Parent(s): 47df781

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -4
app.py CHANGED
@@ -298,6 +298,9 @@ def process_and_query(input_language=None, audio_input=None, image_input=None, t
298
  image_description = ""
299
  markdown_output = "" # Initialize markdown_output
300
 
 
 
 
301
  # Process image input
302
  if image_input is not None and not (isinstance(image_input, np.ndarray) and image_input.size == 0):
303
  image_text = process_image(image_input)
@@ -309,17 +312,17 @@ def process_and_query(input_language=None, audio_input=None, image_input=None, t
309
  combined_text += "\n\n**Audio Input:**\n" + audio_text
310
 
311
  # Process text input
312
- if text_input is not None:
313
- combined_text = "The user asks the following to his health adviser: " + text_input
314
 
315
  # Check if combined text is empty
316
  if not combined_text.strip():
317
  return "Error: Please provide some input (text, audio, or image)."
318
 
319
  # Append the original image description in Markdown
320
- if image_text: # Changed to image_text
321
  markdown_output += "\n### Original Image Description\n"
322
- markdown_output += image_text + "\n" # Changed to image_text
323
 
324
  # Use the text to query Vectara
325
  vectara_response_json = query_vectara(combined_text)
 
298
  image_description = ""
299
  markdown_output = "" # Initialize markdown_output
300
 
301
+ # Debugging print statement
302
+ print(f"Image Input Type: {type(image_input)}, Audio Input Type: {type(audio_input)}")
303
+
304
  # Process image input
305
  if image_input is not None and not (isinstance(image_input, np.ndarray) and image_input.size == 0):
306
  image_text = process_image(image_input)
 
312
  combined_text += "\n\n**Audio Input:**\n" + audio_text
313
 
314
  # Process text input
315
+ if text_input is not None and text_input.strip():
316
+ combined_text += "The user asks the following to his health adviser: " + text_input
317
 
318
  # Check if combined text is empty
319
  if not combined_text.strip():
320
  return "Error: Please provide some input (text, audio, or image)."
321
 
322
  # Append the original image description in Markdown
323
+ if image_text:
324
  markdown_output += "\n### Original Image Description\n"
325
+ markdown_output += image_text + "\n"
326
 
327
  # Use the text to query Vectara
328
  vectara_response_json = query_vectara(combined_text)