m7mdal7aj commited on
Commit
8cf7678
1 Parent(s): 43dee29

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -21
app.py CHANGED
@@ -8,16 +8,14 @@ import torch.nn as nn
8
  from my_model.object_detection import detect_and_draw_objects
9
  from my_model.captioner.image_captioning import get_caption
10
  from my_model.utilities import free_gpu_resources
 
11
 
12
 
13
 
14
- # Placeholder for undefined functions
15
- def load_caption_model():
16
- st.write("Placeholder for load_caption_model function")
17
- return None, None
18
 
19
- def answer_question(image, question):
20
- return "Placeholder answer for the question"
21
 
22
  def get_caption(image):
23
  return "Generated caption for the image"
@@ -32,18 +30,18 @@ sample_images = ["Files/sample1.jpg", "Files/sample2.jpg", "Files/sample3.jpg",
32
 
33
  def run_inference():
34
  st.title("Run Inference")
35
- image_qa_and_object_detection()
36
 
37
- def image_qa_and_object_detection():
38
- # Image-based Q&A functionality
39
- st.subheader("Talk to your image")
40
- image_qa_app()
41
 
42
- # Object Detection functionality
43
- st.subheader("Object Detection")
44
- object_detection_app()
45
 
46
- def image_qa_app():
 
47
  # Initialize session state for storing the current image and its Q&A history
48
  if 'current_image' not in st.session_state:
49
  st.session_state['current_image'] = None
@@ -76,7 +74,7 @@ def image_qa_app():
76
  # Get Answer button
77
  if st.button('Get Answer'):
78
  # Process the question
79
- answer = answer_question(st.session_state['current_image'], question)
80
  st.session_state['qa_history'].append((question, answer))
81
 
82
  # Display all Q&A
@@ -84,11 +82,6 @@ def image_qa_app():
84
  st.text(f"Q: {q}\nA: {a}\n")
85
 
86
 
87
- # Object Detection App
88
- def object_detection_app():
89
- # ... Implement your code for object detection ...
90
- pass
91
-
92
  # Main function
93
  def main():
94
  st.sidebar.title("Navigation")
 
8
  from my_model.object_detection import detect_and_draw_objects
9
  from my_model.captioner.image_captioning import get_caption
10
  from my_model.utilities import free_gpu_resources
11
+ from my_model.KBVQA import KBVQA, prepare_kbvqa_model
12
 
13
 
14
 
15
+ def answer_question(image, question, model):
 
 
 
16
 
17
+ answer = model.generate_answer(question, image):
18
+ return answer
19
 
20
  def get_caption(image):
21
  return "Generated caption for the image"
 
30
 
31
  def run_inference():
32
  st.title("Run Inference")
 
33
 
34
+ # Button to load KBVQA models
35
+ if st.button('Load KBVQA Models'):
36
+ # Call the function to load models and show progress
37
+ kbvqa = prepare_kbvqa_model(your_detection_model) # Replace with your actual detection model
38
 
39
+ if kbvqa:
40
+ st.write("Model is ready for inference."
41
+ image_qa_app(kbvqa)
42
 
43
+
44
+ def image_qa_app(kbvqa):
45
  # Initialize session state for storing the current image and its Q&A history
46
  if 'current_image' not in st.session_state:
47
  st.session_state['current_image'] = None
 
74
  # Get Answer button
75
  if st.button('Get Answer'):
76
  # Process the question
77
+ answer = answer_question(st.session_state['current_image'], question, model=kbvqa)
78
  st.session_state['qa_history'].append((question, answer))
79
 
80
  # Display all Q&A
 
82
  st.text(f"Q: {q}\nA: {a}\n")
83
 
84
 
 
 
 
 
 
85
  # Main function
86
  def main():
87
  st.sidebar.title("Navigation")