m7mdal7aj commited on
Commit
40e0ea9
1 Parent(s): 766fe20

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +24 -8
app.py CHANGED
@@ -5,12 +5,27 @@ import accelerate
5
  import scipy
6
  from PIL import Image
7
  import torch.nn as nn
8
- from transformers import Blip2Processor, Blip2ForConditionalGeneration, InstructBlipProcessor, InstructBlipForConditionalGeneration
9
  from my_model.object_detection import detect_and_draw_objects
10
  from my_model.captioner.image_captioning import get_caption
11
  from my_model.utilities import free_gpu_resources
12
 
13
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
  # Placeholder for undefined functions
15
  def load_caption_model():
16
  st.write("Placeholder for load_caption_model function")
@@ -20,7 +35,7 @@ def answer_question(image, question, model, processor):
20
  return "Placeholder answer for the question"
21
 
22
  def detect_and_draw_objects(image, model_name, threshold):
23
- return image, "Detected objects"
24
 
25
  def get_caption(image):
26
  return "Generated caption for the image"
@@ -94,12 +109,6 @@ def image_qa_app():
94
  st.session_state['images_qa_history'] = []
95
  st.experimental_rerun()
96
 
97
- # Display sample images
98
- st.write("Or choose from sample images:")
99
- for idx, sample_image_path in enumerate(sample_images):
100
- if st.button(f"Use Sample Image {idx+1}", key=f"sample_{idx}"):
101
- uploaded_image = Image.open(sample_image_path)
102
- process_uploaded_image(uploaded_image)
103
 
104
  # Image uploader
105
  uploaded_image = st.file_uploader("Upload an Image", type=["png", "jpg", "jpeg"])
@@ -107,6 +116,13 @@ def image_qa_app():
107
  image = Image.open(uploaded_image)
108
  process_uploaded_image(image)
109
 
 
 
 
 
 
 
 
110
  def process_uploaded_image(image):
111
  current_image_key = image.filename # Use image filename as a unique key
112
  # ... rest of the image processing code ...
 
5
  import scipy
6
  from PIL import Image
7
  import torch.nn as nn
 
8
  from my_model.object_detection import detect_and_draw_objects
9
  from my_model.captioner.image_captioning import get_caption
10
  from my_model.utilities import free_gpu_resources
11
 
12
 
13
+ def perform_object_detection(image, model_name, threshold=0.2):
14
+ """
15
+ Perform object detection on the given image using the specified model and threshold.
16
+ Args:
17
+ image (PIL.Image): The image on which to perform object detection.
18
+ model_name (str): The name of the object detection model to use.
19
+ threshold (float): The threshold for object detection.
20
+ Returns:
21
+ PIL.Image, str: The image with drawn bounding boxes and a string of detected objects.
22
+ """
23
+
24
+ processed_image, detected_objects = detect_and_draw_objects(image, model_name, threshold)
25
+
26
+ return processed_image, detected_objects
27
+
28
+
29
  # Placeholder for undefined functions
30
  def load_caption_model():
31
  st.write("Placeholder for load_caption_model function")
 
35
  return "Placeholder answer for the question"
36
 
37
  def detect_and_draw_objects(image, model_name, threshold):
38
+ perform_object_detection()
39
 
40
  def get_caption(image):
41
  return "Generated caption for the image"
 
109
  st.session_state['images_qa_history'] = []
110
  st.experimental_rerun()
111
 
 
 
 
 
 
 
112
 
113
  # Image uploader
114
  uploaded_image = st.file_uploader("Upload an Image", type=["png", "jpg", "jpeg"])
 
116
  image = Image.open(uploaded_image)
117
  process_uploaded_image(image)
118
 
119
+ # Display sample images
120
+ st.write("Or choose from sample images:")
121
+ for idx, sample_image_path in enumerate(sample_images):
122
+ if st.button(f"Use Sample Image {idx+1}", key=f"sample_{idx}"):
123
+ uploaded_image = Image.open(sample_image_path)
124
+ process_uploaded_image(uploaded_image)
125
+
126
  def process_uploaded_image(image):
127
  current_image_key = image.filename # Use image filename as a unique key
128
  # ... rest of the image processing code ...