apailang commited on
Commit
6db2b96
β€’
1 Parent(s): 2ec2513

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +55 -10
app.py CHANGED
@@ -30,8 +30,8 @@ def load_image_into_numpy_array(path):
30
  image = Image.open(BytesIO(image_data))
31
  return pil_image_as_numpy_array(image)
32
 
33
- def load_model():
34
- download_dir = snapshot_download(REPO_ID)
35
  saved_model_dir = os.path.join(download_dir, "saved_model")
36
  detection_model = tf.saved_model.load(saved_model_dir)
37
  return detection_model
@@ -64,9 +64,40 @@ def predict2(image_np):
64
  agnostic_mode=False,
65
  line_thickness=2)
66
 
67
- result_pil_img = tf.keras.utils.array_to_img(image_np_with_detections[0])
68
 
69
- return result_pil_img
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
70
 
71
  def detect_video(video):
72
  # Create a video capture object
@@ -110,7 +141,10 @@ def detect_video(video):
110
 
111
  label_id_offset = 0
112
  REPO_ID = "apailang/mytfodmodel"
113
- detection_model = load_model()
 
 
 
114
  samples_folder = 'data'
115
  # pil_image = Image.open(image_path)
116
  # image_arr = pil_image_as_numpy_array(pil_image)
@@ -131,21 +165,32 @@ test10 = os.path.join(os.path.dirname(__file__), "data/test10.jpeg")
131
  test11 = os.path.join(os.path.dirname(__file__), "data/test11.jpeg")
132
  test12 = os.path.join(os.path.dirname(__file__), "data/test12.jpeg")
133
 
134
- tts_demo = gr.Interface(
135
  fn=predict,
136
  inputs=gr.Image(type="pil"),
137
  outputs=gr.Image(type="pil"),
138
- title="Image Prediction Interface",
139
- description="Upload a Image for prediction",
140
  examples=[[test1],[test2],[test3],[test4],[test5],[test6],[test7],[test8],[test9],[test10],[test11],[test12],],
141
  cache_examples=True
142
  )#.launch(share=True)
143
 
 
 
 
 
 
 
 
 
 
 
 
 
144
  a = os.path.join(os.path.dirname(__file__), "data/a.mp4") # Video
145
  b = os.path.join(os.path.dirname(__file__), "data/b.mp4") # Video
146
  c = os.path.join(os.path.dirname(__file__), "data/c.mp4") # Video
147
 
148
-
149
  video_out_file = os.path.join(samples_folder,'detected' + '.mp4')
150
 
151
  stt_demo = gr.Interface(
@@ -160,7 +205,7 @@ stt_demo = gr.Interface(
160
  cache_examples=False
161
  )
162
 
163
- demo = gr.TabbedInterface([tts_demo, stt_demo], ["Image", "Video"])
164
 
165
  if __name__ == "__main__":
166
  demo.launch()
 
30
  image = Image.open(BytesIO(image_data))
31
  return pil_image_as_numpy_array(image)
32
 
33
+ def load_model(model_repo_id):
34
+ download_dir = snapshot_download(model_repo_id)
35
  saved_model_dir = os.path.join(download_dir, "saved_model")
36
  detection_model = tf.saved_model.load(saved_model_dir)
37
  return detection_model
 
64
  agnostic_mode=False,
65
  line_thickness=2)
66
 
67
+ result_pil_img2 = tf.keras.utils.array_to_img(image_np_with_detections[0])
68
 
69
+ return result_pil_img2
70
+
71
+ def predict3(pilimg):
72
+
73
+ image_np = pil_image_as_numpy_array(pilimg)
74
+ return predict4(image_np)
75
+
76
+ def predict4(image_np):
77
+
78
+ results = detection_model2(image_np)
79
+
80
+ # different object detection models have additional results
81
+ result = {key:value.numpy() for key,value in results.items()}
82
+
83
+ label_id_offset = 0
84
+ image_np_with_detections = image_np.copy()
85
+
86
+ viz_utils.visualize_boxes_and_labels_on_image_array(
87
+ image_np_with_detections[0],
88
+ result['detection_boxes'][0],
89
+ (result['detection_classes'][0] + label_id_offset).astype(int),
90
+ result['detection_scores'][0],
91
+ category_index,
92
+ use_normalized_coordinates=True,
93
+ max_boxes_to_draw=200,
94
+ min_score_thresh=.60,
95
+ agnostic_mode=False,
96
+ line_thickness=2)
97
+
98
+ result_pil_img4 = tf.keras.utils.array_to_img(image_np_with_detections[0])
99
+
100
+ return result_pil_img4
101
 
102
  def detect_video(video):
103
  # Create a video capture object
 
141
 
142
  label_id_offset = 0
143
  REPO_ID = "apailang/mytfodmodel"
144
+ detection_model = load_model(REPO_ID)
145
+ REPO_ID2 = "apailang/mytfodmodeltuned"
146
+ detection_model2 = load_model(REPO_ID2)
147
+
148
  samples_folder = 'data'
149
  # pil_image = Image.open(image_path)
150
  # image_arr = pil_image_as_numpy_array(pil_image)
 
165
  test11 = os.path.join(os.path.dirname(__file__), "data/test11.jpeg")
166
  test12 = os.path.join(os.path.dirname(__file__), "data/test12.jpeg")
167
 
168
+ base_image = gr.Interface(
169
  fn=predict,
170
  inputs=gr.Image(type="pil"),
171
  outputs=gr.Image(type="pil"),
172
+ title="Luffy and Chopper face detection (Base mobile net model)",
173
+ description="Upload a Image for prediction or click on below examples",
174
  examples=[[test1],[test2],[test3],[test4],[test5],[test6],[test7],[test8],[test9],[test10],[test11],[test12],],
175
  cache_examples=True
176
  )#.launch(share=True)
177
 
178
+ tuned_image = gr.Interface(
179
+ fn=predict3,
180
+ inputs=gr.Image(type="pil"),
181
+ outputs=gr.Image(type="pil"),
182
+ title="Luffy and Chopper face detection (tuned mobile net model)",
183
+ description="Upload a Image for prediction or click on below examples. Mobile net tuned with data Augmentation",
184
+ examples=[[test1],[test2],[test3],[test4],[test5],[test6],[test7],[test8],[test9],[test10],[test11],[test12],],
185
+ cache_examples=True
186
+ )#.launch(share=True)
187
+
188
+
189
+
190
  a = os.path.join(os.path.dirname(__file__), "data/a.mp4") # Video
191
  b = os.path.join(os.path.dirname(__file__), "data/b.mp4") # Video
192
  c = os.path.join(os.path.dirname(__file__), "data/c.mp4") # Video
193
 
 
194
  video_out_file = os.path.join(samples_folder,'detected' + '.mp4')
195
 
196
  stt_demo = gr.Interface(
 
205
  cache_examples=False
206
  )
207
 
208
+ demo = gr.TabbedInterface([base_image,tuned_image, stt_demo], ["Image (base model)","Image (tuned model)", "Video"])
209
 
210
  if __name__ == "__main__":
211
  demo.launch()