vchiang001 commited on
Commit
16fb395
β€’
1 Parent(s): 3e4d10c

change to live action

Browse files
Files changed (1) hide show
  1. app.py +8 -15
app.py CHANGED
@@ -163,19 +163,10 @@ def predict_pipeline(img_input,
163
  ############################################################
164
  ## Get DLC model and labels as strings
165
  if model_input_str == 'full_cat':
166
- path_to_DLCmodel = "model/DLC_Cat_resnet_50_iteration-0_shuffle-0"
167
  pose_cfg_path = os.path.join(path_to_DLCmodel,'pose_cfg.yaml')
168
  elif model_input_str == 'full_dog':
169
- path_to_DLCmodel = "model/DLC_Dog_resnet_50_iteration-0_shuffle-0"
170
- pose_cfg_path = os.path.join(path_to_DLCmodel,'pose_cfg.yaml')
171
- elif model_input_str == 'full_monkey':
172
- path_to_DLCmodel = "model/DLC_monkey_resnet_50_iteration-0_shuffle-1"
173
- pose_cfg_path = os.path.join(path_to_DLCmodel,'pose_cfg.yaml')
174
- elif model_input_str == 'full_human':
175
- path_to_DLCmodel = "model/DLC_human_dancing_resnet_101_iteration-0_shuffle-1"
176
- pose_cfg_path = os.path.join(path_to_DLCmodel,'pose_cfg.yaml')
177
- elif model_input_str == 'monkey_face':
178
- path_to_DLCmodel = "model/DLC_FacialLandmarks_resnet_50_iteration-1_shuffle-1"
179
  pose_cfg_path = os.path.join(path_to_DLCmodel,'pose_cfg.yaml')
180
 
181
  # read pose cfg as dict
@@ -247,14 +238,14 @@ def predict_pipeline(img_input,
247
  # Get MegaDetector model
248
  # TODO: Allow user selectable model?
249
  # models = ["model_weights/md_v5a.0.0.pt","model_weights/md_v5b.0.0.pt"]
250
- MD_model = torch.hub.load('ultralytics/yolov5', 'custom', "megadet_model/md_v5b.0.0.pt")
251
 
252
 
253
  ####################################################
254
  # Create user interface and launch
255
  gr_image_input = gr.inputs.Image(type="pil", label="Input Image")
256
  gr_image_output = gr.outputs.Image(type="pil", label="Output Image")
257
- gr_dlc_model_input = gr.inputs.Dropdown(choices=['full_cat','full_dog', 'full monkey', 'full_human', 'monkey_face'], # choices
258
  default='full_cat', # default option
259
  type='value', # Type of value to be returned by component. "value" returns the string of the choice selected, "index" returns the index of the choice selected.
260
  label='Select DLC model')
@@ -275,7 +266,7 @@ gr_description = "Detect and estimate the pose of animals in camera trap images,
275
  # article = "<p style='text-align: center'>This app makes predictions using a YOLOv5x6 model that was trained to detect animals, humans, and vehicles in camera trap images; find out more about the project on <a href='https://github.com/microsoft/CameraTraps'>GitHub</a>. This app was built by Henry Lydecker but really depends on code and models developed by <a href='http://ecologize.org/'>Ecologize</a> and <a href='http://aka.ms/aiforearth'>Microsoft AI for Earth</a>. Find out more about the YOLO model from the original creator, <a href='https://pjreddie.com/darknet/yolo/'>Joseph Redmon</a>. YOLOv5 is a family of compound-scaled object detection models trained on the COCO dataset and developed by Ultralytics, and includes simple functionality for Test Time Augmentation (TTA), model ensembling, hyperparameter evolution, and export to ONNX, CoreML and TFLite. <a href='https://github.com/ultralytics/yolov5'>Source code</a> | <a href='https://pytorch.org/hub/ultralytics_yolov5'>PyTorch Hub</a></p>"
276
  # examples = [['data/Macropod.jpg'], ['data/koala2.jpg'],['data/cat.jpg'],['data/BrushtailPossum.jpg']]
277
 
278
- gr.Interface(predict_pipeline,
279
  inputs=[gr_image_input,
280
  gr_dlc_model_input,
281
  gr_dlc_only_checkbox,
@@ -284,8 +275,10 @@ gr.Interface(predict_pipeline,
284
  outputs=gr_image_output,
285
  title=gr_title,
286
  description=gr_description,
287
- theme="huggingface").launch(enable_queue=True)
 
288
 
 
289
 
290
  # def dlclive_pose(model, crop_np, crop, fname, index,dlc_proc):
291
  # dlc_live = DLCLive(model, processor=dlc_proc)
 
163
  ############################################################
164
  ## Get DLC model and labels as strings
165
  if model_input_str == 'full_cat':
166
+ path_to_DLCmodel = "DLC_models/DLC_Cat_resnet_50_iteration-0_shuffle-0"
167
  pose_cfg_path = os.path.join(path_to_DLCmodel,'pose_cfg.yaml')
168
  elif model_input_str == 'full_dog':
169
+ path_to_DLCmodel = "DLC_models/DLC_Dog_resnet_50_iteration-0_shuffle-0"
 
 
 
 
 
 
 
 
 
170
  pose_cfg_path = os.path.join(path_to_DLCmodel,'pose_cfg.yaml')
171
 
172
  # read pose cfg as dict
 
238
  # Get MegaDetector model
239
  # TODO: Allow user selectable model?
240
  # models = ["model_weights/md_v5a.0.0.pt","model_weights/md_v5b.0.0.pt"]
241
+ MD_model = torch.hub.load('ultralytics/yolov5', 'custom', "model_weights/md_v5a.0.0.pt")
242
 
243
 
244
  ####################################################
245
  # Create user interface and launch
246
  gr_image_input = gr.inputs.Image(type="pil", label="Input Image")
247
  gr_image_output = gr.outputs.Image(type="pil", label="Output Image")
248
+ gr_dlc_model_input = gr.inputs.Dropdown(choices=['full_cat','full_dog'], # choices
249
  default='full_cat', # default option
250
  type='value', # Type of value to be returned by component. "value" returns the string of the choice selected, "index" returns the index of the choice selected.
251
  label='Select DLC model')
 
266
  # article = "<p style='text-align: center'>This app makes predictions using a YOLOv5x6 model that was trained to detect animals, humans, and vehicles in camera trap images; find out more about the project on <a href='https://github.com/microsoft/CameraTraps'>GitHub</a>. This app was built by Henry Lydecker but really depends on code and models developed by <a href='http://ecologize.org/'>Ecologize</a> and <a href='http://aka.ms/aiforearth'>Microsoft AI for Earth</a>. Find out more about the YOLO model from the original creator, <a href='https://pjreddie.com/darknet/yolo/'>Joseph Redmon</a>. YOLOv5 is a family of compound-scaled object detection models trained on the COCO dataset and developed by Ultralytics, and includes simple functionality for Test Time Augmentation (TTA), model ensembling, hyperparameter evolution, and export to ONNX, CoreML and TFLite. <a href='https://github.com/ultralytics/yolov5'>Source code</a> | <a href='https://pytorch.org/hub/ultralytics_yolov5'>PyTorch Hub</a></p>"
267
  # examples = [['data/Macropod.jpg'], ['data/koala2.jpg'],['data/cat.jpg'],['data/BrushtailPossum.jpg']]
268
 
269
+ demo = gr.Interface(predict_pipeline,
270
  inputs=[gr_image_input,
271
  gr_dlc_model_input,
272
  gr_dlc_only_checkbox,
 
275
  outputs=gr_image_output,
276
  title=gr_title,
277
  description=gr_description,
278
+ theme="huggingface",
279
+ live=True)
280
 
281
+ demo.launch(enable_queue=True)
282
 
283
  # def dlclive_pose(model, crop_np, crop, fname, index,dlc_proc):
284
  # dlc_live = DLCLive(model, processor=dlc_proc)