hlydecker commited on
Commit
aef358b
1 Parent(s): 21059a2

Revert Changes

Browse files
Files changed (1) hide show
  1. app.py +12 -11
app.py CHANGED
@@ -7,29 +7,30 @@ from PIL import Image
7
 
8
  # Load MegaDetector v5a model
9
  # TODO: Allow user selectable model?
10
- models = ["model_weights/md_v5a.0.0.pt","model_weights/md_v5b.0.0.pt"]
 
11
 
12
- def yolo(image, chosen_model, size):
13
- im = image
14
  g = (size / max(im.size)) # gain
15
  im = im.resize((int(x * g) for x in im.size), Image.ANTIALIAS) # resize
16
 
17
- model = torch.hub.load('ultralytics/yolov5', 'custom', chosen_model)
18
 
19
  results = model(im) # inference
20
  results.render() # updates results.imgs with boxes and labels
21
  return Image.fromarray(results.imgs[0])
22
 
23
- image = gr.inputs.Image(type="pil", label="Input Image")
24
- chosen_model = gr.inputs.Dropdown(choices = models, value = "model_weights/md_v5a.0.0.pt",type = "value", label="Model Weight")
25
- size = 640
26
 
27
- inputs = [image, chosen_model, size]
 
28
  outputs = gr.outputs.Image(type="pil", label="Output Image")
29
 
30
  title = "MegaDetector v5"
31
- description = "Detect and identify animals, people and vehicles in camera trap images"
32
- article = "<p style='text-align: center'>This app makes predictions using a YOLOv5 model that was trained to detect animals, humans, and vehicles in camera trap images; find out more about the project on <a href='https://github.com/microsoft/CameraTraps'>GitHub</a>. This app was built by Henry Lydecker but really depends on code and models developed by <a href='http://ecologize.org/'>Ecologize</a> and <a href='http://aka.ms/aiforearth'>Microsoft AI for Earth</a>. Find out more about the YOLO model from the original creator, <a href='https://pjreddie.com/darknet/yolo/'>Joseph Redmon</a>. YOLOv5 is a family of compound-scaled object detection models trained on the COCO dataset and developed by Ultralytics, and includes simple functionality for Test Time Augmentation (TTA), model ensembling, hyperparameter evolution, and export to ONNX, CoreML and TFLite. <a href='https://github.com/ultralytics/yolov5'>Source code</a> | <a href='https://pytorch.org/hub/ultralytics_yolov5'>PyTorch Hub</a></p>"
33
 
34
  examples = [['data/Macropod.jpg'], ['data/koala2.jpg'],['data/cat.jpg'],['data/BrushtailPossum.jpg']]
35
- gr.Interface(yolo, inputs, outputs, title=title, description=description, article=article, examples=examples, theme="huggingface").launch(enable_queue=True)
 
7
 
8
  # Load MegaDetector v5a model
9
  # TODO: Allow user selectable model?
10
+ # models = ["model_weights/md_v5a.0.0.pt","model_weights/md_v5b.0.0.pt"]
11
+ model = torch.hub.load('ultralytics/yolov5', 'custom', "model_weights/md_v5a.0.0.pt")
12
 
13
+ def yolo(im, size=640):
 
14
  g = (size / max(im.size)) # gain
15
  im = im.resize((int(x * g) for x in im.size), Image.ANTIALIAS) # resize
16
 
17
+ model = torch.hub.load('ultralytics/yolov5', 'custom', "model_weights/md_v5a.0.0.pt")
18
 
19
  results = model(im) # inference
20
  results.render() # updates results.imgs with boxes and labels
21
  return Image.fromarray(results.imgs[0])
22
 
23
+ #image = gr.inputs.Image(type="pil", label="Input Image")
24
+ #chosen_model = gr.inputs.Dropdown(choices = models, value = "model_weights/md_v5a.0.0.pt",type = "value", label="Model Weight")
25
+ #size = 640
26
 
27
+ #inputs = [image, chosen_model, size]
28
+ inputs = gr.inputs.Image(type="pil", label="Input Image")
29
  outputs = gr.outputs.Image(type="pil", label="Output Image")
30
 
31
  title = "MegaDetector v5"
32
+ description = "Detect and identify animals, people and vehicles in camera trap images using MegaDetector v5a"
33
+ article = "<p style='text-align: center'>This app makes predictions using a YOLOv5x6 model that was trained to detect animals, humans, and vehicles in camera trap images; find out more about the project on <a href='https://github.com/microsoft/CameraTraps'>GitHub</a>. This app was built by Henry Lydecker but really depends on code and models developed by <a href='http://ecologize.org/'>Ecologize</a> and <a href='http://aka.ms/aiforearth'>Microsoft AI for Earth</a>. Find out more about the YOLO model from the original creator, <a href='https://pjreddie.com/darknet/yolo/'>Joseph Redmon</a>. YOLOv5 is a family of compound-scaled object detection models trained on the COCO dataset and developed by Ultralytics, and includes simple functionality for Test Time Augmentation (TTA), model ensembling, hyperparameter evolution, and export to ONNX, CoreML and TFLite. <a href='https://github.com/ultralytics/yolov5'>Source code</a> | <a href='https://pytorch.org/hub/ultralytics_yolov5'>PyTorch Hub</a></p>"
34
 
35
  examples = [['data/Macropod.jpg'], ['data/koala2.jpg'],['data/cat.jpg'],['data/BrushtailPossum.jpg']]
36
+ gr.Interface(yolo, inputs, outputs, title=title, description=description, article=article, examples=examples, theme="huggingface").launch(enable_queue=True)