Fatih commited on
Commit
a17b1ad
1 Parent(s): 4445444

update for latest sahi changes

Browse files
Files changed (1) hide show
  1. app.py +10 -10
app.py CHANGED
@@ -6,6 +6,8 @@ import sahi.slicing
6
  from PIL import Image
7
  import numpy
8
 
 
 
9
  # Images
10
  sahi.utils.file.download_from_url(
11
  "https://user-images.githubusercontent.com/34196005/142730935-2ace3999-a47b-49bb-83e0-2bdd509f1c90.jpg",
@@ -29,7 +31,7 @@ sahi.utils.file.download_from_url(
29
 
30
  # Model
31
  model = sahi.model.Yolov5DetectionModel(
32
- model_path="yolov5s6.pt", device="cpu", confidence_threshold=0.5
33
  )
34
 
35
 
@@ -39,8 +41,7 @@ def sahi_yolo_inference(
39
  slice_width=512,
40
  overlap_height_ratio=0.2,
41
  overlap_width_ratio=0.2,
42
- image_size=640,
43
- postprocess_type="UNIONMERGE",
44
  postprocess_match_metric="IOS",
45
  postprocess_match_threshold=0.5,
46
  postprocess_class_agnostic=False,
@@ -75,7 +76,6 @@ def sahi_yolo_inference(
75
  prediction_result_2 = sahi.predict.get_sliced_prediction(
76
  image=image,
77
  detection_model=model,
78
- image_size=image_size,
79
  slice_height=slice_height,
80
  slice_width=slice_width,
81
  overlap_height_ratio=overlap_height_ratio,
@@ -103,9 +103,9 @@ inputs = [
103
  gr.inputs.Number(default=0.2, label="overlap_width_ratio"),
104
  gr.inputs.Number(default=640, label="image_size"),
105
  gr.inputs.Dropdown(
106
- ["NMS", "UNIONMERGE"],
107
  type="value",
108
- default="UNIONMERGE",
109
  label="postprocess_type",
110
  ),
111
  gr.inputs.Dropdown(
@@ -124,10 +124,10 @@ title = "Small Object Detection with SAHI + YOLOv5"
124
  description = "SAHI + YOLOv5 demo for small object detection. Upload an image or click an example image to use."
125
  article = "<p style='text-align: center'>SAHI is a lightweight vision library for performing large scale object detection/ instance segmentation.. <a href='https://github.com/obss/sahi'>SAHI Github</a> | <a href='https://medium.com/codable/sahi-a-vision-library-for-performing-sliced-inference-on-large-images-small-objects-c8b086af3b80'>SAHI Blog</a> | <a href='https://github.com/fcakyon/yolov5-pip'>YOLOv5 Github</a> </p>"
126
  examples = [
127
- ["apple_tree.jpg", 256, 256, 0.2, 0.2, 640, "UNIONMERGE", "IOS", 0.5, True],
128
- ["highway.jpg", 256, 256, 0.2, 0.2, 640, "UNIONMERGE", "IOS", 0.5, True],
129
- ["highway2.jpg", 512, 512, 0.2, 0.2, 640, "UNIONMERGE", "IOS", 0.5, True],
130
- ["highway3.jpg", 512, 512, 0.2, 0.2, 640, "UNIONMERGE", "IOS", 0.5, True],
131
  ]
132
 
133
  gr.Interface(
 
6
  from PIL import Image
7
  import numpy
8
 
9
+ IMAGE_SIZE = 640
10
+
11
  # Images
12
  sahi.utils.file.download_from_url(
13
  "https://user-images.githubusercontent.com/34196005/142730935-2ace3999-a47b-49bb-83e0-2bdd509f1c90.jpg",
 
31
 
32
  # Model
33
  model = sahi.model.Yolov5DetectionModel(
34
+ model_path="yolov5s6.pt", device="cpu", confidence_threshold=0.5, image_size=IMAGE_SIZE
35
  )
36
 
37
 
 
41
  slice_width=512,
42
  overlap_height_ratio=0.2,
43
  overlap_width_ratio=0.2,
44
+ postprocess_type="GREEDYNMM",
 
45
  postprocess_match_metric="IOS",
46
  postprocess_match_threshold=0.5,
47
  postprocess_class_agnostic=False,
 
76
  prediction_result_2 = sahi.predict.get_sliced_prediction(
77
  image=image,
78
  detection_model=model,
 
79
  slice_height=slice_height,
80
  slice_width=slice_width,
81
  overlap_height_ratio=overlap_height_ratio,
 
103
  gr.inputs.Number(default=0.2, label="overlap_width_ratio"),
104
  gr.inputs.Number(default=640, label="image_size"),
105
  gr.inputs.Dropdown(
106
+ ["NMS", "GREEDYNMM"],
107
  type="value",
108
+ default="GREEDYNMM",
109
  label="postprocess_type",
110
  ),
111
  gr.inputs.Dropdown(
 
124
  description = "SAHI + YOLOv5 demo for small object detection. Upload an image or click an example image to use."
125
  article = "<p style='text-align: center'>SAHI is a lightweight vision library for performing large scale object detection/ instance segmentation.. <a href='https://github.com/obss/sahi'>SAHI Github</a> | <a href='https://medium.com/codable/sahi-a-vision-library-for-performing-sliced-inference-on-large-images-small-objects-c8b086af3b80'>SAHI Blog</a> | <a href='https://github.com/fcakyon/yolov5-pip'>YOLOv5 Github</a> </p>"
126
  examples = [
127
+ ["apple_tree.jpg", 256, 256, 0.2, 0.2, 640, "GREEDYNMM", "IOS", 0.5, True],
128
+ ["highway.jpg", 256, 256, 0.2, 0.2, 640, "GREEDYNMM", "IOS", 0.5, True],
129
+ ["highway2.jpg", 512, 512, 0.2, 0.2, 640, "GREEDYNMM", "IOS", 0.5, True],
130
+ ["highway3.jpg", 512, 512, 0.2, 0.2, 640, "GREEDYNMM", "IOS", 0.5, True],
131
  ]
132
 
133
  gr.Interface(