fcakyon commited on
Commit
61fbf22
1 Parent(s): cd86d7f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -8
app.py CHANGED
@@ -39,8 +39,8 @@ def sahi_yolo_inference(
39
  image,
40
  slice_height=512,
41
  slice_width=512,
42
- overlap_height_ratio=0.1,
43
- overlap_width_ratio=0.1,
44
  postprocess_type="NMS",
45
  postprocess_match_metric="IOU",
46
  postprocess_match_threshold=0.5,
@@ -100,8 +100,8 @@ inputs = [
100
  gr.Image(type="pil", label="Original Image"),
101
  gr.Number(default=512, label="slice_height"),
102
  gr.Number(default=512, label="slice_width"),
103
- gr.Number(default=0.1, label="overlap_height_ratio"),
104
- gr.Number(default=0.1, label="overlap_width_ratio"),
105
  gr.Dropdown(
106
  ["NMS", "GREEDYNMM"],
107
  type="value",
@@ -124,10 +124,10 @@ title = "Small Object Detection with SAHI + YOLOv5"
124
  description = "SAHI + YOLOv5 demo for small object detection. Upload an image or click an example image to use."
125
  article = "<p style='text-align: center'>SAHI is a lightweight vision library for performing large scale object detection/ instance segmentation.. <a href='https://github.com/obss/sahi'>SAHI Github</a> | <a href='https://medium.com/codable/sahi-a-vision-library-for-performing-sliced-inference-on-large-images-small-objects-c8b086af3b80'>SAHI Blog</a> | <a href='https://github.com/fcakyon/yolov5-pip'>YOLOv5 Github</a> </p>"
126
  examples = [
127
- ["apple_tree.jpg", 256, 256, 0.1, 0.1, "NMS", "IOU", 0.5, True],
128
- ["highway.jpg", 256, 256, 0.1, 0.1, "NMS", "IOU", 0.5, True],
129
- ["highway2.jpg", 512, 512, 0.1, 0.1, "NMS", "IOU", 0.5, True],
130
- ["highway3.jpg", 512, 512, 0.1, 0.1, "NMS", "IOU", 0.5, True],
131
  ]
132
 
133
  gr.Interface(
 
39
  image,
40
  slice_height=512,
41
  slice_width=512,
42
+ overlap_height_ratio=0.2,
43
+ overlap_width_ratio=0.2,
44
  postprocess_type="NMS",
45
  postprocess_match_metric="IOU",
46
  postprocess_match_threshold=0.5,
 
100
  gr.Image(type="pil", label="Original Image"),
101
  gr.Number(default=512, label="slice_height"),
102
  gr.Number(default=512, label="slice_width"),
103
+ gr.Number(default=0.2, label="overlap_height_ratio"),
104
+ gr.Number(default=0.2, label="overlap_width_ratio"),
105
  gr.Dropdown(
106
  ["NMS", "GREEDYNMM"],
107
  type="value",
 
124
  description = "SAHI + YOLOv5 demo for small object detection. Upload an image or click an example image to use."
125
  article = "<p style='text-align: center'>SAHI is a lightweight vision library for performing large scale object detection/ instance segmentation.. <a href='https://github.com/obss/sahi'>SAHI Github</a> | <a href='https://medium.com/codable/sahi-a-vision-library-for-performing-sliced-inference-on-large-images-small-objects-c8b086af3b80'>SAHI Blog</a> | <a href='https://github.com/fcakyon/yolov5-pip'>YOLOv5 Github</a> </p>"
126
  examples = [
127
+ ["apple_tree.jpg", 256, 256, 0.2, 0.2, "NMS", "IOU", 0.4, True],
128
+ ["highway.jpg", 256, 256, 0.2, 0.2, "NMS", "IOU", 0.4, True],
129
+ ["highway2.jpg", 512, 512, 0.2, 0.2, "NMS", "IOU", 0.4, True],
130
+ ["highway3.jpg", 512, 512, 0.2, 0.2, "NMS", "IOU", 0.4, True],
131
  ]
132
 
133
  gr.Interface(