Fatih commited on
Commit
2a3d3f0
1 Parent(s): 12d8bfa
Files changed (1) hide show
  1. app.py +4 -5
app.py CHANGED
@@ -101,7 +101,6 @@ inputs = [
101
  gr.inputs.Number(default=512, label="slice_width"),
102
  gr.inputs.Number(default=0.2, label="overlap_height_ratio"),
103
  gr.inputs.Number(default=0.2, label="overlap_width_ratio"),
104
- gr.inputs.Number(default=640, label="image_size"),
105
  gr.inputs.Dropdown(
106
  ["NMS", "GREEDYNMM"],
107
  type="value",
@@ -124,10 +123,10 @@ title = "Small Object Detection with SAHI + YOLOv5"
124
  description = "SAHI + YOLOv5 demo for small object detection. Upload an image or click an example image to use."
125
  article = "<p style='text-align: center'>SAHI is a lightweight vision library for performing large scale object detection/ instance segmentation.. <a href='https://github.com/obss/sahi'>SAHI Github</a> | <a href='https://medium.com/codable/sahi-a-vision-library-for-performing-sliced-inference-on-large-images-small-objects-c8b086af3b80'>SAHI Blog</a> | <a href='https://github.com/fcakyon/yolov5-pip'>YOLOv5 Github</a> </p>"
126
  examples = [
127
- ["apple_tree.jpg", 256, 256, 0.2, 0.2, 640, "GREEDYNMM", "IOS", 0.5, True],
128
- ["highway.jpg", 256, 256, 0.2, 0.2, 640, "GREEDYNMM", "IOS", 0.5, True],
129
- ["highway2.jpg", 512, 512, 0.2, 0.2, 640, "GREEDYNMM", "IOS", 0.5, True],
130
- ["highway3.jpg", 512, 512, 0.2, 0.2, 640, "GREEDYNMM", "IOS", 0.5, True],
131
  ]
132
 
133
  gr.Interface(
101
  gr.inputs.Number(default=512, label="slice_width"),
102
  gr.inputs.Number(default=0.2, label="overlap_height_ratio"),
103
  gr.inputs.Number(default=0.2, label="overlap_width_ratio"),
 
104
  gr.inputs.Dropdown(
105
  ["NMS", "GREEDYNMM"],
106
  type="value",
123
  description = "SAHI + YOLOv5 demo for small object detection. Upload an image or click an example image to use."
124
  article = "<p style='text-align: center'>SAHI is a lightweight vision library for performing large scale object detection/ instance segmentation.. <a href='https://github.com/obss/sahi'>SAHI Github</a> | <a href='https://medium.com/codable/sahi-a-vision-library-for-performing-sliced-inference-on-large-images-small-objects-c8b086af3b80'>SAHI Blog</a> | <a href='https://github.com/fcakyon/yolov5-pip'>YOLOv5 Github</a> </p>"
125
  examples = [
126
+ ["apple_tree.jpg", 256, 256, 0.2, 0.2, "GREEDYNMM", "IOS", 0.5, True],
127
+ ["highway.jpg", 256, 256, 0.2, 0.2, "GREEDYNMM", "IOS", 0.5, True],
128
+ ["highway2.jpg", 512, 512, 0.2, 0.2, "GREEDYNMM", "IOS", 0.5, True],
129
+ ["highway3.jpg", 512, 512, 0.2, 0.2, "GREEDYNMM", "IOS", 0.5, True],
130
  ]
131
 
132
  gr.Interface(