update sahi version and default params

#1
by fcakyon - opened
Files changed (2) hide show
  1. app.py +49 -46
  2. requirements.txt +2 -2
app.py CHANGED
@@ -11,14 +11,20 @@ import torch
11
 
12
  IMAGE_SIZE = 640
13
 
14
- model_path=hf_hub_download("kadirnar/deprem_model_v1", filename="last.pt",revision="main")
 
 
15
 
16
 
17
- current_device='cuda' if torch.cuda.is_available() else 'cpu'
18
- model_types=["YOLOv5","YOLOv5 + SAHI"]
19
  # Model
20
  model = AutoDetectionModel.from_pretrained(
21
- model_type="yolov5", model_path=model_path, device=current_device, confidence_threshold=0.5, image_size=IMAGE_SIZE
 
 
 
 
22
  )
23
 
24
 
@@ -27,15 +33,15 @@ def sahi_yolo_inference(
27
  image,
28
  slice_height=512,
29
  slice_width=512,
30
- overlap_height_ratio=0.2,
31
- overlap_width_ratio=0.2,
32
- postprocess_type="GREEDYNMM",
33
- postprocess_match_metric="IOS",
34
- postprocess_match_threshold=0.5,
35
  postprocess_class_agnostic=False,
36
  ):
37
 
38
- #image_width, image_height = image.size
39
  # sliced_bboxes = sahi.slicing.get_slice_bboxes(
40
  # image_height,
41
  # image_width,
@@ -50,22 +56,21 @@ def sahi_yolo_inference(
50
  # f"{len(sliced_bboxes)} slices are too much for huggingface spaces, try smaller slice size."
51
  # )
52
 
53
-
54
- rect_th = None or max(round(sum(image.size) / 2 * 0.001), 1)
55
- text_th = None or max(rect_th - 1, 1)
56
 
57
  if "SAHI" in model_type:
58
  prediction_result_2 = sahi.predict.get_sliced_prediction(
59
- image=image,
60
- detection_model=model,
61
- slice_height=int(slice_height),
62
- slice_width=int(slice_width),
63
- overlap_height_ratio=overlap_height_ratio,
64
- overlap_width_ratio=overlap_width_ratio,
65
- postprocess_type=postprocess_type,
66
- postprocess_match_metric=postprocess_match_metric,
67
- postprocess_match_threshold=postprocess_match_threshold,
68
- postprocess_class_agnostic=postprocess_class_agnostic,
69
  )
70
  visual_result_2 = sahi.utils.cv.visualize_object_predictions(
71
  image=numpy.array(image),
@@ -94,41 +99,39 @@ def sahi_yolo_inference(
94
  # sliced inference
95
 
96
 
97
-
98
-
99
  inputs = [
100
- gr.inputs.Dropdown(choices=model_types,label="Choose Model Type",type="value",),
101
- gr.inputs.Image(type="pil", label="Original Image"),
102
- gr.inputs.Number(default=512, label="slice_height"),
103
- gr.inputs.Number(default=512, label="slice_width"),
104
- gr.inputs.Number(default=0.2, label="overlap_height_ratio"),
105
- gr.inputs.Number(default=0.2, label="overlap_width_ratio"),
106
- gr.inputs.Dropdown(
 
 
 
 
 
107
  ["NMS", "GREEDYNMM"],
108
  type="value",
109
- default="GREEDYNMM",
110
  label="postprocess_type",
111
  ),
112
- gr.inputs.Dropdown(
113
- ["IOU", "IOS"], type="value", default="IOS", label="postprocess_type"
114
- ),
115
- gr.inputs.Number(default=0.5, label="postprocess_match_threshold"),
116
- gr.inputs.Checkbox(default=True, label="postprocess_class_agnostic"),
117
  ]
118
 
119
- outputs = [
120
- gr.outputs.Image(type="pil", label="Output")
121
- ]
122
 
123
  title = "Small Object Detection with SAHI + YOLOv5"
124
  description = "SAHI + YOLOv5 demo for small object detection. Upload an image or click an example image to use."
125
  article = "<p style='text-align: center'>SAHI is a lightweight vision library for performing large scale object detection/ instance segmentation.. <a href='https://github.com/obss/sahi'>SAHI Github</a> | <a href='https://medium.com/codable/sahi-a-vision-library-for-performing-sliced-inference-on-large-images-small-objects-c8b086af3b80'>SAHI Blog</a> | <a href='https://github.com/fcakyon/yolov5-pip'>YOLOv5 Github</a> </p>"
126
  examples = [
127
- [model_types[0],"26.jpg", 256, 256, 0.2, 0.2, "GREEDYNMM", "IOS", 0.5, True],
128
- [model_types[0],"27.jpg", 512, 512, 0.2, 0.2, "GREEDYNMM", "IOS", 0.5, True],
129
- [model_types[0],"28.jpg", 512, 512, 0.2, 0.2, "GREEDYNMM", "IOS", 0.5, True],
130
- [model_types[0],"31.jpg", 512, 512, 0.2, 0.2, "GREEDYNMM", "IOS", 0.5, True],
131
-
132
  ]
133
  gr.Interface(
134
  sahi_yolo_inference,
 
11
 
12
  IMAGE_SIZE = 640
13
 
14
+ model_path = hf_hub_download(
15
+ "deprem-ml/Binafarktespit-yolo5x-v1-xview", filename="last.pt", revision="main"
16
+ )
17
 
18
 
19
+ current_device = "cuda" if torch.cuda.is_available() else "cpu"
20
+ model_types = ["YOLOv5", "YOLOv5 + SAHI"]
21
  # Model
22
  model = AutoDetectionModel.from_pretrained(
23
+ model_type="yolov5",
24
+ model_path=model_path,
25
+ device=current_device,
26
+ confidence_threshold=0.5,
27
+ image_size=IMAGE_SIZE,
28
  )
29
 
30
 
 
33
  image,
34
  slice_height=512,
35
  slice_width=512,
36
+ overlap_height_ratio=0.1,
37
+ overlap_width_ratio=0.1,
38
+ postprocess_type="NMS",
39
+ postprocess_match_metric="IOU",
40
+ postprocess_match_threshold=0.25,
41
  postprocess_class_agnostic=False,
42
  ):
43
 
44
+ # image_width, image_height = image.size
45
  # sliced_bboxes = sahi.slicing.get_slice_bboxes(
46
  # image_height,
47
  # image_width,
 
56
  # f"{len(sliced_bboxes)} slices are too much for huggingface spaces, try smaller slice size."
57
  # )
58
 
59
+ rect_th = None or max(round(sum(image.size) / 2 * 0.0001), 1)
60
+ text_th = None or max(rect_th - 2, 1)
 
61
 
62
  if "SAHI" in model_type:
63
  prediction_result_2 = sahi.predict.get_sliced_prediction(
64
+ image=image,
65
+ detection_model=model,
66
+ slice_height=int(slice_height),
67
+ slice_width=int(slice_width),
68
+ overlap_height_ratio=overlap_height_ratio,
69
+ overlap_width_ratio=overlap_width_ratio,
70
+ postprocess_type=postprocess_type,
71
+ postprocess_match_metric=postprocess_match_metric,
72
+ postprocess_match_threshold=postprocess_match_threshold,
73
+ postprocess_class_agnostic=postprocess_class_agnostic,
74
  )
75
  visual_result_2 = sahi.utils.cv.visualize_object_predictions(
76
  image=numpy.array(image),
 
99
  # sliced inference
100
 
101
 
 
 
102
  inputs = [
103
+ gr.Dropdown(
104
+ choices=model_types,
105
+ label="Choose Model Type",
106
+ type="value",
107
+ value=model_types[1],
108
+ ),
109
+ gr.Image(type="pil", label="Original Image"),
110
+ gr.Number(default=512, label="slice_height"),
111
+ gr.Number(default=512, label="slice_width"),
112
+ gr.Number(default=0.1, label="overlap_height_ratio"),
113
+ gr.Number(default=0.1, label="overlap_width_ratio"),
114
+ gr.Dropdown(
115
  ["NMS", "GREEDYNMM"],
116
  type="value",
117
+ value="NMS",
118
  label="postprocess_type",
119
  ),
120
+ gr.Dropdown(["IOU", "IOS"], type="value", value="IOU", label="postprocess_type"),
121
+ gr.Number(default=0.5, label="postprocess_match_threshold"),
122
+ gr.Checkbox(default=True, label="postprocess_class_agnostic"),
 
 
123
  ]
124
 
125
+ outputs = [gr.outputs.Image(type="pil", label="Output")]
 
 
126
 
127
  title = "Small Object Detection with SAHI + YOLOv5"
128
  description = "SAHI + YOLOv5 demo for small object detection. Upload an image or click an example image to use."
129
  article = "<p style='text-align: center'>SAHI is a lightweight vision library for performing large scale object detection/ instance segmentation.. <a href='https://github.com/obss/sahi'>SAHI Github</a> | <a href='https://medium.com/codable/sahi-a-vision-library-for-performing-sliced-inference-on-large-images-small-objects-c8b086af3b80'>SAHI Blog</a> | <a href='https://github.com/fcakyon/yolov5-pip'>YOLOv5 Github</a> </p>"
130
  examples = [
131
+ [model_types[1], "26.jpg", 512, 512, 0.1, 0.1, "NMS", "IOU", 0.25, False],
132
+ [model_types[1], "27.jpg", 512, 512, 0.1, 0.1, "NMS", "IOU", 0.25, False],
133
+ [model_types[1], "28.jpg", 512, 512, 0.1, 0.1, "NMS", "IOU", 0.25, False],
134
+ [model_types[1], "31.jpg", 512, 512, 0.1, 0.1, "NMS", "IOU", 0.25, False],
 
135
  ]
136
  gr.Interface(
137
  sahi_yolo_inference,
requirements.txt CHANGED
@@ -1,5 +1,5 @@
1
  torch==1.10.2+cpu
2
  torchvision==0.11.3+cpu
3
  -f https://download.pytorch.org/whl/torch_stable.html
4
- yolov5==6.2.3
5
- sahi==0.11.1
 
1
  torch==1.10.2+cpu
2
  torchvision==0.11.3+cpu
3
  -f https://download.pytorch.org/whl/torch_stable.html
4
+ yolov5==7.0.8
5
+ sahi==0.11.11