SerdarHelli commited on
Commit
ce7d026
1 Parent(s): 297a2c6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +50 -56
app.py CHANGED
@@ -5,37 +5,25 @@ import sahi.predict
5
  import sahi.slicing
6
  from PIL import Image
7
  import numpy
 
 
 
8
 
9
  IMAGE_SIZE = 640
10
 
11
- # Images
12
- sahi.utils.file.download_from_url(
13
- "https://user-images.githubusercontent.com/34196005/142730935-2ace3999-a47b-49bb-83e0-2bdd509f1c90.jpg",
14
- "apple_tree.jpg",
15
- )
16
- sahi.utils.file.download_from_url(
17
- "https://user-images.githubusercontent.com/34196005/142730936-1b397756-52e5-43be-a949-42ec0134d5d8.jpg",
18
- "highway.jpg",
19
- )
20
 
21
- sahi.utils.file.download_from_url(
22
- "https://user-images.githubusercontent.com/34196005/142742871-bf485f84-0355-43a3-be86-96b44e63c3a2.jpg",
23
- "highway2.jpg",
24
- )
25
-
26
- sahi.utils.file.download_from_url(
27
- "https://user-images.githubusercontent.com/34196005/142742872-1fefcc4d-d7e6-4c43-bbb7-6b5982f7e4ba.jpg",
28
- "highway3.jpg",
29
- )
30
 
 
31
 
32
  # Model
33
  model = AutoDetectionModel.from_pretrained(
34
- model_type="yolov5", model_path="yolov5s6.pt", device="cpu", confidence_threshold=0.5, image_size=IMAGE_SIZE
35
  )
36
 
37
 
38
  def sahi_yolo_inference(
 
39
  image,
40
  slice_height=512,
41
  slice_width=512,
@@ -47,34 +35,26 @@ def sahi_yolo_inference(
47
  postprocess_class_agnostic=False,
48
  ):
49
 
50
- image_width, image_height = image.size
51
- sliced_bboxes = sahi.slicing.get_slice_bboxes(
52
- image_height,
53
- image_width,
54
- slice_height,
55
- slice_width,
56
- False,
57
- overlap_height_ratio,
58
- overlap_width_ratio,
59
- )
60
- if len(sliced_bboxes) > 60:
61
- raise ValueError(
62
- f"{len(sliced_bboxes)} slices are too much for huggingface spaces, try smaller slice size."
63
- )
64
-
65
- # standard inference
66
- prediction_result_1 = sahi.predict.get_prediction(
67
- image=image, detection_model=model
68
- )
69
- print(image)
70
- visual_result_1 = sahi.utils.cv.visualize_object_predictions(
71
- image=numpy.array(image),
72
- object_prediction_list=prediction_result_1.object_prediction_list,
73
- )
74
- output_1 = Image.fromarray(visual_result_1["image"])
75
-
76
- # sliced inference
77
- prediction_result_2 = sahi.predict.get_sliced_prediction(
78
  image=image,
79
  detection_model=model,
80
  slice_height=int(slice_height),
@@ -85,18 +65,33 @@ def sahi_yolo_inference(
85
  postprocess_match_metric=postprocess_match_metric,
86
  postprocess_match_threshold=postprocess_match_threshold,
87
  postprocess_class_agnostic=postprocess_class_agnostic,
88
- )
89
- visual_result_2 = sahi.utils.cv.visualize_object_predictions(
90
- image=numpy.array(image),
91
- object_prediction_list=prediction_result_2.object_prediction_list,
92
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
93
 
94
- output_2 = Image.fromarray(visual_result_2["image"])
95
 
96
- return output_1, output_2
97
 
98
 
99
  inputs = [
 
100
  gr.inputs.Image(type="pil", label="Original Image"),
101
  gr.inputs.Number(default=512, label="slice_height"),
102
  gr.inputs.Number(default=512, label="slice_width"),
@@ -116,8 +111,7 @@ inputs = [
116
  ]
117
 
118
  outputs = [
119
- gr.outputs.Image(type="pil", label="YOLOv5s"),
120
- gr.outputs.Image(type="pil", label="YOLOv5s + SAHI"),
121
  ]
122
 
123
  title = "Small Object Detection with SAHI + YOLOv5"
 
5
  import sahi.slicing
6
  from PIL import Image
7
  import numpy
8
+ from huggingface_hub import hf_hub_download
9
+ import torch
10
+
11
 
12
  IMAGE_SIZE = 640
13
 
14
+ model_path=hf_hub_download("kadirnar/deprem_model_v1", filename="last.pt",revision="main")
 
 
 
 
 
 
 
 
15
 
 
 
 
 
 
 
 
 
 
16
 
17
+ current_device='cuda' if torch.cuda.is_available() else 'cpu'
18
 
19
  # Model
20
  model = AutoDetectionModel.from_pretrained(
21
+ model_type="yolov5", model_path=model_path, device=current_device, confidence_threshold=0.5, image_size=IMAGE_SIZE
22
  )
23
 
24
 
25
  def sahi_yolo_inference(
26
+ model_type,
27
  image,
28
  slice_height=512,
29
  slice_width=512,
 
35
  postprocess_class_agnostic=False,
36
  ):
37
 
38
+ #image_width, image_height = image.size
39
+ # sliced_bboxes = sahi.slicing.get_slice_bboxes(
40
+ # image_height,
41
+ # image_width,
42
+ # slice_height,
43
+ # slice_width,
44
+ # False,
45
+ # overlap_height_ratio,
46
+ # overlap_width_ratio,
47
+ # )
48
+ # if len(sliced_bboxes) > 60:
49
+ # raise ValueError(
50
+ # f"{len(sliced_bboxes)} slices are too much for huggingface spaces, try smaller slice size."
51
+ # )
52
+
53
+
54
+
55
+
56
+ if "SAHI" in model_type:
57
+ prediction_result_2 = sahi.predict.get_sliced_prediction(
 
 
 
 
 
 
 
 
58
  image=image,
59
  detection_model=model,
60
  slice_height=int(slice_height),
 
65
  postprocess_match_metric=postprocess_match_metric,
66
  postprocess_match_threshold=postprocess_match_threshold,
67
  postprocess_class_agnostic=postprocess_class_agnostic,
68
+ )
69
+ visual_result_2 = sahi.utils.cv.visualize_object_predictions(
70
+ image=numpy.array(image),
71
+ object_prediction_list=prediction_result_2.object_prediction_list,
72
+ )
73
+ output = Image.fromarray(visual_result_2["image"])
74
+
75
+ else:
76
+ # standard inference
77
+ prediction_result_1 = sahi.predict.get_prediction(
78
+ image=image, detection_model=model
79
+ )
80
+ print(image)
81
+ visual_result_1 = sahi.utils.cv.visualize_object_predictions(
82
+ image=numpy.array(image),
83
+ object_prediction_list=prediction_result_1.object_prediction_list,
84
+ )
85
+ output = Image.fromarray(visual_result_1["image"])
86
+
87
+ # sliced inference
88
 
 
89
 
90
+ return output
91
 
92
 
93
  inputs = [
94
+ gr.Dropdown(choices=["YOLOv5","YOLOv5 + SAHI"],label="Choose Model Type"),
95
  gr.inputs.Image(type="pil", label="Original Image"),
96
  gr.inputs.Number(default=512, label="slice_height"),
97
  gr.inputs.Number(default=512, label="slice_width"),
 
111
  ]
112
 
113
  outputs = [
114
+ gr.outputs.Image(type="pil", label="Output")
 
115
  ]
116
 
117
  title = "Small Object Detection with SAHI + YOLOv5"