fcakyon commited on
Commit
2f356cf
1 Parent(s): f096d27
Files changed (3) hide show
  1. README.md +2 -2
  2. app.py +128 -0
  3. requirements.txt +4 -0
README.md CHANGED
@@ -1,6 +1,6 @@
1
  ---
2
- title: Sahi Yolov5
3
- emoji: 🔥
4
  colorFrom: indigo
5
  colorTo: green
6
  sdk: gradio
 
1
  ---
2
+ title: SAHI + YOLOv5
3
+ emoji: 👓
4
  colorFrom: indigo
5
  colorTo: green
6
  sdk: gradio
app.py ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import yolov5
3
+ import sahi.utils
4
+ import sahi.model
5
+ import sahi.predict
6
+ from PIL import Image
7
+ import numpy
8
+
9
+ # Images
10
+ sahi.utils.file.download_from_url(
11
+ "https://user-images.githubusercontent.com/34196005/142730935-2ace3999-a47b-49bb-83e0-2bdd509f1c90.jpg",
12
+ "apple_tree.jpg",
13
+ )
14
+ sahi.utils.file.download_from_url(
15
+ "https://user-images.githubusercontent.com/34196005/142730936-1b397756-52e5-43be-a949-42ec0134d5d8.jpg",
16
+ "highway.jpg",
17
+ )
18
+
19
+ sahi.utils.file.download_from_url(
20
+ "https://user-images.githubusercontent.com/34196005/142742871-bf485f84-0355-43a3-be86-96b44e63c3a2.jpg",
21
+ "highway2.jpg",
22
+ )
23
+
24
+ sahi.utils.file.download_from_url(
25
+ "https://user-images.githubusercontent.com/34196005/142742872-1fefcc4d-d7e6-4c43-bbb7-6b5982f7e4ba.jpg",
26
+ "highway3.jpg",
27
+ )
28
+
29
+
30
+ # Model
31
+ model = sahi.model.Yolov5DetectionModel(
32
+ model_path="yolov5s6.pt", device="cpu", confidence_threshold=0.5
33
+ )
34
+
35
+
36
+ def sahi_yolo_inference(
37
+ image,
38
+ slice_height=512,
39
+ slice_width=512,
40
+ overlap_height_ratio=0.2,
41
+ overlap_width_ratio=0.2,
42
+ image_size=640,
43
+ postprocess_type="UNIONMERGE",
44
+ postprocess_match_metric="IOS",
45
+ postprocess_match_threshold=0.5,
46
+ postprocess_class_agnostic=False,
47
+ ):
48
+
49
+ # standard inference
50
+ prediction_result_1 = sahi.predict.get_prediction(
51
+ image=image, detection_model=model, image_size=image_size
52
+ )
53
+ print(image)
54
+ visual_result_1 = sahi.utils.cv.visualize_object_predictions(
55
+ image=numpy.array(image),
56
+ object_prediction_list=prediction_result_1.object_prediction_list,
57
+ )
58
+ output_1 = Image.fromarray(visual_result_1["image"])
59
+
60
+ # sliced inference
61
+ prediction_result_2 = sahi.predict.get_sliced_prediction(
62
+ image=image,
63
+ detection_model=model,
64
+ image_size=image_size,
65
+ slice_height=slice_height,
66
+ slice_width=slice_width,
67
+ overlap_height_ratio=overlap_height_ratio,
68
+ overlap_width_ratio=overlap_width_ratio,
69
+ postprocess_type=postprocess_type,
70
+ postprocess_match_metric=postprocess_match_metric,
71
+ postprocess_match_threshold=postprocess_match_threshold,
72
+ postprocess_class_agnostic=postprocess_class_agnostic,
73
+ )
74
+ visual_result_2 = sahi.utils.cv.visualize_object_predictions(
75
+ image=numpy.array(image),
76
+ object_prediction_list=prediction_result_2.object_prediction_list,
77
+ )
78
+
79
+ output_2 = Image.fromarray(visual_result_2["image"])
80
+
81
+ return output_1, output_2
82
+
83
+
84
+ inputs = [
85
+ gr.inputs.Image(type="pil", label="Original Image"),
86
+ gr.inputs.Number(default=512, label="slice_height"),
87
+ gr.inputs.Number(default=512, label="slice_width"),
88
+ gr.inputs.Number(default=0.2, label="overlap_height_ratio"),
89
+ gr.inputs.Number(default=0.2, label="overlap_width_ratio"),
90
+ gr.inputs.Number(default=640, label="image_size"),
91
+ gr.inputs.Dropdown(
92
+ ["NMS", "UNIONMERGE"],
93
+ type="value",
94
+ default="UNIONMERGE",
95
+ label="postprocess_type",
96
+ ),
97
+ gr.inputs.Dropdown(
98
+ ["IOU", "IOS"], type="value", default="IOS", label="postprocess_type"
99
+ ),
100
+ gr.inputs.Number(default=0.5, label="postprocess_match_threshold"),
101
+ gr.inputs.Checkbox(default=True, label="postprocess_class_agnostic"),
102
+ ]
103
+
104
+ outputs = [
105
+ gr.outputs.Image(type="pil", label="Standard YOLOv5s Inference"),
106
+ gr.outputs.Image(type="pil", label="Sliced YOLOv5s Inference"),
107
+ ]
108
+
109
+ title = "SAHI + YOLOv5"
110
+ description = "SAHI + YOLOv5 Gradio demo for object detection. Upload an image or click an example image to use."
111
+ article = "<p style='text-align: center'>SAHI is a lightweight vision library for performing large scale object detection/ instance segmentation.. <a href='https://github.com/obss/sahi'>SAHI Github</a> | <a href='https://medium.com/codable/sahi-a-vision-library-for-performing-sliced-inference-on-large-images-small-objects-c8b086af3b80'>SAHI Blog</a> | <a href='https://github.com/fcakyon/yolov5-pip'>YOLOv5 Github</a> </p>"
112
+ examples = [
113
+ ["apple_tree.jpg", 256, 256, 0.2, 0.2, 640, "UNIONMERGE", "IOS", 0.5, True],
114
+ ["highway.jpg", 256, 256, 0.2, 0.2, 640, "UNIONMERGE", "IOS", 0.5, True],
115
+ ["highway2.jpg", 512, 512, 0.2, 0.2, 640, "UNIONMERGE", "IOS", 0.5, True],
116
+ ["highway3.jpg", 1024, 1024, 0.2, 0.2, 640, "UNIONMERGE", "IOS", 0.5, True],
117
+ ]
118
+
119
+ gr.Interface(
120
+ sahi_yolo_inference,
121
+ inputs,
122
+ outputs,
123
+ title=title,
124
+ description=description,
125
+ article=article,
126
+ examples=examples,
127
+ theme="default",
128
+ ).launch(debug=True)
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ torch==1.10.0+cpu
2
+ torchvision==0.11.1+cpu
3
+ yolov5==6.0.4
4
+ sahi==0.8.9