SerdarHelli fcakyon commited on
Commit
297a2c6
0 Parent(s):

Duplicate from fcakyon/sahi-yolov5

Browse files

Co-authored-by: Fatih <fcakyon@users.noreply.huggingface.co>

Files changed (5) hide show
  1. .gitattributes +27 -0
  2. README.md +38 -0
  3. app.py +142 -0
  4. packages.txt +3 -0
  5. requirements.txt +5 -0
.gitattributes ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.onnx filter=lfs diff=lfs merge=lfs -text
14
+ *.ot filter=lfs diff=lfs merge=lfs -text
15
+ *.parquet filter=lfs diff=lfs merge=lfs -text
16
+ *.pb filter=lfs diff=lfs merge=lfs -text
17
+ *.pt filter=lfs diff=lfs merge=lfs -text
18
+ *.pth filter=lfs diff=lfs merge=lfs -text
19
+ *.rar filter=lfs diff=lfs merge=lfs -text
20
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
22
+ *.tflite filter=lfs diff=lfs merge=lfs -text
23
+ *.tgz filter=lfs diff=lfs merge=lfs -text
24
+ *.xz filter=lfs diff=lfs merge=lfs -text
25
+ *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Small Object Detection with YOLOv5
3
+ emoji: 🔭
4
+ colorFrom: pink
5
+ colorTo: yellow
6
+ sdk: gradio
7
+ app_file: app.py
8
+ pinned: false
9
+ duplicated_from: fcakyon/sahi-yolov5
10
+ ---
11
+
12
+ # Configuration
13
+
14
+ `title`: _string_
15
+ Display title for the Space
16
+
17
+ `emoji`: _string_
18
+ Space emoji (emoji-only character allowed)
19
+
20
+ `colorFrom`: _string_
21
+ Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
22
+
23
+ `colorTo`: _string_
24
+ Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
25
+
26
+ `sdk`: _string_
27
+ Can be either `gradio` or `streamlit`
28
+
29
+ `sdk_version` : _string_
30
+ Only applicable for `streamlit` SDK.
31
+ See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions.
32
+
33
+ `app_file`: _string_
34
+ Path to your main application file (which contains either `gradio` or `streamlit` Python code).
35
+ Path is relative to the root of the repository.
36
+
37
+ `pinned`: _boolean_
38
+ Whether the Space stays on top of your list.
app.py ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import sahi.utils
3
+ from sahi import AutoDetectionModel
4
+ import sahi.predict
5
+ import sahi.slicing
6
+ from PIL import Image
7
+ import numpy
8
+
9
+ IMAGE_SIZE = 640
10
+
11
+ # Images
12
+ sahi.utils.file.download_from_url(
13
+ "https://user-images.githubusercontent.com/34196005/142730935-2ace3999-a47b-49bb-83e0-2bdd509f1c90.jpg",
14
+ "apple_tree.jpg",
15
+ )
16
+ sahi.utils.file.download_from_url(
17
+ "https://user-images.githubusercontent.com/34196005/142730936-1b397756-52e5-43be-a949-42ec0134d5d8.jpg",
18
+ "highway.jpg",
19
+ )
20
+
21
+ sahi.utils.file.download_from_url(
22
+ "https://user-images.githubusercontent.com/34196005/142742871-bf485f84-0355-43a3-be86-96b44e63c3a2.jpg",
23
+ "highway2.jpg",
24
+ )
25
+
26
+ sahi.utils.file.download_from_url(
27
+ "https://user-images.githubusercontent.com/34196005/142742872-1fefcc4d-d7e6-4c43-bbb7-6b5982f7e4ba.jpg",
28
+ "highway3.jpg",
29
+ )
30
+
31
+
32
+ # Model
33
+ model = AutoDetectionModel.from_pretrained(
34
+ model_type="yolov5", model_path="yolov5s6.pt", device="cpu", confidence_threshold=0.5, image_size=IMAGE_SIZE
35
+ )
36
+
37
+
38
+ def sahi_yolo_inference(
39
+ image,
40
+ slice_height=512,
41
+ slice_width=512,
42
+ overlap_height_ratio=0.2,
43
+ overlap_width_ratio=0.2,
44
+ postprocess_type="GREEDYNMM",
45
+ postprocess_match_metric="IOS",
46
+ postprocess_match_threshold=0.5,
47
+ postprocess_class_agnostic=False,
48
+ ):
49
+
50
+ image_width, image_height = image.size
51
+ sliced_bboxes = sahi.slicing.get_slice_bboxes(
52
+ image_height,
53
+ image_width,
54
+ slice_height,
55
+ slice_width,
56
+ False,
57
+ overlap_height_ratio,
58
+ overlap_width_ratio,
59
+ )
60
+ if len(sliced_bboxes) > 60:
61
+ raise ValueError(
62
+ f"{len(sliced_bboxes)} slices are too much for huggingface spaces, try smaller slice size."
63
+ )
64
+
65
+ # standard inference
66
+ prediction_result_1 = sahi.predict.get_prediction(
67
+ image=image, detection_model=model
68
+ )
69
+ print(image)
70
+ visual_result_1 = sahi.utils.cv.visualize_object_predictions(
71
+ image=numpy.array(image),
72
+ object_prediction_list=prediction_result_1.object_prediction_list,
73
+ )
74
+ output_1 = Image.fromarray(visual_result_1["image"])
75
+
76
+ # sliced inference
77
+ prediction_result_2 = sahi.predict.get_sliced_prediction(
78
+ image=image,
79
+ detection_model=model,
80
+ slice_height=int(slice_height),
81
+ slice_width=int(slice_width),
82
+ overlap_height_ratio=overlap_height_ratio,
83
+ overlap_width_ratio=overlap_width_ratio,
84
+ postprocess_type=postprocess_type,
85
+ postprocess_match_metric=postprocess_match_metric,
86
+ postprocess_match_threshold=postprocess_match_threshold,
87
+ postprocess_class_agnostic=postprocess_class_agnostic,
88
+ )
89
+ visual_result_2 = sahi.utils.cv.visualize_object_predictions(
90
+ image=numpy.array(image),
91
+ object_prediction_list=prediction_result_2.object_prediction_list,
92
+ )
93
+
94
+ output_2 = Image.fromarray(visual_result_2["image"])
95
+
96
+ return output_1, output_2
97
+
98
+
99
+ inputs = [
100
+ gr.inputs.Image(type="pil", label="Original Image"),
101
+ gr.inputs.Number(default=512, label="slice_height"),
102
+ gr.inputs.Number(default=512, label="slice_width"),
103
+ gr.inputs.Number(default=0.2, label="overlap_height_ratio"),
104
+ gr.inputs.Number(default=0.2, label="overlap_width_ratio"),
105
+ gr.inputs.Dropdown(
106
+ ["NMS", "GREEDYNMM"],
107
+ type="value",
108
+ default="GREEDYNMM",
109
+ label="postprocess_type",
110
+ ),
111
+ gr.inputs.Dropdown(
112
+ ["IOU", "IOS"], type="value", default="IOS", label="postprocess_type"
113
+ ),
114
+ gr.inputs.Number(default=0.5, label="postprocess_match_threshold"),
115
+ gr.inputs.Checkbox(default=True, label="postprocess_class_agnostic"),
116
+ ]
117
+
118
+ outputs = [
119
+ gr.outputs.Image(type="pil", label="YOLOv5s"),
120
+ gr.outputs.Image(type="pil", label="YOLOv5s + SAHI"),
121
+ ]
122
+
123
+ title = "Small Object Detection with SAHI + YOLOv5"
124
+ description = "SAHI + YOLOv5 demo for small object detection. Upload an image or click an example image to use."
125
+ article = "<p style='text-align: center'>SAHI is a lightweight vision library for performing large scale object detection/ instance segmentation.. <a href='https://github.com/obss/sahi'>SAHI Github</a> | <a href='https://medium.com/codable/sahi-a-vision-library-for-performing-sliced-inference-on-large-images-small-objects-c8b086af3b80'>SAHI Blog</a> | <a href='https://github.com/fcakyon/yolov5-pip'>YOLOv5 Github</a> </p>"
126
+ examples = [
127
+ ["apple_tree.jpg", 256, 256, 0.2, 0.2, "GREEDYNMM", "IOS", 0.5, True],
128
+ ["highway.jpg", 256, 256, 0.2, 0.2, "GREEDYNMM", "IOS", 0.5, True],
129
+ ["highway2.jpg", 512, 512, 0.2, 0.2, "GREEDYNMM", "IOS", 0.5, True],
130
+ ["highway3.jpg", 512, 512, 0.2, 0.2, "GREEDYNMM", "IOS", 0.5, True],
131
+ ]
132
+
133
+ gr.Interface(
134
+ sahi_yolo_inference,
135
+ inputs,
136
+ outputs,
137
+ title=title,
138
+ description=description,
139
+ article=article,
140
+ examples=examples,
141
+ theme="huggingface",
142
+ ).launch(debug=True, enable_queue=True)
packages.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ ffmpeg
2
+ libsm6
3
+ libxext6
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
1
+ torch==1.10.2+cpu
2
+ torchvision==0.11.3+cpu
3
+ -f https://download.pytorch.org/whl/torch_stable.html
4
+ yolov5==6.2.3
5
+ sahi==0.11.1