kadirnar commited on
Commit
9c5ce29
1 Parent(s): e271ca3

Added yolov8 model

Browse files
Files changed (7) hide show
  1. README.md +1 -1
  2. app.py +72 -82
  3. data/26.jpg +3 -0
  4. data/27.jpg +3 -0
  5. data/28.jpg +3 -0
  6. data/31.jpg +3 -0
  7. requirements.txt +4 -4
README.md CHANGED
@@ -6,7 +6,7 @@ colorTo: yellow
6
  sdk: gradio
7
  app_file: app.py
8
  pinned: false
9
- duplicated_from: fcakyon/sahi-yolov5
10
  license: openrail
11
  ---
12
 
 
6
  sdk: gradio
7
  app_file: app.py
8
  pinned: false
9
+ duplicated_from: deprem-ml/deprem_satellite_test
10
  license: openrail
11
  ---
12
 
app.py CHANGED
@@ -1,34 +1,20 @@
1
- import gradio as gr
2
- import sahi.utils
3
- from sahi import AutoDetectionModel
4
- import sahi.predict
5
- import sahi.slicing
6
  from PIL import Image
 
7
  import numpy
8
- from huggingface_hub import hf_hub_download
9
  import torch
10
 
11
 
12
- IMAGE_SIZE = 640
13
-
14
- model_id = 'deprem-ml/Binafarktespit-yolo5x-v1-xview'
15
-
16
 
 
17
  current_device = "cuda" if torch.cuda.is_available() else "cpu"
18
- model_types = ["YOLOv5", "YOLOv5 + SAHI"]
19
- # Model
20
- model = AutoDetectionModel.from_pretrained(
21
- model_type="yolov5",
22
- model_path=model_id,
23
- device=current_device,
24
- confidence_threshold=0.5,
25
- image_size=IMAGE_SIZE,
26
- )
27
 
28
-
29
- def sahi_yolo_inference(
30
- model_type,
31
  image,
 
 
 
32
  slice_height=512,
33
  slice_width=512,
34
  overlap_height_ratio=0.1,
@@ -39,26 +25,43 @@ def sahi_yolo_inference(
39
  postprocess_class_agnostic=False,
40
  ):
41
 
42
- # image_width, image_height = image.size
43
- # sliced_bboxes = sahi.slicing.get_slice_bboxes(
44
- # image_height,
45
- # image_width,
46
- # slice_height,
47
- # slice_width,
48
- # False,
49
- # overlap_height_ratio,
50
- # overlap_width_ratio,
51
- # )
52
- # if len(sliced_bboxes) > 60:
53
- # raise ValueError(
54
- # f"{len(sliced_bboxes)} slices are too much for huggingface spaces, try smaller slice size."
55
- # )
56
-
57
  rect_th = None or max(round(sum(image.size) / 2 * 0.0001), 1)
58
  text_th = None or max(rect_th - 2, 1)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
59
 
60
- if "SAHI" in model_type:
61
- prediction_result_2 = sahi.predict.get_sliced_prediction(
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
62
  image=image,
63
  detection_model=model,
64
  slice_height=int(slice_height),
@@ -70,54 +73,38 @@ def sahi_yolo_inference(
70
  postprocess_match_threshold=postprocess_match_threshold,
71
  postprocess_class_agnostic=postprocess_class_agnostic,
72
  )
73
- visual_result_2 = sahi.utils.cv.visualize_object_predictions(
 
74
  image=numpy.array(image),
75
  object_prediction_list=prediction_result_2.object_prediction_list,
76
  rect_th=rect_th,
77
  text_th=text_th,
78
  )
 
79
  output = Image.fromarray(visual_result_2["image"])
80
  return output
81
 
82
- else:
83
- # standard inference
84
- prediction_result_1 = sahi.predict.get_prediction(
85
- image=image, detection_model=model
86
- )
87
- print(image)
88
- visual_result_1 = sahi.utils.cv.visualize_object_predictions(
89
- image=numpy.array(image),
90
- object_prediction_list=prediction_result_1.object_prediction_list,
91
- rect_th=rect_th,
92
- text_th=text_th,
93
- )
94
- output = Image.fromarray(visual_result_1["image"])
95
- return output
96
-
97
- # sliced inference
98
 
 
 
 
 
99
 
100
  inputs = [
101
- gr.Dropdown(
102
- choices=model_types,
103
- label="Choose Model Type",
104
- type="value",
105
- value=model_types[1],
106
- ),
107
  gr.Image(type="pil", label="Original Image"),
108
- gr.Number(default=512, label="slice_height"),
109
- gr.Number(default=512, label="slice_width"),
110
- gr.Number(default=0.1, label="overlap_height_ratio"),
111
- gr.Number(default=0.1, label="overlap_width_ratio"),
112
- gr.Dropdown(
113
- ["NMS", "GREEDYNMM"],
114
- type="value",
115
- value="NMS",
116
- label="postprocess_type",
117
- ),
118
- gr.Dropdown(["IOU", "IOS"], type="value", value="IOU", label="postprocess_type"),
119
- gr.Number(value=0.5, label="postprocess_match_threshold"),
120
- gr.Checkbox(value=True, label="postprocess_class_agnostic"),
121
  ]
122
 
123
  outputs = [gr.outputs.Image(type="pil", label="Output")]
@@ -126,13 +113,14 @@ title = "Building Detection from Satellite Images with SAHI + YOLOv5"
126
  description = "SAHI + YOLOv5 demo for building detection from satellite images. Upload an image or click an example image to use."
127
  article = "<p style='text-align: center'>SAHI is a lightweight vision library for performing large scale object detection/ instance segmentation.. <a href='https://github.com/obss/sahi'>SAHI Github</a> | <a href='https://medium.com/codable/sahi-a-vision-library-for-performing-sliced-inference-on-large-images-small-objects-c8b086af3b80'>SAHI Blog</a> | <a href='https://github.com/fcakyon/yolov5-pip'>YOLOv5 Github</a> </p>"
128
  examples = [
129
- [model_types[1], "26.jpg", 512, 512, 0.1, 0.1, "NMS", "IOU", 0.25, False],
130
- [model_types[1], "27.jpg", 512, 512, 0.1, 0.1, "NMS", "IOU", 0.25, False],
131
- [model_types[1], "28.jpg", 512, 512, 0.1, 0.1, "NMS", "IOU", 0.25, False],
132
- [model_types[1], "31.jpg", 512, 512, 0.1, 0.1, "NMS", "IOU", 0.25, False],
133
  ]
134
- gr.Interface(
135
- sahi_yolo_inference,
 
136
  inputs,
137
  outputs,
138
  title=title,
@@ -141,4 +129,6 @@ gr.Interface(
141
  examples=examples,
142
  theme="huggingface",
143
  cache_examples=True,
144
- ).launch(debug=True, enable_queue=True)
 
 
 
1
+ from sahi import utils, predict, AutoDetectionModel
 
 
 
 
2
  from PIL import Image
3
+ import gradio as gr
4
  import numpy
 
5
  import torch
6
 
7
 
 
 
 
 
8
 
9
+ model_id_list = ['deprem-ml/Binafarktespit-yolo5x-v1-xview', 'SerdarHelli/deprem_satellite_labeled_yolov8']
10
  current_device = "cuda" if torch.cuda.is_available() else "cpu"
11
+ model_types = ["YOLOv5", "YOLOv5 + SAHI", "YOLOv8"]
 
 
 
 
 
 
 
 
12
 
13
+ def sahi_yolov5_inference(
 
 
14
  image,
15
+ model_id,
16
+ model_type,
17
+ image_size,
18
  slice_height=512,
19
  slice_width=512,
20
  overlap_height_ratio=0.1,
 
25
  postprocess_class_agnostic=False,
26
  ):
27
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
  rect_th = None or max(round(sum(image.size) / 2 * 0.0001), 1)
29
  text_th = None or max(rect_th - 2, 1)
30
+
31
+ if model_type == "YOLOv5":
32
+ # standard inference
33
+ model = AutoDetectionModel.from_pretrained(
34
+ model_type="yolov5",
35
+ model_path=model_id,
36
+ device=current_device,
37
+ confidence_threshold=0.5,
38
+ image_size=image_size,
39
+ )
40
+
41
+ prediction_result_1 = predict.get_prediction(
42
+ image=image, detection_model=model
43
+ )
44
 
45
+ visual_result_1 = utils.cv.visualize_object_predictions(
46
+ image=numpy.array(image),
47
+ object_prediction_list=prediction_result_1.object_prediction_list,
48
+ rect_th=rect_th,
49
+ text_th=text_th,
50
+ )
51
+
52
+ output = Image.fromarray(visual_result_1["image"])
53
+ return output
54
+
55
+ elif model_type == "YOLOv5 + SAHI":
56
+ model = AutoDetectionModel.from_pretrained(
57
+ model_type="yolov5",
58
+ model_path=model_id,
59
+ device=current_device,
60
+ confidence_threshold=0.5,
61
+ image_size=image_size,
62
+ )
63
+
64
+ prediction_result_2 = predict.get_sliced_prediction(
65
  image=image,
66
  detection_model=model,
67
  slice_height=int(slice_height),
 
73
  postprocess_match_threshold=postprocess_match_threshold,
74
  postprocess_class_agnostic=postprocess_class_agnostic,
75
  )
76
+
77
+ visual_result_2 = utils.cv.visualize_object_predictions(
78
  image=numpy.array(image),
79
  object_prediction_list=prediction_result_2.object_prediction_list,
80
  rect_th=rect_th,
81
  text_th=text_th,
82
  )
83
+
84
  output = Image.fromarray(visual_result_2["image"])
85
  return output
86
 
87
+ elif model_type == "YOLOv8":
88
+ from ultralyticsplus import YOLO, render_result
 
 
 
 
 
 
 
 
 
 
 
 
 
 
89
 
90
+ model = YOLO('SerdarHelli/deprem_satellite_labeled_yolov8')
91
+ result = model.predict(image, imgsz=image_size)[0]
92
+ render = render_result(model=model, image=image, result=result, rect_th=rect_th, text_th=text_th)
93
+ return render
94
 
95
  inputs = [
 
 
 
 
 
 
96
  gr.Image(type="pil", label="Original Image"),
97
+ gr.Dropdown(choices=model_id_list,label="Choose Model",value=model_id_list[0]),
98
+ gr.Dropdown( choices=model_types, label="Choose Model Type", value=model_types[1]),
99
+ gr.Slider(minimum=128, maximum=2048, value=640, step=32, label="Image Size"),
100
+ gr.Slider(minimum=128, maximum=2048, value=512, step=32, label="Slice Height"),
101
+ gr.Slider(minimum=128, maximum=2048, value=512, step=32, label="Slice Width"),
102
+ gr.Slider(minimum=0.0, maximum=1.0, value=0.1, step=0.1, label="Overlap Height Ratio"),
103
+ gr.Slider(minimum=0.0, maximum=1.0, value=0.1, step=0.1, label="Overlap Width Ratio"),
104
+ gr.Dropdown(["NMS", "GREEDYNMM"], type="value", value="NMS", label="Postprocess Type"),
105
+ gr.Dropdown(["IOU", "IOS"], type="value", value="IOU", label="Postprocess Type"),
106
+ gr.Slider(minimum=0.0, maximum=1.0, value=0.5, step=0.1, label="Postprocess Match Threshold"),
107
+ gr.Checkbox(value=True, label="Postprocess Class Agnostic"),
 
 
108
  ]
109
 
110
  outputs = [gr.outputs.Image(type="pil", label="Output")]
 
113
  description = "SAHI + YOLOv5 demo for building detection from satellite images. Upload an image or click an example image to use."
114
  article = "<p style='text-align: center'>SAHI is a lightweight vision library for performing large scale object detection/ instance segmentation.. <a href='https://github.com/obss/sahi'>SAHI Github</a> | <a href='https://medium.com/codable/sahi-a-vision-library-for-performing-sliced-inference-on-large-images-small-objects-c8b086af3b80'>SAHI Blog</a> | <a href='https://github.com/fcakyon/yolov5-pip'>YOLOv5 Github</a> </p>"
115
  examples = [
116
+ ["data/26.jpg", 'deprem-ml/Binafarktespit-yolo5x-v1-xview', "YOLOv5 + SAHI", 640, 512, 512, 0.1, 0.1, "NMS", "IOU", 0.25, False],
117
+ ["data/27.jpg", 'deprem-ml/Binafarktespit-yolo5x-v1-xview', "YOLOv5 + SAHI", 640, 512, 512, 0.1, 0.1, "NMS", "IOU", 0.25, False],
118
+ ["data/28.jpg", 'deprem-ml/Binafarktespit-yolo5x-v1-xview', "YOLOv5 + SAHI", 640, 512, 512, 0.1, 0.1, "NMS", "IOU", 0.25, False],
119
+ ["data/31.jpg", 'deprem-ml/SerdarHelli-yolov8-v1-xview', "YOLOv8", 640, 512, 512, 0.1, 0.1, "NMS", "IOU", 0.25, False],
120
  ]
121
+
122
+ demo = gr.Interface(
123
+ sahi_yolov5_inference,
124
  inputs,
125
  outputs,
126
  title=title,
 
129
  examples=examples,
130
  theme="huggingface",
131
  cache_examples=True,
132
+ )
133
+
134
+ demo.launch(debug=True, enable_queue=True)
data/26.jpg ADDED

Git LFS Details

  • SHA256: d4c76c6cbf981506552cf5d636acec3ac8b24a73b98bba763595821890047f2f
  • Pointer size: 132 Bytes
  • Size of remote file: 5.92 MB
data/27.jpg ADDED

Git LFS Details

  • SHA256: 39d0e6cc52722f11d5150684413954689918b3b593e3fd86704fa43e54d4d446
  • Pointer size: 132 Bytes
  • Size of remote file: 4.63 MB
data/28.jpg ADDED

Git LFS Details

  • SHA256: 193aa658ffdb0ee417d47bd7e25d078e9596f6d564def6fa97ad0ca12932eaec
  • Pointer size: 132 Bytes
  • Size of remote file: 4.19 MB
data/31.jpg ADDED

Git LFS Details

  • SHA256: cfd13bde54acf2974d717853ca63f1009897ac9491166ede0ac6b21e170ae26d
  • Pointer size: 132 Bytes
  • Size of remote file: 5.53 MB
requirements.txt CHANGED
@@ -1,5 +1,5 @@
1
- torch==1.10.2+cpu
2
- torchvision==0.11.3+cpu
3
- -f https://download.pytorch.org/whl/torch_stable.html
4
  yolov5==7.0.8
5
- sahi==0.11.11
 
 
1
+ torch==1.10.2
2
+ torchvision==0.11.3
 
3
  yolov5==7.0.8
4
+ sahi==0.11.11
5
+ pip install git+https://github.com/fcakyon/ultralyticsplus.git