SerdarHelli commited on
Commit
718b31c
1 Parent(s): af96e45

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -10
app.py CHANGED
@@ -15,7 +15,7 @@ model_path=hf_hub_download("kadirnar/deprem_model_v1", filename="last.pt",revisi
15
 
16
 
17
  current_device='cuda' if torch.cuda.is_available() else 'cpu'
18
-
19
  # Model
20
  model = AutoDetectionModel.from_pretrained(
21
  model_type="yolov5", model_path=model_path, device=current_device, confidence_threshold=0.5, image_size=IMAGE_SIZE
@@ -50,8 +50,9 @@ def sahi_yolo_inference(
50
  # f"{len(sliced_bboxes)} slices are too much for huggingface spaces, try smaller slice size."
51
  # )
52
 
53
-
54
 
 
 
55
 
56
  if "SAHI" in model_type:
57
  prediction_result_2 = sahi.predict.get_sliced_prediction(
@@ -69,8 +70,11 @@ def sahi_yolo_inference(
69
  visual_result_2 = sahi.utils.cv.visualize_object_predictions(
70
  image=numpy.array(image),
71
  object_prediction_list=prediction_result_2.object_prediction_list,
 
 
72
  )
73
  output = Image.fromarray(visual_result_2["image"])
 
74
 
75
  else:
76
  # standard inference
@@ -81,17 +85,19 @@ def sahi_yolo_inference(
81
  visual_result_1 = sahi.utils.cv.visualize_object_predictions(
82
  image=numpy.array(image),
83
  object_prediction_list=prediction_result_1.object_prediction_list,
 
 
84
  )
85
  output = Image.fromarray(visual_result_1["image"])
 
86
 
87
  # sliced inference
88
 
89
 
90
- return output
91
 
92
 
93
  inputs = [
94
- gr.Dropdown(choices=["YOLOv5","YOLOv5 + SAHI"],label="Choose Model Type"),
95
  gr.inputs.Image(type="pil", label="Original Image"),
96
  gr.inputs.Number(default=512, label="slice_height"),
97
  gr.inputs.Number(default=512, label="slice_width"),
@@ -118,12 +124,13 @@ title = "Small Object Detection with SAHI + YOLOv5"
118
  description = "SAHI + YOLOv5 demo for small object detection. Upload an image or click an example image to use."
119
  article = "<p style='text-align: center'>SAHI is a lightweight vision library for performing large scale object detection/ instance segmentation.. <a href='https://github.com/obss/sahi'>SAHI Github</a> | <a href='https://medium.com/codable/sahi-a-vision-library-for-performing-sliced-inference-on-large-images-small-objects-c8b086af3b80'>SAHI Blog</a> | <a href='https://github.com/fcakyon/yolov5-pip'>YOLOv5 Github</a> </p>"
120
  examples = [
121
- ["apple_tree.jpg", 256, 256, 0.2, 0.2, "GREEDYNMM", "IOS", 0.5, True],
122
- ["highway.jpg", 256, 256, 0.2, 0.2, "GREEDYNMM", "IOS", 0.5, True],
123
- ["highway2.jpg", 512, 512, 0.2, 0.2, "GREEDYNMM", "IOS", 0.5, True],
124
- ["highway3.jpg", 512, 512, 0.2, 0.2, "GREEDYNMM", "IOS", 0.5, True],
125
- ]
126
 
 
127
  gr.Interface(
128
  sahi_yolo_inference,
129
  inputs,
@@ -133,4 +140,4 @@ gr.Interface(
133
  article=article,
134
  examples=examples,
135
  theme="huggingface",
136
- ).launch(debug=True, enable_queue=True)
 
15
 
16
 
17
  current_device='cuda' if torch.cuda.is_available() else 'cpu'
18
+ model_types=["YOLOv5","YOLOv5 + SAHI"]
19
  # Model
20
  model = AutoDetectionModel.from_pretrained(
21
  model_type="yolov5", model_path=model_path, device=current_device, confidence_threshold=0.5, image_size=IMAGE_SIZE
 
50
  # f"{len(sliced_bboxes)} slices are too much for huggingface spaces, try smaller slice size."
51
  # )
52
 
 
53
 
54
+ rect_th = None or max(round(sum(image.size) / 2 * 0.001), 1)
55
+ text_th = None or max(rect_th - 1, 1)
56
 
57
  if "SAHI" in model_type:
58
  prediction_result_2 = sahi.predict.get_sliced_prediction(
 
70
  visual_result_2 = sahi.utils.cv.visualize_object_predictions(
71
  image=numpy.array(image),
72
  object_prediction_list=prediction_result_2.object_prediction_list,
73
+ rect_th=rect_th,
74
+ text_th=text_th,
75
  )
76
  output = Image.fromarray(visual_result_2["image"])
77
+ return output
78
 
79
  else:
80
  # standard inference
 
85
  visual_result_1 = sahi.utils.cv.visualize_object_predictions(
86
  image=numpy.array(image),
87
  object_prediction_list=prediction_result_1.object_prediction_list,
88
+ rect_th=rect_th,
89
+ text_th=text_th,
90
  )
91
  output = Image.fromarray(visual_result_1["image"])
92
+ return output
93
 
94
  # sliced inference
95
 
96
 
 
97
 
98
 
99
  inputs = [
100
+ gr.inputs.Dropdown(choices=model_types,label="Choose Model Type",type="value",),
101
  gr.inputs.Image(type="pil", label="Original Image"),
102
  gr.inputs.Number(default=512, label="slice_height"),
103
  gr.inputs.Number(default=512, label="slice_width"),
 
124
  description = "SAHI + YOLOv5 demo for small object detection. Upload an image or click an example image to use."
125
  article = "<p style='text-align: center'>SAHI is a lightweight vision library for performing large scale object detection/ instance segmentation.. <a href='https://github.com/obss/sahi'>SAHI Github</a> | <a href='https://medium.com/codable/sahi-a-vision-library-for-performing-sliced-inference-on-large-images-small-objects-c8b086af3b80'>SAHI Blog</a> | <a href='https://github.com/fcakyon/yolov5-pip'>YOLOv5 Github</a> </p>"
126
  examples = [
127
+ [model_types[1],"satellite_original.tif", 256, 256, 0.2, 0.2, "GREEDYNMM", "IOS", 0.5, True],
128
+ [model_types[0],"26.jpg", 256, 256, 0.2, 0.2, "GREEDYNMM", "IOS", 0.5, True],
129
+ [model_types[0],"27.jpg", 512, 512, 0.2, 0.2, "GREEDYNMM", "IOS", 0.5, True],
130
+ [model_types[0],"28.jpg", 512, 512, 0.2, 0.2, "GREEDYNMM", "IOS", 0.5, True],
131
+ [model_types[0],"31.jpg", 512, 512, 0.2, 0.2, "GREEDYNMM", "IOS", 0.5, True],
132
 
133
+ ]
134
  gr.Interface(
135
  sahi_yolo_inference,
136
  inputs,
 
140
  article=article,
141
  examples=examples,
142
  theme="huggingface",
143
+ ).launch(debug=True, enable_queue=True,live=False)