Anh-Chan commited on
Commit
70045c6
1 Parent(s): 3f90471

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +100 -54
app.py CHANGED
@@ -1,58 +1,104 @@
1
  import gradio as gr
2
- import torch
3
- from sahi.prediction import ObjectPrediction
4
- from sahi.utils.cv import visualize_object_predictions, read_image
5
- from ultralyticsplus import YOLO, render_result
6
-
7
-
8
- def yolov8_inference(
9
- image,
10
- model_path,
11
- image_size,
12
- conf_threshold,
13
- iou_threshold,
14
- ):
15
- """
16
- YOLOv8 inference function
17
- Args:
18
- image: Input image
19
- model_path: Path to the model
20
- image_size: Image size
21
- conf_threshold: Confidence threshold
22
- iou_threshold: IOU threshold
23
- Returns:
24
- Rendered image
25
- """
26
- model = YOLO('yolov8l')
27
- # set model parameters
28
- model.overrides['conf'] = conf_threshold # NMS confidence threshold
29
- model.overrides['iou'] = iou_threshold # NMS IoU threshold
30
- model.overrides['agnostic_nms'] = False # NMS class-agnostic
31
- model.overrides['max_det'] = 1000 # maximum number of detections per image
32
- results = model.predict(image, imgsz=image_size)
33
- render = render_result(model=model, image=image, result=results[0])
34
- return render
35
-
36
-
37
- inputs = [
38
- gr.Image(type="filepath", label="Input Image"),
39
- gr.Dropdown(["yolov8n", "yolov8m", "yolov8l", "yolov8x"],
40
- value="yolov8m", label="Model"),
41
- gr.Slider(minimum=320, maximum=1280, value=640, step=320, label="Image Size"),
42
- gr.Slider(minimum=0.0, maximum=1.0, value=0.25, step=0.05, label="Confidence Threshold"),
43
- gr.Slider(minimum=0.0, maximum=1.0, value=0.45, step=0.05, label="IOU Threshold"),
44
  ]
45
 
46
- outputs = gr.Image(type="filepath", label="Output Image")
47
- title = "YOLOv8 Models for Object detection"
48
-
49
- # examples = [['demo_01.jpg', 'yolov8n', 640, 0.25, 0.45], ['demo_02.jpg', 'yolov8l', 640, 0.25, 0.45], ['demo_03.jpg', 'yolov8x', 1280, 0.25, 0.45]]
50
- demo_app = gr.Interface(
51
- fn=yolov8_inference,
52
- inputs=inputs,
53
- outputs=outputs,
54
- title=title,
55
- # examples=examples,
56
- cache_examples=True,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
57
  )
58
- demo_app.launch(debug=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
+ import cv2
3
+ import requests
4
+ import os
5
+
6
+ from ultralytics import YOLO
7
+
8
+ file_urls = [
9
+ 'https://www.dropbox.com/s/b5g97xo901zb3ds/pothole_example.jpg?dl=1',
10
+ 'https://www.dropbox.com/s/86uxlxxlm1iaexa/pothole_screenshot.png?dl=1',
11
+ 'https://www.dropbox.com/s/7sjfwncffg8xej2/video_7.mp4?dl=1'
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
  ]
13
 
14
+ def download_file(url, save_name):
15
+ url = url
16
+ if not os.path.exists(save_name):
17
+ file = requests.get(url)
18
+ open(save_name, 'wb').write(file.content)
19
+
20
+ for i, url in enumerate(file_urls):
21
+ if 'mp4' in file_urls[i]:
22
+ download_file(
23
+ file_urls[i],
24
+ f"video.mp4"
25
+ )
26
+ else:
27
+ download_file(
28
+ file_urls[i],
29
+ f"image_{i}.jpg"
30
+ )
31
+
32
+ model = YOLO('best.pt')
33
+ path = [['image_0.jpg'], ['image_1.jpg']]
34
+ video_path = [['video.mp4']]
35
+
36
+ def show_preds_image(image_path):
37
+ image = cv2.imread(image_path)
38
+ outputs = model.predict(source=image_path)
39
+ results = outputs[0].cpu().numpy()
40
+ for i, det in enumerate(results.boxes.xyxy):
41
+ cv2.rectangle(
42
+ image,
43
+ (int(det[0]), int(det[1])),
44
+ (int(det[2]), int(det[3])),
45
+ color=(0, 0, 255),
46
+ thickness=2,
47
+ lineType=cv2.LINE_AA
48
+ )
49
+ return cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
50
+
51
+ inputs_image = [
52
+ gr.components.Image(type="filepath", label="Input Image"),
53
+ ]
54
+ outputs_image = [
55
+ gr.components.Image(type="numpy", label="Output Image"),
56
+ ]
57
+ interface_image = gr.Interface(
58
+ fn=show_preds_image,
59
+ inputs=inputs_image,
60
+ outputs=outputs_image,
61
+ title="Pothole detector app",
62
+ examples=path,
63
+ cache_examples=False,
64
  )
65
+
66
+ def show_preds_video(video_path):
67
+ cap = cv2.VideoCapture(video_path)
68
+ while(cap.isOpened()):
69
+ ret, frame = cap.read()
70
+ if ret:
71
+ frame_copy = frame.copy()
72
+ outputs = model.predict(source=frame)
73
+ results = outputs[0].cpu().numpy()
74
+ for i, det in enumerate(results.boxes.xyxy):
75
+ cv2.rectangle(
76
+ frame_copy,
77
+ (int(det[0]), int(det[1])),
78
+ (int(det[2]), int(det[3])),
79
+ color=(0, 0, 255),
80
+ thickness=2,
81
+ lineType=cv2.LINE_AA
82
+ )
83
+ yield cv2.cvtColor(frame_copy, cv2.COLOR_BGR2RGB)
84
+
85
+ inputs_video = [
86
+ gr.components.Video(type="filepath", label="Input Video"),
87
+
88
+ ]
89
+ outputs_video = [
90
+ gr.components.Image(type="numpy", label="Output Image"),
91
+ ]
92
+ interface_video = gr.Interface(
93
+ fn=show_preds_video,
94
+ inputs=inputs_video,
95
+ outputs=outputs_video,
96
+ title="Pothole detector",
97
+ examples=video_path,
98
+ cache_examples=False,
99
+ )
100
+
101
+ gr.TabbedInterface(
102
+ [interface_image, interface_video],
103
+ tab_names=['Image inference', 'Video inference']
104
+ ).queue().launch()