anand-luffy commited on
Commit
655dd8f
1 Parent(s): eb4de61

initial commit

Browse files
Files changed (3) hide show
  1. .gitingore +7 -0
  2. app.py +101 -0
  3. requirements.txt +47 -0
.gitingore ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ flagged/
2
+ *.pt
3
+ *.png
4
+ *.jpg
5
+ *.mp4
6
+ *.mkv
7
+ gradio_cached_examples/
app.py ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import cv2
3
+ import requests
4
+ import os
5
+
6
+ from ultralytics import YOLO
7
+
8
+ file_urls = [
9
+ 'https://www.dropbox.com/s/b5g97xo901zb3ds/pothole_example.jpg?dl=1',
10
+ 'https://www.dropbox.com/s/86uxlxxlm1iaexa/pothole_screenshot.png?dl=1',
11
+ 'https://www.dropbox.com/s/7sjfwncffg8xej2/video_7.mp4?dl=1'
12
+ ]
13
+
14
+ def download_file(url, save_name):
15
+ url = url
16
+ if not os.path.exists(save_name):
17
+ file = requests.get(url)
18
+ open(save_name, 'wb').write(file.content)
19
+
20
+ for i, url in enumerate(file_urls):
21
+ if 'mp4' in file_urls[i]:
22
+ download_file(
23
+ file_urls[i],
24
+ f"video.mp4"
25
+ )
26
+ else:
27
+ download_file(
28
+ file_urls[i],
29
+ f"image_{i}.jpg"
30
+ )
31
+ model = YOLO('best.pt')
32
+ path = [['image_0.jpg'], ['image_1.jpg']]
33
+ video_path = [['video.mp4']]
34
+
35
+ def show_preds_image(image_path):
36
+ image = cv2.imread(image_path)
37
+ outputs = model.predict(source=image_path)
38
+ results = outputs[0].cpu().numpy()
39
+ for i, det in enumerate(results.boxes.xyxy):
40
+ cv2.rectangle(
41
+ image,
42
+ (int(det[0]), int(det[1])),
43
+ (int(det[2]), int(det[3])),
44
+ color=(0, 0, 255),
45
+ thickness=2,
46
+ lineType=cv2.LINE_AA
47
+ )
48
+ return cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
49
+
50
+ inputs_image = [
51
+ gr.components.Image(type="filepath", label="Input Image"),
52
+ ]
53
+ outputs_image = [
54
+ gr.components.Image(type="numpy", label="Output Image"),
55
+ ]
56
+ interface_image = gr.Interface(
57
+ fn=show_preds_image,
58
+ inputs=inputs_image,
59
+ outputs=outputs_image,
60
+ title="Pothole detector",
61
+ examples=path,
62
+ cache_examples=False,
63
+ )
64
+ def show_preds_video(video_path):
65
+ cap = cv2.VideoCapture(video_path)
66
+ while(cap.isOpened()):
67
+ ret, frame = cap.read()
68
+ if ret:
69
+ frame_copy = frame.copy()
70
+ outputs = model.predict(source=frame)
71
+ results = outputs[0].cpu().numpy()
72
+ for i, det in enumerate(results.boxes.xyxy):
73
+ cv2.rectangle(
74
+ frame_copy,
75
+ (int(det[0]), int(det[1])),
76
+ (int(det[2]), int(det[3])),
77
+ color=(0, 0, 255),
78
+ thickness=2,
79
+ lineType=cv2.LINE_AA
80
+ )
81
+ yield cv2.cvtColor(frame_copy, cv2.COLOR_BGR2RGB)
82
+
83
+ inputs_video = [
84
+ gr.components.Video(type="filepath", label="Input Video"),
85
+
86
+ ]
87
+ outputs_video = [
88
+ gr.components.Image(type="numpy", label="Output Image"),
89
+ ]
90
+ interface_video = gr.Interface(
91
+ fn=show_preds_video,
92
+ inputs=inputs_video,
93
+ outputs=outputs_video,
94
+ title="Pothole detector",
95
+ examples=video_path,
96
+ cache_examples=False,
97
+ )
98
+ gr.TabbedInterface(
99
+ [interface_image, interface_video],
100
+ tab_names=['Image inference', 'Video inference']
101
+ ).queue().launch()
requirements.txt ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Ultralytics requirements
2
+ # Usage: pip install -r requirements.txt
3
+
4
+ # Base ----------------------------------------
5
+ hydra-core>=1.2.0
6
+ matplotlib>=3.2.2
7
+ numpy>=1.18.5
8
+ opencv-python>=4.1.1
9
+ Pillow>=7.1.2
10
+ PyYAML>=5.3.1
11
+ requests>=2.23.0
12
+ scipy>=1.4.1
13
+ torch>=1.7.0
14
+ torchvision>=0.8.1
15
+ tqdm>=4.64.0
16
+ ultralytics
17
+
18
+ # Logging -------------------------------------
19
+ tensorboard>=2.4.1
20
+ # clearml
21
+ # comet
22
+
23
+ # Plotting ------------------------------------
24
+ pandas>=1.1.4
25
+ seaborn>=0.11.0
26
+
27
+ # Export --------------------------------------
28
+ # coremltools>=6.0 # CoreML export
29
+ # onnx>=1.12.0 # ONNX export
30
+ # onnx-simplifier>=0.4.1 # ONNX simplifier
31
+ # nvidia-pyindex # TensorRT export
32
+ # nvidia-tensorrt # TensorRT export
33
+ # scikit-learn==0.19.2 # CoreML quantization
34
+ # tensorflow>=2.4.1 # TF exports (-cpu, -aarch64, -macos)
35
+ # tensorflowjs>=3.9.0 # TF.js export
36
+ # openvino-dev # OpenVINO export
37
+
38
+ # Extras --------------------------------------
39
+ ipython # interactive notebook
40
+ psutil # system utilization
41
+ thop>=0.1.1 # FLOPs computation
42
+ # albumentations>=1.0.3
43
+ # pycocotools>=2.0.6 # COCO mAP
44
+ # roboflow
45
+
46
+ # HUB -----------------------------------------
47
+ GitPython>=3.1.24