Spaces:
Runtime error
Runtime error
Bhaskar Saranga
commited on
Commit
•
8d6560f
1
Parent(s):
0436107
Added yolo v8 model
Browse files- app.py +36 -11
- requirements.txt +2 -1
- weights/yolov8m.pt +3 -0
app.py
CHANGED
@@ -10,8 +10,7 @@ from utils.general import check_img_size, non_max_suppression, \
|
|
10 |
from utils.plots import plot_one_box
|
11 |
from utils.torch_utils import time_synchronized
|
12 |
import time
|
13 |
-
|
14 |
-
|
15 |
|
16 |
def letterbox(im, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleup=True, stride=32):
|
17 |
# Resize and pad image while meeting stride-multiple constraints
|
@@ -71,7 +70,7 @@ colors = {
|
|
71 |
}
|
72 |
|
73 |
|
74 |
-
def
|
75 |
imgsz = 640
|
76 |
img = np.array(img)
|
77 |
stride = int(model.stride.max()) # model stride
|
@@ -117,13 +116,33 @@ def detect(img,model,device,iou_threshold=0.45,confidence_threshold=0.25):
|
|
117 |
|
118 |
return imgs,fps_inference
|
119 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
120 |
def inference(img,model_link,iou_threshold,confidence_threshold):
|
121 |
print(model_link)
|
122 |
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
|
123 |
# Load model
|
124 |
model_path = 'weights/'+str(model_link)+'.pt'
|
125 |
-
|
126 |
-
|
|
|
|
|
|
|
|
|
127 |
|
128 |
|
129 |
def inference2(video,model_link,iou_threshold,confidence_threshold):
|
@@ -131,7 +150,10 @@ def inference2(video,model_link,iou_threshold,confidence_threshold):
|
|
131 |
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
|
132 |
# Load model
|
133 |
model_path = 'weights/'+str(model_link)+'.pt'
|
134 |
-
|
|
|
|
|
|
|
135 |
frames = cv2.VideoCapture(video)
|
136 |
fps = frames.get(cv2.CAP_PROP_FPS)
|
137 |
image_size = (int(frames.get(cv2.CAP_PROP_FRAME_WIDTH)),int(frames.get(cv2.CAP_PROP_FRAME_HEIGHT)))
|
@@ -141,7 +163,10 @@ def inference2(video,model_link,iou_threshold,confidence_threshold):
|
|
141 |
ret,frame = frames.read()
|
142 |
if not ret:
|
143 |
break
|
144 |
-
|
|
|
|
|
|
|
145 |
fps_video.append(fps)
|
146 |
finalVideo.write(frame)
|
147 |
frames.release()
|
@@ -156,12 +181,12 @@ examples_images = ['data/images/1.jpg',
|
|
156 |
'data/images/3.jpg']
|
157 |
examples_videos = ['data/video/1.mp4','data/video/2.mp4']
|
158 |
|
159 |
-
models = ['yolov7','yolov7t']
|
160 |
|
161 |
with gr.Blocks() as demo:
|
162 |
-
gr.Markdown("##
|
163 |
with gr.Tab("Image"):
|
164 |
-
gr.Markdown("##
|
165 |
with gr.Row():
|
166 |
image_input = gr.Image(type='pil', label="Input Image", source="upload")
|
167 |
image_output = gr.Image(type='pil', label="Output Image", source="upload")
|
@@ -172,7 +197,7 @@ with gr.Blocks() as demo:
|
|
172 |
gr.Examples(examples=examples_images,inputs=image_input,outputs=image_output)
|
173 |
text_button = gr.Button("Detect")
|
174 |
with gr.Tab("Video"):
|
175 |
-
gr.Markdown("##
|
176 |
with gr.Row():
|
177 |
video_input = gr.Video(type='pil', label="Input Video", source="upload")
|
178 |
video_output = gr.Video(type="pil", label="Output Video",format="mp4")
|
|
|
10 |
from utils.plots import plot_one_box
|
11 |
from utils.torch_utils import time_synchronized
|
12 |
import time
|
13 |
+
from ultralytics import YOLO
|
|
|
14 |
|
15 |
def letterbox(im, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleup=True, stride=32):
|
16 |
# Resize and pad image while meeting stride-multiple constraints
|
|
|
70 |
}
|
71 |
|
72 |
|
73 |
+
def detectv7(img,model,device,iou_threshold=0.45,confidence_threshold=0.25):
|
74 |
imgsz = 640
|
75 |
img = np.array(img)
|
76 |
stride = int(model.stride.max()) # model stride
|
|
|
116 |
|
117 |
return imgs,fps_inference
|
118 |
|
119 |
+
def detectv8(img,model,device,iou_threshold=0.45,confidence_threshold=0.25):
|
120 |
+
|
121 |
+
# Inference
|
122 |
+
t1 = time_synchronized()
|
123 |
+
start = time.time()
|
124 |
+
results= model.predict(img,conf=confidence_threshold, iou=iou_threshold)
|
125 |
+
fps_inference = 1/(time.time()-start)
|
126 |
+
|
127 |
+
boxes=results[0].boxes.numpy()
|
128 |
+
for bbox in boxes:
|
129 |
+
#print(f'{colors[names[int(bbox.cls[0])]]}')
|
130 |
+
label = f'{names[int(bbox.cls[0])]} {bbox.conf[0]:.2f}'
|
131 |
+
plot_one_box(bbox.xyxy[0],img,colors[names[int(bbox.cls[0])]],label, line_thickness=1)
|
132 |
+
|
133 |
+
return img,fps_inference
|
134 |
+
|
135 |
def inference(img,model_link,iou_threshold,confidence_threshold):
|
136 |
print(model_link)
|
137 |
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
|
138 |
# Load model
|
139 |
model_path = 'weights/'+str(model_link)+'.pt'
|
140 |
+
if model_link== 'yolov8m':
|
141 |
+
model = YOLO(model_path)
|
142 |
+
return detectv8(img,model,device,iou_threshold,confidence_threshold)
|
143 |
+
else:
|
144 |
+
model = attempt_load(model_path, map_location=device)
|
145 |
+
return detectv7(img,model,device,iou_threshold,confidence_threshold)
|
146 |
|
147 |
|
148 |
def inference2(video,model_link,iou_threshold,confidence_threshold):
|
|
|
150 |
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
|
151 |
# Load model
|
152 |
model_path = 'weights/'+str(model_link)+'.pt'
|
153 |
+
if model_link== 'yolov8m':
|
154 |
+
model = YOLO(model_path)
|
155 |
+
else:
|
156 |
+
model = attempt_load(model_path, map_location=device)
|
157 |
frames = cv2.VideoCapture(video)
|
158 |
fps = frames.get(cv2.CAP_PROP_FPS)
|
159 |
image_size = (int(frames.get(cv2.CAP_PROP_FRAME_WIDTH)),int(frames.get(cv2.CAP_PROP_FRAME_HEIGHT)))
|
|
|
163 |
ret,frame = frames.read()
|
164 |
if not ret:
|
165 |
break
|
166 |
+
if model_link== 'yolov8m':
|
167 |
+
frame,fps = detectv8(frame,model,device,iou_threshold,confidence_threshold)
|
168 |
+
else:
|
169 |
+
frame,fps = detectv7(frame,model,device,iou_threshold,confidence_threshold)
|
170 |
fps_video.append(fps)
|
171 |
finalVideo.write(frame)
|
172 |
frames.release()
|
|
|
181 |
'data/images/3.jpg']
|
182 |
examples_videos = ['data/video/1.mp4','data/video/2.mp4']
|
183 |
|
184 |
+
models = ['yolov8m','yolov7','yolov7t']
|
185 |
|
186 |
with gr.Blocks() as demo:
|
187 |
+
gr.Markdown("## IDD Inference on Yolo V7 and V8 ")
|
188 |
with gr.Tab("Image"):
|
189 |
+
gr.Markdown("## Yolo V7 and V8 Inference on Image")
|
190 |
with gr.Row():
|
191 |
image_input = gr.Image(type='pil', label="Input Image", source="upload")
|
192 |
image_output = gr.Image(type='pil', label="Output Image", source="upload")
|
|
|
197 |
gr.Examples(examples=examples_images,inputs=image_input,outputs=image_output)
|
198 |
text_button = gr.Button("Detect")
|
199 |
with gr.Tab("Video"):
|
200 |
+
gr.Markdown("## Yolo V7 and V8 Inference on Video")
|
201 |
with gr.Row():
|
202 |
video_input = gr.Video(type='pil', label="Input Video", source="upload")
|
203 |
video_output = gr.Video(type="pil", label="Output Video",format="mp4")
|
requirements.txt
CHANGED
@@ -7,4 +7,5 @@ tqdm>=4.64.0
|
|
7 |
seaborn>=0.11.0
|
8 |
scipy>=1.4.1
|
9 |
Pillow>=7.1.2
|
10 |
-
huggingface-hub >= 0.11.0
|
|
|
|
7 |
seaborn>=0.11.0
|
8 |
scipy>=1.4.1
|
9 |
Pillow>=7.1.2
|
10 |
+
huggingface-hub >= 0.11.0
|
11 |
+
ultralytics >=8.0.34
|
weights/yolov8m.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:54583fba52c0a7551abcd102f8f9652d196ca75e951b7b8a648f31eef7c8a7a9
|
3 |
+
size 52018656
|