Future-Tense commited on
Commit
c57adfe
1 Parent(s): 2b463fa

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +24 -1
app.py CHANGED
@@ -1,6 +1,8 @@
1
  import cv2 # opencv2 package for python.
2
  import torch
3
  from pytube import YouTube
 
 
4
  #from torch import hub # Hub contains other models like FasterRCNN
5
 
6
  URL = "https://www.youtube.com/watch?v=dQw4w9WgXcQ" #URL to parse
@@ -17,7 +19,27 @@ def load():
17
  stream = cv2.VideoCapture(vid)
18
  return vid_cap
19
 
20
- model = torch.hub.load('ultralytics/yolov5','yolov5s',pretrained=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
 
22
  """
23
  The function below identifies the device which is availabe to make the prediction and uses it to load and infer the frame. Once it has results it will extract the labels and cordinates(Along with scores) for each object detected in the frame.
@@ -84,3 +106,4 @@ def __call__(self):
84
  ret, frame = player.read() # Read next frame.
85
 
86
 
 
 
1
  import cv2 # opencv2 package for python.
2
  import torch
3
  from pytube import YouTube
4
+ from ultralyticsplus import YOLO, render_result
5
+
6
  #from torch import hub # Hub contains other models like FasterRCNN
7
 
8
  URL = "https://www.youtube.com/watch?v=dQw4w9WgXcQ" #URL to parse
 
19
  stream = cv2.VideoCapture(vid)
20
  return vid_cap
21
 
22
+
23
+ # load model
24
+ model = YOLO('ultralyticsplus/yolov8s')
25
+
26
+ # set model parameters
27
+ model.overrides['conf'] = 0.25 # NMS confidence threshold
28
+ model.overrides['iou'] = 0.45 # NMS IoU threshold
29
+ model.overrides['agnostic_nms'] = False # NMS class-agnostic
30
+ model.overrides['max_det'] = 1000 # maximum number of detections per image
31
+
32
+ # set image
33
+ image = 'https://github.com/ultralytics/yolov5/raw/master/data/images/zidane.jpg'
34
+
35
+ # perform inference
36
+ results = model.predict(image)
37
+
38
+ # observe results
39
+ print(results[0].boxes)
40
+ render = render_result(model=model, image=image, result=results[0])
41
+ render.show()
42
+ '''
43
 
44
  """
45
  The function below identifies the device which is availabe to make the prediction and uses it to load and infer the frame. Once it has results it will extract the labels and cordinates(Along with scores) for each object detected in the frame.
 
106
  ret, frame = player.read() # Read next frame.
107
 
108
 
109
+ '''