Future-Tense commited on
Commit
074d43b
0 Parent(s):

Duplicate from Future-Tense/test-space

Browse files
Files changed (4) hide show
  1. .gitattributes +34 -0
  2. README.md +13 -0
  3. app.py +173 -0
  4. requirements.txt +4 -0
.gitattributes ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Test Space
3
+ emoji: 📊
4
+ colorFrom: blue
5
+ colorTo: gray
6
+ sdk: gradio
7
+ sdk_version: 3.18.0
8
+ app_file: app.py
9
+ pinned: false
10
+ duplicated_from: Future-Tense/test-space
11
+ ---
12
+
13
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import time
3
+ import cv2 # opencv2 package for python.
4
+ import torch
5
+ from pytube import YouTube
6
+ from ultralyticsplus import YOLO, render_result
7
+ #from imageai.Detection import ObjectDetection
8
+
9
+
10
+ #obj_detect.setModelPath(r"C:/Datasets/yolo.h5")
11
+ #obj_detect.loadModel()
12
+
13
+ #from torch import hub # Hub contains other models like FasterRCNN
14
+ model = YOLO('ultralyticsplus/yolov8s')
15
+ device = 'cuda' if torch.cuda.is_available() else 'cpu'
16
+ URL = "https://www.youtube.com/watch?v=dQw4w9WgXcQ" #URL to parse
17
+
18
+ # set model parameters
19
+ model.overrides['conf'] = 0.25 # NMS confidence threshold
20
+ model.overrides['iou'] = 0.45 # NMS IoU threshold
21
+ model.overrides['agnostic_nms'] = False # NMS class-agnostic
22
+ model.overrides['max_det'] = 1000 # maximum number of detections per image
23
+ model.to(device)
24
+
25
+
26
+ #play = pafy.new(_URL).streams[-1] #'-1' means read the lowest quality of video.
27
+
28
+ #assert play is not None # we want to make sure their is a input to read.
29
+ #stream = cv2.VideoCapture(play.url) #create a opencv video stream.
30
+ #stream = cv2.VideoCapture(0) # 0 means read from local camera.
31
+ #camera_ip = "rtsp://username:password@IP/port"
32
+ #stream = cv2.VideoCapture(camera_ip)
33
+ #class Capvid:
34
+
35
+
36
+ # load model
37
+
38
+ # set image
39
+ #image = 'https://github.com/ultralytics/yolov5/raw/master/data/images/zidane.jpg'
40
+
41
+ # perform inference
42
+ #def show(feed)
43
+ # return model.predict(feed)
44
+
45
+ # observe results
46
+ #print(results[0].boxes)
47
+ #render = render_result(model=model, image=image, result=results[0])
48
+ #render.show()
49
+
50
+ """
51
+ The function below identifies the device which is availabe to make the prediction and uses it to load and infer the frame. Once it has results it will extract the labels and cordinates(Along with scores) for each object detected in the frame.
52
+ """
53
+ def score_frame(frame):
54
+ #frame = [torch.tensor(frame)]
55
+ result = model(frame)
56
+ results = [torch.tensor(result)]
57
+ labels = results[0][:, -1].numpy()
58
+ cord = results[0][:, :-1].numpy()
59
+ return labels, cord
60
+
61
+ """
62
+ The function below takes the results and the frame as input and plots boxes over all the objects which have a score higer than our threshold.
63
+ """
64
+ def plot_boxes(results, frame):
65
+ labels, cord = results
66
+ n = len(labels)
67
+ x_shape, y_shape = frame.shape[1], frame.shape[0]
68
+ for i in range(n):
69
+ row = cord[i]
70
+ # If score is less than 0.2 we avoid making a prediction.
71
+ if row[4] < 0.2:
72
+ continue
73
+ x1 = int(row[0]*x_shape)
74
+ y1 = int(row[1]*y_shape)
75
+ x2 = int(row[2]*x_shape)
76
+ y2 = int(row[3]*y_shape)
77
+ bgr = (0, 255, 0) # color of the box
78
+ classes = model.names # Get the name of label index
79
+ label_font = cv2.FONT_HERSHEY_SIMPLEX #Font for the label.
80
+ cv2.rectangle(frame, \
81
+ (x1, y1), (x2, y2), \
82
+ bgr, 2) #Plot the boxes
83
+ cv2.putText(frame,\
84
+ classes[labels[i]], \
85
+ (x1, y1), \
86
+ label_font, 0.9, bgr, 2) #Put a label over box.
87
+ return frame
88
+
89
+ """
90
+ The Function below oracestrates the entire operation and performs the real-time parsing for video stream.
91
+ """
92
+ def vid_play(vid_cap):
93
+ stream = cv2.VideoCapture(vid_cap)
94
+
95
+ player = stream #Get your video stream.
96
+ assert player.isOpened() # Make sure that their is a stream.
97
+ #Below code creates a new video writer object to write our
98
+ #output stream.
99
+ out_vid = ("vid_tmp.avi")
100
+ x_shape = int(player.get(cv2.CAP_PROP_FRAME_WIDTH))
101
+ y_shape = int(player.get(cv2.CAP_PROP_FRAME_HEIGHT))
102
+ four_cc = cv2.VideoWriter_fourcc(*"MJPG") #Using MJPEG codex
103
+ out = cv2.VideoWriter(out_vid, four_cc, 20, \
104
+ (x_shape, y_shape))
105
+ ret, frame = player.read() # Read the first frame.
106
+ while True: # Run until stream is out of frames
107
+ start_time = time.time() # We would like to measure the FPS.
108
+ results = score_frame(frame) # Score the Frame
109
+ frame = plot_boxes(results, frame) # Plot the boxes.
110
+ end_time = time.time()
111
+ fps = 1/np.round(end_time - start_time, 3) #Measure the FPS.
112
+ print(f"Frames Per Second : {fps}")
113
+ out.write(frame) # Write the frame onto the output.
114
+ ret, frame = player.read() # Read next frame.
115
+ return out
116
+
117
+ with gr.Blocks() as app:
118
+ stream = gr.State()
119
+ def load(URL):
120
+ yt = YouTube(URL)
121
+ vid_cap = yt.streams.filter(progressive=True, file_extension='mp4').order_by('resolution').desc().last().download(filename="tmp.mp4")
122
+ process = cv2.VideoCapture(vid_cap)
123
+ frame_num = int(process.get(cv2.CAP_PROP_POS_FRAMES))
124
+ frame_count = int(process.get(cv2.CAP_PROP_FRAME_COUNT))
125
+ process.release()
126
+
127
+ return vid_cap,frame_num,frame_count
128
+
129
+ def vid_play2(cap,frame_num):
130
+ player = cv2.VideoCapture(cap)
131
+
132
+ #player = stream #Get your video stream.
133
+ assert player.isOpened() # Make sure that their is a stream.
134
+ #Below code creates a new video writer object to write our
135
+ #output stream.
136
+ #out_vid = ("vid_tmp.avi")
137
+ #x_shape = int(player.get(cv2.CAP_PROP_FRAME_WIDTH))
138
+ #y_shape = int(player.get(cv2.CAP_PROP_FRAME_HEIGHT))
139
+ #four_cc = cv2.VideoWriter_fourcc(*"MJPG") #Using MJPEG codex
140
+ #out = cv2.VideoWriter(out_vid, four_cc, 20,(x_shape, y_shape))
141
+ #stream.set(cv2.CAP_PROP_POS_FRAMES, int(frame_num))
142
+
143
+ ret, frame = player.read(int(frame_num))
144
+ results = model.predict(frame)
145
+
146
+ render = render_result(model=model, image=frame, result=results[0])
147
+ #out = render.show()
148
+ #start_time = time.time() # We would like to measure the FPS.
149
+ #results = score_frame(frame) # Score the Frame
150
+ #frame = plot_boxes(results, frame) # Plot the boxes.
151
+ #end_time = time.time()
152
+ #fps = 1/np.round(end_time - start_time, 3) #Measure the FPS.
153
+ #print(f"Frames Per Second : {fps}")
154
+ #out.write(frame) # Write the frame onto the output.
155
+ #ret, frame = player.read() # Read next frame.
156
+ return render
157
+
158
+
159
+
160
+
161
+ youtube_url = gr.Textbox(label="YouTube URL",value=f"{URL}")
162
+ load_button = gr.Button("Load Video")
163
+ with gr.Row():
164
+ total_frames = gr.Number(interactive=False)
165
+ run_button = gr.Button()
166
+ cur_frame = gr.Number()
167
+ with gr.Row():
168
+ output_win = gr.Video()
169
+ det_win = gr.Image(source="webcam", streaming=True)
170
+ load_button.click(load,youtube_url,[output_win,cur_frame,total_frames])
171
+ run_button.click(vid_play2, [output_win,cur_frame], det_win)
172
+
173
+ app.launch(enable_queue=False)
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ torch
2
+ opencv-python
3
+ pytube
4
+ ultralyticsplus==0.0.14