AI-Naga commited on
Commit
6602745
1 Parent(s): 1940974

Upload 3 files

Browse files
Files changed (4) hide show
  1. .gitattributes +1 -0
  2. TresPass_Detection_1.mp4 +3 -0
  3. app.py +113 -0
  4. requirements.txt +8 -0
.gitattributes CHANGED
@@ -32,3 +32,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
32
  *.zip filter=lfs diff=lfs merge=lfs -text
33
  *.zst filter=lfs diff=lfs merge=lfs -text
34
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
32
  *.zip filter=lfs diff=lfs merge=lfs -text
33
  *.zst filter=lfs diff=lfs merge=lfs -text
34
  *tfevents* filter=lfs diff=lfs merge=lfs -text
35
+ TresPass_Detection_1.mp4 filter=lfs diff=lfs merge=lfs -text
TresPass_Detection_1.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b75053df957baa86ccd6c729059415f524df9e2741765ffdfd709be7145f9e0e
3
+ size 12501868
app.py ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import gradio as gr
3
+ import cv2
4
+ import requests
5
+ import os
6
+ import torch
7
+ import numpy as np
8
+ from ultralytics import YOLO
9
+
10
+ model = torch.hub.load('ultralytics/yolov5', 'yolov5x', pretrained=True)
11
+ path = [['image_0.jpg'], ['image_1.jpg']]
12
+ video_path = [['TresPass_Detection_1.mp4']]
13
+ # area = [(215, 180), (120, 75), (370, 55), (520, 140), (215, 180) ]
14
+ area = [(215, 180), (110, 75), (370, 55), (520, 140), (215, 180) ]
15
+
16
+ # def show_preds_video(video_path):
17
+ def show_preds_video():
18
+ cap = cv2.VideoCapture('TresPass_Detection_1.mp4')
19
+ count=0
20
+ while(cap.isOpened()):
21
+ ret, frame = cap.read()
22
+ if not ret:
23
+ break
24
+ count += 1
25
+ if count % 10 != 0:
26
+ continue
27
+ # frame = cv2.imread(video_path)
28
+ frame=cv2.resize(frame,(1020,600))
29
+ frame_copy = frame.copy()
30
+ frame=cv2.resize(frame,(1020,600))
31
+
32
+ results=model(frame)
33
+ for index, row in results.pandas().xyxy[0].iterrows():
34
+ x1 = int(row['xmin'])
35
+ y1 = int(row['ymin'])
36
+ x2 = int(row['xmax'])
37
+ y2 = int(row['ymax'])
38
+ d=(row['name'])
39
+
40
+ cx=int(x1+x2)//2
41
+ cy=int(y1+y2)//2
42
+
43
+ if ('person') in d:
44
+ results = cv2.pointPolygonTest(np.array(area, np.int32), ((cx,cy)), False)
45
+ if results >0:
46
+ cv2.rectangle(frame_copy,(x1,y1),(x2,y2),(0,0,255),2)
47
+ cv2.putText(frame_copy,str(d),(x1,y1),cv2.FONT_HERSHEY_SIMPLEX,1,(0,0,255),1)
48
+ cv2.putText(frame_copy,str("Alert !!! Trespasser detected !!!"),(50,400),cv2.FONT_HERSHEY_PLAIN,2,(0,0,255),3)
49
+
50
+ cv2.polylines(frame_copy, [np.array(area, np.int32)], True, (0,255,0), 2)
51
+
52
+ yield cv2.cvtColor(frame_copy, cv2.COLOR_BGR2RGB)
53
+
54
+ inputs_video = [ #gr.components.Video(type="filepath", label="Input Video", visible =False),
55
+ ]
56
+
57
+ outputs_video = [
58
+ gr.components.Image(type="numpy", label="Output Image"),
59
+ ]
60
+
61
+ interface_video = gr.Interface(
62
+ fn=show_preds_video,
63
+ inputs=inputs_video,
64
+ outputs=outputs_video,
65
+ title="Intrusion Detection",
66
+ examples=video_path,
67
+ cache_examples=False,
68
+ )
69
+
70
+ gr.TabbedInterface(
71
+ [interface_video],
72
+ # [interface_image, interface_video],
73
+ tab_names=['Video inference']
74
+ ).queue().launch(width=200, height = 200)
75
+
76
+
77
+ # def show_preds_image(image_path):
78
+ # frame = cv2.imread(image_path)
79
+ # frame=cv2.resize(frame,(1020,600))
80
+ # results=model(frame)
81
+ # for index, row in results.pandas().xyxy[0].iterrows():
82
+ # x1 = int(row['xmin'])
83
+ # y1 = int(row['ymin'])
84
+ # x2 = int(row['xmax'])
85
+ # y2 = int(row['ymax'])
86
+ # d=(row['name'])
87
+
88
+ # cx=int(x1+x2)//2
89
+ # cy=int(y1+y2)//2
90
+
91
+ # if ('person') in d:
92
+ # results = cv2.pointPolygonTest(np.array(area, np.int32), ((cx,cy)), False)
93
+ # if results >0:
94
+ # cv2.rectangle(frame,(x1,y1),(x2,y2),(0,0,255),2)
95
+ # cv2.putText(frame,str(d),(x1,y1),cv2.FONT_HERSHEY_PLAIN,1,(255,0,0),2)
96
+
97
+ # cv2.polylines(frame, [np.array(area, np.int32)], True, (0,255,0), 2)
98
+ # return cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
99
+
100
+ # inputs_image = [
101
+ # gr.components.Image(type="filepath", label="Input Image"),
102
+ # ]
103
+ # outputs_image = [
104
+ # gr.components.Image(type="numpy", label="Output Image"),
105
+ # ]
106
+ # interface_image = gr.Interface(
107
+ # fn=show_preds_image,
108
+ # inputs=inputs_image,
109
+ # outputs=outputs_image,
110
+ # title="Parking space counter",
111
+ # examples=path,
112
+ # cache_examples=False,
113
+ # )
requirements.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ torch
2
+ gradio==3.4.0
3
+ opencv-python
4
+ numpy<1.24
5
+ ultralytics
6
+ yolov5
7
+
8
+