Aalaa commited on
Commit
76433a2
1 Parent(s): fe4d49d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -83
app.py CHANGED
@@ -1,5 +1,4 @@
1
  import gradio as gr
2
- #import torch
3
  import yolov7
4
  import subprocess
5
  import tempfile
@@ -10,12 +9,6 @@ import cv2
10
  import gradio as gr
11
 
12
 
13
-
14
- # # Images
15
- # #torch.hub.download_url_t
16
- # o_file('https://github.com/ultralytics/yolov5/raw/master/data/images/zidane.jpg', 'zidane.jpg')
17
- # #torch.hub.download_url_to_file('https://raw.githubusercontent.com/obss/sahi/main/tests/data/small-vehicles1.jpeg', 'small-vehicles1.jpeg')
18
-
19
 
20
  def image_fn(
21
  image: gr.inputs.Image = None,
@@ -44,51 +37,6 @@ def image_fn(
44
 
45
 
46
 
47
- def video_fn(model_path, video_file, conf_thres, iou_thres, start_sec, duration):
48
- model = yolov7.load(model_path, device="cpu", hf_model=True, trace=False)
49
- start_timestamp = time.strftime("%H:%M:%S", time.gmtime(start_sec))
50
- end_timestamp = time.strftime("%H:%M:%S", time.gmtime(start_sec + duration))
51
-
52
- suffix = Path(video_file).suffix
53
-
54
- clip_temp_file = tempfile.NamedTemporaryFile(suffix=suffix)
55
- subprocess.call(
56
- f"ffmpeg -y -ss {start_timestamp} -i {video_file} -to {end_timestamp} -c copy {clip_temp_file.name}".split()
57
- )
58
-
59
- # Reader of clip file
60
- cap = cv2.VideoCapture(clip_temp_file.name)
61
-
62
- # This is an intermediary temp file where we'll write the video to
63
- # Unfortunately, gradio doesn't play too nice with videos rn so we have to do some hackiness
64
- # with ffmpeg at the end of the function here.
65
- with tempfile.NamedTemporaryFile(suffix=".mp4") as temp_file:
66
- out = cv2.VideoWriter(temp_file.name, cv2.VideoWriter_fourcc(*"MP4V"), 30, (1280, 720))
67
-
68
- num_frames = 0
69
- max_frames = duration * 30
70
- while cap.isOpened():
71
- try:
72
- ret, frame = cap.read()
73
- if not ret:
74
- break
75
- except Exception as e:
76
- print(e)
77
- continue
78
- print("FRAME DTYPE", type(frame))
79
- out.write(model([frame], conf_thres, iou_thres))
80
- num_frames += 1
81
- print("Processed {} frames".format(num_frames))
82
- if num_frames == max_frames:
83
- break
84
-
85
- out.release()
86
-
87
- # Aforementioned hackiness
88
- out_file = tempfile.NamedTemporaryFile(suffix="out.mp4", delete=False)
89
- subprocess.run(f"ffmpeg -y -loglevel quiet -stats -i {temp_file.name} -c:v libx264 {out_file.name}".split())
90
-
91
- return out_file.name
92
 
93
  image_interface = gr.Interface(
94
  fn=image_fn,
@@ -96,10 +44,9 @@ image_interface = gr.Interface(
96
  gr.inputs.Image(type="pil", label="Input Image"),
97
  gr.inputs.Dropdown(
98
  choices=[
99
- "alshimaa/SEE_model_yolo7",
100
- #"kadirnar/yolov7-v0.1",
101
  ],
102
- default="alshimaa/SEE_model_yolo7",
103
  label="Model",
104
  )
105
  #gr.inputs.Slider(minimum=320, maximum=1280, default=640, step=32, label="Image Size")
@@ -107,39 +54,16 @@ image_interface = gr.Interface(
107
  #gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.45, step=0.05, label="IOU Threshold")
108
  ],
109
  outputs=gr.outputs.Image(type="filepath", label="Output Image"),
110
- title="Smart Environmental Eye (SEE)",
111
- examples=[['image1.jpg', 'alshimaa/SEE_model_yolo7', 640, 0.25, 0.45]],
112
  cache_examples=True,
113
  theme='huggingface',
114
  )
115
 
116
 
117
- video_interface = gr.Interface(
118
- fn=video_fn,
119
- inputs=[
120
- gr.inputs.Video(source = "upload", type = "mp4", label = "Input Video"),
121
- gr.inputs.Dropdown(
122
- choices=[
123
- "alshimaa/SEE_model_yolo7",
124
- #"kadirnar/yolov7-v0.1",
125
- ],
126
- default="alshimaa/SEE_model_yolo7",
127
- label="Model",
128
- ),
129
- ],
130
- outputs=gr.outputs.Video(type = "mp4", label = "Output Video"),
131
- # examples=[
132
- # ["video.mp4", 0.25, 0.45, 0, 2],
133
-
134
- # ],
135
- title="Smart Environmental Eye (SEE)",
136
- cache_examples=True,
137
- theme='huggingface',
138
-
139
- )
140
 
141
  if __name__ == "__main__":
142
  gr.TabbedInterface(
143
- [image_interface, video_interface],
144
- ["Run on Images", "Run on Videos"],
145
- ).launch()
 
1
  import gradio as gr
 
2
  import yolov7
3
  import subprocess
4
  import tempfile
 
9
  import gradio as gr
10
 
11
 
 
 
 
 
 
 
12
 
13
  def image_fn(
14
  image: gr.inputs.Image = None,
 
37
 
38
 
39
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
40
 
41
  image_interface = gr.Interface(
42
  fn=image_fn,
 
44
  gr.inputs.Image(type="pil", label="Input Image"),
45
  gr.inputs.Dropdown(
46
  choices=[
47
+ "Aalaa/Yolov7_Visual_Pollution_Detection",
 
48
  ],
49
+ default="Aalaa/Yolov7_Visual_Pollution_Detection",
50
  label="Model",
51
  )
52
  #gr.inputs.Slider(minimum=320, maximum=1280, default=640, step=32, label="Image Size")
 
54
  #gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.45, step=0.05, label="IOU Threshold")
55
  ],
56
  outputs=gr.outputs.Image(type="filepath", label="Output Image"),
57
+
58
+ examples=[['image1.jpg', 'Aalaa/Yolov7_Visual_Pollution_Detection', 640, 0.25, 0.45]],
59
  cache_examples=True,
60
  theme='huggingface',
61
  )
62
 
63
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
64
 
65
  if __name__ == "__main__":
66
  gr.TabbedInterface(
67
+ [image_interface],
68
+ ["Run on Images"],
69
+ ).launch()