AlshimaaGamalAlsaied commited on
Commit
7b1080a
1 Parent(s): 8a41b66
Files changed (1) hide show
  1. app.py +0 -249
app.py CHANGED
@@ -1,249 +0,0 @@
1
- import gradio as gr
2
- #import torch
3
- import yolov7
4
- import subprocess
5
- import tempfile
6
- import time
7
- from pathlib import Path
8
-
9
- import cv2
10
- import gradio as gr
11
-
12
-
13
-
14
- # Images
15
- #torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/raw/master/data/images/zidane.jpg', 'zidane.jpg')
16
- #torch.hub.download_url_to_file('https://raw.githubusercontent.com/obss/sahi/main/tests/data/small-vehicles1.jpeg', 'small-vehicles1.jpeg')
17
-
18
- def image_fn(
19
- image: gr.inputs.Image = None,
20
- model_path: gr.inputs.Dropdown = None,
21
- image_size: gr.inputs.Slider = 640,
22
- conf_threshold: gr.inputs.Slider = 0.25,
23
- iou_threshold: gr.inputs.Slider = 0.45,
24
- ):
25
- """
26
- YOLOv7 inference function
27
- Args:
28
- image: Input image
29
- model_path: Path to the model
30
- image_size: Image size
31
- conf_threshold: Confidence threshold
32
- iou_threshold: IOU threshold
33
- Returns:
34
- Rendered image
35
- """
36
-
37
- model = yolov7.load(model_path, device="cpu", hf_model=True, trace=False)
38
- model.conf = conf_threshold
39
- model.iou = iou_threshold
40
- results = model([image], size=image_size)
41
- return results.render()[0]
42
-
43
-
44
-
45
- def video_fn(model_path, video_file, conf_thres, iou_thres, start_sec, duration):
46
- model = yolov7.load(model_path, device="cpu", hf_model=True, trace=False)
47
- start_timestamp = time.strftime("%H:%M:%S", time.gmtime(start_sec))
48
- end_timestamp = time.strftime("%H:%M:%S", time.gmtime(start_sec + duration))
49
-
50
- suffix = Path(video_file).suffix
51
-
52
- clip_temp_file = tempfile.NamedTemporaryFile(suffix=suffix)
53
- subprocess.call(
54
- f"ffmpeg -y -ss {start_timestamp} -i {video_file} -to {end_timestamp} -c copy {clip_temp_file.name}".split()
55
- )
56
-
57
- # Reader of clip file
58
- cap = cv2.VideoCapture(clip_temp_file.name)
59
-
60
- # This is an intermediary temp file where we'll write the video to
61
- # Unfortunately, gradio doesn't play too nice with videos rn so we have to do some hackiness
62
- # with ffmpeg at the end of the function here.
63
- with tempfile.NamedTemporaryFile(suffix=".mp4") as temp_file:
64
- out = cv2.VideoWriter(temp_file.name, cv2.VideoWriter_fourcc(*"MP4V"), 30, (1280, 720))
65
-
66
- num_frames = 0
67
- max_frames = duration * 30
68
- while cap.isOpened():
69
- try:
70
- ret, frame = cap.read()
71
- if not ret:
72
- break
73
- except Exception as e:
74
- print(e)
75
- continue
76
- print("FRAME DTYPE", type(frame))
77
- out.write(model(frame, conf_thres, iou_thres))
78
- num_frames += 1
79
- print("Processed {} frames".format(num_frames))
80
- if num_frames == max_frames:
81
- break
82
-
83
- out.release()
84
-
85
- # Aforementioned hackiness
86
- out_file = tempfile.NamedTemporaryFile(suffix="out.mp4", delete=False)
87
- subprocess.run(f"ffmpeg -y -loglevel quiet -stats -i {temp_file.name} -c:v libx264 {out_file.name}".split())
88
-
89
- return out_file.name
90
-
91
- image_interface = gr.Interface(
92
- fn=image_fn,
93
- inputs=[
94
- gr.inputs.Image(type="pil", label="Input Image"),
95
- gr.inputs.Dropdown(
96
- choices=[
97
- "alshimaa/model_baseline",
98
- "alshimaa/model_yolo7",
99
- #"kadirnar/yolov7-v0.1",
100
- ],
101
- default="alshimaa/model_baseline",
102
- label="Model",
103
- )
104
- #gr.inputs.Slider(minimum=320, maximum=1280, default=640, step=32, label="Image Size")
105
- #gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.25, step=0.05, label="Confidence Threshold"),
106
- #gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.45, step=0.05, label="IOU Threshold")
107
- ],
108
- outputs=gr.outputs.Image(type="filepath", label="Output Image"),
109
- title="Smart Environmental Eye (SEE)",
110
- examples=[['image1.jpg', 'alshimaa/model_yolo7', 640, 0.25, 0.45], ['image2.jpg', 'alshimaa/model_yolo7', 640, 0.25, 0.45], ['image3.jpg', 'alshimaa/model_yolo7', 640, 0.25, 0.45]],
111
- cache_examples=True,
112
- theme='huggingface',
113
- )
114
-
115
-
116
- video_interface = gr.Interface(
117
- fn=video_fn,
118
- inputs=[
119
- gr.Video(type="file"),
120
- gr.inputs.Dropdown(
121
- choices=[
122
- "alshimaa/model_baseline",
123
- "alshimaa/model_yolo7",
124
- #"kadirnar/yolov7-v0.1",
125
- ],
126
- default="alshimaa/model_baseline",
127
- label="Model",
128
- ),
129
- ],
130
- outputs=gr.Video(type="file", format="mp4"),
131
- # examples=[
132
- # ["video.mp4", 0.25, 0.45, 0, 2],
133
-
134
- # ],
135
- title="Smart Environmental Eye (SEE)",
136
- allow_flagging=False,
137
- allow_screenshot=False,
138
- )
139
-
140
- if __name__ == "__main__":
141
- gr.TabbedInterface(
142
- [image_interface, video_interface],
143
- ["Run on Images", "Run on Videos"],
144
- ).launch()
145
-
146
- # import subprocess
147
- # import tempfile
148
- # import time
149
- # from pathlib import Path
150
-
151
- # import cv2
152
- # import gradio as gr
153
-
154
- # from inferer import Inferer
155
-
156
- # pipeline = Inferer("alshimaa/model_yolo7", device='cuda')
157
-
158
-
159
- # def fn_image(image, conf_thres, iou_thres):
160
- # return pipeline(image, conf_thres, iou_thres)
161
-
162
-
163
- # def fn_video(video_file, conf_thres, iou_thres, start_sec, duration):
164
- # start_timestamp = time.strftime("%H:%M:%S", time.gmtime(start_sec))
165
- # end_timestamp = time.strftime("%H:%M:%S", time.gmtime(start_sec + duration))
166
-
167
- # suffix = Path(video_file).suffix
168
-
169
- # clip_temp_file = tempfile.NamedTemporaryFile(suffix=suffix)
170
- # subprocess.call(
171
- # f"ffmpeg -y -ss {start_timestamp} -i {video_file} -to {end_timestamp} -c copy {clip_temp_file.name}".split()
172
- # )
173
-
174
- # # Reader of clip file
175
- # cap = cv2.VideoCapture(clip_temp_file.name)
176
-
177
- # # This is an intermediary temp file where we'll write the video to
178
- # # Unfortunately, gradio doesn't play too nice with videos rn so we have to do some hackiness
179
- # # with ffmpeg at the end of the function here.
180
- # with tempfile.NamedTemporaryFile(suffix=".mp4") as temp_file:
181
- # out = cv2.VideoWriter(temp_file.name, cv2.VideoWriter_fourcc(*"MP4V"), 30, (1280, 720))
182
-
183
- # num_frames = 0
184
- # max_frames = duration * 30
185
- # while cap.isOpened():
186
- # try:
187
- # ret, frame = cap.read()
188
- # if not ret:
189
- # break
190
- # except Exception as e:
191
- # print(e)
192
- # continue
193
- # print("FRAME DTYPE", type(frame))
194
- # out.write(pipeline(frame, conf_thres, iou_thres))
195
- # num_frames += 1
196
- # print("Processed {} frames".format(num_frames))
197
- # if num_frames == max_frames:
198
- # break
199
-
200
- # out.release()
201
-
202
- # # Aforementioned hackiness
203
- # out_file = tempfile.NamedTemporaryFile(suffix="out.mp4", delete=False)
204
- # subprocess.run(f"ffmpeg -y -loglevel quiet -stats -i {temp_file.name} -c:v libx264 {out_file.name}".split())
205
-
206
- # return out_file.name
207
-
208
-
209
- # image_interface = gr.Interface(
210
- # fn=fn_image,
211
- # inputs=[
212
- # "image",
213
- # gr.Slider(0, 1, value=0.5, label="Confidence Threshold"),
214
- # gr.Slider(0, 1, value=0.5, label="IOU Threshold"),
215
- # ],
216
- # outputs=gr.Image(type="file"),
217
- # examples=[["image1.jpg", 0.5, 0.5], ["image2.jpg", 0.25, 0.45], ["image3.jpg", 0.25, 0.45]],
218
- # title="Smart Environmental Eye (SEE)",
219
- # allow_flagging=False,
220
- # allow_screenshot=False,
221
- # )
222
-
223
- # video_interface = gr.Interface(
224
- # fn=fn_video,
225
- # inputs=[
226
- # gr.Video(type="file"),
227
- # gr.Slider(0, 1, value=0.25, label="Confidence Threshold"),
228
- # gr.Slider(0, 1, value=0.45, label="IOU Threshold"),
229
- # gr.Slider(0, 10, value=0, label="Start Second", step=1),
230
- # gr.Slider(0, 10 if pipeline.device.type != 'cpu' else 3, value=4, label="Duration", step=1),
231
- # ],
232
- # outputs=gr.Video(type="file", format="mp4"),
233
- # # examples=[
234
- # # ["video.mp4", 0.25, 0.45, 0, 2],
235
-
236
- # # ],
237
- # title="Smart Environmental Eye (SEE)",
238
- # allow_flagging=False,
239
- # allow_screenshot=False,
240
- # )
241
-
242
-
243
-
244
- # if __name__ == "__main__":
245
- # gr.TabbedInterface(
246
- # [image_interface, video_interface],
247
- # ["Run on Images", "Run on Videos"],
248
- # ).launch()
249
-