whitphx HF staff commited on
Commit
0ac8362
1 Parent(s): f554f7a

Update app.py and add heroku files

Browse files
Files changed (5) hide show
  1. Aptfile +1 -0
  2. Procfile +1 -0
  3. app.py +397 -0
  4. requirements.txt +7 -0
  5. runtime.txt +1 -0
Aptfile ADDED
@@ -0,0 +1 @@
 
 
1
+ libgl1
Procfile ADDED
@@ -0,0 +1 @@
 
 
1
+ web: streamlit run --server.port $PORT app.py
app.py ADDED
@@ -0,0 +1,397 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import logging.handlers
3
+ import queue
4
+ import urllib.request
5
+ from pathlib import Path
6
+ from typing import Literal
7
+
8
+ import av
9
+ import cv2
10
+ import numpy as np
11
+ import PIL
12
+ import streamlit as st
13
+ from aiortc.contrib.media import MediaPlayer
14
+
15
+ from streamlit_webrtc import (
16
+ ClientSettings,
17
+ VideoTransformerBase,
18
+ WebRtcMode,
19
+ webrtc_streamer,
20
+ )
21
+
22
+ HERE = Path(__file__).parent
23
+
24
+ logger = logging.getLogger(__name__)
25
+
26
+
27
+ # This code is based on https://github.com/streamlit/demo-self-driving/blob/230245391f2dda0cb464008195a470751c01770b/streamlit_app.py#L48 # noqa: E501
28
+ def download_file(url, download_to: Path, expected_size=None):
29
+ # Don't download the file twice.
30
+ # (If possible, verify the download using the file length.)
31
+ if download_to.exists():
32
+ if expected_size:
33
+ if download_to.stat().st_size == expected_size:
34
+ return
35
+ else:
36
+ st.info(f"{url} is already downloaded.")
37
+ if not st.button("Download again?"):
38
+ return
39
+
40
+ download_to.parent.mkdir(parents=True, exist_ok=True)
41
+
42
+ # These are handles to two visual elements to animate.
43
+ weights_warning, progress_bar = None, None
44
+ try:
45
+ weights_warning = st.warning("Downloading %s..." % url)
46
+ progress_bar = st.progress(0)
47
+ with open(download_to, "wb") as output_file:
48
+ with urllib.request.urlopen(url) as response:
49
+ length = int(response.info()["Content-Length"])
50
+ counter = 0.0
51
+ MEGABYTES = 2.0 ** 20.0
52
+ while True:
53
+ data = response.read(8192)
54
+ if not data:
55
+ break
56
+ counter += len(data)
57
+ output_file.write(data)
58
+
59
+ # We perform animation by overwriting the elements.
60
+ weights_warning.warning(
61
+ "Downloading %s... (%6.2f/%6.2f MB)"
62
+ % (url, counter / MEGABYTES, length / MEGABYTES)
63
+ )
64
+ progress_bar.progress(min(counter / length, 1.0))
65
+ # Finally, we remove these visual elements by calling .empty().
66
+ finally:
67
+ if weights_warning is not None:
68
+ weights_warning.empty()
69
+ if progress_bar is not None:
70
+ progress_bar.empty()
71
+
72
+
73
+ def main():
74
+ st.header("WebRTC demo")
75
+
76
+ object_detection_page = "Real time object detection (sendrecv)"
77
+ video_filters_page = (
78
+ "Real time video transform with simple OpenCV filters (sendrecv)"
79
+ )
80
+ streaming_page = (
81
+ "Consuming media files on server-side and streaming it to browser (recvonly)"
82
+ )
83
+ sendonly_page = "WebRTC is sendonly and images are shown via st.image() (sendonly)"
84
+ loopback_page = "Simple video loopback (sendrecv)"
85
+ app_mode = st.sidebar.selectbox(
86
+ "Choose the app mode",
87
+ [
88
+ object_detection_page,
89
+ video_filters_page,
90
+ streaming_page,
91
+ sendonly_page,
92
+ loopback_page,
93
+ ],
94
+ )
95
+ st.subheader(app_mode)
96
+
97
+ if app_mode == video_filters_page:
98
+ app_video_filters()
99
+ elif app_mode == object_detection_page:
100
+ app_object_detection()
101
+ elif app_mode == streaming_page:
102
+ app_streaming()
103
+ elif app_mode == sendonly_page:
104
+ app_sendonly()
105
+ elif app_mode == loopback_page:
106
+ app_loopback()
107
+
108
+
109
+ def app_loopback():
110
+ """ Simple video loopback """
111
+ webrtc_streamer(
112
+ key="loopback",
113
+ mode=WebRtcMode.SENDRECV,
114
+ client_settings=WEBRTC_CLIENT_SETTINGS,
115
+ video_transformer_class=None, # NoOp
116
+ )
117
+
118
+
119
+ def app_video_filters():
120
+ """ Video transforms with OpenCV """
121
+
122
+ class OpenCVVideoTransformer(VideoTransformerBase):
123
+ type: Literal["noop", "cartoon", "edges", "rotate"]
124
+
125
+ def __init__(self) -> None:
126
+ self.type = "noop"
127
+
128
+ def transform(self, frame: av.VideoFrame) -> av.VideoFrame:
129
+ img = frame.to_ndarray(format="bgr24")
130
+
131
+ if self.type == "noop":
132
+ pass
133
+ elif self.type == "cartoon":
134
+ # prepare color
135
+ img_color = cv2.pyrDown(cv2.pyrDown(img))
136
+ for _ in range(6):
137
+ img_color = cv2.bilateralFilter(img_color, 9, 9, 7)
138
+ img_color = cv2.pyrUp(cv2.pyrUp(img_color))
139
+
140
+ # prepare edges
141
+ img_edges = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
142
+ img_edges = cv2.adaptiveThreshold(
143
+ cv2.medianBlur(img_edges, 7),
144
+ 255,
145
+ cv2.ADAPTIVE_THRESH_MEAN_C,
146
+ cv2.THRESH_BINARY,
147
+ 9,
148
+ 2,
149
+ )
150
+ img_edges = cv2.cvtColor(img_edges, cv2.COLOR_GRAY2RGB)
151
+
152
+ # combine color and edges
153
+ img = cv2.bitwise_and(img_color, img_edges)
154
+ elif self.type == "edges":
155
+ # perform edge detection
156
+ img = cv2.cvtColor(cv2.Canny(img, 100, 200), cv2.COLOR_GRAY2BGR)
157
+ elif self.type == "rotate":
158
+ # rotate image
159
+ rows, cols, _ = img.shape
160
+ M = cv2.getRotationMatrix2D((cols / 2, rows / 2), frame.time * 45, 1)
161
+ img = cv2.warpAffine(img, M, (cols, rows))
162
+
163
+ return img
164
+
165
+ webrtc_ctx = webrtc_streamer(
166
+ key="opencv-filter",
167
+ mode=WebRtcMode.SENDRECV,
168
+ client_settings=WEBRTC_CLIENT_SETTINGS,
169
+ video_transformer_class=OpenCVVideoTransformer,
170
+ async_transform=True,
171
+ )
172
+
173
+ transform_type = st.radio(
174
+ "Select transform type", ("noop", "cartoon", "edges", "rotate")
175
+ )
176
+ if webrtc_ctx.video_transformer:
177
+ webrtc_ctx.video_transformer.type = transform_type
178
+
179
+ st.markdown(
180
+ "This demo is based on "
181
+ "https://github.com/aiortc/aiortc/blob/2362e6d1f0c730a0f8c387bbea76546775ad2fe8/examples/server/server.py#L34. " # noqa: E501
182
+ "Many thanks to the project."
183
+ )
184
+
185
+
186
+ def app_object_detection():
187
+ """Object detection demo with MobileNet SSD.
188
+ This model and code are based on
189
+ https://github.com/robmarkcole/object-detection-app
190
+ """
191
+ MODEL_URL = "https://github.com/robmarkcole/object-detection-app/raw/master/model/MobileNetSSD_deploy.caffemodel" # noqa: E501
192
+ MODEL_LOCAL_PATH = HERE / "./models/MobileNetSSD_deploy.caffemodel"
193
+ PROTOTXT_URL = "https://github.com/robmarkcole/object-detection-app/raw/master/model/MobileNetSSD_deploy.prototxt.txt" # noqa: E501
194
+ PROTOTXT_LOCAL_PATH = HERE / "./models/MobileNetSSD_deploy.prototxt.txt"
195
+
196
+ CLASSES = [
197
+ "background",
198
+ "aeroplane",
199
+ "bicycle",
200
+ "bird",
201
+ "boat",
202
+ "bottle",
203
+ "bus",
204
+ "car",
205
+ "cat",
206
+ "chair",
207
+ "cow",
208
+ "diningtable",
209
+ "dog",
210
+ "horse",
211
+ "motorbike",
212
+ "person",
213
+ "pottedplant",
214
+ "sheep",
215
+ "sofa",
216
+ "train",
217
+ "tvmonitor",
218
+ ]
219
+ COLORS = np.random.uniform(0, 255, size=(len(CLASSES), 3))
220
+
221
+ download_file(MODEL_URL, MODEL_LOCAL_PATH, expected_size=23147564)
222
+ download_file(PROTOTXT_URL, PROTOTXT_LOCAL_PATH, expected_size=29353)
223
+
224
+ DEFAULT_CONFIDENCE_THRESHOLD = 0.5
225
+
226
+ class NNVideoTransformer(VideoTransformerBase):
227
+ confidence_threshold: float
228
+
229
+ def __init__(self) -> None:
230
+ self._net = cv2.dnn.readNetFromCaffe(
231
+ str(PROTOTXT_LOCAL_PATH), str(MODEL_LOCAL_PATH)
232
+ )
233
+ self.confidence_threshold = DEFAULT_CONFIDENCE_THRESHOLD
234
+
235
+ def _annotate_image(self, image, detections):
236
+ # loop over the detections
237
+ (h, w) = image.shape[:2]
238
+ labels = []
239
+ for i in np.arange(0, detections.shape[2]):
240
+ confidence = detections[0, 0, i, 2]
241
+
242
+ if confidence > self.confidence_threshold:
243
+ # extract the index of the class label from the `detections`,
244
+ # then compute the (x, y)-coordinates of the bounding box for
245
+ # the object
246
+ idx = int(detections[0, 0, i, 1])
247
+ box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
248
+ (startX, startY, endX, endY) = box.astype("int")
249
+
250
+ # display the prediction
251
+ label = f"{CLASSES[idx]}: {round(confidence * 100, 2)}%"
252
+ labels.append(label)
253
+ cv2.rectangle(image, (startX, startY), (endX, endY), COLORS[idx], 2)
254
+ y = startY - 15 if startY - 15 > 15 else startY + 15
255
+ cv2.putText(
256
+ image,
257
+ label,
258
+ (startX, y),
259
+ cv2.FONT_HERSHEY_SIMPLEX,
260
+ 0.5,
261
+ COLORS[idx],
262
+ 2,
263
+ )
264
+ return image, labels
265
+
266
+ def transform(self, frame: av.VideoFrame) -> np.ndarray:
267
+ image = frame.to_ndarray(format="bgr24")
268
+ blob = cv2.dnn.blobFromImage(
269
+ cv2.resize(image, (300, 300)), 0.007843, (300, 300), 127.5
270
+ )
271
+ self._net.setInput(blob)
272
+ detections = self._net.forward()
273
+ annotated_image, labels = self._annotate_image(image, detections)
274
+ # TODO: Show labels
275
+
276
+ return annotated_image
277
+
278
+ webrtc_ctx = webrtc_streamer(
279
+ key="object-detection",
280
+ mode=WebRtcMode.SENDRECV,
281
+ client_settings=WEBRTC_CLIENT_SETTINGS,
282
+ video_transformer_class=NNVideoTransformer,
283
+ async_transform=True,
284
+ )
285
+
286
+ confidence_threshold = st.slider(
287
+ "Confidence threshold", 0.0, 1.0, DEFAULT_CONFIDENCE_THRESHOLD, 0.05
288
+ )
289
+ if webrtc_ctx.video_transformer:
290
+ webrtc_ctx.video_transformer.confidence_threshold = confidence_threshold
291
+
292
+ st.markdown(
293
+ "This demo uses a model and code from "
294
+ "https://github.com/robmarkcole/object-detection-app. "
295
+ "Many thanks to the project."
296
+ )
297
+
298
+
299
+ def app_streaming():
300
+ """ Media streamings """
301
+ MEDIAFILES = {
302
+ "big_buck_bunny_720p_2mb.mp4": {
303
+ "url": "https://sample-videos.com/video123/mp4/720/big_buck_bunny_720p_2mb.mp4", # noqa: E501
304
+ "local_file_path": HERE / "data/big_buck_bunny_720p_2mb.mp4",
305
+ "type": "video",
306
+ },
307
+ "big_buck_bunny_720p_10mb.mp4": {
308
+ "url": "https://sample-videos.com/video123/mp4/720/big_buck_bunny_720p_10mb.mp4", # noqa: E501
309
+ "local_file_path": HERE / "data/big_buck_bunny_720p_10mb.mp4",
310
+ "type": "video",
311
+ },
312
+ "file_example_MP3_700KB.mp3": {
313
+ "url": "https://file-examples-com.github.io/uploads/2017/11/file_example_MP3_700KB.mp3", # noqa: E501
314
+ "local_file_path": HERE / "data/file_example_MP3_700KB.mp3",
315
+ "type": "audio",
316
+ },
317
+ "file_example_MP3_5MG.mp3": {
318
+ "url": "https://file-examples-com.github.io/uploads/2017/11/file_example_MP3_5MG.mp3", # noqa: E501
319
+ "local_file_path": HERE / "data/file_example_MP3_5MG.mp3",
320
+ "type": "audio",
321
+ },
322
+ }
323
+ media_file_label = st.radio(
324
+ "Select a media file to stream", tuple(MEDIAFILES.keys())
325
+ )
326
+ media_file_info = MEDIAFILES[media_file_label]
327
+ download_file(media_file_info["url"], media_file_info["local_file_path"])
328
+
329
+ def create_player():
330
+ return MediaPlayer(str(media_file_info["local_file_path"]))
331
+
332
+ # NOTE: To stream the video from webcam, use the code below.
333
+ # return MediaPlayer(
334
+ # "1:none",
335
+ # format="avfoundation",
336
+ # options={"framerate": "30", "video_size": "1280x720"},
337
+ # )
338
+
339
+ WEBRTC_CLIENT_SETTINGS.update(
340
+ {
341
+ "fmedia_stream_constraints": {
342
+ "video": media_file_info["type"] == "video",
343
+ "audio": media_file_info["type"] == "audio",
344
+ }
345
+ }
346
+ )
347
+
348
+ webrtc_streamer(
349
+ key=f"media-streaming-{media_file_label}",
350
+ mode=WebRtcMode.RECVONLY,
351
+ client_settings=WEBRTC_CLIENT_SETTINGS,
352
+ player_factory=create_player,
353
+ )
354
+
355
+
356
+ def app_sendonly():
357
+ """A sample to use WebRTC in sendonly mode to transfer frames
358
+ from the browser to the server and to render frames via `st.image`."""
359
+ webrtc_ctx = webrtc_streamer(
360
+ key="loopback",
361
+ mode=WebRtcMode.SENDONLY,
362
+ client_settings=WEBRTC_CLIENT_SETTINGS,
363
+ )
364
+
365
+ if webrtc_ctx.video_receiver:
366
+ image_loc = st.empty()
367
+ while True:
368
+ try:
369
+ frame = webrtc_ctx.video_receiver.frames_queue.get(timeout=1)
370
+ except queue.Empty:
371
+ print("Queue is empty. Stop the loop.")
372
+ webrtc_ctx.video_receiver.stop()
373
+ break
374
+
375
+ img = frame.to_ndarray(format="bgr24")
376
+ img = PIL.Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
377
+ image_loc.image(img)
378
+
379
+
380
+ WEBRTC_CLIENT_SETTINGS = ClientSettings(
381
+ rtc_configuration={"iceServers": [{"urls": ["stun:stun.l.google.com:19302"]}]},
382
+ media_stream_constraints={"video": True, "audio": True},
383
+ )
384
+
385
+ if __name__ == "__main__":
386
+ logging.basicConfig(
387
+ format="[%(asctime)s] %(levelname)7s from %(name)s in %(filename)s:%(lineno)d: "
388
+ "%(message)s",
389
+ force=True,
390
+ )
391
+
392
+ logger.setLevel(level=logging.DEBUG)
393
+
394
+ st_webrtc_logger = logging.getLogger("streamlit_webrtc")
395
+ st_webrtc_logger.setLevel(logging.DEBUG)
396
+
397
+ main()
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ av==8.0.2
2
+ streamlit==0.74.1
3
+ opencv_python==4.5.1.48
4
+ numpy==1.19.5
5
+ aiortc==1.0.0
6
+ Pillow==8.1.0
7
+ streamlit_webrtc==0.2.0
runtime.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ python-3.8.7