Multipage
Browse files- home.py +33 -0
- pages/.isort.cfg +3 -0
- pages/10_sendonly_audio.py +85 -0
- pages/11_programatic_control_playing.py +13 -0
- pages/12_media_constraints_configs.py +20 -0
- pages/13_ui_texts_customization.py +15 -0
- pages/1_object_detection.py +172 -0
- pages/2_opencv_filters.py +62 -0
- pages/3_audio_filter.py +38 -0
- pages/4_delayed_echo.py +41 -0
- pages/5_fork_multi_outputs.py +105 -0
- pages/6_mix_multi_inputs.py +182 -0
- pages/7_record.py +65 -0
- pages/8_media_files_streaming.py +128 -0
- pages/9_sendonly_video.py +34 -0
- sample_utils/__init__.py +0 -0
- sample_utils/download.py +50 -0
home.py
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import logging
|
2 |
+
|
3 |
+
import streamlit as st
|
4 |
+
|
5 |
+
logger = logging.getLogger()
|
6 |
+
|
7 |
+
st.title("streamlit-webrtc demo!")
|
8 |
+
st.info(
|
9 |
+
"""👈 Select the demo
|
10 |
+
"""
|
11 |
+
)
|
12 |
+
|
13 |
+
|
14 |
+
if __name__ == "__main__":
|
15 |
+
import os
|
16 |
+
|
17 |
+
DEBUG = os.environ.get("DEBUG", "false").lower() not in ["false", "no", "0"]
|
18 |
+
|
19 |
+
logging.basicConfig(
|
20 |
+
format="[%(asctime)s] %(levelname)7s from %(name)s in %(pathname)s:%(lineno)d: "
|
21 |
+
"%(message)s",
|
22 |
+
level=logging.DEBUG if DEBUG else logging.INFO,
|
23 |
+
force=True,
|
24 |
+
)
|
25 |
+
|
26 |
+
fsevents_logger = logging.getLogger("fsevents")
|
27 |
+
fsevents_logger.setLevel(logging.WARNING)
|
28 |
+
|
29 |
+
aiortc_logger = logging.getLogger("aiortc")
|
30 |
+
aiortc_logger.setLevel(logging.INFO)
|
31 |
+
|
32 |
+
aioice_logger = logging.getLogger("aioice")
|
33 |
+
aioice_logger.setLevel(logging.INFO)
|
pages/.isort.cfg
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
[settings]
|
2 |
+
profile=black
|
3 |
+
known_third_party=streamlit_webrtc
|
pages/10_sendonly_audio.py
ADDED
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""A sample to use WebRTC in sendonly mode to transfer audio frames
|
2 |
+
from the browser to the server and visualize them with matplotlib
|
3 |
+
and `st.pyplot`."""
|
4 |
+
|
5 |
+
import logging
|
6 |
+
import queue
|
7 |
+
|
8 |
+
import matplotlib.pyplot as plt
|
9 |
+
import numpy as np
|
10 |
+
import pydub
|
11 |
+
import streamlit as st
|
12 |
+
from streamlit_webrtc import WebRtcMode, webrtc_streamer
|
13 |
+
|
14 |
+
logger = logging.getLogger(__name__)
|
15 |
+
|
16 |
+
|
17 |
+
webrtc_ctx = webrtc_streamer(
|
18 |
+
key="sendonly-audio",
|
19 |
+
mode=WebRtcMode.SENDONLY,
|
20 |
+
audio_receiver_size=256,
|
21 |
+
rtc_configuration={"iceServers": [{"urls": ["stun:stun.l.google.com:19302"]}]},
|
22 |
+
media_stream_constraints={"audio": True},
|
23 |
+
)
|
24 |
+
|
25 |
+
fig_place = st.empty()
|
26 |
+
|
27 |
+
fig, [ax_time, ax_freq] = plt.subplots(2, 1, gridspec_kw={"top": 1.5, "bottom": 0.2})
|
28 |
+
|
29 |
+
sound_window_len = 5000 # 5s
|
30 |
+
sound_window_buffer = None
|
31 |
+
while True:
|
32 |
+
if webrtc_ctx.audio_receiver:
|
33 |
+
try:
|
34 |
+
audio_frames = webrtc_ctx.audio_receiver.get_frames(timeout=1)
|
35 |
+
except queue.Empty:
|
36 |
+
logger.warning("Queue is empty. Abort.")
|
37 |
+
break
|
38 |
+
|
39 |
+
sound_chunk = pydub.AudioSegment.empty()
|
40 |
+
for audio_frame in audio_frames:
|
41 |
+
sound = pydub.AudioSegment(
|
42 |
+
data=audio_frame.to_ndarray().tobytes(),
|
43 |
+
sample_width=audio_frame.format.bytes,
|
44 |
+
frame_rate=audio_frame.sample_rate,
|
45 |
+
channels=len(audio_frame.layout.channels),
|
46 |
+
)
|
47 |
+
sound_chunk += sound
|
48 |
+
|
49 |
+
if len(sound_chunk) > 0:
|
50 |
+
if sound_window_buffer is None:
|
51 |
+
sound_window_buffer = pydub.AudioSegment.silent(
|
52 |
+
duration=sound_window_len
|
53 |
+
)
|
54 |
+
|
55 |
+
sound_window_buffer += sound_chunk
|
56 |
+
if len(sound_window_buffer) > sound_window_len:
|
57 |
+
sound_window_buffer = sound_window_buffer[-sound_window_len:]
|
58 |
+
|
59 |
+
if sound_window_buffer:
|
60 |
+
# Ref: https://own-search-and-study.xyz/2017/10/27/python%E3%82%92%E4%BD%BF%E3%81%A3%E3%81%A6%E9%9F%B3%E5%A3%B0%E3%83%87%E3%83%BC%E3%82%BF%E3%81%8B%E3%82%89%E3%82%B9%E3%83%9A%E3%82%AF%E3%83%88%E3%83%AD%E3%82%B0%E3%83%A9%E3%83%A0%E3%82%92%E4%BD%9C/ # noqa
|
61 |
+
sound_window_buffer = sound_window_buffer.set_channels(1) # Stereo to mono
|
62 |
+
sample = np.array(sound_window_buffer.get_array_of_samples())
|
63 |
+
|
64 |
+
ax_time.cla()
|
65 |
+
times = (np.arange(-len(sample), 0)) / sound_window_buffer.frame_rate
|
66 |
+
ax_time.plot(times, sample)
|
67 |
+
ax_time.set_xlabel("Time")
|
68 |
+
ax_time.set_ylabel("Magnitude")
|
69 |
+
|
70 |
+
spec = np.fft.fft(sample)
|
71 |
+
freq = np.fft.fftfreq(sample.shape[0], 1.0 / sound_chunk.frame_rate)
|
72 |
+
freq = freq[: int(freq.shape[0] / 2)]
|
73 |
+
spec = spec[: int(spec.shape[0] / 2)]
|
74 |
+
spec[0] = spec[0] / 2
|
75 |
+
|
76 |
+
ax_freq.cla()
|
77 |
+
ax_freq.plot(freq, np.abs(spec))
|
78 |
+
ax_freq.set_xlabel("Frequency")
|
79 |
+
ax_freq.set_yscale("log")
|
80 |
+
ax_freq.set_ylabel("Magnitude")
|
81 |
+
|
82 |
+
fig_place.pyplot(fig)
|
83 |
+
else:
|
84 |
+
logger.warning("AudioReciver is not set. Abort.")
|
85 |
+
break
|
pages/11_programatic_control_playing.py
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""A sample of controlling the playing state from Python."""
|
2 |
+
|
3 |
+
import streamlit as st
|
4 |
+
from streamlit_webrtc import WebRtcMode, webrtc_streamer
|
5 |
+
|
6 |
+
playing = st.checkbox("Playing", value=True)
|
7 |
+
|
8 |
+
webrtc_streamer(
|
9 |
+
key="programatic_control",
|
10 |
+
desired_playing_state=playing,
|
11 |
+
mode=WebRtcMode.SENDRECV,
|
12 |
+
rtc_configuration={"iceServers": [{"urls": ["stun:stun.l.google.com:19302"]}]},
|
13 |
+
)
|
pages/12_media_constraints_configs.py
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""A sample to configure MediaStreamConstraints object"""
|
2 |
+
|
3 |
+
import streamlit as st
|
4 |
+
from streamlit_webrtc import WebRtcMode, webrtc_streamer
|
5 |
+
|
6 |
+
frame_rate = 5
|
7 |
+
webrtc_streamer(
|
8 |
+
key="media-constraints",
|
9 |
+
mode=WebRtcMode.SENDRECV,
|
10 |
+
rtc_configuration={"iceServers": [{"urls": ["stun:stun.l.google.com:19302"]}]},
|
11 |
+
media_stream_constraints={
|
12 |
+
"video": {"frameRate": {"ideal": frame_rate}},
|
13 |
+
},
|
14 |
+
video_html_attrs={
|
15 |
+
"style": {"width": "50%", "margin": "0 auto", "border": "5px yellow solid"},
|
16 |
+
"controls": False,
|
17 |
+
"autoPlay": True,
|
18 |
+
},
|
19 |
+
)
|
20 |
+
st.write(f"The frame rate is set as {frame_rate}. Video style is changed.")
|
pages/13_ui_texts_customization.py
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from streamlit_webrtc import webrtc_streamer
|
2 |
+
|
3 |
+
webrtc_streamer(
|
4 |
+
key="custom_ui_texts",
|
5 |
+
rtc_configuration={"iceServers": [{"urls": ["stun:stun.l.google.com:19302"]}]},
|
6 |
+
translations={
|
7 |
+
"start": "開始",
|
8 |
+
"stop": "停止",
|
9 |
+
"select_device": "デバイス選択",
|
10 |
+
"media_api_not_available": "Media APIが利用できない環境です",
|
11 |
+
"device_ask_permission": "メディアデバイスへのアクセスを許可してください",
|
12 |
+
"device_not_available": "メディアデバイスを利用できません",
|
13 |
+
"device_access_denied": "メディアデバイスへのアクセスが拒否されました",
|
14 |
+
},
|
15 |
+
)
|
pages/1_object_detection.py
ADDED
@@ -0,0 +1,172 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Object detection demo with MobileNet SSD.
|
2 |
+
This model and code are based on
|
3 |
+
https://github.com/robmarkcole/object-detection-app
|
4 |
+
"""
|
5 |
+
|
6 |
+
import logging
|
7 |
+
import queue
|
8 |
+
from pathlib import Path
|
9 |
+
from typing import List, NamedTuple
|
10 |
+
|
11 |
+
import av
|
12 |
+
import cv2
|
13 |
+
import numpy as np
|
14 |
+
import streamlit as st
|
15 |
+
from streamlit_webrtc import WebRtcMode, webrtc_streamer
|
16 |
+
|
17 |
+
from sample_utils.download import download_file
|
18 |
+
|
19 |
+
HERE = Path(__file__).parent
|
20 |
+
ROOT = HERE.parent
|
21 |
+
|
22 |
+
logger = logging.getLogger(__name__)
|
23 |
+
|
24 |
+
|
25 |
+
MODEL_URL = "https://github.com/robmarkcole/object-detection-app/raw/master/model/MobileNetSSD_deploy.caffemodel" # noqa: E501
|
26 |
+
MODEL_LOCAL_PATH = ROOT / "./models/MobileNetSSD_deploy.caffemodel"
|
27 |
+
PROTOTXT_URL = "https://github.com/robmarkcole/object-detection-app/raw/master/model/MobileNetSSD_deploy.prototxt.txt" # noqa: E501
|
28 |
+
PROTOTXT_LOCAL_PATH = ROOT / "./models/MobileNetSSD_deploy.prototxt.txt"
|
29 |
+
|
30 |
+
CLASSES = [
|
31 |
+
"background",
|
32 |
+
"aeroplane",
|
33 |
+
"bicycle",
|
34 |
+
"bird",
|
35 |
+
"boat",
|
36 |
+
"bottle",
|
37 |
+
"bus",
|
38 |
+
"car",
|
39 |
+
"cat",
|
40 |
+
"chair",
|
41 |
+
"cow",
|
42 |
+
"diningtable",
|
43 |
+
"dog",
|
44 |
+
"horse",
|
45 |
+
"motorbike",
|
46 |
+
"person",
|
47 |
+
"pottedplant",
|
48 |
+
"sheep",
|
49 |
+
"sofa",
|
50 |
+
"train",
|
51 |
+
"tvmonitor",
|
52 |
+
]
|
53 |
+
|
54 |
+
|
55 |
+
@st.experimental_singleton # type: ignore # See https://github.com/python/mypy/issues/7781, https://github.com/python/mypy/issues/12566 # noqa: E501
|
56 |
+
def generate_label_colors():
|
57 |
+
return np.random.uniform(0, 255, size=(len(CLASSES), 3))
|
58 |
+
|
59 |
+
|
60 |
+
COLORS = generate_label_colors()
|
61 |
+
|
62 |
+
download_file(MODEL_URL, MODEL_LOCAL_PATH, expected_size=23147564)
|
63 |
+
download_file(PROTOTXT_URL, PROTOTXT_LOCAL_PATH, expected_size=29353)
|
64 |
+
|
65 |
+
DEFAULT_CONFIDENCE_THRESHOLD = 0.5
|
66 |
+
|
67 |
+
|
68 |
+
class Detection(NamedTuple):
|
69 |
+
name: str
|
70 |
+
prob: float
|
71 |
+
|
72 |
+
|
73 |
+
# Session-specific caching
|
74 |
+
cache_key = "object_detection_dnn"
|
75 |
+
if cache_key in st.session_state:
|
76 |
+
net = st.session_state[cache_key]
|
77 |
+
else:
|
78 |
+
net = cv2.dnn.readNetFromCaffe(str(PROTOTXT_LOCAL_PATH), str(MODEL_LOCAL_PATH))
|
79 |
+
st.session_state[cache_key] = net
|
80 |
+
|
81 |
+
streaming_placeholder = st.empty()
|
82 |
+
|
83 |
+
confidence_threshold = st.slider(
|
84 |
+
"Confidence threshold", 0.0, 1.0, DEFAULT_CONFIDENCE_THRESHOLD, 0.05
|
85 |
+
)
|
86 |
+
|
87 |
+
|
88 |
+
def _annotate_image(image, detections):
|
89 |
+
# loop over the detections
|
90 |
+
(h, w) = image.shape[:2]
|
91 |
+
result: List[Detection] = []
|
92 |
+
for i in np.arange(0, detections.shape[2]):
|
93 |
+
confidence = detections[0, 0, i, 2]
|
94 |
+
|
95 |
+
if confidence > confidence_threshold:
|
96 |
+
# extract the index of the class label from the `detections`,
|
97 |
+
# then compute the (x, y)-coordinates of the bounding box for
|
98 |
+
# the object
|
99 |
+
idx = int(detections[0, 0, i, 1])
|
100 |
+
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
|
101 |
+
(startX, startY, endX, endY) = box.astype("int")
|
102 |
+
|
103 |
+
name = CLASSES[idx]
|
104 |
+
result.append(Detection(name=name, prob=float(confidence)))
|
105 |
+
|
106 |
+
# display the prediction
|
107 |
+
label = f"{name}: {round(confidence * 100, 2)}%"
|
108 |
+
cv2.rectangle(image, (startX, startY), (endX, endY), COLORS[idx], 2)
|
109 |
+
y = startY - 15 if startY - 15 > 15 else startY + 15
|
110 |
+
cv2.putText(
|
111 |
+
image,
|
112 |
+
label,
|
113 |
+
(startX, y),
|
114 |
+
cv2.FONT_HERSHEY_SIMPLEX,
|
115 |
+
0.5,
|
116 |
+
COLORS[idx],
|
117 |
+
2,
|
118 |
+
)
|
119 |
+
return image, result
|
120 |
+
|
121 |
+
|
122 |
+
result_queue: queue.Queue = (
|
123 |
+
queue.Queue()
|
124 |
+
) # TODO: A general-purpose shared state object may be more useful.
|
125 |
+
|
126 |
+
|
127 |
+
def callback(frame: av.VideoFrame) -> av.VideoFrame:
|
128 |
+
image = frame.to_ndarray(format="bgr24")
|
129 |
+
blob = cv2.dnn.blobFromImage(
|
130 |
+
cv2.resize(image, (300, 300)), 0.007843, (300, 300), 127.5
|
131 |
+
)
|
132 |
+
net.setInput(blob)
|
133 |
+
detections = net.forward()
|
134 |
+
annotated_image, result = _annotate_image(image, detections)
|
135 |
+
|
136 |
+
# NOTE: This `recv` method is called in another thread,
|
137 |
+
# so it must be thread-safe.
|
138 |
+
result_queue.put(result) # TODO:
|
139 |
+
|
140 |
+
return av.VideoFrame.from_ndarray(annotated_image, format="bgr24")
|
141 |
+
|
142 |
+
|
143 |
+
with streaming_placeholder.container():
|
144 |
+
webrtc_ctx = webrtc_streamer(
|
145 |
+
key="object-detection",
|
146 |
+
mode=WebRtcMode.SENDRECV,
|
147 |
+
rtc_configuration={"iceServers": [{"urls": ["stun:stun.l.google.com:19302"]}]},
|
148 |
+
video_frame_callback=callback,
|
149 |
+
media_stream_constraints={"video": True, "audio": False},
|
150 |
+
async_processing=True,
|
151 |
+
)
|
152 |
+
|
153 |
+
if st.checkbox("Show the detected labels", value=True):
|
154 |
+
if webrtc_ctx.state.playing:
|
155 |
+
labels_placeholder = st.empty()
|
156 |
+
# NOTE: The video transformation with object detection and
|
157 |
+
# this loop displaying the result labels are running
|
158 |
+
# in different threads asynchronously.
|
159 |
+
# Then the rendered video frames and the labels displayed here
|
160 |
+
# are not strictly synchronized.
|
161 |
+
while True:
|
162 |
+
try:
|
163 |
+
result = result_queue.get(timeout=1.0)
|
164 |
+
except queue.Empty:
|
165 |
+
result = None
|
166 |
+
labels_placeholder.table(result)
|
167 |
+
|
168 |
+
st.markdown(
|
169 |
+
"This demo uses a model and code from "
|
170 |
+
"https://github.com/robmarkcole/object-detection-app. "
|
171 |
+
"Many thanks to the project."
|
172 |
+
)
|
pages/2_opencv_filters.py
ADDED
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Video transforms with OpenCV"""
|
2 |
+
|
3 |
+
import av
|
4 |
+
import cv2
|
5 |
+
import streamlit as st
|
6 |
+
from streamlit_webrtc import WebRtcMode, webrtc_streamer
|
7 |
+
|
8 |
+
_type = st.radio("Select transform type", ("noop", "cartoon", "edges", "rotate"))
|
9 |
+
|
10 |
+
|
11 |
+
def callback(frame: av.VideoFrame) -> av.VideoFrame:
|
12 |
+
img = frame.to_ndarray(format="bgr24")
|
13 |
+
|
14 |
+
if _type == "noop":
|
15 |
+
pass
|
16 |
+
elif _type == "cartoon":
|
17 |
+
# prepare color
|
18 |
+
img_color = cv2.pyrDown(cv2.pyrDown(img))
|
19 |
+
for _ in range(6):
|
20 |
+
img_color = cv2.bilateralFilter(img_color, 9, 9, 7)
|
21 |
+
img_color = cv2.pyrUp(cv2.pyrUp(img_color))
|
22 |
+
|
23 |
+
# prepare edges
|
24 |
+
img_edges = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
|
25 |
+
img_edges = cv2.adaptiveThreshold(
|
26 |
+
cv2.medianBlur(img_edges, 7),
|
27 |
+
255,
|
28 |
+
cv2.ADAPTIVE_THRESH_MEAN_C,
|
29 |
+
cv2.THRESH_BINARY,
|
30 |
+
9,
|
31 |
+
2,
|
32 |
+
)
|
33 |
+
img_edges = cv2.cvtColor(img_edges, cv2.COLOR_GRAY2RGB)
|
34 |
+
|
35 |
+
# combine color and edges
|
36 |
+
img = cv2.bitwise_and(img_color, img_edges)
|
37 |
+
elif _type == "edges":
|
38 |
+
# perform edge detection
|
39 |
+
img = cv2.cvtColor(cv2.Canny(img, 100, 200), cv2.COLOR_GRAY2BGR)
|
40 |
+
elif _type == "rotate":
|
41 |
+
# rotate image
|
42 |
+
rows, cols, _ = img.shape
|
43 |
+
M = cv2.getRotationMatrix2D((cols / 2, rows / 2), frame.time * 45, 1)
|
44 |
+
img = cv2.warpAffine(img, M, (cols, rows))
|
45 |
+
|
46 |
+
return av.VideoFrame.from_ndarray(img, format="bgr24")
|
47 |
+
|
48 |
+
|
49 |
+
webrtc_streamer(
|
50 |
+
key="opencv-filter",
|
51 |
+
mode=WebRtcMode.SENDRECV,
|
52 |
+
rtc_configuration={"iceServers": [{"urls": ["stun:stun.l.google.com:19302"]}]},
|
53 |
+
video_frame_callback=callback,
|
54 |
+
media_stream_constraints={"video": True, "audio": False},
|
55 |
+
async_processing=True,
|
56 |
+
)
|
57 |
+
|
58 |
+
st.markdown(
|
59 |
+
"This demo is based on "
|
60 |
+
"https://github.com/aiortc/aiortc/blob/2362e6d1f0c730a0f8c387bbea76546775ad2fe8/examples/server/server.py#L34. " # noqa: E501
|
61 |
+
"Many thanks to the project."
|
62 |
+
)
|
pages/3_audio_filter.py
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import av
|
2 |
+
import numpy as np
|
3 |
+
import pydub
|
4 |
+
import streamlit as st
|
5 |
+
from streamlit_webrtc import WebRtcMode, webrtc_streamer
|
6 |
+
|
7 |
+
gain = st.slider("Gain", -10.0, +20.0, 1.0, 0.05)
|
8 |
+
|
9 |
+
|
10 |
+
def process_audio(frame: av.AudioFrame) -> av.AudioFrame:
|
11 |
+
raw_samples = frame.to_ndarray()
|
12 |
+
sound = pydub.AudioSegment(
|
13 |
+
data=raw_samples.tobytes(),
|
14 |
+
sample_width=frame.format.bytes,
|
15 |
+
frame_rate=frame.sample_rate,
|
16 |
+
channels=len(frame.layout.channels),
|
17 |
+
)
|
18 |
+
|
19 |
+
sound = sound.apply_gain(gain)
|
20 |
+
|
21 |
+
# Ref: https://github.com/jiaaro/pydub/blob/master/API.markdown#audiosegmentget_array_of_samples # noqa
|
22 |
+
channel_sounds = sound.split_to_mono()
|
23 |
+
channel_samples = [s.get_array_of_samples() for s in channel_sounds]
|
24 |
+
new_samples: np.ndarray = np.array(channel_samples).T
|
25 |
+
new_samples = new_samples.reshape(raw_samples.shape)
|
26 |
+
|
27 |
+
new_frame = av.AudioFrame.from_ndarray(new_samples, layout=frame.layout.name)
|
28 |
+
new_frame.sample_rate = frame.sample_rate
|
29 |
+
return new_frame
|
30 |
+
|
31 |
+
|
32 |
+
webrtc_streamer(
|
33 |
+
key="audio-filter",
|
34 |
+
mode=WebRtcMode.SENDRECV,
|
35 |
+
rtc_configuration={"iceServers": [{"urls": ["stun:stun.l.google.com:19302"]}]},
|
36 |
+
audio_frame_callback=process_audio,
|
37 |
+
async_processing=True,
|
38 |
+
)
|
pages/4_delayed_echo.py
ADDED
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import asyncio
|
2 |
+
import logging
|
3 |
+
from typing import List
|
4 |
+
|
5 |
+
import av
|
6 |
+
import streamlit as st
|
7 |
+
from streamlit_webrtc import WebRtcMode, webrtc_streamer
|
8 |
+
|
9 |
+
logger = logging.getLogger(__name__)
|
10 |
+
|
11 |
+
|
12 |
+
delay = st.slider("Delay", 0.0, 5.0, 1.0, 0.05)
|
13 |
+
|
14 |
+
|
15 |
+
async def queued_video_frames_callback(
|
16 |
+
frames: List[av.VideoFrame],
|
17 |
+
) -> List[av.VideoFrame]:
|
18 |
+
logger.debug("Delay: %f", delay)
|
19 |
+
# A standalone `await ...` is interpreted as an expression and
|
20 |
+
# the Streamlit magic's target, which leads implicit calls of `st.write`.
|
21 |
+
# To prevent it, fix it as `_ = await ...`, a statement.
|
22 |
+
# See https://discuss.streamlit.io/t/issue-with-asyncio-run-in-streamlit/7745/15
|
23 |
+
await asyncio.sleep(delay)
|
24 |
+
return frames
|
25 |
+
|
26 |
+
|
27 |
+
async def queued_audio_frames_callback(
|
28 |
+
frames: List[av.AudioFrame],
|
29 |
+
) -> List[av.AudioFrame]:
|
30 |
+
await asyncio.sleep(delay)
|
31 |
+
return frames
|
32 |
+
|
33 |
+
|
34 |
+
webrtc_streamer(
|
35 |
+
key="delay",
|
36 |
+
mode=WebRtcMode.SENDRECV,
|
37 |
+
rtc_configuration={"iceServers": [{"urls": ["stun:stun.l.google.com:19302"]}]},
|
38 |
+
queued_video_frames_callback=queued_video_frames_callback,
|
39 |
+
queued_audio_frames_callback=queued_audio_frames_callback,
|
40 |
+
async_processing=True,
|
41 |
+
)
|
pages/5_fork_multi_outputs.py
ADDED
@@ -0,0 +1,105 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
try:
|
2 |
+
from typing import Literal
|
3 |
+
except ImportError:
|
4 |
+
from typing_extensions import Literal # type: ignore
|
5 |
+
|
6 |
+
from typing import cast
|
7 |
+
|
8 |
+
import av
|
9 |
+
import cv2
|
10 |
+
import streamlit as st
|
11 |
+
from streamlit_webrtc import WebRtcMode, webrtc_streamer
|
12 |
+
|
13 |
+
st.markdown(
|
14 |
+
"""
|
15 |
+
Fork one input to multiple outputs with different video filters.
|
16 |
+
"""
|
17 |
+
)
|
18 |
+
|
19 |
+
VideoFilterType = Literal["noop", "cartoon", "edges", "rotate"]
|
20 |
+
|
21 |
+
|
22 |
+
def make_video_frame_callback(_type: VideoFilterType):
|
23 |
+
def callback(frame: av.VideoFrame) -> av.VideoFrame:
|
24 |
+
img = frame.to_ndarray(format="bgr24")
|
25 |
+
|
26 |
+
if _type == "noop":
|
27 |
+
pass
|
28 |
+
elif _type == "cartoon":
|
29 |
+
# prepare color
|
30 |
+
img_color = cv2.pyrDown(cv2.pyrDown(img))
|
31 |
+
for _ in range(6):
|
32 |
+
img_color = cv2.bilateralFilter(img_color, 9, 9, 7)
|
33 |
+
img_color = cv2.pyrUp(cv2.pyrUp(img_color))
|
34 |
+
|
35 |
+
# prepare edges
|
36 |
+
img_edges = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
|
37 |
+
img_edges = cv2.adaptiveThreshold(
|
38 |
+
cv2.medianBlur(img_edges, 7),
|
39 |
+
255,
|
40 |
+
cv2.ADAPTIVE_THRESH_MEAN_C,
|
41 |
+
cv2.THRESH_BINARY,
|
42 |
+
9,
|
43 |
+
2,
|
44 |
+
)
|
45 |
+
img_edges = cv2.cvtColor(img_edges, cv2.COLOR_GRAY2RGB)
|
46 |
+
|
47 |
+
# combine color and edges
|
48 |
+
img = cv2.bitwise_and(img_color, img_edges)
|
49 |
+
elif _type == "edges":
|
50 |
+
# perform edge detection
|
51 |
+
img = cv2.cvtColor(cv2.Canny(img, 100, 200), cv2.COLOR_GRAY2BGR)
|
52 |
+
elif _type == "rotate":
|
53 |
+
# rotate image
|
54 |
+
rows, cols, _ = img.shape
|
55 |
+
M = cv2.getRotationMatrix2D((cols / 2, rows / 2), frame.time * 45, 1)
|
56 |
+
img = cv2.warpAffine(img, M, (cols, rows))
|
57 |
+
|
58 |
+
return av.VideoFrame.from_ndarray(img, format="bgr24")
|
59 |
+
|
60 |
+
return callback
|
61 |
+
|
62 |
+
|
63 |
+
COMMON_RTC_CONFIG = {"iceServers": [{"urls": ["stun:stun.l.google.com:19302"]}]}
|
64 |
+
|
65 |
+
st.header("Input")
|
66 |
+
ctx = webrtc_streamer(
|
67 |
+
key="loopback",
|
68 |
+
mode=WebRtcMode.SENDRECV,
|
69 |
+
rtc_configuration=COMMON_RTC_CONFIG,
|
70 |
+
media_stream_constraints={"video": True, "audio": False},
|
71 |
+
)
|
72 |
+
|
73 |
+
st.header("Forked output 1")
|
74 |
+
filter1_type = st.radio(
|
75 |
+
"Select transform type",
|
76 |
+
("noop", "cartoon", "edges", "rotate"),
|
77 |
+
key="fork-filter1-type",
|
78 |
+
)
|
79 |
+
callback = make_video_frame_callback(cast(VideoFilterType, filter1_type))
|
80 |
+
webrtc_streamer(
|
81 |
+
key="filter1",
|
82 |
+
mode=WebRtcMode.RECVONLY,
|
83 |
+
video_frame_callback=callback,
|
84 |
+
source_video_track=ctx.output_video_track,
|
85 |
+
desired_playing_state=ctx.state.playing,
|
86 |
+
rtc_configuration=COMMON_RTC_CONFIG,
|
87 |
+
media_stream_constraints={"video": True, "audio": False},
|
88 |
+
)
|
89 |
+
|
90 |
+
st.header("Forked output 2")
|
91 |
+
filter2_type = st.radio(
|
92 |
+
"Select transform type",
|
93 |
+
("noop", "cartoon", "edges", "rotate"),
|
94 |
+
key="fork-filter2-type",
|
95 |
+
)
|
96 |
+
callback = make_video_frame_callback(cast(VideoFilterType, filter2_type))
|
97 |
+
webrtc_streamer(
|
98 |
+
key="filter2",
|
99 |
+
mode=WebRtcMode.RECVONLY,
|
100 |
+
video_frame_callback=callback,
|
101 |
+
source_video_track=ctx.output_video_track,
|
102 |
+
desired_playing_state=ctx.state.playing,
|
103 |
+
rtc_configuration=COMMON_RTC_CONFIG,
|
104 |
+
media_stream_constraints={"video": True, "audio": False},
|
105 |
+
)
|
pages/6_mix_multi_inputs.py
ADDED
@@ -0,0 +1,182 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import math
|
2 |
+
from typing import List
|
3 |
+
|
4 |
+
try:
|
5 |
+
from typing import Literal, cast
|
6 |
+
except ImportError:
|
7 |
+
from typing_extensions import Literal # type: ignore
|
8 |
+
|
9 |
+
import av
|
10 |
+
import cv2
|
11 |
+
import numpy as np
|
12 |
+
import streamlit as st
|
13 |
+
from streamlit_webrtc import (
|
14 |
+
WebRtcMode,
|
15 |
+
create_mix_track,
|
16 |
+
create_process_track,
|
17 |
+
webrtc_streamer,
|
18 |
+
)
|
19 |
+
|
20 |
+
st.markdown(
|
21 |
+
"""
|
22 |
+
Mix multiple inputs with different video filters into one stream.
|
23 |
+
"""
|
24 |
+
)
|
25 |
+
|
26 |
+
VideoFilterType = Literal["noop", "cartoon", "edges", "rotate"]
|
27 |
+
|
28 |
+
|
29 |
+
def make_video_frame_callback(_type: VideoFilterType):
|
30 |
+
def callback(frame: av.VideoFrame) -> av.VideoFrame:
|
31 |
+
img = frame.to_ndarray(format="bgr24")
|
32 |
+
|
33 |
+
if _type == "noop":
|
34 |
+
pass
|
35 |
+
elif _type == "cartoon":
|
36 |
+
# prepare color
|
37 |
+
img_color = cv2.pyrDown(cv2.pyrDown(img))
|
38 |
+
for _ in range(6):
|
39 |
+
img_color = cv2.bilateralFilter(img_color, 9, 9, 7)
|
40 |
+
img_color = cv2.pyrUp(cv2.pyrUp(img_color))
|
41 |
+
|
42 |
+
# prepare edges
|
43 |
+
img_edges = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
|
44 |
+
img_edges = cv2.adaptiveThreshold(
|
45 |
+
cv2.medianBlur(img_edges, 7),
|
46 |
+
255,
|
47 |
+
cv2.ADAPTIVE_THRESH_MEAN_C,
|
48 |
+
cv2.THRESH_BINARY,
|
49 |
+
9,
|
50 |
+
2,
|
51 |
+
)
|
52 |
+
img_edges = cv2.cvtColor(img_edges, cv2.COLOR_GRAY2RGB)
|
53 |
+
|
54 |
+
# combine color and edges
|
55 |
+
img = cv2.bitwise_and(img_color, img_edges)
|
56 |
+
elif _type == "edges":
|
57 |
+
# perform edge detection
|
58 |
+
img = cv2.cvtColor(cv2.Canny(img, 100, 200), cv2.COLOR_GRAY2BGR)
|
59 |
+
elif _type == "rotate":
|
60 |
+
# rotate image
|
61 |
+
rows, cols, _ = img.shape
|
62 |
+
M = cv2.getRotationMatrix2D((cols / 2, rows / 2), frame.time * 45, 1)
|
63 |
+
img = cv2.warpAffine(img, M, (cols, rows))
|
64 |
+
|
65 |
+
return av.VideoFrame.from_ndarray(img, format="bgr24")
|
66 |
+
|
67 |
+
return callback
|
68 |
+
|
69 |
+
|
70 |
+
def mixer_callback(frames: List[av.VideoFrame]) -> av.VideoFrame:
|
71 |
+
buf_w = 640
|
72 |
+
buf_h = 480
|
73 |
+
buffer = np.zeros((buf_h, buf_w, 3), dtype=np.uint8)
|
74 |
+
|
75 |
+
n_inputs = len(frames)
|
76 |
+
|
77 |
+
n_cols = math.ceil(math.sqrt(n_inputs))
|
78 |
+
n_rows = math.ceil(n_inputs / n_cols)
|
79 |
+
grid_w = buf_w // n_cols
|
80 |
+
grid_h = buf_h // n_rows
|
81 |
+
|
82 |
+
for i in range(n_inputs):
|
83 |
+
frame = frames[i]
|
84 |
+
if frame is None:
|
85 |
+
continue
|
86 |
+
|
87 |
+
grid_x = (i % n_cols) * grid_w
|
88 |
+
grid_y = (i // n_cols) * grid_h
|
89 |
+
|
90 |
+
img = frame.to_ndarray(format="bgr24")
|
91 |
+
src_h, src_w = img.shape[0:2]
|
92 |
+
|
93 |
+
aspect_ratio = src_w / src_h
|
94 |
+
|
95 |
+
window_w = min(grid_w, int(grid_h * aspect_ratio))
|
96 |
+
window_h = min(grid_h, int(window_w / aspect_ratio))
|
97 |
+
|
98 |
+
window_offset_x = (grid_w - window_w) // 2
|
99 |
+
window_offset_y = (grid_h - window_h) // 2
|
100 |
+
|
101 |
+
window_x0 = grid_x + window_offset_x
|
102 |
+
window_y0 = grid_y + window_offset_y
|
103 |
+
window_x1 = window_x0 + window_w
|
104 |
+
window_y1 = window_y0 + window_h
|
105 |
+
|
106 |
+
buffer[window_y0:window_y1, window_x0:window_x1, :] = cv2.resize(
|
107 |
+
img, (window_w, window_h)
|
108 |
+
)
|
109 |
+
|
110 |
+
new_frame = av.VideoFrame.from_ndarray(buffer, format="bgr24")
|
111 |
+
|
112 |
+
return new_frame
|
113 |
+
|
114 |
+
|
115 |
+
COMMON_RTC_CONFIG = {"iceServers": [{"urls": ["stun:stun.l.google.com:19302"]}]}
|
116 |
+
|
117 |
+
st.header("Input 1")
|
118 |
+
input1_ctx = webrtc_streamer(
|
119 |
+
key="input1_ctx",
|
120 |
+
mode=WebRtcMode.SENDRECV,
|
121 |
+
rtc_configuration=COMMON_RTC_CONFIG,
|
122 |
+
media_stream_constraints={"video": True, "audio": False},
|
123 |
+
)
|
124 |
+
filter1_type = st.radio(
|
125 |
+
"Select transform type",
|
126 |
+
("noop", "cartoon", "edges", "rotate"),
|
127 |
+
key="mix-filter1-type",
|
128 |
+
)
|
129 |
+
callback = make_video_frame_callback(cast(VideoFilterType, filter1_type))
|
130 |
+
input1_video_process_track = None
|
131 |
+
if input1_ctx.output_video_track:
|
132 |
+
input1_video_process_track = create_process_track(
|
133 |
+
input_track=input1_ctx.output_video_track,
|
134 |
+
frame_callback=callback,
|
135 |
+
)
|
136 |
+
|
137 |
+
st.header("Input 2")
|
138 |
+
input2_ctx = webrtc_streamer(
|
139 |
+
key="input2_ctx",
|
140 |
+
mode=WebRtcMode.SENDRECV,
|
141 |
+
rtc_configuration=COMMON_RTC_CONFIG,
|
142 |
+
media_stream_constraints={"video": True, "audio": False},
|
143 |
+
)
|
144 |
+
filter2_type = st.radio(
|
145 |
+
"Select transform type",
|
146 |
+
("noop", "cartoon", "edges", "rotate"),
|
147 |
+
key="mix-filter2-type",
|
148 |
+
)
|
149 |
+
callback = make_video_frame_callback(cast(VideoFilterType, filter2_type))
|
150 |
+
input2_video_process_track = None
|
151 |
+
if input2_ctx.output_video_track:
|
152 |
+
input2_video_process_track = create_process_track(
|
153 |
+
input_track=input2_ctx.output_video_track, frame_callback=callback
|
154 |
+
)
|
155 |
+
|
156 |
+
st.header("Input 3 (no filter)")
|
157 |
+
input3_ctx = webrtc_streamer(
|
158 |
+
key="input3_ctx",
|
159 |
+
mode=WebRtcMode.SENDRECV,
|
160 |
+
rtc_configuration=COMMON_RTC_CONFIG,
|
161 |
+
media_stream_constraints={"video": True, "audio": False},
|
162 |
+
)
|
163 |
+
|
164 |
+
st.header("Mixed output")
|
165 |
+
mix_track = create_mix_track(kind="video", mixer_callback=mixer_callback, key="mix")
|
166 |
+
mix_ctx = webrtc_streamer(
|
167 |
+
key="mix",
|
168 |
+
mode=WebRtcMode.RECVONLY,
|
169 |
+
rtc_configuration=COMMON_RTC_CONFIG,
|
170 |
+
source_video_track=mix_track,
|
171 |
+
desired_playing_state=input1_ctx.state.playing
|
172 |
+
or input2_ctx.state.playing
|
173 |
+
or input3_ctx.state.playing,
|
174 |
+
)
|
175 |
+
|
176 |
+
if mix_ctx.source_video_track and input1_video_process_track:
|
177 |
+
mix_ctx.source_video_track.add_input_track(input1_video_process_track)
|
178 |
+
if mix_ctx.source_video_track and input2_video_process_track:
|
179 |
+
mix_ctx.source_video_track.add_input_track(input2_video_process_track)
|
180 |
+
if mix_ctx.source_video_track and input3_ctx.output_video_track:
|
181 |
+
# Input3 is sourced without any filter.
|
182 |
+
mix_ctx.source_video_track.add_input_track(input3_ctx.output_video_track)
|
pages/7_record.py
ADDED
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import uuid
|
2 |
+
from pathlib import Path
|
3 |
+
|
4 |
+
import av
|
5 |
+
import cv2
|
6 |
+
import streamlit as st
|
7 |
+
from aiortc.contrib.media import MediaRecorder
|
8 |
+
from streamlit_webrtc import WebRtcMode, webrtc_streamer
|
9 |
+
|
10 |
+
|
11 |
+
def video_frame_callback(frame: av.VideoFrame) -> av.VideoFrame:
|
12 |
+
img = frame.to_ndarray(format="bgr24")
|
13 |
+
|
14 |
+
# perform edge detection
|
15 |
+
img = cv2.cvtColor(cv2.Canny(img, 100, 200), cv2.COLOR_GRAY2BGR)
|
16 |
+
|
17 |
+
return av.VideoFrame.from_ndarray(img, format="bgr24")
|
18 |
+
|
19 |
+
|
20 |
+
RECORD_DIR = Path("./records")
|
21 |
+
RECORD_DIR.mkdir(exist_ok=True)
|
22 |
+
|
23 |
+
|
24 |
+
def app():
|
25 |
+
if "prefix" not in st.session_state:
|
26 |
+
st.session_state["prefix"] = str(uuid.uuid4())
|
27 |
+
prefix = st.session_state["prefix"]
|
28 |
+
in_file = RECORD_DIR / f"{prefix}_input.flv"
|
29 |
+
out_file = RECORD_DIR / f"{prefix}_output.flv"
|
30 |
+
|
31 |
+
def in_recorder_factory() -> MediaRecorder:
|
32 |
+
return MediaRecorder(
|
33 |
+
str(in_file), format="flv"
|
34 |
+
) # HLS does not work. See https://github.com/aiortc/aiortc/issues/331
|
35 |
+
|
36 |
+
def out_recorder_factory() -> MediaRecorder:
|
37 |
+
return MediaRecorder(str(out_file), format="flv")
|
38 |
+
|
39 |
+
webrtc_streamer(
|
40 |
+
key="record",
|
41 |
+
mode=WebRtcMode.SENDRECV,
|
42 |
+
rtc_configuration={"iceServers": [{"urls": ["stun:stun.l.google.com:19302"]}]},
|
43 |
+
media_stream_constraints={
|
44 |
+
"video": True,
|
45 |
+
"audio": True,
|
46 |
+
},
|
47 |
+
video_frame_callback=video_frame_callback,
|
48 |
+
in_recorder_factory=in_recorder_factory,
|
49 |
+
out_recorder_factory=out_recorder_factory,
|
50 |
+
)
|
51 |
+
|
52 |
+
if in_file.exists():
|
53 |
+
with in_file.open("rb") as f:
|
54 |
+
st.download_button(
|
55 |
+
"Download the recorded video without video filter", f, "input.flv"
|
56 |
+
)
|
57 |
+
if out_file.exists():
|
58 |
+
with out_file.open("rb") as f:
|
59 |
+
st.download_button(
|
60 |
+
"Download the recorded video with video filter", f, "output.flv"
|
61 |
+
)
|
62 |
+
|
63 |
+
|
64 |
+
if __name__ == "__main__":
|
65 |
+
app()
|
pages/8_media_files_streaming.py
ADDED
@@ -0,0 +1,128 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Media streamings"""
|
2 |
+
import logging
|
3 |
+
from pathlib import Path
|
4 |
+
from typing import Dict, Optional, cast
|
5 |
+
|
6 |
+
import av
|
7 |
+
import cv2
|
8 |
+
import streamlit as st
|
9 |
+
from aiortc.contrib.media import MediaPlayer
|
10 |
+
from streamlit_webrtc import WebRtcMode, WebRtcStreamerContext, webrtc_streamer
|
11 |
+
|
12 |
+
from sample_utils.download import download_file
|
13 |
+
|
14 |
+
HERE = Path(__file__).parent
|
15 |
+
ROOT = HERE.parent
|
16 |
+
|
17 |
+
logger = logging.getLogger(__name__)
|
18 |
+
|
19 |
+
|
20 |
+
MEDIAFILES: Dict[str, Dict] = {
|
21 |
+
"big_buck_bunny_720p_2mb.mp4 (local)": {
|
22 |
+
"url": "https://sample-videos.com/video123/mp4/720/big_buck_bunny_720p_2mb.mp4", # noqa: E501
|
23 |
+
"local_file_path": ROOT / "data/big_buck_bunny_720p_2mb.mp4",
|
24 |
+
"type": "video",
|
25 |
+
},
|
26 |
+
"big_buck_bunny_720p_10mb.mp4 (local)": {
|
27 |
+
"url": "https://sample-videos.com/video123/mp4/720/big_buck_bunny_720p_10mb.mp4", # noqa: E501
|
28 |
+
"local_file_path": ROOT / "data/big_buck_bunny_720p_10mb.mp4",
|
29 |
+
"type": "video",
|
30 |
+
},
|
31 |
+
"file_example_MP3_700KB.mp3 (local)": {
|
32 |
+
"url": "https://file-examples-com.github.io/uploads/2017/11/file_example_MP3_700KB.mp3", # noqa: E501
|
33 |
+
"local_file_path": ROOT / "data/file_example_MP3_700KB.mp3",
|
34 |
+
"type": "audio",
|
35 |
+
},
|
36 |
+
"file_example_MP3_5MG.mp3 (local)": {
|
37 |
+
"url": "https://file-examples-com.github.io/uploads/2017/11/file_example_MP3_5MG.mp3", # noqa: E501
|
38 |
+
"local_file_path": ROOT / "data/file_example_MP3_5MG.mp3",
|
39 |
+
"type": "audio",
|
40 |
+
},
|
41 |
+
"rtsp://wowzaec2demo.streamlock.net/vod/mp4:BigBuckBunny_115k.mov": {
|
42 |
+
"url": "rtsp://wowzaec2demo.streamlock.net/vod/mp4:BigBuckBunny_115k.mov",
|
43 |
+
"type": "video",
|
44 |
+
},
|
45 |
+
}
|
46 |
+
media_file_label = st.radio("Select a media source to stream", tuple(MEDIAFILES.keys()))
|
47 |
+
media_file_info = MEDIAFILES[cast(str, media_file_label)]
|
48 |
+
if "local_file_path" in media_file_info:
|
49 |
+
download_file(media_file_info["url"], media_file_info["local_file_path"])
|
50 |
+
|
51 |
+
|
52 |
+
def create_player():
|
53 |
+
if "local_file_path" in media_file_info:
|
54 |
+
return MediaPlayer(str(media_file_info["local_file_path"]))
|
55 |
+
else:
|
56 |
+
return MediaPlayer(media_file_info["url"])
|
57 |
+
|
58 |
+
# NOTE: To stream the video from webcam, use the code below.
|
59 |
+
# return MediaPlayer(
|
60 |
+
# "1:none",
|
61 |
+
# format="avfoundation",
|
62 |
+
# options={"framerate": "30", "video_size": "1280x720"},
|
63 |
+
# )
|
64 |
+
|
65 |
+
|
66 |
+
key = f"media-streaming-{media_file_label}"
|
67 |
+
ctx: Optional[WebRtcStreamerContext] = st.session_state.get(key)
|
68 |
+
if media_file_info["type"] == "video" and ctx and ctx.state.playing:
|
69 |
+
_type = st.radio("Select transform type", ("noop", "cartoon", "edges", "rotate"))
|
70 |
+
else:
|
71 |
+
_type = "noop"
|
72 |
+
|
73 |
+
|
74 |
+
def video_frame_callback(frame: av.VideoFrame) -> av.VideoFrame:
|
75 |
+
img = frame.to_ndarray(format="bgr24")
|
76 |
+
|
77 |
+
if _type == "noop":
|
78 |
+
pass
|
79 |
+
elif _type == "cartoon":
|
80 |
+
# prepare color
|
81 |
+
img_color = cv2.pyrDown(cv2.pyrDown(img))
|
82 |
+
for _ in range(6):
|
83 |
+
img_color = cv2.bilateralFilter(img_color, 9, 9, 7)
|
84 |
+
img_color = cv2.pyrUp(cv2.pyrUp(img_color))
|
85 |
+
|
86 |
+
# prepare edges
|
87 |
+
img_edges = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
|
88 |
+
img_edges = cv2.adaptiveThreshold(
|
89 |
+
cv2.medianBlur(img_edges, 7),
|
90 |
+
255,
|
91 |
+
cv2.ADAPTIVE_THRESH_MEAN_C,
|
92 |
+
cv2.THRESH_BINARY,
|
93 |
+
9,
|
94 |
+
2,
|
95 |
+
)
|
96 |
+
img_edges = cv2.cvtColor(img_edges, cv2.COLOR_GRAY2RGB)
|
97 |
+
|
98 |
+
# combine color and edges
|
99 |
+
img = cv2.bitwise_and(img_color, img_edges)
|
100 |
+
elif _type == "edges":
|
101 |
+
# perform edge detection
|
102 |
+
img = cv2.cvtColor(cv2.Canny(img, 100, 200), cv2.COLOR_GRAY2BGR)
|
103 |
+
elif _type == "rotate":
|
104 |
+
# rotate image
|
105 |
+
rows, cols, _ = img.shape
|
106 |
+
M = cv2.getRotationMatrix2D((cols / 2, rows / 2), frame.time * 45, 1)
|
107 |
+
img = cv2.warpAffine(img, M, (cols, rows))
|
108 |
+
|
109 |
+
return av.VideoFrame.from_ndarray(img, format="bgr24")
|
110 |
+
|
111 |
+
|
112 |
+
webrtc_streamer(
|
113 |
+
key=key,
|
114 |
+
mode=WebRtcMode.RECVONLY,
|
115 |
+
rtc_configuration={"iceServers": [{"urls": ["stun:stun.l.google.com:19302"]}]},
|
116 |
+
media_stream_constraints={
|
117 |
+
"video": media_file_info["type"] == "video",
|
118 |
+
"audio": media_file_info["type"] == "audio",
|
119 |
+
},
|
120 |
+
player_factory=create_player,
|
121 |
+
video_frame_callback=video_frame_callback,
|
122 |
+
)
|
123 |
+
|
124 |
+
st.markdown(
|
125 |
+
"The video filter in this demo is based on "
|
126 |
+
"https://github.com/aiortc/aiortc/blob/2362e6d1f0c730a0f8c387bbea76546775ad2fe8/examples/server/server.py#L34. " # noqa: E501
|
127 |
+
"Many thanks to the project."
|
128 |
+
)
|
pages/9_sendonly_video.py
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""A sample to use WebRTC in sendonly mode to transfer frames
|
2 |
+
from the browser to the server and to render frames via `st.image`."""
|
3 |
+
|
4 |
+
import logging
|
5 |
+
import queue
|
6 |
+
|
7 |
+
import streamlit as st
|
8 |
+
from streamlit_webrtc import WebRtcMode, webrtc_streamer
|
9 |
+
|
10 |
+
logger = logging.getLogger(__name__)
|
11 |
+
|
12 |
+
|
13 |
+
webrtc_ctx = webrtc_streamer(
|
14 |
+
key="video-sendonly",
|
15 |
+
mode=WebRtcMode.SENDONLY,
|
16 |
+
rtc_configuration={"iceServers": [{"urls": ["stun:stun.l.google.com:19302"]}]},
|
17 |
+
media_stream_constraints={"video": True},
|
18 |
+
)
|
19 |
+
|
20 |
+
image_place = st.empty()
|
21 |
+
|
22 |
+
while True:
|
23 |
+
if webrtc_ctx.video_receiver:
|
24 |
+
try:
|
25 |
+
video_frame = webrtc_ctx.video_receiver.get_frame(timeout=1)
|
26 |
+
except queue.Empty:
|
27 |
+
logger.warning("Queue is empty. Abort.")
|
28 |
+
break
|
29 |
+
|
30 |
+
img_rgb = video_frame.to_ndarray(format="rgb24")
|
31 |
+
image_place.image(img_rgb)
|
32 |
+
else:
|
33 |
+
logger.warning("AudioReciver is not set. Abort.")
|
34 |
+
break
|
sample_utils/__init__.py
ADDED
File without changes
|
sample_utils/download.py
ADDED
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import urllib.request
|
2 |
+
from pathlib import Path
|
3 |
+
|
4 |
+
import streamlit as st
|
5 |
+
|
6 |
+
|
7 |
+
# This code is based on https://github.com/streamlit/demo-self-driving/blob/230245391f2dda0cb464008195a470751c01770b/streamlit_app.py#L48 # noqa: E501
|
8 |
+
def download_file(url, download_to: Path, expected_size=None):
|
9 |
+
# Don't download the file twice.
|
10 |
+
# (If possible, verify the download using the file length.)
|
11 |
+
if download_to.exists():
|
12 |
+
if expected_size:
|
13 |
+
if download_to.stat().st_size == expected_size:
|
14 |
+
return
|
15 |
+
else:
|
16 |
+
st.info(f"{url} is already downloaded.")
|
17 |
+
if not st.button("Download again?"):
|
18 |
+
return
|
19 |
+
|
20 |
+
download_to.parent.mkdir(parents=True, exist_ok=True)
|
21 |
+
|
22 |
+
# These are handles to two visual elements to animate.
|
23 |
+
weights_warning, progress_bar = None, None
|
24 |
+
try:
|
25 |
+
weights_warning = st.warning("Downloading %s..." % url)
|
26 |
+
progress_bar = st.progress(0)
|
27 |
+
with open(download_to, "wb") as output_file:
|
28 |
+
with urllib.request.urlopen(url) as response:
|
29 |
+
length = int(response.info()["Content-Length"])
|
30 |
+
counter = 0.0
|
31 |
+
MEGABYTES = 2.0 ** 20.0
|
32 |
+
while True:
|
33 |
+
data = response.read(8192)
|
34 |
+
if not data:
|
35 |
+
break
|
36 |
+
counter += len(data)
|
37 |
+
output_file.write(data)
|
38 |
+
|
39 |
+
# We perform animation by overwriting the elements.
|
40 |
+
weights_warning.warning(
|
41 |
+
"Downloading %s... (%6.2f/%6.2f MB)"
|
42 |
+
% (url, counter / MEGABYTES, length / MEGABYTES)
|
43 |
+
)
|
44 |
+
progress_bar.progress(min(counter / length, 1.0))
|
45 |
+
# Finally, we remove these visual elements by calling .empty().
|
46 |
+
finally:
|
47 |
+
if weights_warning is not None:
|
48 |
+
weights_warning.empty()
|
49 |
+
if progress_bar is not None:
|
50 |
+
progress_bar.empty()
|