|
import streamlit as st |
|
import pandas as pd |
|
from datetime import datetime |
|
import cv2 |
|
import pyaudio |
|
import wave |
|
import imageio |
|
import av |
|
import moviepy.editor as mp |
|
import os |
|
import numpy as np |
|
from io import BytesIO |
|
|
|
|
|
if 'file_history' not in st.session_state: |
|
st.session_state['file_history'] = [] |
|
if 'ping_code' not in st.session_state: |
|
st.session_state['ping_code'] = "" |
|
if 'uploaded_files' not in st.session_state: |
|
st.session_state['uploaded_files'] = [] |
|
|
|
|
|
def save_to_history(file_type, file_path): |
|
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S") |
|
st.session_state['file_history'].append({ |
|
"Timestamp": timestamp, |
|
"Type": file_type, |
|
"Path": file_path |
|
}) |
|
|
|
|
|
st.sidebar.header("πΈ Configuration") |
|
library_choice = st.sidebar.selectbox( |
|
"Select Library", |
|
["OpenCV", "PyAudio", "ImageIO", "PyAV", "MoviePy"] |
|
) |
|
resolution = st.sidebar.select_slider( |
|
"Resolution", |
|
options=["320x240", "640x480", "1280x720"], |
|
value="640x480" |
|
) |
|
fps = st.sidebar.slider("FPS", 1, 60, 30) |
|
|
|
|
|
st.sidebar.subheader("π’ Device Ping Code") |
|
col1, col2, col3, col4 = st.sidebar.columns(4) |
|
with col1: |
|
digit1 = st.selectbox("D1", [str(i) for i in range(10)], key="d1") |
|
with col2: |
|
digit2 = st.selectbox("D2", [str(i) for i in range(10)], key="d2") |
|
with col3: |
|
digit3 = st.selectbox("D3", [str(i) for i in range(10)], key="d3") |
|
with col4: |
|
digit4 = st.selectbox("D4", [str(i) for i in range(10)], key="d4") |
|
ping_code = digit1 + digit2 + digit3 + digit4 |
|
st.session_state['ping_code'] = ping_code |
|
st.sidebar.write(f"Ping Code: {ping_code}") |
|
|
|
|
|
st.title("πΈ Camera & Audio Capture Tool") |
|
|
|
|
|
st.header("πΈ Top Five Python Libraries") |
|
|
|
|
|
with st.expander("1. π· OpenCV"): |
|
st.write("π₯ *Best Feature*: Real-time image processing and video capture.") |
|
st.subheader("Top 3 Function Examples") |
|
|
|
st.write("1. `cv2.VideoCapture()` - Capture and display video") |
|
if st.button("Run VideoCapture", key="opencv_1"): |
|
cap = cv2.VideoCapture(0) |
|
frame_placeholder = st.empty() |
|
for _ in range(50): |
|
ret, frame = cap.read() |
|
if ret: |
|
frame_placeholder.image(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)) |
|
cap.release() |
|
|
|
st.write("2. `cv2.imwrite()` - Save processed image") |
|
if st.button("Run imwrite", key="opencv_2"): |
|
cap = cv2.VideoCapture(0) |
|
ret, frame = cap.read() |
|
if ret: |
|
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) |
|
file_path = f"opencv_gray_{datetime.now().strftime('%Y%m%d_%H%M%S')}.jpg" |
|
cv2.imwrite(file_path, gray) |
|
save_to_history("Image", file_path) |
|
st.image(file_path, caption="Grayscale Image") |
|
cap.release() |
|
|
|
st.write("3. `cv2.Canny()` - Edge detection") |
|
if st.button("Run Canny", key="opencv_3"): |
|
cap = cv2.VideoCapture(0) |
|
ret, frame = cap.read() |
|
if ret: |
|
edges = cv2.Canny(frame, 100, 200) |
|
file_path = f"opencv_edges_{datetime.now().strftime('%Y%m%d_%H%M%S')}.jpg" |
|
cv2.imwrite(file_path, edges) |
|
save_to_history("Image", file_path) |
|
st.image(file_path, caption="Edge Detection") |
|
cap.release() |
|
|
|
|
|
with st.expander("2. ποΈ PyAudio"): |
|
st.write("π *Best Feature*: Low-level audio input/output control.") |
|
st.subheader("Top 3 Function Examples") |
|
|
|
st.write("1. `PyAudio.open()` - Record audio") |
|
if st.button("Run Audio Record", key="pyaudio_1"): |
|
p = pyaudio.PyAudio() |
|
stream = p.open(format=pyaudio.paInt16, channels=1, rate=44100, input=True, frames_per_buffer=1024) |
|
frames = [stream.read(1024) for _ in range(int(44100 / 1024 * 3))] |
|
stream.stop_stream() |
|
stream.close() |
|
p.terminate() |
|
file_path = f"pyaudio_rec_{datetime.now().strftime('%Y%m%d_%H%M%S')}.wav" |
|
with wave.open(file_path, 'wb') as wf: |
|
wf.setnchannels(1) |
|
wf.setsampwidth(p.get_sample_size(pyaudio.paInt16)) |
|
wf.setframerate(44100) |
|
wf.writeframes(b''.join(frames)) |
|
save_to_history("Audio", file_path) |
|
st.audio(file_path) |
|
|
|
st.write("2. `PyAudio.get_device_info_by_index()` - List audio devices") |
|
if st.button("Run Device Info", key="pyaudio_2"): |
|
p = pyaudio.PyAudio() |
|
devices = [p.get_device_info_by_index(i) for i in range(p.get_device_count())] |
|
st.write("Available Audio Devices:", devices) |
|
p.terminate() |
|
|
|
st.write("3. `stream.read()` - Real-time audio data") |
|
if st.button("Run Stream Read", key="pyaudio_3"): |
|
p = pyaudio.PyAudio() |
|
stream = p.open(format=pyaudio.paInt16, channels=1, rate=44100, input=True, frames_per_buffer=1024) |
|
data = stream.read(1024) |
|
st.write("Sample audio data length:", len(data)) |
|
stream.stop_stream() |
|
stream.close() |
|
p.terminate() |
|
|
|
|
|
with st.expander("3. πΉ ImageIO"): |
|
st.write("π₯ *Best Feature*: Simple and efficient image/video I/O.") |
|
st.subheader("Top 3 Function Examples") |
|
|
|
st.write("1. `imageio.get_reader()` - Read webcam frames") |
|
if st.button("Run get_reader", key="imageio_1"): |
|
reader = imageio.get_reader('<video0>') |
|
frame = reader.get_next_data() |
|
file_path = f"imageio_frame_{datetime.now().strftime('%Y%m%d_%H%M%S')}.jpg" |
|
imageio.imwrite(file_path, frame) |
|
save_to_history("Image", file_path) |
|
st.image(file_path) |
|
|
|
st.write("2. `imageio.imwrite()` - Save image with compression") |
|
if st.button("Run imwrite", key="imageio_2"): |
|
reader = imageio.get_reader('<video0>') |
|
frame = reader.get_next_data() |
|
file_path = f"imageio_comp_{datetime.now().strftime('%Y%m%d_%H%M%S')}.jpg" |
|
imageio.imwrite(file_path, frame, quality=85) |
|
save_to_history("Image", file_path) |
|
st.image(file_path, caption="Compressed Image") |
|
|
|
st.write("3. `imageio.mimwrite()` - Create GIF") |
|
if st.button("Run mimwrite", key="imageio_3"): |
|
reader = imageio.get_reader('<video0>') |
|
frames = [reader.get_next_data() for _ in range(10)] |
|
file_path = f"imageio_gif_{datetime.now().strftime('%Y%m%d_%H%M%S')}.gif" |
|
imageio.mimwrite(file_path, frames, fps=5) |
|
save_to_history("GIF", file_path) |
|
st.image(file_path, caption="GIF") |
|
|
|
|
|
with st.expander("4. π¬ PyAV"): |
|
st.write("π₯ *Best Feature*: Powerful FFmpeg-based AV processing.") |
|
st.subheader("Top 3 Function Examples") |
|
|
|
st.write("1. `av.open()` - Capture video") |
|
if st.button("Run av.open", key="pyav_1"): |
|
container = av.open('/dev/video0') |
|
stream = container.streams.video[0] |
|
file_path = f"pyav_vid_{datetime.now().strftime('%Y%m%d_%H%M%S')}.mp4" |
|
output = av.open(file_path, 'w') |
|
out_stream = output.add_stream('h264', rate=30) |
|
for i, frame in enumerate(container.decode(stream)): |
|
if i > 30: break |
|
out_frame = frame.reformat(out_stream.width, out_stream.height) |
|
output.mux(out_stream.encode(out_frame)) |
|
output.close() |
|
container.close() |
|
save_to_history("Video", file_path) |
|
st.video(file_path) |
|
|
|
st.write("2. `container.decode()` - Extract audio") |
|
if st.button("Run decode", key="pyav_2"): |
|
container = av.open('/dev/video0', 'r', format='v4l2') |
|
file_path = f"pyav_audio_{datetime.now().strftime('%Y%m%d_%H%M%S')}.wav" |
|
output = av.open(file_path, 'w') |
|
out_stream = output.add_stream('pcm_s16le', rate=44100) |
|
for packet in container.demux(): |
|
for frame in packet.decode(): |
|
if frame.is_corrupt: continue |
|
if hasattr(frame, 'to_ndarray'): |
|
output.mux(out_stream.encode(frame)) |
|
if os.path.getsize(file_path) > 100000: break |
|
output.close() |
|
container.close() |
|
save_to_history("Audio", file_path) |
|
st.audio(file_path) |
|
|
|
st.write("3. `av.filter()` - Apply video filter") |
|
if st.button("Run filter", key="pyav_3"): |
|
container = av.open('/dev/video0') |
|
stream = container.streams.video[0] |
|
file_path = f"pyav_filter_{datetime.now().strftime('%Y%m%d_%H%M%S')}.mp4" |
|
output = av.open(file_path, 'w') |
|
out_stream = output.add_stream('h264', rate=30) |
|
graph = av.filter.Graph() |
|
filt = graph.add("negate") |
|
for i, frame in enumerate(container.decode(stream)): |
|
if i > 30: break |
|
filt.push(frame) |
|
out_frame = filt.pull() |
|
output.mux(out_stream.encode(out_frame.reformat(out_stream.width, out_stream.height))) |
|
output.close() |
|
container.close() |
|
save_to_history("Video", file_path) |
|
st.video(file_path, caption="Negated Video") |
|
|
|
|
|
with st.expander("5. πΌ MoviePy"): |
|
st.write("π₯ *Best Feature*: High-level video editing.") |
|
st.subheader("Top 3 Function Examples") |
|
|
|
st.write("1. `ImageSequenceClip()` - Video from frames") |
|
if st.button("Run ImageSequenceClip", key="moviepy_1"): |
|
cap = cv2.VideoCapture(0) |
|
frames = [cv2.cvtColor(cap.read()[1], cv2.COLOR_BGR2RGB) for _ in range(30)] |
|
cap.release() |
|
file_path = f"moviepy_seq_{datetime.now().strftime('%Y%m%d_%H%M%S')}.mp4" |
|
clip = mp.ImageSequenceClip(frames, fps=15) |
|
clip.write_videofile(file_path) |
|
save_to_history("Video", file_path) |
|
st.video(file_path) |
|
|
|
st.write("2. `VideoFileClip()` - Load and resize video") |
|
if st.button("Run VideoFileClip", key="moviepy_2"): |
|
cap = cv2.VideoCapture(0) |
|
frames = [cv2.cvtColor(cap.read()[1], cv2.COLOR_BGR2RGB) for _ in range(30)] |
|
cap.release() |
|
temp_path = "temp.mp4" |
|
mp.ImageSequenceClip(frames, fps=15).write_videofile(temp_path) |
|
clip = mp.VideoFileClip(temp_path).resize((320, 240)) |
|
file_path = f"moviepy_resized_{datetime.now().strftime('%Y%m%d_%H%M%S')}.mp4" |
|
clip.write_videofile(file_path) |
|
save_to_history("Video", file_path) |
|
st.video(file_path, caption="Resized Video") |
|
|
|
st.write("3. `concatenate_videoclips()` - Combine clips") |
|
if st.button("Run concatenate", key="moviepy_3"): |
|
cap = cv2.VideoCapture(0) |
|
frames1 = [cv2.cvtColor(cap.read()[1], cv2.COLOR_BGR2RGB) for _ in range(15)] |
|
frames2 = [cv2.cvtColor(cap.read()[1], cv2.COLOR_BGR2RGB) for _ in range(15)] |
|
cap.release() |
|
clip1 = mp.ImageSequenceClip(frames1, fps=15) |
|
clip2 = mp.ImageSequenceClip(frames2, fps=15) |
|
final_clip = mp.concatenate_videoclips([clip1, clip2]) |
|
file_path = f"moviepy_concat_{datetime.now().strftime('%Y%m%d_%H%M%S')}.mp4" |
|
final_clip.write_videofile(file_path) |
|
save_to_history("Video", file_path) |
|
st.video(file_path, caption="Concatenated Video") |
|
|
|
|
|
st.header("π Upload Media Files") |
|
uploaded_files = st.file_uploader("Upload Images, Audio, or Video", accept_multiple_files=True, type=['jpg', 'png', 'mp4', 'wav', 'mp3']) |
|
if uploaded_files: |
|
for uploaded_file in uploaded_files: |
|
file_type = uploaded_file.type.split('/')[0] |
|
file_path = f"uploaded_{uploaded_file.name}" |
|
with open(file_path, 'wb') as f: |
|
f.write(uploaded_file.read()) |
|
st.session_state['uploaded_files'].append({ |
|
"Name": uploaded_file.name, |
|
"Type": file_type, |
|
"Path": file_path |
|
}) |
|
|
|
|
|
st.header("πΌοΈ Media Gallery") |
|
if st.session_state['uploaded_files']: |
|
images = [f for f in st.session_state['uploaded_files'] if f['Type'] == 'image'] |
|
audios = [f for f in st.session_state['uploaded_files'] if f['Type'] == 'audio'] |
|
videos = [f for f in st.session_state['uploaded_files'] if f['Type'] == 'video'] |
|
|
|
if images: |
|
st.subheader("Images") |
|
cols = st.columns(3) |
|
for i, img in enumerate(images): |
|
with cols[i % 3]: |
|
st.image(img['Path'], caption=img['Name'], use_column_width=True) |
|
|
|
if audios: |
|
st.subheader("Audio") |
|
for audio in audios: |
|
st.audio(audio['Path'], format=f"audio/{audio['Name'].split('.')[-1]}") |
|
st.write(f"Name: {audio['Name']}") |
|
|
|
if videos: |
|
st.subheader("Videos") |
|
for video in videos: |
|
st.video(video['Path']) |
|
st.write(f"Name: {video['Name']}") |
|
else: |
|
st.write("No media uploaded yet.") |
|
|
|
|
|
st.header("π File History") |
|
if st.session_state['file_history']: |
|
df = pd.DataFrame(st.session_state['file_history']) |
|
st.dataframe(df) |
|
else: |
|
st.write("No files captured yet.") |