demo-app / app.py
Slender
CHANGE TO APP
4c37163
import cv2
import streamlit as st
from streamlit_webrtc import webrtc_streamer
import numpy as np
from PIL import Image
from Moildev import Moildev
import threading
class demo_app:
def __init__(self):
# self.dev = 'Raspi_Cam.json'
self.moildev = Moildev()
def brighten_image(self, image, amount):
img_bright = cv2.convertScaleAbs(image, beta=amount)
return img_bright
def anypoint_image(self, image, alpha, beta, zoom, mode):
any_image = self.moildev.anypoint(image, alpha, beta, zoom, mode)
return any_image
def main_loop(self):
st.title("MoilDev Demo App")
st.subheader("This app allows you to play with Image filters!")
st.text("We use OpenCV and Streamlit for this demo")
# radio
source = st.sidebar.radio('Sources:', ('image', 'video', 'stream'))
if source == 'image':
# slider
zoom = st.sidebar.slider("Zoom", min_value=1.0, max_value=3.5, value=1.2)
alpha = st.sidebar.slider("Alpha", min_value=0.0, max_value=180.0, value=60.0)
beta = st.sidebar.slider("Beta", min_value=0.0, max_value=180.0, value=60.0)
brightness_amount = st.sidebar.slider("Brightness", min_value=-50, max_value=50, value=0)
# checkbox
mode_filter = st.sidebar.checkbox('Car Mode')
# upload json
json_file = st.file_uploader("Upload Your Parameter", type=['json'])
if not json_file:
return None
if json_file:
dev = json_file.name
self.moildev = Moildev(dev)
image_file = st.file_uploader("Upload Your Image", type=['jpg', 'png', 'jpeg'])
if not image_file:
return None
original_image = Image.open(image_file)
original_image = np.array(original_image)
if mode_filter == 1:
processed_image = self.anypoint_image(original_image, alpha=alpha, beta=beta, zoom=zoom, mode=1)
else:
processed_image = self.anypoint_image(original_image, alpha=alpha, beta=beta, zoom=zoom, mode=2)
processed_image = self.brighten_image(processed_image, brightness_amount)
st.text("Original Image vs Processed Image")
st.image([original_image, processed_image])
if source == 'video':
# slider
zoom = st.sidebar.slider("Zoom", min_value=1.0, max_value=3.5, value=1.2)
alpha = st.sidebar.slider("Alpha", min_value=0.0, max_value=180.0, value=60.0)
beta = st.sidebar.slider("Beta", min_value=0.0, max_value=180.0, value=60.0)
brightness_amount = st.sidebar.slider("Brightness", min_value=-50, max_value=50, value=0)
# checkbox
mode_filter = st.sidebar.checkbox('Car Mode')
if source == 'stream':
# slider
zoom = st.sidebar.slider("Zoom", min_value=1.0, max_value=3.5, value=1.2)
alpha = st.sidebar.slider("Alpha", min_value=0.0, max_value=360.0, value=60.0)
beta = st.sidebar.slider("Beta", min_value=0.0, max_value=360.0, value=60.0)
# checkbox
mode_filter = st.sidebar.checkbox('Car Mode')
lock = threading.Lock()
img_container = {"img": None}
def video_frame_callback(frame):
img = frame.to_ndarray(format="bgr24")
with lock:
img_container["img"] = img
return frame
ctx = webrtc_streamer(key="example", video_frame_callback=video_frame_callback)
vsn_place = st.empty()
while ctx.state.playing:
cv2.waitKey(10)
with lock:
img = img_container["img"]
if img is None:
continue
if mode_filter == 1:
processed_image = self.anypoint_image(img, alpha=alpha, beta=beta, zoom=zoom, mode=1)
vsn_place.image(processed_image)
else:
processed_image = self.anypoint_image(img, alpha=alpha, beta=beta, zoom=zoom, mode=2)
vsn_place.image(processed_image)
if __name__ == '__main__':
da = demo_app()
da.main_loop()