Spaces:
Sleeping
Sleeping
wissemkarous
commited on
Commit
•
975577a
1
Parent(s):
2a24b35
update
Browse files
app.py
CHANGED
@@ -62,85 +62,8 @@
|
|
62 |
|
63 |
###########################
|
64 |
|
65 |
-
# import streamlit as st
|
66 |
-
# import os
|
67 |
-
# from utils.demo import load_video, ctc_decode
|
68 |
-
# from utils.two_stream_infer import load_model
|
69 |
-
# from scripts.extract_lip_coordinates import generate_lip_coordinates
|
70 |
-
# import options as opt
|
71 |
-
|
72 |
-
# st.set_page_config(layout="wide")
|
73 |
-
|
74 |
-
# model = load_model()
|
75 |
-
|
76 |
-
# st.title("Lipreading final year project Demo")
|
77 |
-
|
78 |
-
# st.info(
|
79 |
-
# "The inference speed is very slow on Huggingface spaces due to it being processed entirely on CPU ",
|
80 |
-
# icon="ℹ️",
|
81 |
-
# )
|
82 |
-
|
83 |
-
# # Generating a list of options or videos
|
84 |
-
# options = sorted(os.listdir(os.path.join("app_input"))) # Ensure the list is sorted
|
85 |
-
# selected_video = st.selectbox("Choose video", options)
|
86 |
-
|
87 |
-
# # Find the index of the selected video and calculate the index of the next video
|
88 |
-
# selected_index = options.index(selected_video)
|
89 |
-
# next_video_index = (selected_index + 1) % len(options) # Ensures looping back to start
|
90 |
-
# next_video = options[next_video_index]
|
91 |
-
|
92 |
-
# col1, col2 = st.columns(2)
|
93 |
-
|
94 |
-
# # Function to display video in a column
|
95 |
-
# def display_video(column, video_path, video_name):
|
96 |
-
# os.system(f"ffmpeg -i {video_path} -vcodec libx264 {video_name}.mp4 -y")
|
97 |
-
# video = open(f"{video_name}.mp4", "rb")
|
98 |
-
# video_bytes = video.read()
|
99 |
-
# column.video(video_bytes)
|
100 |
-
|
101 |
-
# # Displaying the selected video in the first column
|
102 |
-
# with col1:
|
103 |
-
# file_path = os.path.join("app_input", selected_video)
|
104 |
-
# video_name = selected_video.split(".")[0]
|
105 |
-
# display_video(col1, file_path, video_name)
|
106 |
-
# # Displaying the next video in the second column
|
107 |
-
# with col2:
|
108 |
-
# st.info("Expected Result !")
|
109 |
-
# next_file_path = os.path.join("app_input", next_video)
|
110 |
-
# next_video_name = next_video.split(".")[0]
|
111 |
-
# display_video(col2, next_file_path, next_video_name)
|
112 |
-
|
113 |
-
|
114 |
-
# # Assuming further processing (like generating predictions) is only intended for the first (selected) video
|
115 |
-
# with col1, st.spinner("Processing video..."):
|
116 |
-
# video, img_p, files = load_video(f"{video_name}.mp4", opt.device)
|
117 |
-
# coordinates = generate_lip_coordinates(f"{video_name}_samples")
|
118 |
-
# # Assuming 'frames_generated' and 'coordinates_generated' are used for control flow or further processing
|
119 |
-
# frames_generated = True
|
120 |
-
# coordinates_generated = True
|
121 |
-
# if frames_generated and coordinates_generated:
|
122 |
-
# st.markdown(f"Frames Generated for {video_name}:\n{files}")
|
123 |
-
# st.markdown(f"Coordinates Generated for {video_name}:\n{coordinates}")
|
124 |
-
|
125 |
-
# with col2:
|
126 |
-
# st.info("Ready to make prediction!")
|
127 |
-
# generate = st.button("Generate")
|
128 |
-
# if generate:
|
129 |
-
# with st.spinner("Generating..."):
|
130 |
-
# y = model(
|
131 |
-
# video[None, ...].to(opt.device),
|
132 |
-
# coordinates[None, ...].to(opt.device),
|
133 |
-
# )
|
134 |
-
# txt = ctc_decode(y[0])
|
135 |
-
# st.text(txt[-1])
|
136 |
-
|
137 |
-
# st.info("Author ©️ : Wissem Karous ")
|
138 |
-
# st.info("Made with ❤️")
|
139 |
-
##################
|
140 |
-
|
141 |
import streamlit as st
|
142 |
import os
|
143 |
-
import cv2
|
144 |
from utils.demo import load_video, ctc_decode
|
145 |
from utils.two_stream_infer import load_model
|
146 |
from scripts.extract_lip_coordinates import generate_lip_coordinates
|
@@ -150,7 +73,7 @@ st.set_page_config(layout="wide")
|
|
150 |
|
151 |
model = load_model()
|
152 |
|
153 |
-
st.title("Lipreading
|
154 |
|
155 |
st.info(
|
156 |
"The inference speed is very slow on Huggingface spaces due to it being processed entirely on CPU ",
|
@@ -168,27 +91,18 @@ next_video = options[next_video_index]
|
|
168 |
|
169 |
col1, col2 = st.columns(2)
|
170 |
|
171 |
-
# Function to display video in a column
|
172 |
def display_video(column, video_path, video_name):
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
new_height = int((new_width / width) * height)
|
178 |
-
while cap.isOpened():
|
179 |
-
ret, frame = cap.read()
|
180 |
-
if not ret:
|
181 |
-
break
|
182 |
-
frame = cv2.resize(frame, (new_width, new_height))
|
183 |
-
column.image(frame, channels="BGR")
|
184 |
-
cap.release()
|
185 |
|
186 |
# Displaying the selected video in the first column
|
187 |
with col1:
|
188 |
file_path = os.path.join("app_input", selected_video)
|
189 |
video_name = selected_video.split(".")[0]
|
190 |
display_video(col1, file_path, video_name)
|
191 |
-
|
192 |
# Displaying the next video in the second column
|
193 |
with col2:
|
194 |
st.info("Expected Result !")
|
@@ -196,6 +110,7 @@ with col2:
|
|
196 |
next_video_name = next_video.split(".")[0]
|
197 |
display_video(col2, next_file_path, next_video_name)
|
198 |
|
|
|
199 |
# Assuming further processing (like generating predictions) is only intended for the first (selected) video
|
200 |
with col1, st.spinner("Processing video..."):
|
201 |
video, img_p, files = load_video(f"{video_name}.mp4", opt.device)
|
@@ -221,4 +136,6 @@ with col2:
|
|
221 |
|
222 |
st.info("Author ©️ : Wissem Karous ")
|
223 |
st.info("Made with ❤️")
|
|
|
|
|
224 |
|
|
|
62 |
|
63 |
###########################
|
64 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
65 |
import streamlit as st
|
66 |
import os
|
|
|
67 |
from utils.demo import load_video, ctc_decode
|
68 |
from utils.two_stream_infer import load_model
|
69 |
from scripts.extract_lip_coordinates import generate_lip_coordinates
|
|
|
73 |
|
74 |
model = load_model()
|
75 |
|
76 |
+
st.title("Lipreading final year project Demo")
|
77 |
|
78 |
st.info(
|
79 |
"The inference speed is very slow on Huggingface spaces due to it being processed entirely on CPU ",
|
|
|
91 |
|
92 |
col1, col2 = st.columns(2)
|
93 |
|
94 |
+
# Function to display video in a column
|
95 |
def display_video(column, video_path, video_name):
|
96 |
+
os.system(f"ffmpeg -i {video_path} -vcodec libx264 {video_name}.mp4 -y")
|
97 |
+
video = open(f"{video_name}.mp4", "rb")
|
98 |
+
video_bytes = video.read()
|
99 |
+
column.video(video_bytes)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
100 |
|
101 |
# Displaying the selected video in the first column
|
102 |
with col1:
|
103 |
file_path = os.path.join("app_input", selected_video)
|
104 |
video_name = selected_video.split(".")[0]
|
105 |
display_video(col1, file_path, video_name)
|
|
|
106 |
# Displaying the next video in the second column
|
107 |
with col2:
|
108 |
st.info("Expected Result !")
|
|
|
110 |
next_video_name = next_video.split(".")[0]
|
111 |
display_video(col2, next_file_path, next_video_name)
|
112 |
|
113 |
+
|
114 |
# Assuming further processing (like generating predictions) is only intended for the first (selected) video
|
115 |
with col1, st.spinner("Processing video..."):
|
116 |
video, img_p, files = load_video(f"{video_name}.mp4", opt.device)
|
|
|
136 |
|
137 |
st.info("Author ©️ : Wissem Karous ")
|
138 |
st.info("Made with ❤️")
|
139 |
+
##################
|
140 |
+
|
141 |
|