Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -8,15 +8,10 @@ import librosa.display
|
|
8 |
import soundfile as sf
|
9 |
import gradio as gr
|
10 |
import tempfile
|
11 |
-
import logging
|
12 |
-
|
13 |
-
# Setup logging
|
14 |
-
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s', handlers=[logging.FileHandler("video_processing.log"), logging.StreamHandler()])
|
15 |
|
16 |
# Function for displaying progress
|
17 |
def display_progress(percent, message, progress=gr.Progress()):
|
18 |
progress(percent, desc=message)
|
19 |
-
logging.info(message)
|
20 |
|
21 |
# Function for extracting audio from video
|
22 |
def extract_audio(video_path, progress):
|
@@ -24,16 +19,13 @@ def extract_audio(video_path, progress):
|
|
24 |
try:
|
25 |
video = VideoFileClip(video_path)
|
26 |
if video.audio is None:
|
27 |
-
raise ValueError("No audio found in the video
|
28 |
audio_path = "extracted_audio.wav"
|
29 |
video.audio.write_audiofile(audio_path)
|
30 |
display_progress(0.2, "Audio extracted", progress)
|
31 |
-
logging.info(f"Audio extracted: {audio_path}")
|
32 |
return audio_path
|
33 |
except Exception as e:
|
34 |
-
|
35 |
-
display_progress(0.2, error_message, progress)
|
36 |
-
logging.error(error_message)
|
37 |
return None
|
38 |
|
39 |
# Function for dividing video into frames
|
@@ -41,8 +33,6 @@ def extract_frames(video_path, progress):
|
|
41 |
display_progress(0.3, "Extracting frames from video", progress)
|
42 |
try:
|
43 |
video = cv2.VideoCapture(video_path)
|
44 |
-
if not video.isOpened():
|
45 |
-
raise ValueError("Failed to open video file. Please check if the file is accessible and valid.")
|
46 |
frames = []
|
47 |
success, frame = video.read()
|
48 |
while success:
|
@@ -50,12 +40,9 @@ def extract_frames(video_path, progress):
|
|
50 |
success, frame = video.read()
|
51 |
video.release()
|
52 |
display_progress(0.4, "Frames extracted", progress)
|
53 |
-
logging.info(f"Frames extracted: {len(frames)} frames")
|
54 |
return frames
|
55 |
except Exception as e:
|
56 |
-
|
57 |
-
display_progress(0.4, error_message, progress)
|
58 |
-
logging.error(error_message)
|
59 |
return None
|
60 |
|
61 |
# Convert frame to spectrogram
|
@@ -63,14 +50,12 @@ def frame_to_spectrogram(frame, sr=22050):
|
|
63 |
gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
|
64 |
S = np.flipud(gray_frame.astype(np.float32) / 255.0 * 100.0)
|
65 |
y = librosa.griffinlim(S)
|
66 |
-
logging.info("Converted frame to spectrogram")
|
67 |
return y
|
68 |
|
69 |
# Saving audio
|
70 |
def save_audio(y, sr=22050):
|
71 |
audio_path = 'output_frame_audio.wav'
|
72 |
sf.write(audio_path, y, sr)
|
73 |
-
logging.info(f"Audio saved: {audio_path}")
|
74 |
return audio_path
|
75 |
|
76 |
# Saving frame spectrogram
|
@@ -81,7 +66,6 @@ def save_spectrogram_image(S, frame_number, temp_dir):
|
|
81 |
image_path = os.path.join(temp_dir, f'spectrogram_frame_{frame_number}.png')
|
82 |
plt.savefig(image_path)
|
83 |
plt.close()
|
84 |
-
logging.info(f"Spectrogram image saved: {image_path}")
|
85 |
return image_path
|
86 |
|
87 |
# Processing all video frames
|
@@ -95,9 +79,7 @@ def process_video_frames(frames, sr=22050, temp_dir=None, progress=gr.Progress()
|
|
95 |
processed_frame = cv2.imread(image_path)
|
96 |
processed_frames.append(processed_frame)
|
97 |
display_progress(0.5 + int((i + 1) / total_frames * 0.7), f"Frame processing {i + 1}/{total_frames}", progress)
|
98 |
-
logging.info(f"Processed frame {i + 1}/{total_frames}")
|
99 |
display_progress(0.8, "All frames processed", progress)
|
100 |
-
logging.info("All frames processed")
|
101 |
return processed_frames
|
102 |
|
103 |
# Saving video from frames
|
@@ -107,7 +89,6 @@ def save_video_from_frames(frames, output_path, fps=30):
|
|
107 |
for frame in frames:
|
108 |
video.write(frame)
|
109 |
video.release()
|
110 |
-
logging.info(f"Video saved: {output_path}")
|
111 |
|
112 |
# Adding audio back to video
|
113 |
def add_audio_to_video(video_path, audio_path, output_path, progress):
|
@@ -118,33 +99,27 @@ def add_audio_to_video(video_path, audio_path, output_path, progress):
|
|
118 |
final_video = video.set_audio(audio)
|
119 |
final_video.write_videofile(output_path, codec='libx264', audio_codec='aac')
|
120 |
display_progress(1, "Video's ready", progress)
|
121 |
-
logging.info(f"Final video saved: {output_path}")
|
122 |
except Exception as e:
|
123 |
-
|
124 |
-
display_progress(1, error_message, progress)
|
125 |
-
logging.error(error_message)
|
126 |
|
127 |
# Gradio interface
|
128 |
def process_video(video_path, progress=gr.Progress()):
|
129 |
try:
|
130 |
video = VideoFileClip(video_path)
|
131 |
if video.duration > 10:
|
132 |
-
logging.info(f"Video longer than 10 seconds, trimming to 10 seconds: {video_path}")
|
133 |
video = video.subclip(0, 10)
|
134 |
temp_trimmed_video_path = "trimmed_video.mp4"
|
135 |
video.write_videofile(temp_trimmed_video_path, codec='libx264')
|
136 |
video_path = temp_trimmed_video_path
|
137 |
except Exception as e:
|
138 |
-
|
139 |
-
logging.error(error_message)
|
140 |
-
return error_message
|
141 |
|
142 |
audio_path = extract_audio(video_path, progress)
|
143 |
if audio_path is None:
|
144 |
-
return "Failed to extract audio from video.
|
145 |
frames = extract_frames(video_path, progress)
|
146 |
if frames is None:
|
147 |
-
return "Failed to extract frames from video.
|
148 |
|
149 |
# Creating a temporary folder for saving frames
|
150 |
with tempfile.TemporaryDirectory() as temp_dir:
|
@@ -174,18 +149,14 @@ with gr.Blocks(title='Video from Spectrogram', theme=gr.themes.Soft(primary_hue=
|
|
174 |
generate_button = gr.Button("Generate")
|
175 |
with gr.Column(variant='panel'):
|
176 |
video_output = gr.Video(label="VideoSpectrogram")
|
177 |
-
error_output = gr.HTML()
|
178 |
|
179 |
def gradio_video_process_fn(video_input, progress=gr.Progress()):
|
180 |
-
|
181 |
-
if isinstance(result, str):
|
182 |
-
return "", result # If there is an error, return the error message
|
183 |
-
return result, "" # If no error, return the video path and empty error message
|
184 |
|
185 |
generate_button.click(
|
186 |
gradio_video_process_fn,
|
187 |
inputs=[video_input],
|
188 |
-
outputs=[video_output
|
189 |
)
|
190 |
|
191 |
iface.launch(share=True)
|
|
|
8 |
import soundfile as sf
|
9 |
import gradio as gr
|
10 |
import tempfile
|
|
|
|
|
|
|
|
|
11 |
|
12 |
# Function for displaying progress
|
13 |
def display_progress(percent, message, progress=gr.Progress()):
|
14 |
progress(percent, desc=message)
|
|
|
15 |
|
16 |
# Function for extracting audio from video
|
17 |
def extract_audio(video_path, progress):
|
|
|
19 |
try:
|
20 |
video = VideoFileClip(video_path)
|
21 |
if video.audio is None:
|
22 |
+
raise ValueError("No audio found in the video")
|
23 |
audio_path = "extracted_audio.wav"
|
24 |
video.audio.write_audiofile(audio_path)
|
25 |
display_progress(0.2, "Audio extracted", progress)
|
|
|
26 |
return audio_path
|
27 |
except Exception as e:
|
28 |
+
display_progress(0.2, f"Failed to extract audio: {e}", progress)
|
|
|
|
|
29 |
return None
|
30 |
|
31 |
# Function for dividing video into frames
|
|
|
33 |
display_progress(0.3, "Extracting frames from video", progress)
|
34 |
try:
|
35 |
video = cv2.VideoCapture(video_path)
|
|
|
|
|
36 |
frames = []
|
37 |
success, frame = video.read()
|
38 |
while success:
|
|
|
40 |
success, frame = video.read()
|
41 |
video.release()
|
42 |
display_progress(0.4, "Frames extracted", progress)
|
|
|
43 |
return frames
|
44 |
except Exception as e:
|
45 |
+
display_progress(0.4, f"Failed to extract frames: {e}", progress)
|
|
|
|
|
46 |
return None
|
47 |
|
48 |
# Convert frame to spectrogram
|
|
|
50 |
gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
|
51 |
S = np.flipud(gray_frame.astype(np.float32) / 255.0 * 100.0)
|
52 |
y = librosa.griffinlim(S)
|
|
|
53 |
return y
|
54 |
|
55 |
# Saving audio
|
56 |
def save_audio(y, sr=22050):
|
57 |
audio_path = 'output_frame_audio.wav'
|
58 |
sf.write(audio_path, y, sr)
|
|
|
59 |
return audio_path
|
60 |
|
61 |
# Saving frame spectrogram
|
|
|
66 |
image_path = os.path.join(temp_dir, f'spectrogram_frame_{frame_number}.png')
|
67 |
plt.savefig(image_path)
|
68 |
plt.close()
|
|
|
69 |
return image_path
|
70 |
|
71 |
# Processing all video frames
|
|
|
79 |
processed_frame = cv2.imread(image_path)
|
80 |
processed_frames.append(processed_frame)
|
81 |
display_progress(0.5 + int((i + 1) / total_frames * 0.7), f"Frame processing {i + 1}/{total_frames}", progress)
|
|
|
82 |
display_progress(0.8, "All frames processed", progress)
|
|
|
83 |
return processed_frames
|
84 |
|
85 |
# Saving video from frames
|
|
|
89 |
for frame in frames:
|
90 |
video.write(frame)
|
91 |
video.release()
|
|
|
92 |
|
93 |
# Adding audio back to video
|
94 |
def add_audio_to_video(video_path, audio_path, output_path, progress):
|
|
|
99 |
final_video = video.set_audio(audio)
|
100 |
final_video.write_videofile(output_path, codec='libx264', audio_codec='aac')
|
101 |
display_progress(1, "Video's ready", progress)
|
|
|
102 |
except Exception as e:
|
103 |
+
display_progress(1, f"Failed to add audio to video: {e}", progress)
|
|
|
|
|
104 |
|
105 |
# Gradio interface
|
106 |
def process_video(video_path, progress=gr.Progress()):
|
107 |
try:
|
108 |
video = VideoFileClip(video_path)
|
109 |
if video.duration > 10:
|
|
|
110 |
video = video.subclip(0, 10)
|
111 |
temp_trimmed_video_path = "trimmed_video.mp4"
|
112 |
video.write_videofile(temp_trimmed_video_path, codec='libx264')
|
113 |
video_path = temp_trimmed_video_path
|
114 |
except Exception as e:
|
115 |
+
return f"Failed to load video: {e}"
|
|
|
|
|
116 |
|
117 |
audio_path = extract_audio(video_path, progress)
|
118 |
if audio_path is None:
|
119 |
+
return "Failed to extract audio from video."
|
120 |
frames = extract_frames(video_path, progress)
|
121 |
if frames is None:
|
122 |
+
return "Failed to extract frames from video."
|
123 |
|
124 |
# Creating a temporary folder for saving frames
|
125 |
with tempfile.TemporaryDirectory() as temp_dir:
|
|
|
149 |
generate_button = gr.Button("Generate")
|
150 |
with gr.Column(variant='panel'):
|
151 |
video_output = gr.Video(label="VideoSpectrogram")
|
|
|
152 |
|
153 |
def gradio_video_process_fn(video_input, progress=gr.Progress()):
|
154 |
+
return process_video(video_input, progress)
|
|
|
|
|
|
|
155 |
|
156 |
generate_button.click(
|
157 |
gradio_video_process_fn,
|
158 |
inputs=[video_input],
|
159 |
+
outputs=[video_output]
|
160 |
)
|
161 |
|
162 |
iface.launch(share=True)
|