Spaces:
Running
Running
import gradio as gr | |
import moviepy.editor as mp | |
import numpy as np | |
from PIL import Image | |
import os | |
# Resize image while maintaining aspect ratio | |
def resize_image_with_aspect_ratio(img, target_size=(1280, 720), padding_color=(0, 0, 0)): | |
width, height = img.size | |
target_width, target_height = target_size | |
aspect_ratio = width / height | |
target_aspect_ratio = target_width / target_height | |
if aspect_ratio > target_aspect_ratio: | |
new_width = target_width | |
new_height = int(new_width / aspect_ratio) | |
else: | |
new_height = target_height | |
new_width = int(new_height * aspect_ratio) | |
img_resized = img.resize((new_width, new_height)) | |
final_img = Image.new('RGB', target_size, padding_color) | |
padding_left = (target_width - new_width) // 2 | |
padding_top = (target_height - new_height) // 2 | |
final_img.paste(img_resized, (padding_left, padding_top)) | |
return final_img | |
# Video generation function with debug logging | |
def process_and_generate_video(audio_file, images): | |
debug_log = [] | |
try: | |
debug_log.append(f"Audio file received: {audio_file}") | |
debug_log.append(f"Images received: {images}") | |
# Process audio file | |
audio = mp.AudioFileClip(audio_file) | |
audio_duration = audio.duration | |
image_clips = [] | |
image_duration = audio_duration / len(images) | |
for img_path in images: | |
try: | |
debug_log.append(f"Processing image: {img_path}") | |
img = Image.open(img_path) | |
resized_img = resize_image_with_aspect_ratio(img, target_size=(1280, 720)) | |
img_clip = mp.ImageClip(np.array(resized_img)).set_duration(image_duration).set_fps(24) | |
image_clips.append(img_clip) | |
except Exception as img_error: | |
debug_log.append(f"Error processing image {img_path}: {str(img_error)}") | |
if not image_clips: | |
raise ValueError("No valid images provided for video generation.") | |
debug_log.append(f"Created {len(image_clips)} image clips.") | |
# Concatenate image clips | |
video = mp.concatenate_videoclips(image_clips, method="compose") | |
video = video.set_audio(audio) | |
# Output video path | |
output_path = '/tmp/generated_video.mp4' | |
video.write_videofile(output_path, codec='libx264', audio_codec='aac') | |
debug_log.append(f"Video successfully generated: {output_path}") | |
print("\n".join(debug_log)) # Print logs to the console for debugging | |
return output_path | |
except Exception as e: | |
debug_log.append(f"Error: {str(e)}") | |
print("\n".join(debug_log)) # Print logs to the console for debugging | |
return f"Error: {str(e)}" | |
# Gradio interface setup | |
def gradio_interface(): | |
with gr.Blocks() as demo: | |
with gr.Row(): | |
with gr.Column(): | |
mp3_input = gr.Audio(type="filepath", label="Upload MP3") | |
# Allow additional formats like .jpeg | |
image_input = gr.File(type="filepath", file_types=[".jpg", ".png", ".jpeg"], label="Upload Images", file_count="multiple") | |
generate_button = gr.Button("Generate Video") | |
output_video = gr.Video(label="Generated Video") | |
generate_button.click(fn=process_and_generate_video, inputs=[mp3_input, image_input], outputs=output_video) | |
demo.launch() | |
# Run the interface | |
gradio_interface() | |