ai-dreams-x / app.py
AIVISIONDREAMS's picture
Upload folder using huggingface_hub
32d4b56 verified
raw
history blame contribute delete
No virus
3.47 kB
import gradio as gr
import torch
from diffusers import StableDiffusionPipeline
import time
from moviepy.editor import VideoFileClip, concatenate_videoclips, AudioFileClip
from moviepy.video.fx.all import volumex
import os
import datetime
# Load the pipeline
model_id = "CompVis/stable-diffusion-v1-4"
pipe = StableDiffusionPipeline.from_pretrained(model_id)
pipe = pipe.to("cpu")
# Create a sample video
sample_video_path = "sample_video.mp4"
video_clip = VideoFileClip(sample_video_path)
# Define paths
output_directory = os.path.expanduser("~/Desktop/AI DREAMS & VISIONS/")
# Ensure the output directory exists
os.makedirs(output_directory, exist_ok=True)
# Function to generate video with visualizer
def generate_video(prompt, duration=10, frame_rate=24):
start_time = time.time()
# Placeholder: Simulate video generation
output_video_path = os.path.join(output_directory, f"{prompt.replace(' ', '_')}.mp4")
video_clip = VideoFileClip(sample_video_path).subclip(0, duration)
# Add a visualizer (simple volume visualizer as a placeholder)
audio_clip = video_clip.audio
visualizer = volumex(video_clip, 0.5)
final_clip = concatenate_videoclips([visualizer.set_audio(audio_clip)])
final_clip.write_videofile(output_video_path, fps=frame_rate)
end_time = time.time()
time_taken = end_time - start_time
estimated_time = str(datetime.timedelta(seconds=int(time_taken)))
return output_video_path, estimated_time
# Function to upload music and sync with video
def sync_music_to_video(video_path, music_path):
video_clip = VideoFileClip(video_path)
audio_clip = AudioFileClip(music_path)
# Sync music to video duration
synced_audio_clip = audio_clip.subclip(0, video_clip.duration)
# Apply the music to the video
final_clip = video_clip.set_audio(synced_audio_clip)
synced_video_path = video_path.replace('.mp4', '_synced.mp4')
final_clip.write_videofile(synced_video_path)
return synced_video_path
# Define the Gradio interface
with gr.Blocks() as demo:
gr.Markdown("# AI DREAMS X Video Generator")
with gr.Row():
with gr.Column():
text_input = gr.Textbox(label="Text Prompt")
duration_input = gr.Slider(minimum=1, maximum=60, step=1, label="Duration (seconds)", value=10)
frame_rate_input = gr.Slider(minimum=1, maximum=60, step=1, label="Frame Rate (fps)", value=24)
music_upload = gr.File(label="Upload Music File")
generate_button = gr.Button("Generate Video")
with gr.Column():
output_video = gr.Video(label="Generated Video")
download_link = gr.File(label="Download Video")
estimated_time = gr.Textbox(label="Estimated Time of Completion")
def generate_and_display(prompt, duration, frame_rate, music_file):
video_path, estimated_time = generate_video(prompt, duration, frame_rate)
if music_file:
video_path = sync_music_to_video(video_path, music_file.name)
return video_path, video_path, estimated_time
generate_button.click(generate_and_display, inputs=[text_input, duration_input, frame_rate_input, music_upload], outputs=[output_video, download_link, estimated_time])
gr.Markdown("[Contact Us](mailto:aidreams@aidreams.company) | [Follow @TheKingofJewelz](https://x.com/TheKingofJewelz)")
demo.launch(share=True, server_name="0.0.0.0", server_port=7863)