Spaces:
Running
Running
""" | |
project @ images_to_video | |
created @ 2024-12-17 | |
author @ github.com/ishworrsubedii | |
""" | |
import os | |
from moviepy.audio.io.AudioFileClip import AudioFileClip | |
from moviepy.video.VideoClip import ImageClip, ColorClip, TextClip | |
from moviepy.video.compositing.CompositeVideoClip import CompositeVideoClip | |
from moviepy.video.compositing.concatenate import concatenate_videoclips | |
from moviepy.video.compositing.transitions import slide_in, crossfadein, make_loopable, crossfadeout, slide_out | |
from moviepy.video.fx.resize import resize | |
from moviepy.video.io.VideoFileClip import VideoFileClip | |
class EachVideoCreator: | |
def __init__(self, necklace_title, backgrounds: list[tuple], nto_title, cto_title, makeup_title, | |
intro_video_path=None, necklace_image=None, | |
nto_outputs=None, | |
nto_cto_outputs=None, makeup_outputs=None, font_path=None, output_path=None, | |
audio_path=None, image_display_duration=2.5, box_color=(131, 42, 48), box_opacity=0.8, | |
font_size=28, text_color="white", fps=1, outro_title=None, logo_image=None, address=None, | |
phone_numbers=None, transition_duration=0.5, transition_type='None', direction='left', | |
outro_video_path: str = ""): | |
self.intro_video_path = intro_video_path | |
self.necklace_images = necklace_image if necklace_image else [] | |
self.nto_outputs = nto_outputs if nto_outputs else [] | |
self.nto_cto_outputs = nto_cto_outputs if nto_cto_outputs else [] | |
self.makeup_outputs = makeup_outputs if makeup_outputs else [] | |
self.output_video_path = output_path | |
self.font_path = font_path | |
self.audio_path = audio_path | |
self.image_display_duration = image_display_duration | |
self.box_color = box_color | |
self.box_opacity = box_opacity | |
self.font_size = font_size | |
self.text_color = text_color | |
self.fps = fps | |
self.necklace_title = necklace_title | |
self.nto_title = nto_title | |
self.cto_title = cto_title | |
self.makeup_title = makeup_title | |
self.backgrounds = backgrounds | |
self.outro_title = outro_title | |
self.logo_image = logo_image | |
self.address = address | |
self.phone_numbers = phone_numbers | |
self.transition_duration = transition_duration | |
self.transition_type = transition_type | |
self.direction = direction | |
self.outro_video_path = outro_video_path | |
def create_necklace_clips(self, necklace_image, index, label): | |
if not necklace_image: | |
print(f"Skipping necklace {index + 1}: No image provided.") | |
return [] | |
# backgrounds = [ | |
# (245, 245, 245), # Soft White | |
# (220, 245, 245), # Light Blue | |
# (230, 230, 235) # Pearl Gray | |
# ] | |
necklace_clips = [] | |
for bg_color in self.backgrounds: | |
bg_clip = ColorClip((1080, 1080), col=bg_color, duration=self.image_display_duration) | |
necklace = resize(ImageClip(necklace_image), width=500) | |
necklace = necklace.set_duration(self.image_display_duration).set_position('center') | |
txt_overlay = self.create_text_overlay(f"{label}") | |
final_clip = CompositeVideoClip([bg_clip, necklace, txt_overlay.set_position(('center', 'bottom'))]) | |
necklace_clips.append(final_clip) | |
return necklace_clips | |
def create_grouped_clips(self, grouped_images, label): | |
clips = [] | |
for idx, group in enumerate(grouped_images): | |
for img_path in group: | |
if os.path.exists(img_path) and img_path.lower().endswith(('.png', '.jpg', '.jpeg')): | |
print(f"Processing {label} image: {img_path}") | |
img_clip = resize(ImageClip(img_path), (1080, 1080)) | |
txt_overlay = self.create_text_overlay(f"{label}") | |
final_clip = CompositeVideoClip([ | |
img_clip.set_duration(self.image_display_duration), | |
txt_overlay.set_position(('center', 'bottom')) | |
]) | |
clips.append(final_clip) | |
return clips | |
def create_text_overlay(self, text, duration=None): | |
box = ColorClip((1080, 80), col=self.box_color, duration=duration or self.image_display_duration) | |
box = box.set_opacity(self.box_opacity) | |
txt = TextClip(text, font=self.font_path, fontsize=self.font_size, color=self.text_color) | |
return CompositeVideoClip([box, txt.set_position('center')]) | |
def create_last_clip(self, title, logo_image, address, phone_numbers, font_path): | |
# Background clip (1080x1080, light gray color) | |
bg_clip = ColorClip((1080, 1080), col=(245, 245, 245), duration=self.image_display_duration) | |
# Resize logo to fit well within the 1080x1080 frame | |
logo = resize(ImageClip(logo_image), width=400) | |
logo = logo.set_duration(self.image_display_duration) | |
logo = logo.set_position(lambda t: ('center', 200)) # Place logo near top | |
# Title overlay text | |
txt_overlay_title = TextClip(title, fontsize=50, color='black', font=font_path) | |
txt_overlay_title = txt_overlay_title.set_duration(self.image_display_duration) | |
txt_overlay_title = txt_overlay_title.set_position(lambda t: ('center', 600)) # Below logo | |
# Address overlay text | |
txt_overlay_address = TextClip(address, fontsize=30, color='black', font=font_path) | |
txt_overlay_address = txt_overlay_address.set_duration(self.image_display_duration) | |
txt_overlay_address = txt_overlay_address.set_position(lambda t: ('center', 680)) # Below title | |
# Phone number overlay text | |
txt_overlay_phone = TextClip(phone_numbers, fontsize=30, color='black', font=font_path) | |
txt_overlay_phone = txt_overlay_phone.set_duration(self.image_display_duration) | |
txt_overlay_phone = txt_overlay_phone.set_position(lambda t: ('center', 730)) # Below address | |
# Combine everything into the final clip | |
final_clip = CompositeVideoClip([bg_clip, logo, txt_overlay_title, txt_overlay_address, txt_overlay_phone]) | |
return final_clip | |
def apply_slideout_transition(self, clip, direction='left'): | |
if direction == 'left': | |
return slide_in(clip, duration=self.transition_duration, side='right') | |
return slide_in(clip, duration=self.transition_duration, side='left') | |
def apply_slidein_transition(self, clip, direction='left'): | |
if direction == 'left': | |
return slide_out(clip, duration=self.transition_duration, side='right') | |
return slide_out(clip, duration=self.transition_duration, side='left') | |
def apply_loopable_transition(self, clip): | |
return make_loopable(clip, duration=self.transition_duration) | |
def apply_crossfadein_transition(self, clip): | |
return crossfadein(clip, self.transition_duration) | |
def apply_crossfadeout_transition(self, clip): | |
return crossfadeout(clip, self.transition_duration) | |
def create_final_video(self): | |
try: | |
print("Starting video creation...") | |
clips = [] | |
# Step 1: Process Intro Video | |
if self.intro_video_path and os.path.exists(self.intro_video_path): | |
print(f"Adding intro video from path: {self.intro_video_path}") | |
intro_clip = resize(VideoFileClip(self.intro_video_path), (1080, 1080)) | |
clips.append(intro_clip) | |
else: | |
print("Skipping intro video: Path not provided or invalid.") | |
# Step 2: Process Necklaces and Associated Outputs | |
for idx, necklace_image in enumerate(self.necklace_images): | |
print(f"Processing Necklace {idx + 1}...") | |
# Necklace preview clips | |
necklace_clips = self.create_necklace_clips(necklace_image, idx, self.necklace_title[idx]) | |
if necklace_clips: | |
clips.extend(necklace_clips) | |
else: | |
print(f"Skipping Necklace {idx + 1} preview: No valid clips created.") | |
# NTO outputs | |
if idx < len(self.nto_outputs): | |
for idj in range(len(self.nto_outputs[idx])): | |
print(f"Total NTO outputs{len(self.nto_outputs)}") | |
print(f"Adding NTO outputs for Necklace {idx + 1}") | |
nto_clips = self.create_grouped_clips([self.nto_outputs[idx]], self.nto_title[idx][idj]) | |
if nto_clips: | |
clips.extend(nto_clips) | |
if idx < len(self.nto_cto_outputs): | |
for idj in range(len(self.nto_cto_outputs[idx])): | |
print(f"Total CTO outputs{len(self.nto_cto_outputs)}") | |
print(f"Adding CTO outputs for Necklace {idx + 1}") | |
cto_clips = self.create_grouped_clips([self.nto_cto_outputs[idx]], self.cto_title[idx][idj]) | |
if cto_clips: | |
clips.extend(cto_clips) | |
else: | |
print(f"No valid CTO clips for Necklace {idx + 1}") | |
if idx < len(self.makeup_outputs): | |
for idj in range(len(self.makeup_outputs[idx])): | |
print(f"Total Makeup outputs{len(self.makeup_outputs)}") | |
print(f"Adding Makeup outputs for Necklace {idx + 1}") | |
makeup_clips = self.create_grouped_clips([self.makeup_outputs[idx]], | |
self.makeup_title[idx][idj]) | |
if makeup_clips: | |
clips.extend(makeup_clips) | |
else: | |
print(f"No valid Makeup clips for Necklace {idx + 1}") | |
# Step 3: Process Outro Video | |
if self.outro_video_path and os.path.exists(self.outro_video_path): | |
print(f"Adding outro video from path: {self.outro_video_path}") | |
outro_clip = resize(VideoFileClip(self.outro_video_path), (1080, 1080)) | |
clips.append(outro_clip) | |
# if self.logo_image or self.address or self.phone_numbers is not None: | |
# outro_clip = self.create_last_clip(title=self.outro_title, logo_image=self.logo_image, | |
# address=self.address, | |
# phone_numbers=self.phone_numbers, | |
# font_path=self.font_path) | |
# clips.extend([outro_clip]) | |
final_clips = [] | |
for clip in clips: | |
clip = clip.set_duration(self.image_display_duration) | |
try: | |
if self.transition_type == 'crossfadein': | |
clip_with_transition = self.apply_crossfadein_transition(clip) | |
elif self.transition_type == 'crossfadeout': | |
clip_with_transition = self.apply_crossfadeout_transition(clip) | |
elif self.transition_type == 'slideout': | |
clip_with_transition = self.apply_slideout_transition(clip, self.direction) | |
elif self.transition_type == 'slidein': | |
clip_with_transition = self.apply_slidein_transition(clip, self.direction) | |
elif self.transition_type == 'loop': | |
clip_with_transition = self.apply_loopable_transition(clip) | |
else: | |
clip_with_transition = clip | |
final_clips.append(clip_with_transition) | |
except Exception as e: | |
print(f"Error applying transition: {e}") | |
final_clips.append(clip) | |
clips = final_clips | |
if not clips: | |
print("No valid clips to combine. Exiting.") | |
return | |
print(f"Total clips to concatenate: {len(clips)}") | |
final_video = concatenate_videoclips(clips, method="compose") | |
if self.audio_path and os.path.exists(self.audio_path): | |
print(f"Adding background audio from path: {self.audio_path}") | |
try: | |
audio = AudioFileClip(self.audio_path).subclip(0, final_video.duration) | |
final_video = final_video.set_audio(audio) | |
except Exception as e: | |
print(f"Error adding audio: {e}") | |
else: | |
print("Skipping background audio: Path not provided or invalid.") | |
print("Rendering final video...") | |
final_video.write_videofile( | |
self.output_video_path, | |
fps=self.fps, | |
codec="libx264", | |
audio_codec="aac", | |
threads=4, | |
preset="ultrafast" | |
) | |
print(f"Video successfully saved to: {self.output_video_path}") | |
except Exception as e: | |
print(f"An error occurred during video creation: {e}") | |