import os import random import gradio as gr from moviepy.editor import VideoFileClip, CompositeVideoClip, ImageClip import numpy as np from PIL import Image, ImageDraw, ImageFont import subprocess # Ensure ImageMagick is installed def install_imagemagick(): if not os.path.exists('/usr/bin/convert'): subprocess.run(['apt-get', 'update']) subprocess.run(['apt-get', 'install', '-y', 'imagemagick']) install_imagemagick() def create_text_clip(text, fontsize, color, size): img = Image.new('RGB', size, color='black') draw = ImageDraw.Draw(img) font_path = "arial.ttf" # Make sure this file is in the root directory of your space font = ImageFont.truetype(font_path, fontsize) w, h = draw.textbbox((0, 0), text, font=font)[2:] draw.text(((size[0] - w) / 2, (size[1] - h) / 2), text, font=font, fill=color) return np.array(img) def process_video(text): video_folder = "videos" video_files = [os.path.join(video_folder, f) for f in os.listdir(video_folder) if f.endswith(('mp4', 'mov', 'avi', 'mkv'))] if not video_files: raise FileNotFoundError("No video files found in the specified directory.") selected_video = random.choice(video_files) video = VideoFileClip(selected_video) start_time = random.uniform(0, max(0, video.duration - 60)) video = video.subclip(start_time, min(start_time + 60, video.duration)) def resize_image(image, new_size): pil_image = Image.fromarray(image) resized_pil = pil_image.resize(new_size[::-1], Image.LANCZOS) return np.array(resized_pil) text_lines = text.split() text = "\n".join([" ".join(text_lines[i:i+8]) for i in range(0, len(text_lines), 8)]) text_img = create_text_clip(text, fontsize=70, color='white', size=video.size) text_clip = ImageClip(text_img).set_duration(video.duration).set_position(('center', 'center')).set_opacity(0.5) final_clip = CompositeVideoClip([video, text_clip]) output_path = "output.mp4" final_clip.write_videofile(output_path, codec="libx264") return output_path def chat_interface(): return gr.Interface( fn=generate_response, inputs=[ gr.Textbox(label="Prompt"), gr.Textbox(label="History", type="text"), gr.Dropdown(choices=["llama3-70b-8192", "llama3-8b-8192", "mixtral-8x7b-32768", "gemma-7b-it"], label="Model"), gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label="Temperature"), gr.Slider(minimum=1, maximum=32192, step=1, label="Max Tokens"), gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label="Top P"), gr.Number(precision=0, label="Seed") ], outputs=gr.Textbox(label="Response"), title="YTSHorts Maker - Chat Interface", description="Powered by GROQ.", live=True ) def process_video_interface(): return gr.Interface( fn=process_video, inputs=gr.Textbox(label="Text (8 words max per line)"), outputs=gr.Video(label="Processed Video"), title="YTSHorts Maker - Video Processing", description="Select a video file from 'videos' folder, add text, and process.", ) # Main app definition with gr.Blocks(theme=gr.themes.Soft(primary_hue="red", secondary_hue="pink")) as demo: with gr.Tabs(): # Chat Ta with gr.TabItem("Video Processing"): text_input = gr.Textbox(lines=5, label="Text (8 words max per line)") process_button = gr.Button("Process Video") video_output = gr.Video(label="Processed Video") process_button.click( fn=process_video, inputs=text_input, outputs=video_output, ) # Launch the Gradio interface if __name__ == "__main__": demo.launch()