AIVISIONDREAMS commited on
Commit
1058225
1 Parent(s): 21425ba

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +187 -0
app.py ADDED
@@ -0,0 +1,187 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from diffusers import StableDiffusionPipeline
3
+ import openai
4
+ import moviepy.editor as mp
5
+ import torch
6
+ import os
7
+ from datetime import datetime
8
+ import librosa
9
+
10
+ # Ensure the desktop folder exists
11
+ desktop_path = os.path.join(os.path.expanduser("~"), "Desktop", "DREAMS & VISIONS")
12
+ os.makedirs(desktop_path, exist_ok=True)
13
+
14
+ # Check for GPU availability
15
+ if torch.cuda.is_available():
16
+ device = "cuda"
17
+ dtype = torch.float16
18
+ else:
19
+ device = "cpu"
20
+ dtype = torch.float32
21
+
22
+ # Initialize the Stable Diffusion pipeline
23
+ pipeline = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", torch_dtype=dtype).to(device)
24
+ print("Stable Diffusion pipeline initialized.")
25
+
26
+ # Set OpenAI API key
27
+ openai.api_key = "sk-proj-niE4nhCiKxdCoXu4gBjKT3BlbkFJARCQWnAE8VAUtQ55bVcC"
28
+ print("OpenAI API key set.")
29
+
30
+ def get_unique_filename(directory, base_name, extension):
31
+ """
32
+ Generate a unique filename by appending a number if the file already exists.
33
+ """
34
+ counter = 1
35
+ filename = f"{base_name}.{extension}"
36
+ while os.path.exists(os.path.join(directory, filename)):
37
+ filename = f"{base_name}_{counter}.{extension}"
38
+ counter += 1
39
+ return filename
40
+
41
+ def generate_text(prompt):
42
+ try:
43
+ response = openai.ChatCompletion.create(
44
+ model="gpt-3.5-turbo",
45
+ messages=[{"role": "user", "content": prompt}],
46
+ max_tokens=150
47
+ )
48
+ narrative = response['choices'][0]['message']['content'].strip()
49
+ print(f"Narrative generated: {narrative}")
50
+ return narrative
51
+ except Exception as e:
52
+ print(f"Error generating text: {e}")
53
+ return "Error generating narrative text."
54
+
55
+ def generate_images(prompt, num_images):
56
+ try:
57
+ images = []
58
+ for _ in range(num_images):
59
+ image = pipeline(prompt).images[0]
60
+ images.append(image)
61
+ print(f"{len(images)} images generated.")
62
+ return images
63
+ except Exception as e:
64
+ print(f"Error generating images: {e}")
65
+ return []
66
+
67
+ def sync_video_with_audio(prompt, duration, frame_rate, audio_path=None):
68
+ try:
69
+ # Generate video clips based on the prompt, duration, and frame rate
70
+ num_frames = int(duration * frame_rate)
71
+ images = generate_images(prompt, num_frames)
72
+
73
+ # Save images as frames
74
+ frame_paths = []
75
+ for i, image in enumerate(images):
76
+ frame_path = os.path.join(desktop_path, f"frame_{i:04d}.png")
77
+ image.save(frame_path)
78
+ frame_paths.append(frame_path)
79
+
80
+ # Create video clips from frames
81
+ clips = [mp.ImageClip(frame).set_duration(1 / frame_rate) for frame in frame_paths]
82
+ video = mp.concatenate_videoclips(clips, method="compose")
83
+
84
+ # Sync video with audio if provided
85
+ if audio_path:
86
+ y, sr = librosa.load(audio_path)
87
+ tempo, beat_frames = librosa.beat.beat_track(y=y, sr=sr)
88
+ beat_times = librosa.frames_to_time(beat_frames, sr=sr)
89
+
90
+ synced_clips = []
91
+ for i, beat_time in enumerate(beat_times[:-1]):
92
+ start_time = beat_time
93
+ end_time = beat_times[i + 1]
94
+ clip = video.subclip(start_time, end_time)
95
+ synced_clips.append(clip)
96
+
97
+ final_video = mp.concatenate_videoclips(synced_clips)
98
+ audio = mp.AudioFileClip(audio_path)
99
+ final_video = final_video.set_audio(audio)
100
+ else:
101
+ final_video = video
102
+
103
+ # Save final video
104
+ final_video_path = os.path.join(desktop_path, get_unique_filename(desktop_path, "final_synced_video", "mp4"))
105
+ final_video.write_videofile(final_video_path, codec="libx264", audio_codec="aac", fps=frame_rate)
106
+
107
+ print(f"Final synced video saved to: {final_video_path}")
108
+ return final_video_path, final_video_path
109
+
110
+ except Exception as e:
111
+ print(f"Error syncing video with audio: {e}")
112
+ return None, None
113
+
114
+ def manual_edit_video(video_path, effects, transitions):
115
+ try:
116
+ video = mp.VideoFileClip(video_path)
117
+
118
+ if "fade_in" in effects:
119
+ video = video.fadein(1)
120
+ if "fade_out" in effects:
121
+ video = video.fadeout(1)
122
+ if "black_and_white" in effects:
123
+ video = video.fx(mp.vfx.blackwhite)
124
+ if "mirror" in effects:
125
+ video = video.fx(mp.vfx.mirror_x)
126
+ if "rotate" in effects:
127
+ video = video.fx(mp.vfx.rotate, 180)
128
+
129
+ if "crossfade" in transitions:
130
+ video = video.crossfadein(1)
131
+ if "slide" in transitions:
132
+ video = video.fx(mp.vfx.slide_in, 'left')
133
+
134
+ edited_video_path = os.path.join(desktop_path, get_unique_filename(desktop_path, "edited_video", "mp4"))
135
+ video.write_videofile(edited_video_path, codec="libx264", audio_codec="aac")
136
+
137
+ print(f"Edited video saved to: {edited_video_path}")
138
+ return edited_video_path, edited_video_path
139
+
140
+ except Exception as e:
141
+ print(f"Error editing video: {e}")
142
+ return None, None
143
+
144
+ with gr.Blocks() as demo:
145
+ gr.Markdown("# AI DREAMS & VISIONS Music Video Creator")
146
+ gr.Markdown("Bring your dreams and visions to life with our AI-powered music video creator. Upload a song, image, or video and create a stunning, synchronized music video with AI-driven enhancements. Contact us at [aidreams@aidreams.company](mailto:aidreams@aidreams.company) for any inquiries. Follow us on [X @TheKingofJewelz](https://x.com/TheKingofJewelz) and get a chance to have your video featured!")
147
+
148
+ with gr.Row():
149
+ with gr.Column():
150
+ prompt = gr.Textbox(label="Text Prompt", placeholder="Enter your video description here...")
151
+ duration = gr.Slider(label="Duration (seconds)", minimum=1, maximum=30, step=1, value=10)
152
+ frame_rate = gr.Slider(label="Frame Rate", minimum=1, maximum=60, step=1, value=24)
153
+ audio_file = gr.Audio(label="Upload Audio (Optional)")
154
+ image_file = gr.Image(label="Upload Image (Optional)", type="filepath")
155
+ video_file = gr.Video(label="Upload Video (Optional)")
156
+ generate_btn = gr.Button("Generate Video")
157
+
158
+ with gr.Column():
159
+ video_output = gr.Video(label="Generated Video")
160
+ download_link = gr.File(label="Download Video")
161
+
162
+ generate_btn.click(
163
+ sync_video_with_audio,
164
+ inputs=[prompt, duration, frame_rate, audio_file],
165
+ outputs=[video_output, download_link],
166
+ )
167
+
168
+ gr.Markdown("### Manual Video Editing")
169
+ with gr.Row():
170
+ with gr.Column():
171
+ video_input = gr.File(label="Upload Video for Editing", type="filepath")
172
+ effects = gr.CheckboxGroup(label="Select Effects", choices=["fade_in", "fade_out", "black_and_white", "mirror", "rotate"])
173
+ transitions = gr.CheckboxGroup(label="Select Transitions", choices=["crossfade", "slide"])
174
+ edit_btn = gr.Button("Edit Video")
175
+
176
+ with gr.Column():
177
+ edited_video_output = gr.Video(label="Edited Video")
178
+ download_edited_link = gr.File(label="Download Edited Video")
179
+
180
+ edit_btn.click(
181
+ manual_edit_video,
182
+ inputs=[video_input, effects, transitions],
183
+ outputs=[edited_video_output, download_edited_link],
184
+ )
185
+
186
+ if __name__ == "__main__":
187
+ demo.launch(server_name="0.0.0.0", server_port=8888, share=True)