AIVISIONDREAMS commited on
Commit
32d4b56
1 Parent(s): e513631

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -1,35 +1,4 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
- *.model filter=lfs diff=lfs merge=lfs -text
13
- *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
- *.onnx filter=lfs diff=lfs merge=lfs -text
17
- *.ot filter=lfs diff=lfs merge=lfs -text
18
- *.parquet filter=lfs diff=lfs merge=lfs -text
19
- *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
- *.pt filter=lfs diff=lfs merge=lfs -text
23
- *.pth filter=lfs diff=lfs merge=lfs -text
24
- *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
- *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
- *.tflite filter=lfs diff=lfs merge=lfs -text
30
- *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
- *.xz filter=lfs diff=lfs merge=lfs -text
33
- *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
1
+ *.mp4 filter=lfs diff=lfs merge=lfs -text
2
+ *.dylib filter=lfs diff=lfs merge=lfs -text
3
+ *.dylibs filter=lfs diff=lfs merge=lfs -text
4
+ ffmpeg-osx64-v4.2.2 filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
.gitignore ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Ignore virtual environment
2
+ venv/
3
+
4
+ # Ignore Python cache files
5
+ __pycache__/
6
+ *.pyc
7
+
8
+ # Ignore large files
9
+ *.mp4
10
+ *.dylib
11
+ *.so
12
+ *.a
13
+ venv/
14
+
15
+ venv/
16
+ *.mp4
17
+ *.log
18
+ *.mp4
Procfile ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ web: python app.py
2
+
README.md CHANGED
@@ -1,12 +1,6 @@
1
  ---
2
- title: Ai Dreams X
3
- emoji: 🦀
4
- colorFrom: gray
5
- colorTo: indigo
6
  sdk: gradio
7
  sdk_version: 4.31.5
8
- app_file: app.py
9
- pinned: false
10
  ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: ai-dreams-x
3
+ app_file: gradio_interface_extended.py
 
 
4
  sdk: gradio
5
  sdk_version: 4.31.5
 
 
6
  ---
 
 
app.py ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ from diffusers import StableDiffusionPipeline
4
+ import time
5
+ from moviepy.editor import VideoFileClip, concatenate_videoclips, AudioFileClip
6
+ from moviepy.video.fx.all import volumex
7
+ import os
8
+ import datetime
9
+
10
+ # Load the pipeline
11
+ model_id = "CompVis/stable-diffusion-v1-4"
12
+ pipe = StableDiffusionPipeline.from_pretrained(model_id)
13
+ pipe = pipe.to("cpu")
14
+
15
+ # Create a sample video
16
+ sample_video_path = "sample_video.mp4"
17
+ video_clip = VideoFileClip(sample_video_path)
18
+
19
+ # Define paths
20
+ output_directory = os.path.expanduser("~/Desktop/AI DREAMS & VISIONS/")
21
+
22
+ # Ensure the output directory exists
23
+ os.makedirs(output_directory, exist_ok=True)
24
+
25
+ # Function to generate video with visualizer
26
+ def generate_video(prompt, duration=10, frame_rate=24):
27
+ start_time = time.time()
28
+
29
+ # Placeholder: Simulate video generation
30
+ output_video_path = os.path.join(output_directory, f"{prompt.replace(' ', '_')}.mp4")
31
+ video_clip = VideoFileClip(sample_video_path).subclip(0, duration)
32
+
33
+ # Add a visualizer (simple volume visualizer as a placeholder)
34
+ audio_clip = video_clip.audio
35
+ visualizer = volumex(video_clip, 0.5)
36
+ final_clip = concatenate_videoclips([visualizer.set_audio(audio_clip)])
37
+
38
+ final_clip.write_videofile(output_video_path, fps=frame_rate)
39
+
40
+ end_time = time.time()
41
+ time_taken = end_time - start_time
42
+ estimated_time = str(datetime.timedelta(seconds=int(time_taken)))
43
+
44
+ return output_video_path, estimated_time
45
+
46
+ # Function to upload music and sync with video
47
+ def sync_music_to_video(video_path, music_path):
48
+ video_clip = VideoFileClip(video_path)
49
+ audio_clip = AudioFileClip(music_path)
50
+
51
+ # Sync music to video duration
52
+ synced_audio_clip = audio_clip.subclip(0, video_clip.duration)
53
+
54
+ # Apply the music to the video
55
+ final_clip = video_clip.set_audio(synced_audio_clip)
56
+ synced_video_path = video_path.replace('.mp4', '_synced.mp4')
57
+ final_clip.write_videofile(synced_video_path)
58
+
59
+ return synced_video_path
60
+
61
+ # Define the Gradio interface
62
+ with gr.Blocks() as demo:
63
+ gr.Markdown("# AI DREAMS X Video Generator")
64
+ with gr.Row():
65
+ with gr.Column():
66
+ text_input = gr.Textbox(label="Text Prompt")
67
+ duration_input = gr.Slider(minimum=1, maximum=60, step=1, label="Duration (seconds)", value=10)
68
+ frame_rate_input = gr.Slider(minimum=1, maximum=60, step=1, label="Frame Rate (fps)", value=24)
69
+ music_upload = gr.File(label="Upload Music File")
70
+ generate_button = gr.Button("Generate Video")
71
+ with gr.Column():
72
+ output_video = gr.Video(label="Generated Video")
73
+ download_link = gr.File(label="Download Video")
74
+ estimated_time = gr.Textbox(label="Estimated Time of Completion")
75
+
76
+ def generate_and_display(prompt, duration, frame_rate, music_file):
77
+ video_path, estimated_time = generate_video(prompt, duration, frame_rate)
78
+ if music_file:
79
+ video_path = sync_music_to_video(video_path, music_file.name)
80
+ return video_path, video_path, estimated_time
81
+
82
+ generate_button.click(generate_and_display, inputs=[text_input, duration_input, frame_rate_input, music_upload], outputs=[output_video, download_link, estimated_time])
83
+
84
+ gr.Markdown("[Contact Us](mailto:aidreams@aidreams.company) | [Follow @TheKingofJewelz](https://x.com/TheKingofJewelz)")
85
+
86
+ demo.launch(share=True, server_name="0.0.0.0", server_port=7863)
87
+
build.sh ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env bash
2
+
3
+ set -e
4
+
5
+ # Install system dependencies
6
+ apt-get update && apt-get install -y portaudio19-dev
7
+
8
+ # Install Python dependencies
9
+ pip install -r requirements.txt
10
+
deploy_loop.sh ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # Define the repository and branch
4
+ REPO_URL="https://github.com/UnseenSeven/GEMFINDER.git"
5
+ BRANCH="main"
6
+ APP_DIR="~/ai-dreams-x"
7
+
8
+ # Function to update the app.py file to remove the deprecated import
9
+ function fix_imports() {
10
+ # Navigate to the app directory
11
+ cd $APP_DIR
12
+
13
+ # Ensure the correct import
14
+ sed -i 's/from werkzeug.urls import url_quote/from urllib.parse import quote as url_quote/' app.py
15
+
16
+ # Stage and commit the changes
17
+ git add app.py
18
+ git commit -m "Remove deprecated Werkzeug import and use urllib.parse.quote"
19
+ git push origin $BRANCH
20
+ }
21
+
22
+ # Function to deploy the application
23
+ function deploy_app() {
24
+ cd $APP_DIR
25
+ # Force a new deployment by making a dummy change
26
+ touch deploy_trigger.txt
27
+ git add deploy_trigger.txt
28
+ git commit -m "Trigger redeployment"
29
+ git push origin $BRANCH
30
+ }
31
+
32
+ # Function to check deployment status
33
+ function check_deployment() {
34
+ # Check logs for specific errors
35
+ ERROR_LOG=$(render logs ai-dreams-x | grep "ImportError: cannot import name 'url_quote'")
36
+
37
+ if [ -n "$ERROR_LOG" ]; then
38
+ echo "Found ImportError, attempting to fix..."
39
+ return 1
40
+ else
41
+ echo "No ImportError found. Checking for other issues..."
42
+ return 0
43
+ fi
44
+ }
45
+
46
+ # Function to clean and redeploy
47
+ function clean_redeploy() {
48
+ # Clean and reinstall dependencies
49
+ rm -rf .venv
50
+ python3 -m venv .venv
51
+ source .venv/bin/activate
52
+ pip install -r requirements.txt
53
+
54
+ # Run the deployment
55
+ deploy_app
56
+ }
57
+
58
+ # Main loop
59
+ while true; do
60
+ # Pull latest changes
61
+ git pull origin $BRANCH
62
+
63
+ # Fix imports
64
+ fix_imports
65
+
66
+ # Deploy the application
67
+ deploy_app
68
+
69
+ # Wait for a few seconds to let deployment finish
70
+ sleep 60
71
+
72
+ # Check deployment status
73
+ check_deployment
74
+ DEPLOY_STATUS=$?
75
+
76
+ if [ $DEPLOY_STATUS -eq 0 ]; then
77
+ echo "Deployment successful!"
78
+ break
79
+ else
80
+ echo "Deployment failed. Retrying..."
81
+ clean_redeploy
82
+ fi
83
+
84
+ # Wait a bit before retrying
85
+ sleep 30
86
+ done
87
+
deploy_trigger.txt ADDED
File without changes
gradio_interface.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from diffusers import StableDiffusionPipeline
3
+ import gradio as gr
4
+
5
+ model_id = "CompVis/stable-diffusion-v1-4"
6
+ pipe = StableDiffusionPipeline.from_pretrained(model_id)
7
+ pipe = pipe.to("cpu") # Use CPU instead of CUDA
8
+
9
+ def generate_image(prompt):
10
+ image = pipe(prompt).images[0]
11
+ return image
12
+
13
+ demo = gr.Interface(
14
+ fn=generate_image,
15
+ inputs=gr.Textbox(label="Text Prompt"),
16
+ outputs="image",
17
+ title="AI Image Generator",
18
+ description="Generate images from text prompts using Stable Diffusion."
19
+ )
20
+
21
+ if __name__ == "__main__":
22
+ demo.launch()
23
+
gradio_interface_extended.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from diffusers import StableDiffusionPipeline
3
+ import moviepy.editor as mp
4
+ import torch
5
+ import os
6
+ from datetime import datetime
7
+
8
+ def generate_video(prompt, duration, frame_rate, music_file):
9
+ # Initialize the pipeline
10
+ device = "cuda" if torch.cuda.is_available() else "cpu"
11
+ if device == "cuda":
12
+ pipeline = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", torch_dtype=torch.float16)
13
+ else:
14
+ pipeline = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4")
15
+
16
+ pipeline = pipeline.to(device)
17
+
18
+ # Generate frames
19
+ num_frames = duration * frame_rate
20
+ temp_dir = f"/tmp/{datetime.now().strftime('%Y%m%d%H%M%S')}"
21
+ os.makedirs(temp_dir, exist_ok=True)
22
+
23
+ for i in range(num_frames):
24
+ frame = pipeline(prompt).images[0]
25
+ frame_path = os.path.join(temp_dir, f"frame_{i:04d}.png")
26
+ frame.save(frame_path)
27
+
28
+ # Create video from frames
29
+ video_path = os.path.join(temp_dir, "video.mp4")
30
+ video_clip = mp.ImageSequenceClip(temp_dir, fps=frame_rate)
31
+
32
+ if music_file:
33
+ audio_clip = mp.AudioFileClip(music_file)
34
+ audio_clip = audio_clip.set_duration(video_clip.duration)
35
+ video_clip = video_clip.set_audio(audio_clip)
36
+
37
+ video_clip.write_videofile(video_path, codec="libx264")
38
+
39
+ return video_path
40
+
41
+ with gr.Blocks() as demo:
42
+ gr.Markdown("# AI Dreams & Visions Video Generator")
43
+ gr.Markdown("Generate stunning videos from text prompts using AI technology. For inquiries, contact us at [aidreams@aidreams.company](mailto:aidreams@aidreams.company). Follow us on X: [@TheKingofJewelz](https://x.com/TheKingofJewelz).")
44
+
45
+ with gr.Row():
46
+ with gr.Column():
47
+ prompt = gr.Textbox(label="Text Prompt", placeholder="Enter your video description here...")
48
+ duration = gr.Slider(label="Duration (seconds)", minimum=1, maximum=30, step=1, value=5)
49
+ frame_rate = gr.Slider(label="Frame Rate", minimum=1, maximum=60, step=1, value=24)
50
+ music_file = gr.Audio(label="Music File (Optional)", type="filepath")
51
+ generate_btn = gr.Button("Generate Video")
52
+
53
+ with gr.Column():
54
+ video_output = gr.Video(label="Generated Video")
55
+ download_link = gr.File(label="Download Video")
56
+
57
+ def generate_and_display_video(prompt, duration, frame_rate, music_file):
58
+ video_path = generate_video(prompt, duration, frame_rate, music_file)
59
+ return video_path, video_path
60
+
61
+ generate_btn.click(
62
+ generate_and_display_video,
63
+ inputs=[prompt, duration, frame_rate, music_file],
64
+ outputs=[video_output, download_link],
65
+ )
66
+
67
+ if __name__ == "__main__":
68
+ demo.launch(share=True)
69
+
gradio_interface_extended.py.save ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import os
3
+ import moviepy.editor as mp
4
+ import torch
5
+ from diffusers import StableDiffusionPipeline
6
+
7
+ # Load the Stable Diffusion model
8
+ pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5")
9
+ pipe.to("cpu") # Change to "cuda" if you have a GPU
10
+
11
+ def generate_video(prompt, duration, framerate):
12
+ temp_dir = "/tmp/sd_frames"
13
+ if not os.path.exists(temp_dir):
14
+ os.makedirs(temp_dir, exist_ok=True)
15
+
16
+ # Generate frames using the prompt
17
+ for i in range(1, int(duration * framerate) + 1):
18
+ frame = pipe(prompt).images[0]
19
+ frame.save(f"{temp_dir}/frame_{i:04d}.png")
20
+
21
+ # Generate video from frames
22
+ video = mp.ImageSequenceClip(temp_dir, fps=framerate)
23
+ output_path = "/tmp/sd_video.mp4"
24
+ video.write_videofile(output_path, codec="libx264")
25
+
26
+ return output_path
27
+
28
+ iface = gr.Interface(
29
+ fn=generate_video,
30
+ inputs=[
31
+ gr.Textbox(label="Prompt"),
32
+ gr.Slider(label="Duration (seconds)", minimum=1, maximum=30, step=1, default=5),
33
+ gr.Slider(label="Framerate (fps)", minimum=1, maximum=60, step=1, default=30)
34
+ ],
35
+ outputs=gr.Video(label="Generated Video"),
36
+ title="AI Dreams & Visions Video Generator",
37
+ description="Generate a video based on a prompt. Enter the prompt, set the duration and framerate, and click 'Generate Video'.",
38
+ theme="dark",
39
+ css="footer {visibility: hidden}"
40
+ )
41
+
42
+ if __name__ == "__main__":
43
+ iface.launch(share=True)
44
+
pipeline_utils.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ @classmethod
2
+ def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
3
+ # Add debugging prints here
4
+ print(f"Loading model from path: {pretrained_model_name_or_path}")
5
+ print(f"Class: {cls}")
6
+ print(f"Model args: {model_args}")
7
+ print(f"Kwargs: {kwargs}")
8
+
9
+ load_method_name = kwargs.pop("_from_pretrained_load_method", "from_config")
10
+ if not isinstance(load_method_name, str):
11
+ raise TypeError("load_method_name must be a string")
12
+
13
+ load_method = getattr(cls, load_method_name)
14
+ return load_method(pretrained_model_name_or_path, *model_args, **kwargs)
15
+
post_deploy.sh ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # Ensure dependencies are installed
4
+ pip install accelerate moviepy numpy
5
+
6
+ # Move cache to the right location if needed
7
+ python -c "from transformers.utils import move_cache; move_cache()"
8
+
9
+ <<<<<<< HEAD
10
+ # Function to check and download the model
11
+ check_and_download_model() {
12
+ local model_name=$1
13
+ python -c "
14
+ import sys
15
+ from diffusers import StableDiffusionPipeline
16
+ import torch
17
+
18
+ try:
19
+ StableDiffusionPipeline.from_pretrained('${model_name}', torch_dtype=torch.float16)
20
+ sys.exit(0)
21
+ except Exception as e:
22
+ print(f'Error: {e}', file=sys.stderr)
23
+ sys.exit(1)
24
+ "
25
+ }
26
+
27
+ # Function to ensure setup is complete
28
+ ensure_setup() {
29
+ install_packages
30
+
31
+ local models=("stabilityai/stable-diffusion-2-1-base" "CompVis/stable-diffusion-v1-4" "runwayml/stable-diffusion-v1-5")
32
+
33
+ for model in "${models[@]}"
34
+ do
35
+ echo "Checking model: ${model}"
36
+ if check_and_download_model ${model}; then
37
+ echo "Model ${model} is ready."
38
+ export MODEL_NAME=${model}
39
+ return 0
40
+ =======
41
+ # Install necessary Python packages
42
+ pip install transformers gradio torch
43
+
44
+ # Retry loop for launching the Gradio interface
45
+ while true; do
46
+ python gradio_interface_extended.py
47
+ if [ $? -eq 0 ]; then
48
+ echo "Gradio interface started successfully."
49
+ break
50
+ >>>>>>> cd0bb518 (Update render.yaml and post_deploy.sh for enhanced deployment)
51
+ else
52
+ echo "Gradio interface failed to start. Retrying in 10 seconds..."
53
+ sleep 10
54
+ fi
55
+ done
56
+
post_deploy.sh.save ADDED
File without changes
render.yaml ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ services:
2
+ - type: web
3
+ name: ai-dreams-x
4
+ env: python
5
+ runtime: python3
6
+ buildCommand: "pip install -r requirements.txt"
7
+ startCommand: "bash post_deploy.sh && python gradio_interface_extended.py"
8
+ region: oregon
9
+ plan: starter
10
+ disk:
11
+ name: persistent
12
+ size: 1GB
13
+ envVars:
14
+ - key: PORT
15
+ value: "8080"
16
+
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ gradio
2
+ torch
3
+ diffusers
4
+ moviepy
5
+
requirements.txt.save ADDED
File without changes
run_gradio.sh ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ while true; do
4
+ echo "Checking and installing required packages..."
5
+ pip install torch torchvision torchaudio diffusers gradio --upgrade
6
+
7
+ echo "Running the gradio_interface.py script..."
8
+ python ~/ai-dreams-x/gradio_interface.py
9
+
10
+ if [ $? -eq 0 ]; then
11
+ echo "Script ran successfully!"
12
+ break
13
+ else
14
+ echo "An error occurred. Retrying in 5 seconds..."
15
+ sleep 5
16
+ fi
17
+ done
18
+
setup_and_run.sh ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ while true; do
4
+ echo "Checking and installing required packages..."
5
+
6
+ # Install required packages
7
+ pip install --upgrade pip
8
+ pip install torch torchvision
9
+ pip install diffusers==0.27.2
10
+ pip install gradio==3.1.5
11
+ pip install moviepy
12
+ pip install opencv-python-headless==4.5.5.64
13
+ pip install huggingface-hub
14
+
15
+ # Run the Gradio interface
16
+ python ~/ai-dreams-x/gradio_interface.py
17
+
18
+ # Check if the last command was successful
19
+ if [ $? -eq 0 ]; then
20
+ echo "Gradio interface is running successfully."
21
+ break
22
+ else
23
+ echo "An error occurred. Retrying in 5 seconds..."
24
+ sleep 5
25
+ fi
26
+ done
27
+
setup_script.sh ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # Ensure we are in the correct directory
4
+ cd ~/ai-dreams-x
5
+
6
+ # Install required Python packages
7
+ pip install gradio torch diffusers moviepy sounddevice soundfile numpy librosa matplotlib
8
+
9
+ # Create the Python script for the Gradio interface
10
+ cat <<EOF > gradio_interface_extended.py
11
+ import gradio as gr
12
+ import torch
13
+ from diffusers import StableDiffusionPipeline
14
+ import moviepy.editor as mp
15
+ import sounddevice as sd
16
+ import soundfile as sf
17
+ import numpy as np
18
+ import librosa
19
+ import librosa.display
20
+ import matplotlib.pyplot as plt
21
+ from pathlib import Path
22
+ import time
23
+
24
+ # Load the model
25
+ model_id = "runwayml/stable-diffusion-v1-5"
26
+ device = "cuda" if torch.cuda.is_available() else "cpu"
27
+ pipe = StableDiffusionPipeline.from_pretrained(model_id)
28
+ pipe.to(device)
29
+
30
+ # Function to generate video from text
31
+ def generate_video(prompt, duration, frame_rate, audio_file, mic_input):
32
+ if mic_input:
33
+ fs = 44100 # Sample rate
34
+ seconds = 10 # Duration of recording
35
+ print("Recording audio...")
36
+ audio_data = sd.rec(int(seconds * fs), samplerate=fs, channels=2)
37
+ sd.wait() # Wait until recording is finished
38
+ audio_path = "mic_audio.wav"
39
+ sf.write(audio_path, audio_data, fs)
40
+ elif audio_file is not None:
41
+ audio_path = audio_file.name
42
+ else:
43
+ audio_path = None
44
+
45
+ if audio_path:
46
+ y, sr = librosa.load(audio_path)
47
+ tempo, _ = librosa.beat.beat_track(y=y, sr=sr)
48
+ duration = librosa.get_duration(y=y, sr=sr)
49
+ else:
50
+ sr = 22050
51
+ duration = float(duration)
52
+
53
+ frames = []
54
+
55
+ start_time = time.time()
56
+ for i in range(int(duration * frame_rate)):
57
+ frame = pipe(prompt).images[0]
58
+ frames.append(frame)
59
+
60
+ clip = mp.ImageSequenceClip([np.array(f) for f in frames], fps=frame_rate)
61
+ if audio_path:
62
+ audio_clip = mp.AudioFileClip(audio_path)
63
+ video = clip.set_audio(audio_clip)
64
+ else:
65
+ video = clip
66
+
67
+ # Add visualizer
68
+ if audio_path:
69
+ waveform = np.abs(librosa.stft(y))
70
+ plt.figure(figsize=(10, 4))
71
+ librosa.display.specshow(librosa.amplitude_to_db(waveform, ref=np.max), sr=sr, x_axis='time', y_axis='log')
72
+ plt.colorbar(format='%+2.0f dB')
73
+ plt.title('Power spectrogram')
74
+ plt.tight_layout()
75
+ visualizer_path = "/Users/unseenseven/Desktop/AI_DREAMS & VISIONS/visualizer.png"
76
+ plt.savefig(visualizer_path)
77
+ plt.close()
78
+
79
+ output_path = "/Users/unseenseven/Desktop/AI_DREAMS & VISIONS/generated_video.mp4"
80
+ video.write_videofile(output_path, codec="libx264")
81
+
82
+ estimated_time = time.time() - start_time
83
+ return output_path, f"Estimated time to completion: {estimated_time:.2f} seconds", None
84
+
85
+ # Gradio interface
86
+ with gr.Blocks() as demo:
87
+ gr.Markdown("# AI DREAMS & VISIONS Video Generator")
88
+ with gr.Row():
89
+ with gr.Column():
90
+ prompt = gr.Textbox(label="Text Prompt")
91
+ duration = gr.Slider(minimum=1, maximum=60, label="Duration (seconds)")
92
+ frame_rate = gr.Slider(minimum=1, maximum=60, label="Frame Rate (fps)")
93
+ audio_file = gr.File(label="Upload Audio File (optional)")
94
+ mic_input = gr.Checkbox(label="Use Microphone Input")
95
+ submit = gr.Button("Generate Video")
96
+ email = gr.Markdown("Contact us: [aidreams@aidreams.company](mailto:aidreams@aidreams.company)")
97
+ with gr.Column():
98
+ video_preview = gr.Video(label="Generated Video Preview")
99
+ estimated_time_output = gr.Textbox(label="Estimated Time to Completion", interactive=False)
100
+ download_link = gr.File(label="Download Video")
101
+
102
+ submit.click(
103
+ generate_video,
104
+ inputs=[prompt, duration, frame_rate, audio_file, mic_input],
105
+ outputs=[video_preview, estimated_time_output, download_link]
106
+ )
107
+
108
+ demo.launch(share=True)
109
+ EOF
110
+
111
+ # Run the Python script
112
+ python gradio_interface_extended.py
113
+
start.sh ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env bash
2
+
3
+ set -e
4
+
5
+ MAX_RETRIES=5
6
+ RETRY_DELAY=5
7
+
8
+ for i in $(seq 1 $MAX_RETRIES); do
9
+ python gradio_interface_extended.py && break || {
10
+ if [ "$i" -eq "$MAX_RETRIES" ]; then
11
+ echo "Reached maximum retries, exiting."
12
+ exit 1
13
+ fi
14
+ echo "Retrying in $RETRY_DELAY seconds..."
15
+ sleep $RETRY_DELAY
16
+ }
17
+ done
18
+
static/apple-touch-icon.png ADDED
static/favicon.ico ADDED
templates/access.html ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!DOCTYPE html>
2
+ <html lang="en">
3
+ <head>
4
+ <meta charset="UTF-8">
5
+ <title>AI Dreams X - Movie Maker</title>
6
+ </head>
7
+ <body>
8
+ <h1>Welcome to AI Dreams X</h1>
9
+ <form action="{{ url_for('generate') }}" method="post">
10
+ <label for="prompt">Enter your movie idea:</label>
11
+ <textarea id="prompt" name="prompt" rows="4" cols="50"></textarea>
12
+ <button type="submit">Generate</button>
13
+ </form>
14
+ </body>
15
+ </html>
16
+
templates/payment.html ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!DOCTYPE html>
2
+ <html lang="en">
3
+ <head>
4
+ <meta charset="UTF-8">
5
+ <meta name="viewport" content="width=device-width, initial-scale=1.0">
6
+ <title>AI DREAMS X Payment</title>
7
+ <style>
8
+ body {
9
+ font-family: Arial, sans-serif;
10
+ background-color: #121212;
11
+ color: #ffffff;
12
+ text-align: center;
13
+ padding: 50px;
14
+ }
15
+ .container {
16
+ background-color: #1e1e1e;
17
+ border-radius: 10px;
18
+ padding: 20px;
19
+ max-width: 600px;
20
+ margin: auto;
21
+ }
22
+ .logo {
23
+ width: 100px;
24
+ margin-bottom: 20px;
25
+ }
26
+ .mission-statement {
27
+ font-size: 18px;
28
+ margin: 20px 0;
29
+ }
30
+ .payment-link {
31
+ background-color: #007bff;
32
+ color: #ffffff;
33
+ padding: 10px 20px;
34
+ border-radius: 5px;
35
+ text-decoration: none;
36
+ font-size: 16px;
37
+ }
38
+ .payment-link:hover {
39
+ background-color: #0056b3;
40
+ }
41
+ .coupon {
42
+ margin: 20px 0;
43
+ }
44
+ .email {
45
+ margin-top: 20px;
46
+ }
47
+ .message {
48
+ margin-top: 10px;
49
+ }
50
+ </style>
51
+ </head>
52
+ <body>
53
+ <div class="container">
54
+ <img src="path/to/AI_DREAMS_X_logo.png" alt="AI DREAMS X Logo" class="logo">
55
+ <div class="mission-statement">
56
+ <p>Welcome to AI DREAMS X! Our mission is to harness the power of artificial intelligence to create stunning visual and audio experiences that transcend reality.</p>
57
+ </div>
58
+ <a href="{{ cash_app_url }}" class="payment-link">Pay with Cash App</a>
59
+ <div class="coupon">
60
+ <p>Enter coupon code for free access:</p>
61
+ <input type="text" id="coupon_code" placeholder="Coupon Code">
62
+ <button onclick="validateCoupon()">Submit</button>
63
+ </div>
64
+ <div class="message" id="message"></div>
65
+ <div class="email">
66
+ <p>Contact us: <a href="mailto:aidreams@aidreams.company">aidreams@aidreams.company</a></p>
67
+ </div>
68
+ </div>
69
+ <script>
70
+ function validateCoupon() {
71
+ const couponCode = document.getElementById('coupon_code').value;
72
+ const messageDiv = document.getElementById('message');
73
+ if (couponCode === 'FREEACCESS2024') {
74
+ messageDiv.innerHTML = '<p style="color: green;">Coupon code accepted! You now have free access.</p>';
75
+ setTimeout(() => {
76
+ window.location.href = '/access'; // Redirect to the access page or whatever URL you use for free access
77
+ }, 2000); // Delay to show the success message before redirecting
78
+ } else {
79
+ messageDiv.innerHTML = '<p style="color: red;">Invalid coupon code. Please try again.</p>';
80
+ }
81
+ }
82
+ </script>
83
+ </body>
84
+ </html>
85
+
templates/result.html ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!DOCTYPE html>
2
+ <html lang="en">
3
+ <head>
4
+ <meta charset="UTF-8">
5
+ <title>AI Dreams X - Movie Maker Results</title>
6
+ </head>
7
+ <body>
8
+ <h1>Generated Movie Content</h1>
9
+ <h2>Script:</h2>
10
+ <p>{{ script }}</p>
11
+ <h2>Scene Image:</h2>
12
+ <img src="{{ scene_image }}" alt="Scene Image">
13
+ <h2>Character Voice:</h2>
14
+ <audio controls>
15
+ <source src="{{ character_voice }}" type="audio/mpeg">
16
+ Your browser does not support the audio element.
17
+ </audio>
18
+ <h2>Background Music:</h2>
19
+ <audio controls>
20
+ <source src="{{ background_music }}" type="audio/mpeg">
21
+ Your browser does not support the audio element.
22
+ </audio>
23
+ <h2>Storyboard:</h2>
24
+ <video controls>
25
+ <source src="{{ storyboard }}" type="video/mp4">
26
+ Your browser does not support the video tag.
27
+ </video>
28
+ <h2>Video with Effects:</h2>
29
+ <video controls>
30
+ <source src="{{ video_with_effects }}" type="video/mp4">
31
+ Your browser does not support the video tag.
32
+ </video>
33
+ <h2>Sentiment Analysis:</h2>
34
+ <p>{{ sentiment }}</p>
35
+ </body>
36
+ </html>
37
+