Remsky commited on
Commit
599abe6
β€’
0 Parent(s):

Add initial project structure with Docker support and UI components

Browse files

- Create .gitignore to exclude environment and compiled files
- Add requirements.txt for project dependencies
- Implement Dockerfile for containerized application
- Set up docker-compose.yml for service orchestration
- Create empty __init__.py for lib package
- Add error_utils.py for error message formatting
- Include loading_messages.json for status messages
- Develop ui_components.py for Gradio UI layout
- Implement status_utils.py for progress tracking
- Add image_utils.py for image preparation and uploading

.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ *.env
2
+ *.pyc
Dockerfile ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.10-slim
2
+
3
+ WORKDIR /usr/src/app
4
+
5
+ COPY requirements.txt .
6
+
7
+ RUN pip install --no-cache-dir -r requirements.txt
8
+
9
+ COPY gradio_app.py .
10
+
11
+ EXPOSE 7860
12
+ ENV GRADIO_SERVER_NAME="0.0.0.0"
13
+
14
+ CMD ["python", "gradio_app.py"]
docker-compose.yml ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ version: '3.8'
2
+
3
+ services:
4
+ luma-gradio:
5
+ build: .
6
+ ports:
7
+ - "7860:7860"
8
+ volumes:
9
+ - .:/usr/src/app
10
+ environment:
11
+ - GRADIO_SERVER_NAME=0.0.0.0
12
+ - PYTHONUNBUFFERED=1
13
+ restart: unless-stopped
14
+ container_name: luma-gradio
gradio_app.py ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import time
3
+ import random
4
+ import requests
5
+ from lumaai import LumaAI
6
+ import traceback
7
+
8
+ from lib.status_utils import StatusTracker, load_messages
9
+ from lib.image_utils import prepare_image
10
+ from lib.api_utils import get_camera_motions
11
+ from lib.ui_components import create_input_column, create_output_column
12
+
13
+ def generate_video(api_key, prompt, camera_motion, image=None, progress=gr.Progress()):
14
+ status_box = gr.Markdown() # Create status box
15
+ if not api_key or not prompt:
16
+ return None, "Please provide both API key and prompt (I'm not a mind reader... yet)"
17
+
18
+ try:
19
+ status_tracker = StatusTracker(progress, status_box)
20
+ status_tracker.add_step("LumaAI initialized", 0.01)
21
+ client = LumaAI(auth_token=api_key)
22
+
23
+ # Prepare generation parameters
24
+ generation_params = {
25
+ "prompt": f"{prompt} {camera_motion if camera_motion != 'None' else ''}",
26
+ "loop": True,
27
+ "aspect_ratio": "1:1" # Force square aspect ratio
28
+ }
29
+
30
+ # Handle image if provided
31
+ if image is not None:
32
+ try:
33
+ cdn_url = prepare_image(image, status_tracker)
34
+ generation_params["keyframes"] = {
35
+ "frame0": {
36
+ "type": "image",
37
+ "url": cdn_url
38
+ }
39
+ }
40
+ status_tracker.add_step("Image ready for its starring role", 0.1)
41
+ except Exception as e:
42
+ return None, f"🎭 Drama in the image department: {str(e)}"
43
+
44
+ status_tracker.add_step("Sending your creative masterpiece to LumaAI", 0.15)
45
+ try:
46
+ generation = client.generations.create(**generation_params)
47
+ except Exception as e:
48
+ return None, f"🎬 LumaAI didn't like that: {str(e)}"
49
+
50
+ # Load and shuffle status messages
51
+ status_messages = load_messages()
52
+ random.shuffle(status_messages)
53
+
54
+ # Poll for completion
55
+ start_time = time.time()
56
+ message_index = 0
57
+ last_status = None
58
+
59
+ while True:
60
+ try:
61
+ generation_status = client.generations.get(generation.id)
62
+ status = generation_status.state
63
+ elapsed_time = time.time() - start_time
64
+
65
+ if status != last_status:
66
+ status_tracker.add_step(f"Status: {status}", min(0.2 + (elapsed_time/300), 0.8))
67
+ last_status = status
68
+
69
+ current_message = status_messages[message_index % len(status_messages)]
70
+ status_tracker.update_message(current_message, min(0.2 + (elapsed_time/300), 0.8))
71
+ message_index += 1
72
+
73
+ if status == 'completed':
74
+ status_tracker.add_step("Generation completed!", 0.9)
75
+ download_url = generation_status.assets.video
76
+ break
77
+ elif status == 'failed':
78
+ failure_reason = generation_status.failure_reason or "It's not you, it's me"
79
+ return None, f"🎭 Generation failed: {failure_reason}"
80
+
81
+ if elapsed_time > 300:
82
+ return None, "⏰ Generation timeout (5 minutes of awkward silence)"
83
+
84
+ time.sleep(10)
85
+
86
+ except Exception as e:
87
+ print(f"Error during generation polling: {str(e)}")
88
+ print(traceback.format_exc())
89
+ time.sleep(10)
90
+ continue
91
+
92
+ # Download the video
93
+ status_tracker.update_message("Downloading your masterpiece...", 0.95)
94
+ try:
95
+ response = requests.get(download_url, stream=True, timeout=30)
96
+ response.raise_for_status()
97
+ file_path = "output_video.mp4"
98
+ with open(file_path, 'wb') as file:
99
+ file.write(response.content)
100
+
101
+ status_tracker.add_step("πŸŽ‰ Video ready!", 1.0)
102
+ return file_path, status_box
103
+ except Exception as e:
104
+ return None, f"πŸ“Ί Video download failed: {str(e)}"
105
+
106
+ except Exception as e:
107
+ print(f"Error during generation: {str(e)}")
108
+ print(traceback.format_exc())
109
+ return None, f"πŸŽͺ The show must go on, but: {str(e)}"
110
+
111
+ # Create Gradio interface with a modern theme
112
+ with gr.Blocks(theme=gr.themes.Soft(
113
+ primary_hue="indigo",
114
+ secondary_hue="purple",
115
+ )) as app:
116
+ gr.Markdown(
117
+ """
118
+ # 🎬 LumaAI Video Generator
119
+ ### Transform your prompts into mesmerizing videos
120
+ """
121
+ )
122
+
123
+ with gr.Row():
124
+ # Create input and output columns
125
+ prompt, camera_motion, api_key, image_input, generate_btn, status_display = create_input_column()
126
+ video_output = create_output_column()
127
+
128
+ generate_btn.click(
129
+ fn=generate_video,
130
+ inputs=[api_key, prompt, camera_motion, image_input],
131
+ outputs=[video_output, status_display]
132
+ )
133
+
134
+ if __name__ == "__main__":
135
+ app.launch()
lib/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ # This file is intentionally empty to make the directory a Python package
lib/api_utils.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+ import traceback
3
+ from typing import Optional
4
+
5
+ def upload_to_freeimage(file_path: str, status_tracker) -> str:
6
+ """
7
+ Upload a file to freeimage.host and return the direct image URL.
8
+
9
+ Args:
10
+ file_path: Path to the file to upload
11
+ status_tracker: StatusTracker instance for progress updates
12
+
13
+ Returns:
14
+ str: Direct URL for the uploaded image
15
+
16
+ Raises:
17
+ Exception: If upload fails for any reason
18
+ """
19
+ try:
20
+ # API endpoint
21
+ url = 'https://freeimage.host/api/1/upload'
22
+
23
+ # Read image file
24
+ with open(file_path, 'rb') as image_file:
25
+ # Prepare the files and data for upload
26
+ files = {
27
+ 'source': image_file
28
+ }
29
+ data = {
30
+ 'key': '6d207e02198a847aa98d0a2a901485a5' # Free API key from freeimage.host
31
+ }
32
+
33
+ status_tracker.update_message("Uploading image to CDN...", 0.05)
34
+
35
+ # Make the request
36
+ response = requests.post(url, files=files, data=data, timeout=30)
37
+ response.raise_for_status()
38
+
39
+ # Get the direct image URL from response
40
+ result = response.json()
41
+ if result.get('status_code') == 200:
42
+ image_url = result['image']['url']
43
+ status_tracker.add_step("Image uploaded to CDN", 0.08)
44
+ return image_url
45
+ else:
46
+ raise Exception(f"Upload failed: {result.get('error', 'Unknown error')}")
47
+
48
+ except requests.Timeout:
49
+ raise Exception("CDN is taking a coffee break (timeout)")
50
+ except requests.ConnectionError:
51
+ raise Exception("Can't reach CDN (is the internet on vacation?)")
52
+ except Exception as e:
53
+ print(f"CDN upload error: {str(e)}")
54
+ print(traceback.format_exc())
55
+ raise Exception(f"CDN upload failed: {str(e)}")
56
+
57
+ def get_camera_motions() -> list:
58
+ """
59
+ Get list of available camera motions from LumaAI.
60
+
61
+ Returns:
62
+ list: List of camera motion options
63
+ """
64
+ try:
65
+ from lumaai import LumaAI
66
+ client = LumaAI()
67
+ motions = client.generations.camera_motion.list()
68
+ return ["None"] + motions
69
+ except:
70
+ return ["None", "camera orbit left", "camera orbit right", "camera dolly in", "camera dolly out"]
lib/error_utils.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ def style_error(message: str) -> str:
2
+ """
3
+ Style an error message with a muted red background and border.
4
+
5
+ Args:
6
+ message: The error message to style
7
+
8
+ Returns:
9
+ str: HTML-formatted error message with styling
10
+ """
11
+ return f"""
12
+ <div style="padding: 1rem;
13
+ border-radius: 0.5rem;
14
+ background-color: #fee2e2;
15
+ border: 1px solid #ef4444;
16
+ margin: 1rem 0;">
17
+ <p style="color: #dc2626; margin: 0;">🎭 {message}</p>
18
+ </div>
19
+ """
20
+
21
+ def format_error(error: Exception, prefix: str = "") -> str:
22
+ """
23
+ Format an exception into a user-friendly error message.
24
+
25
+ Args:
26
+ error: The exception to format
27
+ prefix: Optional prefix for the error message
28
+
29
+ Returns:
30
+ str: Styled error message
31
+ """
32
+ error_msg = str(error)
33
+ if len(error_msg) > 100:
34
+ error_msg = error_msg[:100] + "..."
35
+
36
+ if prefix:
37
+ error_msg = f"{prefix}: {error_msg}"
38
+
39
+ return style_error(error_msg)
lib/image_utils.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from PIL import Image
2
+ import io
3
+ from pathlib import Path
4
+ import time
5
+ import traceback
6
+ from typing import Union, Optional
7
+ from .api_utils import upload_to_freeimage
8
+
9
+ def prepare_image(image: Union[str, bytes, Image.Image, None], status_tracker) -> Optional[str]:
10
+ """
11
+ Prepare an image for use with LumaAI by resizing and uploading to CDN.
12
+
13
+ Args:
14
+ image: Input image (can be path, bytes, or PIL Image)
15
+ status_tracker: StatusTracker instance for progress updates
16
+
17
+ Returns:
18
+ Optional[str]: CDN URL of the prepared image, or None if no image provided
19
+
20
+ Raises:
21
+ Exception: If image preparation fails
22
+ """
23
+ if image is None:
24
+ return None
25
+
26
+ try:
27
+ status_tracker.update_message("Preparing your image for its big moment...", 0.01)
28
+
29
+ # Convert to PIL Image if needed
30
+ if isinstance(image, str):
31
+ image = Image.open(image)
32
+ elif isinstance(image, bytes):
33
+ image = Image.open(io.BytesIO(image))
34
+ elif not isinstance(image, Image.Image):
35
+ raise Exception("That doesn't look like an image (unless I need glasses)")
36
+
37
+ # Resize image to 512x512
38
+ image = image.resize((512, 512), Image.Resampling.LANCZOS)
39
+ status_tracker.add_step("Image resized to 512x512", 0.02)
40
+
41
+ # Convert to RGB if necessary
42
+ if image.mode not in ('RGB', 'RGBA'):
43
+ image = image.convert('RGB')
44
+
45
+ # Save to temporary file
46
+ temp_dir = Path("temp")
47
+ temp_dir.mkdir(exist_ok=True)
48
+ temp_path = temp_dir / f"temp_image_{int(time.time())}.png"
49
+ image.save(str(temp_path), format='PNG', optimize=True)
50
+
51
+ # Upload to freeimage.host
52
+ try:
53
+ cdn_url = upload_to_freeimage(temp_path, status_tracker)
54
+
55
+ # Clean up temporary file
56
+ if temp_path.exists():
57
+ temp_path.unlink()
58
+
59
+ return cdn_url
60
+ except Exception as e:
61
+ # Clean up temporary file in case of error
62
+ if temp_path.exists():
63
+ temp_path.unlink()
64
+ raise Exception(f"Image upload failed: {str(e)}")
65
+
66
+ except Exception as e:
67
+ print(f"Error preparing image: {str(e)}")
68
+ print(traceback.format_exc())
69
+ raise Exception(f"Image preparation failed: {str(e)}")
lib/status_utils.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import time
2
+ import json
3
+ from pathlib import Path
4
+
5
+ def load_messages() -> list:
6
+ """Load status messages from JSON file."""
7
+ with open('loading_messages.json', 'r') as f:
8
+ return json.load(f)['messages']
9
+
10
+ class StatusTracker:
11
+ """
12
+ Track and display progress status for video generation.
13
+ """
14
+ def __init__(self, progress, status_box=None):
15
+ self.progress = progress
16
+ self.status_box = status_box
17
+ self.steps = []
18
+ self.current_message = ""
19
+ self._status_markdown = "### 🎬 Ready to Generate"
20
+
21
+ def add_step(self, message: str, progress_value: float):
22
+ """
23
+ Add a permanent step to the progress display.
24
+
25
+ Args:
26
+ message: Step description
27
+ progress_value: Progress value between 0 and 1
28
+ """
29
+ self.steps.append(f"βœ“ {message}")
30
+ self._update_display(progress_value)
31
+ time.sleep(0.5) # Brief pause for visibility
32
+
33
+ def update_message(self, message: str, progress_value: float):
34
+ """
35
+ Update the current working message.
36
+
37
+ Args:
38
+ message: Current status message
39
+ progress_value: Progress value between 0 and 1
40
+ """
41
+ self.current_message = f"➀ {message}"
42
+ self._update_display(progress_value)
43
+
44
+ def _update_display(self, progress_value: float):
45
+ """
46
+ Update the status display with current progress.
47
+
48
+ Args:
49
+ progress_value: Progress value between 0 and 1
50
+ """
51
+ # Create markdown-formatted status display
52
+ status_md = "### 🎬 Generation Progress:\n"
53
+ for step in self.steps:
54
+ status_md += f"- {step}\n"
55
+ if self.current_message:
56
+ status_md += f"\n**Current Step:**\n{self.current_message}"
57
+
58
+ self._status_markdown = status_md
59
+ self.progress(progress_value)
60
+
61
+ # Only try to update status_box if it exists
62
+ if self.status_box is not None:
63
+ try:
64
+ self.status_box.update(value=self._status_markdown)
65
+ except:
66
+ pass # Silently handle if update fails
67
+
68
+ def get_status(self) -> str:
69
+ """Get the current status markdown."""
70
+ return self._status_markdown
lib/ui_components.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from .api_utils import get_camera_motions
3
+
4
+ def create_input_column():
5
+ """Create the input column of the UI."""
6
+ with gr.Column(scale=1, min_width=400) as column:
7
+ # Main inputs
8
+ prompt = gr.Textbox(
9
+ label="Prompt",
10
+ placeholder="Describe your video scene here...",
11
+ lines=3
12
+ )
13
+ camera_motion = gr.Dropdown(
14
+ choices=get_camera_motions(),
15
+ label="Camera Motion",
16
+ value="None"
17
+ )
18
+
19
+ # Collapsible sections
20
+ with gr.Accordion("πŸ”‘ API Settings", open=False):
21
+ api_key = gr.Textbox(
22
+ label="LumaAI API Key",
23
+ placeholder="Enter your API key here",
24
+ type="password"
25
+ )
26
+
27
+ with gr.Accordion("πŸ–ΌοΈ Advanced Options", open=False):
28
+ image_input = gr.Image(
29
+ label="Starting Image (will be resized to 512x512)",
30
+ type="pil"
31
+ )
32
+
33
+ generate_btn = gr.Button("πŸš€ Generate Video", variant="primary", size="lg")
34
+
35
+ # Status display
36
+ status_display = gr.Markdown()
37
+
38
+ gr.Markdown(
39
+ """
40
+ ### 🎯 Pro Tips:
41
+ - Be specific and descriptive in your prompts
42
+ - Try different camera motions for dynamic effects
43
+ - Generation usually takes 1-3 minutes β˜•
44
+ """
45
+ )
46
+
47
+ return prompt, camera_motion, api_key, image_input, generate_btn, status_display
48
+
49
+ def create_output_column():
50
+ """Create the output column of the UI."""
51
+ with gr.Column(scale=1, min_width=600) as column:
52
+ video_output = gr.Video(
53
+ label="Generated Video",
54
+ width="100%",
55
+ height="400px"
56
+ )
57
+ return video_output
loading_messages.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "messages": [
3
+ "AI is pondering the meaning of your prompt... and life",
4
+ "Converting caffeine into video frames...",
5
+ "Teaching pixels to dance to your prompt...",
6
+ "Negotiating with stubborn neurons...",
7
+ "Bribing the GPU with more electricity...",
8
+ "Asking ChatGPT for video editing advice (just kidding)",
9
+ "Performing ancient AI rituals for better results...",
10
+ "Consulting the sacred scrolls of deep learning...",
11
+ "Attempting to reason with random number generators...",
12
+ "Convincing the AI that your prompt is totally reasonable...",
13
+ "Feeding hamsters that power the GPU...",
14
+ "Downloading more RAM (don't tell Chrome)...",
15
+ "Reticulating splines in the neural network...",
16
+ "Teaching AI about color theory using memes...",
17
+ "Calculating the meaning of life (currently at 42)...",
18
+ "Asking senior AI for approval (they're on coffee break)...",
19
+ "Debugging quantum fluctuations in the matrix...",
20
+ "Optimizing neural pathways with rubber duck debugging...",
21
+ "Applying machine learning to procrastination...",
22
+ "Converting your prompt into interpretive dance...",
23
+ "Consulting with the AI elders...",
24
+ "Summoning the spirit of Alan Turing...",
25
+ "Teaching AI about human humor (still confused)...",
26
+ "Reorganizing bits into artistic arrangements...",
27
+ "Explaining art theory to silicon chips...",
28
+ "Motivating lazy neurons with inspirational quotes...",
29
+ "Performing ritual sacrifices to the GPU gods...",
30
+ "Translating prompt into binary and back again...",
31
+ "Asking AI to think outside the box (it's stuck)...",
32
+ "Generating random excuses for slow processing..."
33
+ ]
34
+ }
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ gradio
2
+ lumaai
3
+ requests
4
+ python-dotenv