Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -6,6 +6,7 @@ from longcat_image.models import LongCatImageTransformer2DModel
|
|
| 6 |
from longcat_image.pipelines import LongCatImageEditPipeline
|
| 7 |
import numpy as np
|
| 8 |
import os
|
|
|
|
| 9 |
|
| 10 |
# Global variables for model
|
| 11 |
pipe = None
|
|
@@ -51,6 +52,7 @@ def initialize_model():
|
|
| 51 |
print(f"❌ Error loading model: {e}")
|
| 52 |
raise
|
| 53 |
|
|
|
|
| 54 |
def edit_image(
|
| 55 |
input_image: Image.Image,
|
| 56 |
prompt: str,
|
|
@@ -81,7 +83,7 @@ def edit_image(
|
|
| 81 |
progress(0.2, desc="Generating edited image...")
|
| 82 |
|
| 83 |
# Set random seed for reproducibility
|
| 84 |
-
generator = torch.Generator("cpu").manual_seed(seed)
|
| 85 |
|
| 86 |
# Run the pipeline
|
| 87 |
with torch.inference_mode():
|
|
@@ -124,6 +126,9 @@ with gr.Blocks(fill_height=True) as demo:
|
|
| 124 |
<p style="font-size: 14px; margin-top: 10px;">
|
| 125 |
Built with <a href="https://huggingface.co/spaces/akhaliq/anycoder" target="_blank" style="color: #4A90E2; text-decoration: none;">anycoder</a>
|
| 126 |
</p>
|
|
|
|
|
|
|
|
|
|
| 127 |
</div>
|
| 128 |
""")
|
| 129 |
|
|
@@ -179,6 +184,15 @@ with gr.Blocks(fill_height=True) as demo:
|
|
| 179 |
|
| 180 |
edit_btn = gr.Button("✨ Edit Image", variant="primary", size="lg")
|
| 181 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 182 |
with gr.Column(scale=1):
|
| 183 |
gr.Markdown("### 🎯 Output")
|
| 184 |
output_image = gr.Image(
|
|
@@ -195,6 +209,7 @@ with gr.Blocks(fill_height=True) as demo:
|
|
| 195 |
- Supports both English and Chinese prompts
|
| 196 |
- Try different guidance scales for varied results
|
| 197 |
- Higher inference steps = better quality (but slower)
|
|
|
|
| 198 |
""")
|
| 199 |
|
| 200 |
# Examples section
|
|
@@ -224,7 +239,7 @@ with gr.Blocks(fill_height=True) as demo:
|
|
| 224 |
gr.HTML("""
|
| 225 |
<div style="text-align: center; margin-top: 40px; padding: 20px; border-top: 1px solid #eee;">
|
| 226 |
<p style="color: #666; font-size: 14px;">
|
| 227 |
-
Powered by LongCat Image Edit |
|
| 228 |
<a href="https://huggingface.co/spaces/akhaliq/anycoder" target="_blank" style="color: #4A90E2;">Built with anycoder</a>
|
| 229 |
</p>
|
| 230 |
</div>
|
|
|
|
| 6 |
from longcat_image.pipelines import LongCatImageEditPipeline
|
| 7 |
import numpy as np
|
| 8 |
import os
|
| 9 |
+
import spaces
|
| 10 |
|
| 11 |
# Global variables for model
|
| 12 |
pipe = None
|
|
|
|
| 52 |
print(f"❌ Error loading model: {e}")
|
| 53 |
raise
|
| 54 |
|
| 55 |
+
@spaces.GPU(duration=120)
|
| 56 |
def edit_image(
|
| 57 |
input_image: Image.Image,
|
| 58 |
prompt: str,
|
|
|
|
| 83 |
progress(0.2, desc="Generating edited image...")
|
| 84 |
|
| 85 |
# Set random seed for reproducibility
|
| 86 |
+
generator = torch.Generator("cuda" if torch.cuda.is_available() else "cpu").manual_seed(seed)
|
| 87 |
|
| 88 |
# Run the pipeline
|
| 89 |
with torch.inference_mode():
|
|
|
|
| 126 |
<p style="font-size: 14px; margin-top: 10px;">
|
| 127 |
Built with <a href="https://huggingface.co/spaces/akhaliq/anycoder" target="_blank" style="color: #4A90E2; text-decoration: none;">anycoder</a>
|
| 128 |
</p>
|
| 129 |
+
<p style="font-size: 12px; color: #888; margin-top: 5px;">
|
| 130 |
+
⚡ Powered by Zero-GPU
|
| 131 |
+
</p>
|
| 132 |
</div>
|
| 133 |
""")
|
| 134 |
|
|
|
|
| 184 |
|
| 185 |
edit_btn = gr.Button("✨ Edit Image", variant="primary", size="lg")
|
| 186 |
|
| 187 |
+
gr.Markdown("""
|
| 188 |
+
<div style="padding: 10px; background-color: #f0f7ff; border-radius: 8px; margin-top: 10px;">
|
| 189 |
+
<p style="margin: 0; font-size: 12px; color: #555;">
|
| 190 |
+
⏱️ <strong>Note:</strong> Zero-GPU provides 120 seconds of GPU time per request.
|
| 191 |
+
Processing typically takes 30-60 seconds depending on settings.
|
| 192 |
+
</p>
|
| 193 |
+
</div>
|
| 194 |
+
""")
|
| 195 |
+
|
| 196 |
with gr.Column(scale=1):
|
| 197 |
gr.Markdown("### 🎯 Output")
|
| 198 |
output_image = gr.Image(
|
|
|
|
| 209 |
- Supports both English and Chinese prompts
|
| 210 |
- Try different guidance scales for varied results
|
| 211 |
- Higher inference steps = better quality (but slower)
|
| 212 |
+
- GPU time is limited - optimize your settings for speed
|
| 213 |
""")
|
| 214 |
|
| 215 |
# Examples section
|
|
|
|
| 239 |
gr.HTML("""
|
| 240 |
<div style="text-align: center; margin-top: 40px; padding: 20px; border-top: 1px solid #eee;">
|
| 241 |
<p style="color: #666; font-size: 14px;">
|
| 242 |
+
Powered by LongCat Image Edit with Zero-GPU |
|
| 243 |
<a href="https://huggingface.co/spaces/akhaliq/anycoder" target="_blank" style="color: #4A90E2;">Built with anycoder</a>
|
| 244 |
</p>
|
| 245 |
</div>
|