File size: 6,453 Bytes
c8f1f54
75726c1
 
4fe456a
75726c1
218c1e6
4103aa4
5ddcd4f
75726c1
 
b840c1a
 
 
 
 
75726c1
 
5ddcd4f
 
75726c1
4103aa4
 
 
 
 
75726c1
 
b840c1a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5ddcd4f
75726c1
5ddcd4f
882e052
df1e443
5ddcd4f
75726c1
5ddcd4f
882e052
5ddcd4f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
75726c1
 
5ddcd4f
 
 
 
 
75726c1
 
882e052
bc1c1c7
75726c1
5ddcd4f
 
 
 
75726c1
5ddcd4f
75726c1
5ddcd4f
b840c1a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5ddcd4f
b840c1a
 
 
 
 
75726c1
 
 
5ddcd4f
 
 
75726c1
 
5ddcd4f
 
 
 
 
 
75726c1
5ddcd4f
 
 
 
 
 
 
 
75726c1
5ddcd4f
 
 
 
82c86ca
5ddcd4f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
75726c1
5ddcd4f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
82c86ca
 
5ddcd4f
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
import gradio as gr
import numpy as np
import random
from PIL import Image, ImageDraw, ImageFont
import torch
from diffusers import DiffusionPipeline
import io
import time

# ===== CONFIG =====
# Print debug info
print(f"PyTorch version: {torch.__version__}")
print(f"CUDA available: {torch.cuda.is_available()}")
print(f"CUDA device count: {torch.cuda.device_count()}")

device = "cuda" if torch.cuda.is_available() else "cpu"
torch_dtype = torch.float16 if device == "cuda" else torch.float32

# Using SDXL Turbo for fastest generation
model_repo_id = "stabilityai/sdxl-turbo"
pipe = DiffusionPipeline.from_pretrained(
    model_repo_id,
    torch_dtype=torch_dtype,
    variant="fp16" if device == "cuda" else None
)
pipe.to(device)

# Enable optimizations only if GPU is available
if device == "cuda":
    try:
        pipe.enable_xformers_memory_efficient_attention()
        print("Enabled xformers memory efficient attention")
    except Exception as e:
        print(f"Could not enable xformers: {str(e)}")
    
    try:
        pipe.unet.to(memory_format=torch.channels_last)
        print("Enabled channels last memory format")
    except Exception as e:
        print(f"Could not enable channels last: {str(e)}")
else:
    print("Running on CPU - skipping GPU optimizations")

MAX_SEED = np.iinfo(np.int32).max
IMAGE_SIZE = 1024  # Same as original code
WATERMARK_TEXT = "SelamGPT"

# ===== OPTIMIZED WATERMARK FUNCTION =====
def add_watermark(image):
    """Optimized watermark function matching original style"""
    try:
        draw = ImageDraw.Draw(image)
        font_size = 24  # Fixed size as in original
        
        try:
            font = ImageFont.truetype("Roboto-Bold.ttf", font_size)
        except:
            font = ImageFont.load_default(font_size)
        
        text_width = draw.textlength(WATERMARK_TEXT, font=font)
        x = image.width - text_width - 10
        y = image.height - 34
        
        # Shadow effect
        draw.text((x+1, y+1), WATERMARK_TEXT, font=font, fill=(0, 0, 0, 128))
        draw.text((x, y), WATERMARK_TEXT, font=font, fill=(255, 255, 255))
        
        return image
    except Exception as e:
        print(f"Watermark error: {str(e)}")
        return image

# ===== ULTRA-FAST INFERENCE FUNCTION =====
def generate(
    prompt,
    negative_prompt="",
    seed=None,
    randomize_seed=True,
    guidance_scale=0.0,  # 0.0 for turbo models
    num_inference_steps=1,  # Can be as low as 1-2 for turbo
    progress=gr.Progress(track_tqdm=True),
):
    if not prompt.strip():
        return None, "⚠️ Please enter a prompt"

    start_time = time.time()
    
    # Seed handling
    if randomize_seed or seed is None:
        seed = random.randint(0, MAX_SEED)
    
    generator = torch.manual_seed(seed)
    
    try:
        # Ultra-fast generation with minimal steps
        result = pipe(
            prompt=prompt,
            negative_prompt=negative_prompt,
            width=IMAGE_SIZE,
            height=IMAGE_SIZE,
            guidance_scale=guidance_scale,
            num_inference_steps=max(1, num_inference_steps),  # Minimum 1 step
            generator=generator,
        ).images[0]
        
        # Optimized watermark and JPG conversion
        watermarked = add_watermark(result)
        buffer = io.BytesIO()
        watermarked.save(buffer, format="JPEG", quality=85, optimize=True)
        buffer.seek(0)
        
        gen_time = time.time() - start_time
        status = f"✔️ Generated in {gen_time:.2f}s | Seed: {seed}"
        
        return Image.open(buffer), status
    
    except torch.cuda.OutOfMemoryError:
        return None, "⚠️ GPU out of memory - try a simpler prompt"
    except Exception as e:
        print(f"Generation error: {str(e)}")
        return None, f"⚠️ Error: {str(e)[:200]}"

# ===== EXAMPLES =====
examples = [
    ["An ancient Aksumite warrior in cyberpunk armor, 4k detailed"],
    ["Traditional Ethiopian coffee ceremony in zero gravity"],
    ["Portrait of a Habesha queen with golden jewelry"]
]

# ===== OPTIMIZED INTERFACE =====
theme = gr.themes.Default(
    primary_hue="emerald",
    secondary_hue="amber",
    font=[gr.themes.GoogleFont("Poppins"), "Arial", "sans-serif"]
)

with gr.Blocks(theme=theme, title="SelamGPT Turbo Generator") as demo:
    gr.Markdown("""
    # 🎨 SelamGPT Turbo Image Generator
    *Ultra-fast 1024x1024 image generation with SDXL-Turbo*
    """)
    
    with gr.Row():
        with gr.Column(scale=3):
            prompt = gr.Textbox(
                label="Describe your image",
                placeholder="A futuristic Ethiopian city with flying cars...",
                lines=3,
                max_lines=5
            )
            with gr.Row():
                generate_btn = gr.Button("Generate Image", variant="primary")
                clear_btn = gr.Button("Clear")
            
            gr.Examples(
                examples=examples,
                inputs=[prompt]
            )
            
        with gr.Column(scale=2):
            output_image = gr.Image(
                label="Generated Image",
                type="pil",
                format="jpeg",
                height=512
            )
            status_output = gr.Textbox(
                label="Status",
                interactive=False
            )
    
    with gr.Accordion("⚙️ Advanced Settings", open=False):
        negative_prompt = gr.Textbox(
            label="Negative Prompt",
            placeholder="What to avoid (optional)",
            max_lines=1
        )
        with gr.Row():
            randomize_seed = gr.Checkbox(label="Randomize Seed", value=True)
            seed = gr.Number(label="Seed", value=0, precision=0)
        guidance_scale = gr.Slider(0.0, 1.0, value=0.0, step=0.1, label="Guidance Scale")
        num_inference_steps = gr.Slider(1, 4, value=1, step=1, label="Inference Steps")

    generate_btn.click(
        fn=generate,
        inputs=[
            prompt,
            negative_prompt,
            seed,
            randomize_seed,
            guidance_scale,
            num_inference_steps
        ],
        outputs=[output_image, status_output]
    )
    
    clear_btn.click(
        fn=lambda: [None, ""],
        outputs=[output_image, status_output]
    )

if __name__ == "__main__":
    demo.queue(max_size=4)  # Increased queue for better throughput
    demo.launch(server_name="0.0.0.0", server_port=7860)