import torch import os a=os.getenv('My_name') import gradio as gr import torch from diffusers import StableDiffusionXLPipeline, LCMScheduler # Загрузка модели model_id = "ByteDance/SDXL-Lightning" pipe = StableDiffusionXLPipeline.from_pretrained( model_id, torch_dtype=torch.float16, variant="fp16" ).to("cuda") pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config) def generate_image(prompt, seed=42, guidance_scale=0.0): if seed == 0: seed = torch.randint(0, 1000000, (1,)).item() generator = torch.Generator(device="cuda").manual_seed(seed) image = pipe( prompt=prompt, num_inference_steps=2, guidance_scale=guidance_scale, generator=generator ).images[0] return image # Gradio интерфейс with gr.Blocks(title="⚡ SDXL Lightning 2-step") as demo: gr.Markdown("# ⚡ SDXL Lightning — генерация за 2 шага!") gr.Markdown("Модель от ByteDance. Быстро. Качественно. Просто.") with gr.Row(): with gr.Column(): prompt = gr.Textbox(label="Prompt", placeholder="A majestic lion in the savannah, cinematic lighting", lines=2) seed = gr.Number(label="Seed (0 = random)", value=42, precision=0) guidance_scale = gr.Slider(label="Guidance Scale (лучше оставить 0.0)", minimum=0.0, maximum=5.0, value=0.0, step=0.1) btn = gr.Button("🚀 Generate", variant="primary") with gr.Column(): output_image = gr.Image(label="Generated Image") btn.click(fn=generate_image, inputs=[prompt, seed, guidance_scale], outputs=output_image) gr.Examples( examples=[ ["A cyberpunk city at night, neon lights, rain, 4K"], ["A cute corgi wearing sunglasses, beach background"], ["Portrait of a fantasy elf queen, intricate jewelry, soft glow"], ["A steampunk airship flying over mountains, sunset"] ], inputs=prompt ) gr.Markdown("📌 **Важно**: Модель обучена генерировать за 2 шага. Не меняйте `num_inference_steps` — это сломает качество.") if __name__ == "__main__": demo.launch()