import gradio as gr import os import torch from diffusers import AutoPipelineForText2Image, LCMScheduler from torchvision.transforms.functional import to_pil_image, center_crop, resize, to_tensor device = 'cuda' if torch.cuda.is_available() else 'mps' if torch.backends.mps.is_available() else 'cpu' model_id = "Lykon/dreamshaper-7" adapter_id = "latent-consistency/lcm-lora-sdv1-5" pipe = AutoPipelineForText2Image.from_pretrained(model_id, torch_dtype=torch.float16, variant="fp16") pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config) pipe.to(device) # load and fuse lcm lora pipe.load_lora_weights(adapter_id) pipe.fuse_lora() prompt = "Self-portrait oil painting, a beautiful cyborg with golden hair, 8k" @torch.no_grad() def generate(prompt, guidance_scale): image = pipe(prompt=prompt, num_inference_steps=4, guidance_scale=guidance_scale).images[0] return image def app(): return gr.Interface(generate, [gr.Textbox( label="Prompt", info="Enter your prompt", lines=3, value="Self-portrait oil painting, a beautiful cyborg with golden hair, 8k", ), gr.Slider(2, 20, value=7.5, label="Guidance Scale", info="Higher scale depicts more creativity")], gr.Image(type="pil", height=512, width=512 ) , allow_flagging='never', title='Gen Image') if __name__ == "__main__": app().launch()