import gradio as gr import numpy as np import random from diffusers import DiffusionPipeline import torch device = "cuda" if torch.cuda.is_available() else "cpu" base_model = "stabilityai/stable-diffusion-xl-base-1.0" if torch.cuda.is_available(): torch.cuda.max_memory_allocated(device=device) pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=torch.float16, variant="fp16", use_safetensors=True) pipe.enable_xformers_memory_efficient_attention() pipe = pipe.to(device) pipe.load_lora_weights("Fer14/sdxl-coffe-machines", weights_name ="pytorch_lora_weights.safetensors") else: pipe = DiffusionPipeline.from_pretrained(base_model, use_safetensors=True) pipe = pipe.to(device) pipe.load_lora_weights("Fer14/sdxl-coffe-machines", weights_name ="pytorch_lora_weights.safetensors") MAX_SEED = np.iinfo(np.int32).max MAX_IMAGE_SIZE = 1024 def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps): if randomize_seed: seed = random.randint(0, MAX_SEED) generator = torch.Generator().manual_seed(seed) image = pipe( prompt = prompt, negative_prompt = negative_prompt, guidance_scale = guidance_scale, num_inference_steps = num_inference_steps, width = width, height = height, generator = generator ).images[0] return image examples = [ "A cream espresso machine with steam wand for milk frothing, retro-shaped, with no screen and three silver buttons", "A beige espresso coffee machine with a wooden handle, rectangular shaped, with no screen and three silver buttons", "A red espresso coffee machine with portafilter handle and milk frother, cylindrical shaped, with no screen and three silver buttons", ] css=""" #col-container { margin: 0 auto; max-width: 520px; } """ if torch.cuda.is_available(): power_device = "GPU" else: power_device = "CPU" with gr.Blocks(css=css) as demo: with gr.Column(elem_id="col-container"): gr.Markdown(f""" # Coffee Machine Generator ☕ Currently running on {power_device}. """) with gr.Row(): prompt = gr.Text( label="Prompt", show_label=False, max_lines=1, placeholder="Enter your prompt", container=False, ) run_button = gr.Button("Run", scale=0) result = gr.Image(label="Result", show_label=False) with gr.Accordion("Advanced Settings", open=False): negative_prompt = gr.Text( label="Negative prompt", max_lines=1, placeholder="Enter a negative prompt", visible=False, ) seed = gr.Slider( label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0, ) randomize_seed = gr.Checkbox(label="Randomize seed", value=True) with gr.Row(): width = gr.Slider( label="Width", minimum=540, maximum=MAX_IMAGE_SIZE, step=32, value=512, ) height = gr.Slider( label="Height", minimum=540, maximum=MAX_IMAGE_SIZE, step=32, value=512, ) with gr.Row(): guidance_scale = gr.Slider( label="Guidance scale", minimum=0.0, maximum=10.0, step=0.1, value=7.5, ) num_inference_steps = gr.Slider( label="Number of inference steps", minimum=1, maximum=40, step=1, value=30, ) gr.Examples( examples = examples, inputs = [prompt] ) run_button.click( fn = infer, inputs = [prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps], outputs = [result] ) demo.queue().launch()