#!/usr/bin/env pythona from __future__ import annotations import requests import os import random import random import string import gradio as gr import numpy as np import spaces import torch import gc import cv2 from PIL import Image from accelerate import init_empty_weights from io import BytesIO from diffusers.utils import load_image from diffusers import StableDiffusionXLControlNetPipeline, StableDiffusionXLControlNetInpaintPipeline, ControlNetModel, AutoencoderKL, DiffusionPipeline, AutoPipelineForImage2Image, AutoPipelineForInpainting, UNet2DConditionModel from controlnet_aux import HEDdetector import threading DESCRIPTION = "# Run any LoRA or SD Model" if not torch.cuda.is_available(): DESCRIPTION += "\n
⚠️ This space is running on the CPU. This demo doesn't work on CPU 😞! Run on a GPU by duplicating this space or test our website for free and unlimited by clicking here, which provides these and more options.
" MAX_SEED = np.iinfo(np.int32).max MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "1824")) USE_TORCH_COMPILE = os.getenv("USE_TORCH_COMPILE") == "1" ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD") == "1" ENABLE_USE_LORA = os.getenv("ENABLE_USE_LORA", "1") == "1" ENABLE_USE_LORA2 = os.getenv("ENABLE_USE_LORA2", "1") == "1" ENABLE_USE_IMG2IMG = os.getenv("ENABLE_USE_IMG2IMG", "1") == "1" device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") def randomize_seed_fn(seed: int, randomize_seed: bool) -> int: if randomize_seed: seed = random.randint(0, MAX_SEED) return seed cached_pipelines = {} # Dicionário para armazenar os pipelines cached_loras = {} # Crie um objeto Lock pipeline_lock = threading.Lock() @spaces.GPU def generate( prompt: str = "", negative_prompt: str = "", prompt_2: str = "", negative_prompt_2: str = "", use_negative_prompt: bool = False, use_prompt_2: bool = False, use_negative_prompt_2: bool = False, seed: int = 0, width: int = 1024, height: int = 1024, guidance_scale_base: float = 5.0, num_inference_steps_base: int = 25, strength_img2img: float = 0.7, use_lora: bool = False, use_lora2: bool = False, model = 'stabilityai/stable-diffusion-xl-base-1.0', lora = '', lora2 = '', lora_scale: float = 0.7, lora_scale2: float = 0.7, use_img2img: bool = False, url = '', ): global cached_pipelines, cached_loras if torch.cuda.is_available(): # Construa a chave do dicionário baseada no modelo e no tipo de pipeline pipeline_key = (model, use_img2img) if pipeline_key not in cached_pipelines: if not use_img2img: cached_pipelines[pipeline_key] = DiffusionPipeline.from_pretrained(model, safety_checker=None, requires_safety_checker=False, torch_dtype=torch.float16, low_cpu_mem_usage=True) elif use_img2img: cached_pipelines[pipeline_key] = AutoPipelineForImage2Image.from_pretrained(model, safety_checker=None, requires_safety_checker=False, torch_dtype=torch.float16, low_cpu_mem_usage=True) pipe = cached_pipelines[pipeline_key] # Usa o pipeline carregado da memória if use_img2img: init_image = load_image(url) if use_lora: lora_key = (lora, lora_scale) if lora_key not in cached_loras: adapter_name = ''.join(random.choice(string.ascii_letters) for _ in range(5)) pipe.load_lora_weights(lora, adapter_name=adapter_name) cached_loras[lora_key] = adapter_name else: adapter_name = cached_loras[lora_key] pipe.set_adapters(adapter_name, adapter_weights=[lora_scale]) if use_lora2: lora_key1 = (lora, lora_scale) lora_key2 = (lora2, lora_scale2) if lora_key1 not in cached_loras: adapter_name1 = ''.join(random.choice(string.ascii_letters) for _ in range(5)) pipe.load_lora_weights(lora, adapter_name=adapter_name1) cached_loras[lora_key1] = adapter_name1 else: adapter_name1 = cached_loras[lora_key1] if lora_key2 not in cached_loras: adapter_name2 = ''.join(random.choice(string.ascii_letters) for _ in range(5)) pipe.load_lora_weights(lora2, adapter_name=adapter_name2) cached_loras[lora_key2] = adapter_name2 else: adapter_name2 = cached_loras[lora_key2] pipe.set_adapters([adapter_name1, adapter_name2], adapter_weights=[lora_scale, lora_scale2]) pipe.enable_model_cpu_offload() generator = torch.Generator().manual_seed(seed) if not use_negative_prompt: negative_prompt = None # type: ignore if not use_prompt_2: prompt_2 = None # type: ignore if not use_negative_prompt_2: negative_prompt_2 = None # type: ignore with pipeline_lock: if use_img2img: result = pipe( prompt=prompt, image=init_image, strength=strength_img2img, negative_prompt=negative_prompt, prompt_2=prompt_2, negative_prompt_2=negative_prompt_2, width=width, height=height, guidance_scale=guidance_scale_base, num_inference_steps=num_inference_steps_base, generator=generator, ).images[0] else: result = pipe( prompt=prompt, negative_prompt=negative_prompt, prompt_2=prompt_2, negative_prompt_2=negative_prompt_2, width=width, height=height, guidance_scale=guidance_scale_base, num_inference_steps=num_inference_steps_base, generator=generator, ).images[0] return result # Limpeza de memória del pipe torch.cuda.empty_cache() gc.collect() with gr.Blocks(theme=gr.themes.Soft(), css="style.css") as demo: gr.HTML( "