import spaces from sd.prompt_helper import helper from PIL import Image from sd.utils.utils import * from utils.utils import sketch_process, prompt_preprocess MODELS_NAMES=["cagliostrolab/animagine-xl-3.1", "stabilityai/stable-diffusion-xl-base-1.0"] LORA_PATH='sd/lora/lora.safetensors' VAE=get_vae() CONTROLNET=get_controlnet() ADAPTER=get_adapter() SCHEDULER=get_scheduler(model_name=MODELS_NAMES[1]) DETECTOR=get_detector() FIRST_PIPE=get_pipe(vae=VAE, model_name=MODELS_NAMES[0], controlnet=CONTROLNET, lora_path=LORA_PATH) SECOND_PIPE=get_pipe(vae=VAE, model_name=MODELS_NAMES[1], adapter=ADAPTER, scheduler=SCHEDULER) @spaces.GPU def get_first_result(img, prompt, negative_prompt, controlnet_scale=0.5, strength=1.0,n_steps=30,eta=1.0): substrate, resized_image = sketch_process(img["composite"]) prompt=prompt_preprocess(prompt) FIRST_PIPE.to('cuda') result=FIRST_PIPE(image=substrate, control_image=resized_image, strength=strength, prompt=prompt, negative_prompt = negative_prompt, controlnet_conditioning_scale=float(controlnet_scale), generator=torch.manual_seed(0), num_inference_steps=n_steps, eta=eta) FIRST_PIPE.to('cpu') return result.images[0] @spaces.GPU def get_second_result(img, prompt, negative_prompt, g_scale=7.5, n_steps=25, adapter_scale=0.9, adapter_factor=1.0): DETECTOR.to('cuda') SECOND_PIPE.to('cuda') preprocessed_img=DETECTOR(img, detect_resolution=1024, image_resolution=1024, apply_filter=True).convert("L") result=SECOND_PIPE(prompt=prompt, negative_prompt=negative_prompt, image=preprocessed_img, guidance_scale=g_scale, num_inference_steps=n_steps, adapter_conditioning_scale=adapter_scale, adapter_conditioning_factor=adapter_factor, generator = torch.manual_seed(42)) DETECTOR.to('cpu') SECOND_PIPE.to('cpu') return result.images[0] def get_help_w_prompt(img): if isinstance(img, dict): return helper.get_help(img["composite"]) else: return helper.get_help(img)