Sketcher / app.py
Gainward777's picture
Upload app.py
a093ea5 verified
raw
history blame
2.63 kB
from ui.gradio_ui import ui
import spaces
from sd import zerogpu_controller as controller
from sd.utils.utils import *
from utils.utils import sketch_process, prompt_preprocess
#from sd.sd_controller import Controller
#controller=Controller()
"""
MODELS_NAMES=["cagliostrolab/animagine-xl-3.1",
"stabilityai/stable-diffusion-xl-base-1.0"]
LORA_PATH='sd/lora/lora.safetensors'
VAE=get_vae()
CONTROLNET=get_controlnet()
ADAPTER=get_adapter()
SCHEDULER=get_scheduler(model_name=MODELS_NAMES[1])
DETECTOR=get_detector()
FIRST_PIPE=get_pipe(vae=VAE,
model_name=MODELS_NAMES[0],
controlnet=CONTROLNET,
lora_path=LORA_PATH)
SECOND_PIPE=get_pipe(vae=VAE,
model_name=MODELS_NAMES[1],
adapter=ADAPTER,
scheduler=SCHEDULER)
@spaces.GPU
def get_first_result(img, prompt, negative_prompt,
controlnet_scale=0.5, strength=1.0,n_steps=30,eta=1.0):
substrate, resized_image = sketch_process(img)
prompt=prompt_preprocess(prompt)
result=FIRST_PIPE(image=substrate,
control_image=resized_image,
strength=strength,
prompt=prompt,
negative_prompt = negative_prompt,
controlnet_conditioning_scale=float(controlnet_scale),
generator=torch.manual_seed(0),
num_inference_steps=n_steps,
eta=eta)
return result.images[0]
@spaces.GPU
def get_second_result(img, prompt, negative_prompt,
g_scale=7.5, n_steps=25,
adapter_scale=0.9, adapter_factor=1.0):
preprocessed_img=DETECTOR(img,
detect_resolution=1024,
image_resolution=1024,
apply_filter=True).convert("L")
result=SECOND_PIPE(prompt=prompt,
negative_prompt=negative_prompt,
image=preprocessed_img,
guidance_scale=g_scale,
num_inference_steps=n_steps,
adapter_conditioning_scale=adapter_scale,
adapter_conditioning_factor=adapter_factor,
generator = torch.manual_seed(42))
return result.images[0]
"""
ui(controller)#get_first_result, get_second_result) #controller)