Spaces:
Sleeping
Sleeping
import os | |
import io | |
import base64 | |
import torch | |
from torch.cuda import amp | |
import numpy as np | |
from PIL import Image | |
from diffusers import AutoPipelineForText2Image, AutoencoderKL, DPMSolverMultistepScheduler | |
pipe = None | |
def load_model(_model = None, _vae = None, loras = []): | |
global pipe | |
_model = _model or 'cagliostrolab/animagine-xl-3.0' | |
if _vae: | |
# "stabilityai/sdxl-vae" | |
vae = AutoencoderKL.from_pretrained(_vae, torch_dtype=torch.float16) | |
pipe = AutoPipelineForText2Image.from_pretrained( | |
_model, | |
torch_dtype=torch.float16, | |
vae=vae, | |
) | |
else: | |
pipe = AutoPipelineForText2Image.from_pretrained( | |
_model, | |
torch_dtype=torch.float16, | |
) | |
# DPM++ 2M Karras | |
pipe.scheduler = DPMSolverMultistepScheduler.from_config( | |
pipe.scheduler.config, | |
algorithm_type="sde-dpmsolver++", | |
use_karras_sigmas=True | |
) | |
for lora in loras: | |
pipe.load_lora_weights(".", weight_name=lora + ".safetensors") | |
if torch.cuda.is_available(): | |
pipe.to("cuda") | |
pipe.enable_vae_slicing() | |
def pil_to_webp(img): | |
buffer = io.BytesIO() | |
img.save(buffer, 'webp') | |
return buffer.getvalue() | |
def bin_to_base64(bin): | |
return base64.b64encode(bin).decode('ascii') | |
def run(prompt = None, negative_prompt = None, model = None, guidance_scale = None, steps = None, seed = None): | |
global pipe | |
if not pipe: | |
load_model(model) | |
_prompt = "masterpiece, best quality, 1girl, portrait" | |
_negative_prompt = "lowres, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry, artist name" | |
prompt = prompt or _prompt | |
negative_prompt = negative_prompt or _negative_prompt | |
guidance_scale = float(guidance_scale) if guidance_scale else 5.0 | |
steps = int(steps) if steps else 20 | |
seed = int(seed) if seed else -1 | |
generator = None | |
if seed != -1: | |
generator = torch.manual_seed(seed) | |
image = pipe( | |
prompt=prompt, | |
negative_prompt=negative_prompt, | |
guidance_scale=guidance_scale, | |
num_inference_steps=steps, | |
clip_skip=2, | |
generator=generator, | |
).images[0] | |
return image | |