File size: 1,621 Bytes
b799323
a40c12e
 
 
 
 
13cf6b9
a40c12e
 
13cf6b9
 
 
 
 
b799323
 
13cf6b9
b799323
13cf6b9
a40c12e
 
 
0b9c983
 
 
 
 
 
 
634c920
a40c12e
4c265de
0b9c983
a40c12e
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
from diffusers import StableDiffusionXLPipeline, AutoencoderKL, UNet2DConditionModel, LCMScheduler, DPMSolverMultistepScheduler
import torch

loaded_pipe = None
loaded_pipe_id = None

def load_model(pipe_id, unet_model_id):
    global loaded_pipe, loaded_pipe_id
    if loaded_pipe_id != pipe_id:
        unet = UNet2DConditionModel.from_pretrained(
            unet_model_id,
            torch_dtype=torch.float16,
            variant="fp16",
        )
        vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
        
        loaded_pipe = StableDiffusionXLPipeline.from_pretrained(
            pipe_id, unet=unet, vae=vae, torch_dtype=torch.float16, variant="fp16",
        ).to("cuda")
        loaded_pipe_id = pipe_id
    return loaded_pipe

def set_scheduler(pipe, scheduler_type):
    if scheduler_type == "LCM":
        pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
    elif scheduler_type == "DPM++ 2M Karras":
        pipe.scheduler = DPMSolverMultistepScheduler(use_karras_sigmas="yes")
    return pipe

def generate_image(prompt, num_inference_steps, seed, guidance_scale, negative_prompt=None, pipe_id="KBlueLeaf/kohaku-xl-beta7.1", unet_model_id="latent-consistency/lcm-sdxl", scheduler_type="LCM"):
    global loaded_pipe
    pipe = load_model(pipe_id, unet_model_id)
    pipe = set_scheduler(pipe, scheduler_type)
    generator = torch.manual_seed(seed)
    image = pipe(prompt=prompt, negative_prompt=negative_prompt, num_inference_steps=num_inference_steps, generator=generator, guidance_scale=guidance_scale).images[0]
    return image