#=============T2I Adapter============
from diffusers import StableDiffusionXLAdapterPipeline, T2IAdapter, EulerAncestralDiscreteScheduler, AutoencoderKL
from diffusers.utils import load_image, make_image_grid
from controlnet_aux.lineart import LineartDetector
import torch
from PIL import Image
from DWPose.inference import get_pose

# load adapter
adapter = T2IAdapter.from_pretrained(
  "t2i-adapter-openpose-sdxl-1.0", torch_dtype=torch.float16, varient="fp16",
   local_files_only=True
)

# load euler_a scheduler
model_id = 'anime_illust_diffusion_xl/Nova_Flat_XL_02.safetensors'
config_id = 'sdxl'
euler_a = EulerAncestralDiscreteScheduler.from_pretrained(config_id, subfolder="scheduler")

pipe = StableDiffusionXLAdapterPipeline.from_single_file(
    model_id, 
    adapter=adapter, scheduler=euler_a,
    dtype=torch.float16,
    config=config_id,
    variant= "fp16",
    local_files_only=True)

pipe.enable_model_cpu_offload()


images = get_pose("reference_image/111.png")
generator = torch.Generator("cuda").manual_seed(31)
prompt = "1girl, white background, full body, looking at viewer, smile, blush, purple eyes, white hair, wing hair ornament, very long hair, low twintails, hair bow, hairclip, pleated skirt, white collared shirt, open clothes, pink jacket, loose socks, white socks"
negative_prompt = "worst quality, low quality, lowres, messy, abstract, ugly, disfigured, bad anatomy, deformed hands, fused fingers, signature, text, multi views"
gen_images = pipe(
    prompt=prompt,
    negative_prompt=negative_prompt,
    image=images,
    num_inference_steps=28,
    num_images_per_prompt=0,
    adapter_conditioning_scale=1,
    generator=generator,
    guidance_scale=7, 
).images
for i in range(len(gen_images)):
    gen_images[i].save(f'out_lin_{i}.png')