import torch
from diffusers import AutoencoderKL, StableDiffusionXLPipeline, EulerDiscreteScheduler, ControlNetModel
import lpw_stable_diffusion_xl as lpw

from pipeline_stable_diffusion_xl_instantid import StableDiffusionXLInstantIDPipeline

# diffusers==0.26.1 写法实现，切到diffusers==0.25.1会直接报错
if __name__ == "__main__":
    # 从0开始，0就是不跳过，1就跳过一层，如此类推，webui是从1开始（这里对应程序参数0）
    clip_skip = 0
    cur_device = 'cuda'

    vae_model_path = 'E:/data/ai/vae/sdxl_vae.safetensors'
    base_model_path = 'E:/data/ai/sdxl-model/LEOSAM_HelloWorld_SDXL_v4.0.safetensors'
    config_file = 'E:/data/ai/config/v1-inference.yaml'
    xl_config_file = 'E:/data/ai/config/sd_xl_base.yaml'
    control_net_config_file = 'E:/data/ai/config/instand_id_config.json'
    control_net_instant_id_path = 'E:/data/ai/instantID-model/checkpoints/ControlNetModel'
    face_adapter_path = 'E:/data/ai/instantID-model/ip-adapter_instant_id_sdxl.bin'

    # Load pipeline
    # controlnet = ControlNetModel.from_pretrained(control_net_instant_id_path,
    #                                              use_safetensors=True,
    #                                              local_files_only=True)
    # controlnet = ControlNetModel.from_single_file(control_net_instant_id_path,
    #                                               use_safetensors=True,
    #                                               local_files_only=True,
    #                                               original_config_file=control_net_config_file)
    # 加载完vae使用有问题，不使用
    # vae = AutoencoderKL.from_single_file(vae_model_path, use_safetensors=True,
    #                                      local_files_only=True,
    #                                      original_config_file=config_file
    #                                      )
    # vae.to(cur_device)
    xl_pipe = StableDiffusionXLPipeline.from_single_file(
        base_model_path,
        use_safetensors=True,
        local_files_only=True,
        original_config_file=xl_config_file,
        from_safetensor=True, device='cuda', load_safety_checker=False,
        torch_dtype=torch.float32,
        #vae=vae
    )

    # scheduler = EulerDiscreteScheduler.from_config(xl_pipe.scheduler.config)
    # xl_pipe.scheduler = scheduler

    generator = torch.Generator(device="cuda").manual_seed(1225561594)

    xl_pipe = xl_pipe.to("cuda")

    pos_prompt = 'leogirl, cute 1girl, long tousled black hair, biting lips, seductive forward lean, delicate skin, delicate face, real, realistic, 1girl,white hair,blue background,upper body,'
    neg_prompt = '(worst quality,low resolution,bad hands,open mouth),distorted,twisted,watermark,'
    width = 832
    height = 1256
    batch_size = 1
    num_inference_steps = 30
    guidance_scale = 7

    (prompt_embeds, negative_prompt_embeds,
     pooled_prompt_embeds, negative_pooled_prompt_embeds) = lpw.get_weighted_text_embeddings_sdxl(
        pipe=xl_pipe,
        prompt=pos_prompt,
        neg_prompt=neg_prompt,
        clip_skip=clip_skip)

    print()

    images_style = xl_pipe(
        #prompt=pos_prompt,
        #negative_prompt=neg_prompt,
        height=height,
        width=width,
        guidance_scale=guidance_scale,
        generator=generator,
        num_inference_steps=num_inference_steps, num_images_per_prompt=batch_size,
        prompt_embeds=prompt_embeds,
        negative_prompt_embeds=negative_prompt_embeds,
        pooled_prompt_embeds=pooled_prompt_embeds,
        negative_pooled_prompt_embeds=negative_pooled_prompt_embeds
    ).images
    print('exec end')
    for index in range(len(images_style)):
        image = images_style[index]
        output_path = f"image_of_{index}.png"
        image.save(output_path)
    print('run end')

    # pipe = StableDiffusionXLInstantIDPipeline(
    #     vae=vae,
    #     text_encoder=xl_pipe.tokenizer,
    #     text_encoder_2=xl_pipe.tokenizer_2,
    #     tokenizer=xl_pipe.text_encoder,
    #     tokenizer_2=xl_pipe.text_encoder_2,
    #     unet=xl_pipe.unet,
    #     scheduler=scheduler,
    #     #controlnet=controlnet,
    # ).to(cur_device)

    # pipe.load_ip_adapter_instantid(face_adapter_path)

    print()
