import torch
from diffusers import AutoencoderKL, StableDiffusionXLPipeline, EulerDiscreteScheduler, ControlNetModel, \
    StableDiffusionXLControlNetPipeline
import lpw_stable_diffusion_xl as lpw
from diffusers.utils import load_image
from insightface.app import FaceAnalysis
import cv2
import numpy as np
import PIL
from PIL import Image
from deepface import DeepFace

from pipeline_stable_diffusion_xl_instantid import StableDiffusionXLInstantIDPipeline, draw_kps


def pad64(x):
    return int(np.ceil(float(x) / 64.0) * 64 - x)


def resize_img3(input_image, max_side=1280, min_side=1024, size=None,
                pad_to_max_side=False, mode=PIL.Image.BILINEAR, base_pixel_number=64):
    w, h = input_image.size
    if size is not None:
        w_resize_new, h_resize_new = size
    else:
        ratio = min_side / min(h, w)
        # w, h = round(ratio * w), round(ratio * h)
        # ratio = max_side / max(h, w)
        # input_image = input_image.resize([round(ratio * w), round(ratio * h)], mode)
        w_resize_new = (round(ratio * w) // base_pixel_number) * base_pixel_number
        h_resize_new = (round(ratio * h) // base_pixel_number) * base_pixel_number
    input_image = input_image.resize([w_resize_new, h_resize_new], mode)

    if pad_to_max_side:
        res = np.ones([max_side, max_side, 3], dtype=np.uint8) * 255
        offset_x = (max_side - w_resize_new) // 2
        offset_y = (max_side - h_resize_new) // 2
        res[offset_y:offset_y + h_resize_new, offset_x:offset_x + w_resize_new] = np.array(input_image)
        input_image = Image.fromarray(res)
    return input_image


def resize_img2(input_image, max_side=1280, min_side=1024, size=None,
                pad_to_max_side=False, mode=PIL.Image.BILINEAR, base_pixel_number=64):
    w, h = input_image.size
    if size is not None:
        w_resize_new, h_resize_new = size
    else:
        ratio = min_side / min(h, w)
        w_resize_new = round(ratio * w)
        h_resize_new = round(ratio * h)
        w_resize_new = w_resize_new + pad64(w_resize_new)
        h_resize_new = h_resize_new + pad64(h_resize_new)
    input_image = input_image.resize([w_resize_new, h_resize_new], mode)

    if pad_to_max_side:
        res = np.ones([max_side, max_side, 3], dtype=np.uint8) * 255
        offset_x = (max_side - w_resize_new) // 2
        offset_y = (max_side - h_resize_new) // 2
        res[offset_y:offset_y + h_resize_new, offset_x:offset_x + w_resize_new] = np.array(input_image)
        input_image = Image.fromarray(res)
    return input_image


def resize_img(input_image, max_side=1280, min_side=1024, size=None,
               pad_to_max_side=False, mode=PIL.Image.BILINEAR, base_pixel_number=64):
    w, h = input_image.size
    if size is not None:
        w_resize_new, h_resize_new = size
    else:
        ratio = min_side / min(h, w)
        w, h = round(ratio * w), round(ratio * h)
        ratio = max_side / max(h, w)
        input_image = input_image.resize([round(ratio * w), round(ratio * h)], mode)
        w_resize_new = (round(ratio * w) // base_pixel_number) * base_pixel_number
        h_resize_new = (round(ratio * h) // base_pixel_number) * base_pixel_number
    input_image = input_image.resize([w_resize_new, h_resize_new], mode)

    if pad_to_max_side:
        res = np.ones([max_side, max_side, 3], dtype=np.uint8) * 255
        offset_x = (max_side - w_resize_new) // 2
        offset_y = (max_side - h_resize_new) // 2
        res[offset_y:offset_y + h_resize_new, offset_x:offset_x + w_resize_new] = np.array(input_image)
        input_image = Image.fromarray(res)
    return input_image


def convert_from_image_to_cv2(img: Image) -> np.ndarray:
    return cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)
    # return np.array(img)


if __name__ == "__main__":
    # 从0开始，0就是不跳过，1就跳过一层，如此类推，webui是从1开始（这里对应程序参数0）
    clip_skip = 0
    cur_device = 'cuda'
    torch_dtype = torch.float16

    vae_model_path = 'E:/data/ai/vae/sdxl_vae.safetensors'
    base_model_path = 'E:/data/ai/sdxl-model/LEOSAM_HelloWorld_SDXL_v4.0.safetensors'
    config_file = 'E:/data/ai/config/v1-inference.yaml'
    xl_config_file = 'E:/data/ai/config/sd_xl_base.yaml'
    control_net_config_file = 'E:/data/ai/config/instand_id_config.json'
    control_net_instant_id_path = 'E:/data/ai/instantID-model/checkpoints/ControlNetModel'
    face_adapter_path = 'E:/data/ai/instantID-model/ip-adapter_instant_id_sdxl.bin'

    # Load pipeline
    controlnet = ControlNetModel.from_pretrained(control_net_instant_id_path,
                                                 use_safetensors=True,
                                                 local_files_only=True,
                                                 torch_dtype=torch_dtype, )
    # controlnet = ControlNetModel.from_single_file(control_net_instant_id_path,
    #                                               use_safetensors=True,
    #                                               local_files_only=True,
    #                                               original_config_file=control_net_config_file)
    # 加载完vae使用有问题，不使用
    # vae = AutoencoderKL.from_single_file(vae_model_path, use_safetensors=True,
    #                                      local_files_only=True,
    #                                      original_config_file=config_file
    #                                      )
    # vae.to(cur_device)
    controlnet.to(cur_device)
    xl_pipe = StableDiffusionXLPipeline.from_single_file(
        base_model_path,
        use_safetensors=True,
        local_files_only=True,
        original_config_file=xl_config_file,
        from_safetensor=True, device='cuda', load_safety_checker=False,
        torch_dtype=torch_dtype,
        # vae=vae
    )

    # scheduler = EulerDiscreteScheduler.from_config(xl_pipe.scheduler.config)
    # xl_pipe.scheduler = scheduler

    generator = torch.Generator(device="cuda").manual_seed(1451775822)

    xl_pipe = xl_pipe.to("cuda")

    pos_prompt = 'solo,1boy,realistic,male focus,black hair,looking at viewer,shirt,grey background,simple background,white shirt,signature,portrait,black eyes,lips,brown eyes,upper body,short hair,'
    neg_prompt = '(worst quality,low resolution,bad hands,open mouth),distorted,twisted,watermark,'
    width = 800
    height = 800
    batch_size = 1
    num_inference_steps = 30
    guidance_scale = 7

    (prompt_embeds, negative_prompt_embeds,
     pooled_prompt_embeds, negative_pooled_prompt_embeds) = lpw.get_weighted_text_embeddings_sdxl(
        pipe=xl_pipe,
        prompt=pos_prompt,
        neg_prompt=neg_prompt,
        clip_skip=clip_skip)

    print()

    pipe = StableDiffusionXLInstantIDPipeline(
        vae=xl_pipe.vae,
        text_encoder=xl_pipe.text_encoder,
        text_encoder_2=xl_pipe.text_encoder_2,
        tokenizer=xl_pipe.tokenizer,
        tokenizer_2=xl_pipe.tokenizer_2,
        unet=xl_pipe.unet,
        scheduler=xl_pipe.scheduler,
        controlnet=controlnet,
    )

    pipe.to(cur_device)

    pipe.load_ip_adapter_instantid(face_adapter_path)

    # prepare 'antelopev2' under ./models
    app = FaceAnalysis(name='antelopev2', root='./', providers=['CUDAExecutionProvider', 'CPUExecutionProvider'])
    app.prepare(ctx_id=0, det_size=(640, 640))

    face_img_path = 'E:/data/test/skin_test_20231124104715.jpg'

    face_img = load_image(face_img_path)
    face_img.save('face_img_0.jpg')
    face_img = resize_img2(face_img, max_side=512, min_side=512)
    face_img.save('face_img_1.jpg')
    # prepare face emb
    face_image_cv2 = convert_from_image_to_cv2(face_img)
    face_img_temp = PIL.Image.fromarray(face_image_cv2).convert("RGB")
    face_img_temp.save("face_img_temp.jpg")
    # face_info = app.get(face_image_cv2)
    face_info = app.get(face_image_cv2)
    face_info = sorted(face_info, key=lambda x: (x['bbox'][2] - x['bbox'][0]) * x['bbox'][3] - x['bbox'][1])[
        -1]  # only use the maximum face
    face_emb = face_info['embedding']
    face_kps = draw_kps(face_img, face_info['kps'])
    face_kps.save('face_kps.jpg')
    pose_image_path = 'E:/data/ai/baidu-ai/515295e0a58a46809ec2c35bba3a06f1.png'

    # models = ["VGG-Face", "Facenet", "OpenFace", "DeepFace", "ArcFace"]
    # embedding_objs = DeepFace.represent(img_path="E:/data/test/skin_test_20231124104715.jpg", model_name=models[3])
    # embedding = embedding_objs[0]["embedding"]
    # embedding = np.array(embedding)
    # face_emb = embedding

    if pose_image_path is not None:
        pose_image = load_image(pose_image_path)
        pose_image.save('pose_image_0.jpg')
        pose_image = resize_img3(pose_image, min_side=512)
        pose_image.save('pose_image_1.jpg')
        pose_image_cv2 = convert_from_image_to_cv2(pose_image)

        face_info = app.get(pose_image_cv2)

        face_info = sorted(face_info, key=lambda x: (x['bbox'][2] - x['bbox'][0]) * x['bbox'][3] - x['bbox'][1])[-1]
        face_kps = draw_kps(pose_image, face_info['kps'])
        face_kps.save('post_face_kps.jpg')
        # width, height = face_kps.size

    print()
    pipe.set_ip_adapter_scale(0.9)
    images_style = pipe(
        # prompt=pos_prompt,
        # negative_prompt=neg_prompt,
        height=height,
        width=width,
        guidance_scale=guidance_scale,
        generator=generator,
        num_inference_steps=num_inference_steps, num_images_per_prompt=batch_size,
        prompt_embeds=prompt_embeds,
        negative_prompt_embeds=negative_prompt_embeds,
        pooled_prompt_embeds=pooled_prompt_embeds,
        negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
        image_embeds=face_emb,
        image=face_kps,
        controlnet_conditioning_scale=0.6,
        # ip_adapter_scale=1,
    ).images
    print('exec end')
    for index in range(len(images_style)):
        image = images_style[index]
        output_path = f"image_of_{index}.png"
        image.save(output_path)
    print('run end')

    print()
