import os
import torch
import cv2
import numpy as np
from diffusers import StableDiffusionControlNetInpaintPipeline, ControlNetModel, AutoencoderKL
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from controlnet_aux import OpenposeDetector
import lpw_stable_diffusion as lpw

weight_dtype = torch.float16
model_canny = None
model_openpose = None
model_tile = None
model_color = None
model_openpose_preprocess = None
abs_models_path = 'E:/data/open-source/EasyPhoto/model_data'


def resize_image(input_image, resolution, nearest=False, crop264=True):
    H, W, C = input_image.shape
    H = float(H)
    W = float(W)
    k = float(resolution) / min(H, W)
    H *= k
    W *= k
    if crop264:
        H = int(np.round(H / 64.0)) * 64
        W = int(np.round(W / 64.0)) * 64
    else:
        H = int(H)
        W = int(W)
    if not nearest:
        img = cv2.resize(input_image, (W, H), interpolation=cv2.INTER_LANCZOS4 if k > 1 else cv2.INTER_AREA)
    else:
        img = cv2.resize(input_image, (W, H), interpolation=cv2.INTER_NEAREST)
    return img


def get_controlnet_unit(unit):
    global model_canny, model_openpose, model_tile, model_color

    if unit == "canny":
        if model_canny is None:
            model_canny = ControlNetModel.from_pretrained(
                os.path.join(abs_models_path, "Others", "bubbliiiing/controlnet_helper/controlnet",
                             "sd-controlnet-canny"), torch_dtype=weight_dtype)
        return model_canny

    elif unit == "openpose":
        if model_openpose is None:
            model_openpose = ControlNetModel.from_pretrained(
                os.path.join(abs_models_path, "Others", "bubbliiiing/controlnet_helper/controlnet",
                             "sd-controlnet-openpose"), torch_dtype=weight_dtype)
        return model_openpose

    elif unit == "color":
        if model_color is None:
            model_color = ControlNetModel.from_pretrained(
                os.path.join(abs_models_path, "Others", "bubbliiiing/controlnet_helper/controlnet",
                             "sd-controlnet-color"), torch_dtype=weight_dtype)
        return model_color

    elif unit == "tile":
        if model_tile is None:
            model_tile = ControlNetModel.from_pretrained(
                os.path.join(abs_models_path, "Others", "bubbliiiing/controlnet_helper/controlnet",
                             "sd-controlnet-tile"), torch_dtype=weight_dtype)
        return model_tile

    return None


def get_controlnet_preprocess(unit, input_image):
    global model_openpose_preprocess
    if unit == "canny":
        canny_image = cv2.Canny(np.array(input_image, np.uint8), 100, 200)[:, :, None]
        output_image = Image.fromarray(np.concatenate([canny_image, canny_image, canny_image], axis=2))

    elif unit == "openpose":
        if model_openpose_preprocess is None:
            model_openpose_preprocess = OpenposeDetector.from_pretrained(
                os.path.join(abs_models_path, "Others", "bubbliiiing/controlnet_helper/", "controlnet_detector"))
        output_image = model_openpose_preprocess(input_image)

    elif unit == "color":
        blur_ratio = 24
        h, w, c = np.shape(input_image)
        color_image = np.array(input_image, np.uint8)

        color_image = resize_image(color_image, 1024)
        now_h, now_w = color_image.shape[:2]

        color_image = cv2.resize(color_image, (int(now_w // blur_ratio), int(now_h // blur_ratio)),
                                 interpolation=cv2.INTER_CUBIC)
        color_image = cv2.resize(color_image, (now_w, now_h), interpolation=cv2.INTER_NEAREST)
        color_image = cv2.resize(color_image, (w, h), interpolation=cv2.INTER_CUBIC)
        output_image = Image.fromarray(np.uint8(color_image))

    elif unit == "tile":
        output_image = input_image

    return output_image


def get_pipe(controlnet_units_list):
    ## 加载sd模型
    base_model_path = 'E:/data/ai/sd-model/majicmixRealistic_v7.safetensors'
    vae_model_path = 'D:/deeplab/sd-webui-aki/sd-webui-aki-v4.1/models/VAE/vae-ft-mse-840000-ema-pruned.safetensors'
    cache_dir = 'E:/data/ai/lib'
    config_file = 'E:/data/ai/config/v1-inference.yaml'
    print(base_model_path)
    config = CLIPTextConfig.from_pretrained("E:/data/ai/lib/clip-vit-large-patch14",
                                            local_files_only=True)
    text_model = CLIPTextModel(config)
    tokenizer = CLIPTokenizer.from_pretrained("E:/data/ai/lib/clip-vit-large-patch14", local_files_only=True)
    vae = AutoencoderKL.from_single_file(vae_model_path, config_file=config_file, use_safetensors=True,
                                         local_files_only=True)
    print('load text_model end')

    vae.to(cur_device)
    text_model.to(cur_device)

    pipe = StableDiffusionControlNetInpaintPipeline.from_single_file(
        base_model_path, safety_checker=None, use_safetensors=True,
        local_files_only=True,
        torch_dtype=weight_dtype,
        original_config_file=config_file,
        controlnet=controlnet_units_list,
        from_safetensor=True, device='cuda', load_safety_checker=False,
        text_encoder=text_model,
        tokenizer=tokenizer,
        vae=vae,
        feature_extractor=None,
    )
    print('load model end')
    return pipe


if __name__ == "__main__":

    clip_skip = 1
    cur_device = 'cuda'

    temp_img_path = 'E:/data/code/facechain2/controlnet/1.jpg'
    temp_img = Image.open(temp_img_path)

    input_short_size = 512.0

    short_side = min(temp_img.width, temp_img.height)
    resize = float(short_side / input_short_size)
    new_size = (int(temp_img.width // resize), int(temp_img.height // resize))
    input_image = temp_img.resize(new_size, Image.Resampling.LANCZOS)
    print("Start Image resize to {}.".format(input_short_size))
    mask_image_path = 'E:/data/code/facechain2/controlnet/mask_image.jpg'
    mask_image = Image.open(mask_image_path)
    replaced_input_img_path = 'E:/data/code/facechain2/controlnet/replaced_input_image.jpg'
    replaced_input_img = Image.open(replaced_input_img_path)

    controlnet_pairs = [["canny", input_image, 0.50], ["openpose", replaced_input_img, 0.50],
                        ["color", input_image, 0.85]]

    controlnet_units_list = []
    controlnet_image = []
    controlnet_conditioning_scale = []
    for pair in controlnet_pairs:
        controlnet_units_list.append(
            get_controlnet_unit(pair[0])
        )
        controlnet_image.append(
            get_controlnet_preprocess(pair[0], pair[1])
        )
        controlnet_conditioning_scale.append(
            pair[2]
        )
    print("Start first diffusion.")
    pipe = get_pipe(controlnet_units_list)
    pipe = pipe.to("cuda")
    seed = 12345
    generator = torch.Generator("cuda").manual_seed(int(seed))
    pos_prompt = ('easyphoto_face, easyphoto, 1person, masterpiece, handsome, cloth, best quality, realistic, '
                  'photo-realistic, detailed skin, rough skin, beautiful eyes, sparkling eyes, beautiful mouth, '
                  'finely detail, extremely detailed CG unity 8k wallpaper, huge filesize, best quality, realistic, '
                  'photo-realistic, ultra high res, raw photo, put on makeup')
    neg_prompt = ('bags under the eyes, bags under eyes, glasses, naked, nsfw, nude, breasts, penis, cum, over red '
                  'lips, bad lips, bad hair, bad teeth, worst quality, low quality, normal quality, lowres, '
                  'watermark, badhand, lowres, bad anatomy, bad hands, normal quality, mural,')

    pos_text_embeddings, neg_text_embeddings = lpw.get_weighted_text_embeddings(pipe, prompt=pos_prompt,
                                                                                uncond_prompt=neg_prompt,
                                                                                clip_skip=clip_skip)

    w = int(input_image.width)
    h = int(input_image.height)
    width = int(w // 8 * 8)
    height = int(h // 8 * 8)
    cfg_scale = 7
    steps = 50
    denoising_strength = 0.45
    first_diffusion_output_image = pipe(
        # prompt=pos_prompt,
        # negative_prompt=neg_prompt,
        image=[input_image], mask_image=mask_image, control_image=controlnet_image,
        strength=denoising_strength,
        guidance_scale=cfg_scale, num_inference_steps=steps, generator=generator, height=height, width=width,
        controlnet_conditioning_scale=controlnet_conditioning_scale, guess_mode=True,
        prompt_embeds=pos_text_embeddings,
        negative_prompt_embeds=neg_text_embeddings
    ).images[0]
    first_diffusion_output_image.save('controlnet.jpg')

    print("Start Second diffusion.")
    first_diffusion_output_image = input_image
    controlnet_pairs = [["canny", first_diffusion_output_image, 1.00], ["tile", first_diffusion_output_image, 1.00]]

    second_input_mask_path = 'E:/data/code/facechain2/controlnet/second_input_mask.jpg'
    second_input_mask = Image.open(second_input_mask_path)
    controlnet_units_list = []
    controlnet_image = []
    controlnet_conditioning_scale = []
    for pair in controlnet_pairs:
        controlnet_units_list.append(
            get_controlnet_unit(pair[0])
        )
        controlnet_image.append(
            get_controlnet_preprocess(pair[0], pair[1])
        )
        controlnet_conditioning_scale.append(
            pair[2]
        )
    denoising_strength = 0.3
    steps = 20
    pipe = get_pipe(controlnet_units_list)
    pipe = pipe.to("cuda")
    pos_text_embeddings, neg_text_embeddings = lpw.get_weighted_text_embeddings(pipe, prompt=pos_prompt,
                                                                                uncond_prompt=neg_prompt,
                                                                                clip_skip=clip_skip)
    second_diffusion_output_image = pipe(
        # prompt=pos_prompt,
        # negative_prompt=neg_prompt,
        image=[first_diffusion_output_image], mask_image=second_input_mask, control_image=controlnet_image,
        strength=denoising_strength,
        guidance_scale=cfg_scale, num_inference_steps=steps, generator=generator, height=height, width=width,
        controlnet_conditioning_scale=controlnet_conditioning_scale, guess_mode=True,
        prompt_embeds=pos_text_embeddings,
        negative_prompt_embeds=neg_text_embeddings
    ).images[0]
    second_diffusion_output_image.save('second_controlnet.jpg')
