import os
import yaml
import paddle
import argparse
import numpy as np
import paddle.nn.functional as nnf
from tqdm import tqdm
from PIL import Image
from ppdiffusers import StableDiffusionPipeline, DDIMScheduler
import ptp
import ptp_utils


def load_512(image_path, left=0, right=0, top=0, bottom=0):
    if isinstance(image_path, str):
        image = np.array(Image.open(image_path))[:, :, :3]
    else:
        image = image_path
    h, w, c = image.shape
    left = min(left, w - 1)
    right = min(right, w - left - 1)
    top = min(top, h - left - 1)
    bottom = min(bottom, h - top - 1)
    image = image[top:h-bottom, left:w-right]
    h, w, c = image.shape
    if h < w:
        offset = (w - h) // 2
        image = image[:, offset:offset + h]
    elif w < h:
        offset = (h - w) // 2
        image = image[offset:offset + w]
    image = np.array(Image.fromarray(image).resize((512, 512)))
    return image


class NullInversion:
    def __init__(self, model, num_steps=50, guidance_scale=7.5):
        self.model = model
        self.tokenizer = self.model.tokenizer
        self.model.scheduler.set_timesteps(num_steps)
        self.prompt = None
        self.context = None
        self.num_steps = num_steps
        self.guidance_scale = guidance_scale

    def prev_step(self, model_output, timestep, sample):
        prev_timestep = timestep - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
        alpha_prod_t = self.scheduler.alphas_cumprod[timestep]
        alpha_prod_t_prev = self.scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.scheduler.final_alpha_cumprod
        beta_prod_t = 1 - alpha_prod_t
        pred_original_sample = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
        pred_sample_direction = (1 - alpha_prod_t_prev) ** 0.5 * model_output
        prev_sample = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
        return prev_sample
    
    def next_step(self, model_output, timestep, sample):
        timestep, next_timestep = min(timestep - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps, 999), timestep
        alpha_prod_t = self.scheduler.alphas_cumprod[timestep] if timestep >= 0 else self.scheduler.final_alpha_cumprod
        alpha_prod_t_next = self.scheduler.alphas_cumprod[next_timestep]
        beta_prod_t = 1 - alpha_prod_t
        next_original_sample = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
        next_sample_direction = (1 - alpha_prod_t_next) ** 0.5 * model_output
        next_sample = alpha_prod_t_next ** 0.5 * next_original_sample + next_sample_direction
        return next_sample
    
    def get_noise_pred_single(self, latents, t, context):
        noise_pred = self.model.unet(latents, t, encoder_hidden_states=context)["sample"]
        return noise_pred

    def get_noise_pred(self, latents, t, is_forward=True, context=None):
        latents_input = paddle.concat([latents] * 2, axis=0)
        if context is None:
            context = self.context
        guidance_scale = 1 if is_forward else self.guidance_scale
        noise_pred = self.model.unet(latents_input, t, encoder_hidden_states=context)["sample"]
        noise_pred_uncond, noise_prediction_text = noise_pred.chunk(2)
        noise_pred = noise_pred_uncond + guidance_scale * (noise_prediction_text - noise_pred_uncond)
        if is_forward:
            latents = self.next_step(noise_pred, t, latents)
        else:
            latents = self.prev_step(noise_pred, t, latents)
        return latents

    @paddle.no_grad()
    def latent2image(self, latents, return_type='np'):
        latents = 1 / 0.18215 * latents.detach()
        image = self.model.vae.decode(latents)['sample']
        if return_type == 'np':
            image = (image / 2 + 0.5).clip(0, 1)
            image = image.cpu().permute(0, 2, 3, 1).numpy()[0]
            image = (image * 255).astype(np.uint8)
        return image

    @paddle.no_grad()
    def image2latent(self, image):
        with paddle.no_grad():
            if type(image) is Image:
                image = np.array(image)
            if isinstance(image, paddle.Tensor) and image.dim() == 4:
                latents = image
            else:
                image = paddle.to_tensor(image, dtype="float32") / 127.5 - 1
                image = image.permute(2, 0, 1).unsqueeze(0)
                latents = self.model.vae.encode(image)['latent_dist'].mean
                latents = latents * 0.18215
        return latents

    @paddle.no_grad()
    def init_prompt(self, prompt: str):
        uncond_input = self.model.tokenizer(
            [""], padding="max_length", max_length=self.model.tokenizer.model_max_length,
            return_tensors="pd"
        )
        uncond_embeddings = self.model.text_encoder(uncond_input.input_ids)[0]
        text_input = self.model.tokenizer(
            [prompt],
            padding="max_length",
            max_length=self.model.tokenizer.model_max_length,
            truncation=True,
            return_tensors="pd",
        )
        text_embeddings = self.model.text_encoder(text_input.input_ids)[0]
        self.context = paddle.concat([uncond_embeddings, text_embeddings], axis=0)
        self.prompt = prompt

    @paddle.no_grad()
    def ddim_loop(self, latent):
        uncond_embeddings, cond_embeddings = self.context.chunk(2)
        all_latent = [latent]
        latent = latent.clone().detach()
        for i in range(self.num_steps):
            t = self.model.scheduler.timesteps[len(self.model.scheduler.timesteps) - i - 1]
            noise_pred = self.get_noise_pred_single(latent, t, cond_embeddings)
            latent = self.next_step(noise_pred, t, latent)
            all_latent.append(latent)
        return all_latent

    @property
    def scheduler(self):
        return self.model.scheduler

    @paddle.no_grad()
    def ddim_inversion(self, image):
        latent = self.image2latent(image)
        image_rec = self.latent2image(latent)
        ddim_latents = self.ddim_loop(latent)
        return image_rec, ddim_latents

    def null_optimization(self, latents, num_inner_steps, epsilon):
        uncond_embeddings, cond_embeddings = self.context.chunk(2)
        uncond_embeddings_list = []
        latent_cur = latents[-1]
        bar = tqdm(total=num_inner_steps * self.num_steps)
        for i in range(self.num_steps):
            uncond_embeddings = uncond_embeddings.clone().detach()
            uncond_embeddings.stop_gradient = False
            optimizer = paddle.optimizer.Adam(parameters=[uncond_embeddings], learning_rate=1e-2 * (1. - i / 100.))
            latent_prev = latents[len(latents) - i - 2]
            t = self.model.scheduler.timesteps[i]
            with paddle.no_grad():
                noise_pred_cond = self.get_noise_pred_single(latent_cur, t, cond_embeddings)
            for j in range(num_inner_steps):
                noise_pred_uncond = self.get_noise_pred_single(latent_cur, t, uncond_embeddings)
                noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_cond - noise_pred_uncond)
                latents_prev_rec = self.prev_step(noise_pred, t, latent_cur)
                loss = nnf.mse_loss(latents_prev_rec, latent_prev)
                optimizer.clear_grad() # TODO: before or after?
                loss.backward()
                optimizer.step()
                loss_item = loss.item()
                bar.update()
                if loss_item < epsilon + i * 2e-5:
                    break
            for j in range(j + 1, num_inner_steps):
                bar.update()
            uncond_embeddings_list.append(uncond_embeddings[:1].detach())
            with paddle.no_grad():
                context = paddle.concat([uncond_embeddings, cond_embeddings], axis=0)
                latent_cur = self.get_noise_pred(latent_cur, t, False, context)
        bar.close()
        return uncond_embeddings_list
    
    def invert(self, image_path: str, prompt: str, offsets=(0,0,0,0), num_inner_steps=10, early_stop_epsilon=1e-5, verbose=False):
        self.init_prompt(prompt)
        ptp_utils.register_attention_control(self.model, None)
        image_gt = load_512(image_path, *offsets)
        # if verbose:
            # print("DDIM inversion...")
        image_rec, ddim_latents = self.ddim_inversion(image_gt)
        if verbose:
            print("Null-text optimization...")
        uncond_embeddings = self.null_optimization(ddim_latents, num_inner_steps, early_stop_epsilon)
        return (image_gt, image_rec), ddim_latents[-1], uncond_embeddings
    

def parse_args():
    parser = argparse.ArgumentParser(description="Simple example of a NULL TEXT Prompt2Prompt.")
    parser.add_argument(
        "--config", "-d",
        type=str,
        default=None,
        required=True,
        help="A file path containing the config.",
    )
    return parser.parse_args()


if __name__ == "__main__":
    args = parse_args()
    param_dict = yaml.safe_load(open(args.config, "rt"))
    # globals().update(param_dict)
    pretrained_model_name_or_path = param_dict["pretrained_model_name_or_path"]
    LOW_RESOURCE = param_dict["low_resource"]
    NUM_DIFFUSION_STEPS = param_dict["num_diffusion_steps"]
    GUIDANCE_SCALE = param_dict["guidance_scale"]
    MAX_NUM_WORDS = param_dict["max_num_words"]
    image_path = param_dict["image_path"] if param_dict["image_path"] else None
    seed = param_dict["seed"]
    prompts = param_dict["prompts"]
    cross_replace_steps = param_dict["cross_replace_steps"]
    self_replace_steps = param_dict["self_replace_steps"]
    blend_word = param_dict["blend_word"] if param_dict["blend_word"] else None
    eq_params = param_dict["equilizer_params"] if param_dict["equilizer_params"] else None
    is_replace_controller = param_dict["is_replace_controller"]
    show_cross_attention = param_dict["show_cross_attention"]

    device = paddle.device.set_device('gpu:0') if paddle.device.is_compiled_with_cuda() else paddle.device.set_device('cpu')
    generator = paddle.Generator().manual_seed(seed) if seed else None
    use_null_text = True if image_path else False

    if use_null_text:
        # null_text prompt2prompt: for real image
        # the param is same as pretrained_model_name_or_path's scheduler: PNDM
        scheduler = DDIMScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, steps_offset=1)
        ldm_stable = StableDiffusionPipeline.from_pretrained(pretrained_model_name_or_path, scheduler=scheduler).to(device)
    else:
        # prompt2prompt: for generated image
        ldm_stable = StableDiffusionPipeline.from_pretrained(pretrained_model_name_or_path).to(device)

    try:
        ldm_stable.disable_xformers_memory_efficient_attention()
    except AttributeError:
        print("Attribute disable_xformers_memory_efficient_attention() is missing")
    tokenizer = ldm_stable.tokenizer

    if use_null_text:
        null_inversion = NullInversion(ldm_stable, NUM_DIFFUSION_STEPS, GUIDANCE_SCALE)
        (image_gt, image_enc), x_t, uncond_embeddings = null_inversion.invert(image_path, prompts[0], offsets=(0, 0, 200, 0), verbose=True)
        print("Modify or remove offsets according to your image!")
        controller = ptp.AttentionStore(LOW_RESOURCE)
        image_inv, x_t = ptp.run_and_display(prompts[:1], ldm_stable, controller, NUM_DIFFUSION_STEPS, GUIDANCE_SCALE, generator=generator, run_baseline=False, latent=x_t, uncond_embeddings=uncond_embeddings, low_resource=LOW_RESOURCE, verbose=False)
        images = ptp_utils.view_images([image_gt, image_enc, image_inv[0]])
        # print("showing from left to right: the ground truth image, the vq-autoencoder reconstruction, the null-text inverted image")
        # images.save("./{}_gt_vae_nt.png".format(os.path.basename(image_path).split(".")[0]))

        if show_cross_attention:
            images = ptp.show_cross_attention(prompts, controller, 16, ["up", "down"], select=0, tokenizer=tokenizer)
            images.save("./{}_null_text_cross_attention.png".format(os.path.basename(image_path).split(".")[0]))

        controller = ptp.make_controller(prompts, is_replace_controller, cross_replace_steps, self_replace_steps, blend_word, eq_params, NUM_DIFFUSION_STEPS, LOW_RESOURCE, tokenizer, MAX_NUM_WORDS)
        images, _ = ptp.run_and_display(prompts, ldm_stable, controller, NUM_DIFFUSION_STEPS, GUIDANCE_SCALE, generator=generator, run_baseline=False, latent=x_t, uncond_embeddings=uncond_embeddings, low_resource=LOW_RESOURCE, verbose=True)
        images.save("./{}_null_text_edit.png".format(os.path.basename(image_path).split(".")[0]))
    else:
        controller = ptp.AttentionStore(LOW_RESOURCE)
        images, x_t = ptp.run_and_display(prompts[:1], ldm_stable, controller, NUM_DIFFUSION_STEPS, GUIDANCE_SCALE, latent=None, run_baseline=False, generator=generator, uncond_embeddings=None, low_resource=LOW_RESOURCE, verbose=True)
        # images.save("./ptp_generate.png")

        controller = ptp.make_controller(prompts, is_replace_controller, cross_replace_steps, self_replace_steps, blend_word, eq_params, NUM_DIFFUSION_STEPS, LOW_RESOURCE, tokenizer, MAX_NUM_WORDS)
        images, _ = ptp.run_and_display(prompts, ldm_stable, controller, NUM_DIFFUSION_STEPS, GUIDANCE_SCALE, generator=generator, run_baseline=False, latent=x_t, uncond_embeddings=None, low_resource=LOW_RESOURCE, verbose=True)
        images.save("./ptp_edit.png")

        if show_cross_attention:
            images = ptp.show_cross_attention(prompts, controller, 16, ["up", "down"], select=0, tokenizer=tokenizer)
            images.save("./ptp_cross_attention.png")
