
import abc
import paddle
import numpy as np
import paddle.nn.functional as nnf
from tqdm import tqdm
from PIL import Image
from typing import Optional, Union, Tuple, List, Callable, Dict
import ptp_utils
import seq_aligner


class LocalBlend:
    def __init__(self, 
                 prompts: List[str], 
                 words: [List[List[str]]], 
                 substruct_words=None, 
                 start_blend=0.2, 
                 threshold=(.3, .3), 
                 max_num_words=77, 
                 tokenizer = None, 
                 num_steps=50):
        alpha_layers = paddle.zeros([len(prompts),  1, 1, 1, 1, max_num_words])
        for i, (prompt, words_) in enumerate(zip(prompts, words)):
            if isinstance(words_, str):
                words_ = [words_]
            for word in words_:
                ind = ptp_utils.get_word_inds(prompt, word, tokenizer)
                alpha_layers[i, :, :, :, :, ind] = 1
        
        if substruct_words is not None:
            substruct_layers = paddle.zeros([len(prompts),  1, 1, 1, 1, max_num_words])
            for i, (prompt, words_) in enumerate(zip(prompts, substruct_words)):
                if isinstance(words_, str):
                    words_ = [words_]
                for word in words_:
                    ind = ptp_utils.get_word_inds(prompt, word, tokenizer)
                    substruct_layers[i, :, :, :, :, ind] = 1
            self.substruct_layers = substruct_layers
        else:
            self.substruct_layers = None
        self.alpha_layers = alpha_layers
        self.start_blend = int(start_blend * num_steps)
        self.counter = 0 
        self.threshold = paddle.to_tensor(threshold, dtype="float32")
        self.max_num_words = max_num_words

    def get_mask(self, x_t, maps, alpha, use_pool):
        k = 1
        maps = (maps * alpha).sum(-1).mean(1)
        if use_pool:
            maps = nnf.max_pool2d(maps, (k * 2 + 1, k * 2 + 1), (1, 1), padding=(k, k))
        mask = nnf.interpolate(maps, size=(x_t.shape[2:]))
        mask = mask / mask.max(2, keepdim=True).max(3, keepdim=True)
        mask = mask.greater_than(self.threshold[1 - int(use_pool)])
        mask = mask[:1].expand_as(mask) | mask
        return mask
    
    def __call__(self, x_t, attention_store):
        self.counter += 1
        if self.counter > self.start_blend:           
            maps = attention_store["down_cross"][2:4] + attention_store["up_cross"][:3]
            maps = [item.reshape([self.alpha_layers.shape[0], -1, 1, 16, 16, self.max_num_words]) for item in maps]
            maps = paddle.concat(maps, axis=1)
            mask = self.get_mask(x_t, maps, self.alpha_layers, True)
            if self.substruct_layers is not None:
                maps_sub = ~self.get_mask(maps, self.substruct_layers, False)
                mask = mask & maps_sub
            mask = mask.astype("float32")
            x_t = x_t[:1] + mask * (x_t - x_t[:1])
        return x_t
    

class EmptyControl:
    def step_callback(self, x_t):
        return x_t
    
    def between_steps(self):
        return
    
    def __call__(self, attn, is_cross: bool, place_in_unet: str):
        return attn


class AttentionControl(abc.ABC):
    def __init__(self, low_resource=False):
        self.cur_step = 0
        self.num_att_layers = -1
        self.cur_att_layer = 0
        self.low_resource = low_resource

    def step_callback(self, x_t):
        return x_t
    
    def between_steps(self):
        return
    
    @property
    def num_uncond_att_layers(self):
        return self.num_att_layers if self.low_resource else 0
    
    @abc.abstractmethod
    def forward (self, attn, is_cross: bool, place_in_unet: str):
        raise NotImplementedError

    def __call__(self, attn, is_cross: bool, place_in_unet: str):
        if self.cur_att_layer >= self.num_uncond_att_layers:
            if self.low_resource:
                attn = self.forward(attn, is_cross, place_in_unet)
            else:
                h = attn.shape[0]
                attn[h // 2:] = self.forward(attn[h // 2:], is_cross, place_in_unet)
        self.cur_att_layer += 1
        if self.cur_att_layer == self.num_att_layers + self.num_uncond_att_layers:
            self.cur_att_layer = 0
            self.cur_step += 1
            self.between_steps()
        return attn
    
    def reset(self):
        self.cur_step = 0
        self.cur_att_layer = 0


class AttentionStore(AttentionControl):
    def __init__(self, low_resource=False):
        super(AttentionStore, self).__init__(low_resource)
        self.step_store = self.get_empty_store()
        self.attention_store = {}

    @staticmethod
    def get_empty_store():
        return {"down_cross": [], "mid_cross": [], "up_cross": [],
                "down_self": [],  "mid_self": [],  "up_self": []}

    def forward(self, attn, is_cross: bool, place_in_unet: str):
        key = f"{place_in_unet}_{'cross' if is_cross else 'self'}"
        if attn.shape[1] <= 32 ** 2:  # avoid memory overhead
            self.step_store[key].append(attn)
        return attn

    def between_steps(self):
        if len(self.attention_store) == 0:
            self.attention_store = self.step_store
        else:
            for key in self.attention_store:
                for i in range(len(self.attention_store[key])):
                    self.attention_store[key][i] += self.step_store[key][i]
        self.step_store = self.get_empty_store()

    def get_average_attention(self):
        average_attention = {key: [item / self.cur_step for item in self.attention_store[key]] for key in self.attention_store}
        return average_attention

    def reset(self):
        super(AttentionStore, self).reset()
        self.step_store = self.get_empty_store()
        self.attention_store = {}



class AttentionControlEdit(AttentionStore, abc.ABC):
    def __init__(self, 
                 prompts, 
                 num_steps: int,
                 cross_replace_steps: Union[float, Tuple[float, float], Dict[str, Tuple[float, float]]],
                 self_replace_steps: Union[float, Tuple[float, float]],
                 local_blend: Optional[LocalBlend],
                 low_resource=False,
                 tokenizer=None):
        super(AttentionControlEdit, self).__init__(low_resource)
        self.batch_size = len(prompts)
        self.cross_replace_alpha = ptp_utils.get_time_words_attention_alpha(prompts, num_steps, cross_replace_steps, tokenizer)
        if isinstance(self_replace_steps, float):
            self_replace_steps = 0, self_replace_steps
        self.num_self_replace = int(num_steps * self_replace_steps[0]), int(num_steps * self_replace_steps[1])
        self.local_blend = local_blend

    def step_callback(self, x_t):
        if self.local_blend is not None:
            x_t = self.local_blend(x_t, self.attention_store)
        return x_t
        
    def replace_self_attention(self, attn_base, att_replace, place_in_unet):
        if att_replace.shape[2] <= 32 ** 2:
            return attn_base.unsqueeze(0).expand([att_replace.shape[0], *attn_base.shape])
        else:
            return att_replace
    
    @abc.abstractmethod
    def replace_cross_attention(self, attn_base, att_replace):
        raise NotImplementedError
    
    def forward(self, attn, is_cross: bool, place_in_unet: str):
        super(AttentionControlEdit, self).forward(attn, is_cross, place_in_unet)
        if is_cross or (self.num_self_replace[0] <= self.cur_step < self.num_self_replace[1]):
            h = attn.shape[0] // (self.batch_size)
            attn = attn.reshape([self.batch_size, h, *attn.shape[1:]])
            attn_base, attn_repalce = attn[0], attn[1:]
            if is_cross:
                alpha_words = self.cross_replace_alpha[self.cur_step]
                attn_repalce_new = self.replace_cross_attention(attn_base, attn_repalce) * alpha_words + (1 - alpha_words) * attn_repalce
                attn[1:] = attn_repalce_new
            else:
                attn[1:] = self.replace_self_attention(attn_base, attn_repalce, place_in_unet)
            attn = attn.reshape([self.batch_size * h, *attn.shape[2:]])
        return attn


class AttentionReplace(AttentionControlEdit):
    def __init__(self, 
                 prompts, 
                 num_steps: int, 
                 cross_replace_steps: float, 
                 self_replace_steps: float,
                 local_blend: Optional[LocalBlend] = None,
                 low_resource=False,
                 tokenizer=None,
                 max_num_words=77):
        super(AttentionReplace, self).__init__(prompts, num_steps, cross_replace_steps, self_replace_steps, local_blend, low_resource, tokenizer)
        self.mapper = seq_aligner.get_replacement_mapper(prompts, tokenizer, max_len=max_num_words)
        
    def replace_cross_attention(self, attn_base, att_replace):
        return paddle.einsum('hpw,bwn->bhpn', attn_base, self.mapper)


class AttentionRefine(AttentionControlEdit):
    def __init__(self, 
                 prompts, 
                 num_steps: int, 
                 cross_replace_steps: float, 
                 self_replace_steps: float,
                 local_blend: Optional[LocalBlend] = None,
                 low_resource=False,
                 tokenizer=None,
                 max_num_words=77):
        super(AttentionRefine, self).__init__(prompts, num_steps, cross_replace_steps, self_replace_steps, local_blend, low_resource, tokenizer)
        self.mapper, alphas = seq_aligner.get_refinement_mapper(prompts, tokenizer, max_len=max_num_words)
        self.alphas = alphas.reshape([alphas.shape[0], 1, 1, alphas.shape[1]])

    def replace_cross_attention(self, attn_base, att_replace):
        attn_base_replace = attn_base.unsqueeze(2).index_select(self.mapper.squeeze(), axis=3).permute(2, 0, 1, 3)
        attn_replace = attn_base_replace * self.alphas + att_replace * (1 - self.alphas)
        # attn_replace = attn_replace / attn_replace.sum(-1, keepdims=True)
        return attn_replace


class AttentionReweight(AttentionControlEdit):
    def __init__(self, 
                 prompts, 
                 num_steps: int, 
                 cross_replace_steps: float, 
                 self_replace_steps: float, 
                 equalizer,
                 local_blend: Optional[LocalBlend] = None, 
                 controller: Optional[AttentionControlEdit] = None,
                 low_resource=False, 
                 tokenizer=None):
        super(AttentionReweight, self).__init__(prompts, num_steps, cross_replace_steps, self_replace_steps, local_blend, low_resource, tokenizer)
        self.equalizer = equalizer
        self.prev_controller = controller

    def replace_cross_attention(self, attn_base, att_replace):
        if self.prev_controller is not None:
            attn_base = self.prev_controller.replace_cross_attention(attn_base, att_replace)
        attn_replace = attn_base[None, :, :, :] * self.equalizer[:, None, None, :]
        # attn_replace = attn_replace / attn_replace.sum(-1, keepdims=True)
        return attn_replace


def get_equalizer(text: str, 
                  word_select: Union[int, Tuple[int, ...]], 
                  values: Union[List[float], Tuple[float, ...]],
                  tokenizer=None):
    if type(word_select) is int or type(word_select) is str:
        word_select = (word_select,)
    equalizer = paddle.ones([1, 77])
    for word, val in zip(word_select, values):
        inds = ptp_utils.get_word_inds(text, word, tokenizer)
        equalizer[:, inds] = paddle.to_tensor(val, dtype="float32")
    return equalizer


def aggregate_attention(prompts: List[str], attention_store: AttentionStore, res: int, from_where: List[str], is_cross: bool, select: int):
    out = []
    attention_maps = attention_store.get_average_attention()
    num_pixels = res ** 2
    for location in from_where:
        for item in attention_maps[f"{location}_{'cross' if is_cross else 'self'}"]:
            if item.shape[1] == num_pixels:
                cross_maps = item.reshape([len(prompts), -1, res, res, item.shape[-1]])[select]
                out.append(cross_maps)
    out = paddle.concat(out, axis=0)
    out = out.sum(0) / out.shape[0]
    return out.cpu()


def make_controller(prompts: List[str], 
                    is_replace_controller: bool, 
                    cross_replace_steps: Dict[str, float], 
                    self_replace_steps: float, 
                    blend_words=None, 
                    equilizer_params=None,
                    num_steps=50,
                    low_resource=False,
                    tokenizer=None,
                    max_num_words=77,
                    ) -> AttentionControlEdit:
    lb = LocalBlend(prompts, blend_words, tokenizer=tokenizer, max_num_words=max_num_words, num_steps=num_steps) if blend_words is not None else None
    if is_replace_controller:
        controller = AttentionReplace(prompts, num_steps, cross_replace_steps=cross_replace_steps, self_replace_steps=self_replace_steps, local_blend=lb, low_resource=low_resource, tokenizer=tokenizer, max_num_words=max_num_words)
    else:
        controller = AttentionRefine(prompts, num_steps, cross_replace_steps=cross_replace_steps, self_replace_steps=self_replace_steps, local_blend=lb, low_resource=low_resource, tokenizer=tokenizer, max_num_words=max_num_words)
    if equilizer_params is not None:
        eq = get_equalizer(prompts[1], equilizer_params["words"], equilizer_params["values"], tokenizer=tokenizer)
        controller = AttentionReweight(prompts, num_steps, cross_replace_steps=cross_replace_steps, self_replace_steps=self_replace_steps, equalizer=eq, local_blend=lb, controller=controller, low_resource=low_resource, tokenizer=tokenizer)
    return controller


def show_cross_attention(prompts: List[str], attention_store: AttentionStore, res: int, from_where: List[str], select: int = 0, tokenizer=None):
    tokens = tokenizer.encode(prompts[select])["input_ids"]
    attention_maps = aggregate_attention(prompts, attention_store, res, from_where, True, select)
    images = []
    for i in range(len(tokens)):
        image = attention_maps[:, :, i]
        image = 255 * image / image.max()
        image = image.unsqueeze(-1).expand([*image.shape, 3])
        image = image.numpy().astype(np.uint8)
        image = np.array(Image.fromarray(image).resize((256, 256)))
        image = ptp_utils.text_under_image(image, tokenizer.decode(int(tokens[i])))
        images.append(image)
    images = ptp_utils.view_images(np.stack(images, axis=0))
    return images


def run_and_display(prompts, ldm_stable, controller, num_steps=50, guidance_scale=7.5, latent=None, run_baseline=False, generator=None, uncond_embeddings=None, low_resource=False, verbose=True):
    if run_baseline:
        print("w.o. prompt-to-prompt")
        images, latent = run_and_display(prompts, ldm_stable, EmptyControl(), num_steps, guidance_scale, latent=latent, run_baseline=False, generator=generator, uncond_embeddings=uncond_embeddings, low_resource=False, verbose=True)
        return images, latent
    images, x_t = ptp_utils.text2image_ldm_stable(ldm_stable, prompts, controller, latent=latent, num_inference_steps=num_steps, guidance_scale=guidance_scale, generator=generator, uncond_embeddings=uncond_embeddings, low_resource=low_resource, start_time=num_steps)
    if verbose:
        images = ptp_utils.view_images(images)
    return images, x_t