import torch
from .diffusers import AutoencoderKL, UNet2DConditionModel
from transformers import CLIPTextModel, CLIPTokenizer
from peft import LoraConfig
from .diffusers.utils import deprecate
import random
import numpy as np
import math
from utils.misc import NestedTensor
from .diffusers.configuration_utils import register_to_config
from .position_encoding import build_position_encoding
from .backbone import Joiner

class ProcessorWithAttnMap:
    def __call__(
        self_of_kernel,
        attn,
        hidden_states,
        encoder_hidden_states = None,
        attention_mask = None,
        temb = None,
        *args,
        **kwargs,
    ):
        if len(args) > 0 or kwargs.get("scale", None) is not None:
            deprecation_message = "The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`."
            deprecate("scale", "1.0.0", deprecation_message)

        residual = hidden_states
        if attn.spatial_norm is not None:
            hidden_states = attn.spatial_norm(hidden_states, temb)

        input_ndim = hidden_states.ndim

        if input_ndim == 4:
            batch_size, channel, height, width = hidden_states.shape
            hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2)

        batch_size, sequence_length, _ = (
            hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape
        )

        if attention_mask is not None:
            attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
            # scaled_dot_product_attention expects attention_mask shape to be
            # (batch, heads, source_length, target_length)
            attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1])

        if attn.group_norm is not None:
            hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)

        query = attn.to_q(hidden_states)

        if encoder_hidden_states is None:
            encoder_hidden_states = hidden_states
        elif attn.norm_cross:
            encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)

        key = attn.to_k(encoder_hidden_states)
        value = attn.to_v(encoder_hidden_states)

        inner_dim = key.shape[-1]
        head_dim = inner_dim // attn.heads

        query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)

        key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
        value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)

        if attn.norm_q is not None:
            query = attn.norm_q(query)
        if attn.norm_k is not None:
            key = attn.norm_k(key)

        # the output of sdp = (batch, num_heads, seq_len, head_dim)
        # TODO: add support for attn.scale when we move to Torch 2.1
            
        def scaled_dot_product_attention(query, key, value, attn_mask=None, dropout_p=0.0,
            is_causal=False, scale=None, enable_gqa=False) -> torch.Tensor:
            L, S = query.size(-2), key.size(-2)
            scale_factor = 1 / math.sqrt(query.size(-1)) if scale is None else scale
            attn_bias = torch.zeros(L, S, dtype=query.dtype,device=query.device)
            if is_causal:
                assert attn_mask is None
                temp_mask = torch.ones(L, S, dtype=torch.bool,device=query.device).tril(diagonal=0)
                attn_bias.masked_fill_(temp_mask.logical_not(), float("-inf"))
                attn_bias.to(query.dtype)

            if attn_mask is not None:
                if attn_mask.dtype == torch.bool:
                    attn_bias.masked_fill_(attn_mask.logical_not(), float("-inf"))
                else:
                    attn_bias += attn_mask

            if enable_gqa:
                key = key.repeat_interleave(query.size(-3)//key.size(-3), -3)
                value = value.repeat_interleave(query.size(-3)//value.size(-3), -3)

            attn_weight = query @ key.transpose(-2, -1) * scale_factor
            attn_weight += attn_bias
            attn_weight = torch.softmax(attn_weight, dim=-1)
            attn_weight = torch.dropout(attn_weight, dropout_p, train=True)
            
            return attn_weight @ value, attn_weight
        
        hidden_states,aw = scaled_dot_product_attention(
            query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False
        )

        hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim)
        hidden_states = hidden_states.to(query.dtype)

        # linear proj
        hidden_states = attn.to_out[0](hidden_states)
        # dropout
        hidden_states = attn.to_out[1](hidden_states)

        if input_ndim == 4:
            hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)

        if attn.residual_connection:
            hidden_states = hidden_states + residual

        hidden_states = hidden_states / attn.rescale_output_factor

        return hidden_states,aw

import inspect
import types
class UNetWithAttnMap(UNet2DConditionModel):
    def HookAttention(self):
        self.attn_map={}
        self.hook_attn(self)

    def which_attn(self,name):
        return 'attn2' in name and ('up_blocks.3' not in name) and 'up' in name and('lora' not in name) and ('sample_size' not in name)
    
    def hook_attn(self,module,prefix=''):
        if not hasattr(module,'named_children'):
            return
        for k,v in module.named_children():
            full_name=prefix+'.'+k
            if(self.which_attn(full_name)):
                v.processor=ProcessorWithAttnMap()
                self.overload_forward(v,full_name)
                # kernel=v.processor
                # assert type(kernel)==type(AttnProcessor2_0),"I'm a rookie and I can only handle AttnProcessor2_0"
                # self.overload(kernel,full_name)
            else:
                self.hook_attn(v,full_name)

    def overload_forward(self,attnclass,location):
        def f(
            cls,
            hidden_states: torch.Tensor,
            encoder_hidden_states = None,
            attention_mask = None,
            **cross_attention_kwargs,
        ) -> torch.Tensor:
            attn_parameters = set(inspect.signature(cls.processor.__call__).parameters.keys())
            quiet_attn_parameters = {"ip_adapter_masks"}
            unused_kwargs = [
                k for k, _ in cross_attention_kwargs.items() if k not in attn_parameters and k not in quiet_attn_parameters
            ]
            cross_attention_kwargs = {k: w for k, w in cross_attention_kwargs.items() if k in attn_parameters}

            ret,attn_map=cls.processor(
                cls,
                hidden_states,
                encoder_hidden_states=encoder_hidden_states,
                attention_mask=attention_mask,
                **cross_attention_kwargs,
            )
            self.attn_map[location]=attn_map.mean(1).transpose(-2,-1)
            return ret
        
        attnclass.forward=types.MethodType(f,attnclass)

class DiffusionBackbone(torch.nn.Module):
    def __init__(self,pretrained_model_path,lora_rank=16,meta_prompt_num=100) -> None:
        super().__init__()
        self.tokenizer=CLIPTokenizer.from_pretrained(
            pretrained_model_path,subfolder="tokenizer",revision=False,model_max_length=meta_prompt_num
        )
        self.text_encoder=CLIPTextModel.from_pretrained(
            pretrained_model_path,subfolder='text_encoder',revision=False,max_position_embeddings=meta_prompt_num
        )
        self.vae = AutoencoderKL.from_pretrained(
            pretrained_model_path, subfolder="vae", revision=False
        )
        self.unet = UNetWithAttnMap.from_pretrained(
            pretrained_model_path, subfolder="unet", revision=False
        )
        caption_input_ids=self.tokenize_captions(["Car,Van,Truck,Pedestrian,Person,Cyclist,Tram,Misc"],self.training)
        encoder_hidden_states = self.text_encoder(caption_input_ids, return_dict=False)[0]
        h=self.unet.config.cross_attention_dim
        # self.meta_prompt=torch.nn.Parameter(encoder_hidden_states).cuda()#(torch.randn((1,meta_prompt_num,h)))

        self.vae.requires_grad_(False)
        self.text_encoder.requires_grad_(False)

        if self.training:
            unet_lora_config = LoraConfig(
                r=lora_rank,
                lora_alpha=lora_rank,
                init_lora_weights="gaussian",
                target_modules=["to_k", "to_q", "to_v", "to_out.0"],
            )   

            self.unet.add_adapter(unet_lora_config)
        self.unet.HookAttention()
        self.unet.time_embedding.requires_grad_(True)

        self.num_channels=[self.unet.config.block_out_channels[i] for i in [1,2,3]]
        self.num_channels[1]+=meta_prompt_num
        self.num_channels[2]+=meta_prompt_num
        self.strides=[-1,-1,-1]

        self.batched_text_feat=torch.nn.Parameter(torch.repeat_interleave(encoder_hidden_states,16,0)).cuda()
        self.timesteps = torch.zeros((16,)).long().cuda()

    def tokenize_captions(self,examples, is_train=True):
        captions = []
        for caption in examples:
            if isinstance(caption, str):
                captions.append(caption)
            elif isinstance(caption, (list, np.ndarray)):
                # take a random caption if there are multiple
                captions.append(random.choice(caption) if is_train else caption[0])
            else:
                raise ValueError(
                    f"Caption column should contain either strings or lists of strings."
                )
        inputs = self.tokenizer(
            captions, max_length=self.tokenizer.model_max_length, padding="max_length", truncation=True, return_tensors="pt"
        )
        return inputs.input_ids

    def forward(self,images):
        b,c,h,w=images.shape

        latents = self.vae.encode(images).latent_dist.sample()
        latents = latents * self.vae.config.scaling_factor

        outs = self.unet(latents, self.timesteps,self.batched_text_feat,return_dict=False)
        
        out={}
        def get_m(p):
            return torch.zeros(p.shape[0], p.shape[2], p.shape[3]).to(torch.bool).to(p.device)

        upblock1=[]
        upblock2=[]
        for k,v in self.unet.attn_map.items():
            if('up_blocks.1' in k):
                upblock1.append(v)
            else:
                upblock2.append(v)
        
        upblock1attn=torch.stack(upblock1).mean(0).transpose(-2,-1).reshape(b,-1,outs[0].shape[-2],outs[0].shape[-1])
        upblock2attn=torch.stack(upblock2).mean(0).transpose(-2,-1).reshape(b,-1,outs[1].shape[-2],outs[1].shape[-1])
        outs[1]=torch.cat([outs[1],upblock2attn],dim=1)
        outs[0]=torch.cat([outs[0],upblock1attn],dim=1)
        out['0']=(NestedTensor(outs[2], get_m(outs[2])))
        out['1']=(NestedTensor(outs[1],get_m(outs[1])))
        out['2']=(NestedTensor(outs[0],get_m(outs[0])))
        # out['3']=(NestedTensor(outs[3],get_m(outs[3]))) no 3 is the best

        return out

def build_diffusion_backbone(cfg):
    
    position_embedding = build_position_encoding(cfg)
    return_interm_layers = cfg['masks'] or cfg['num_feature_levels'] > 1
    backbone = DiffusionBackbone(cfg['backbone'],cfg['lora_rank'],cfg['meta_prompt_num'])
    model = Joiner(backbone, position_embedding)
    return model
