# Copyright (c) Microsoft, Inc. 2020
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
# Author: penhe@microsoft.com
# Date: 01/15/2020
#

"""
  Disentangled SelfAttention module
"""

import numpy as np
import math
import torch
from torch import nn, softmax
import functools
import pdb

from .ops import *
from .da_utils import build_relative_position

__all__=['DisentangledSelfAttention']
class DisentangledSelfAttention(nn.Module):
    def __init__(self, d_model, n_head):
        super().__init__()
        self.num_attention_heads = n_head
        _attention_head_size = d_model // n_head
        self.attention_head_size = _attention_head_size
        self.all_head_size = self.num_attention_heads * self.attention_head_size
        self.query_proj = nn.Linear(d_model, self.all_head_size, bias=True)
        self.key_proj = nn.Linear(d_model, self.all_head_size, bias=True)
        self.value_proj = nn.Linear(d_model, self.all_head_size, bias=True)
        self.hidden_dropout_prob = 0.0
        self.attention_mask = None

        self.share_att_key = False
        self.pos_att_type = ['c2p', 'p2c', 'p2p']  # c2p|p2c
        # TODO it must to be True
        self.relative_attention = True

        if self.relative_attention:
            self.position_buckets = 129
            self.max_relative_positions = -1
            self.pos_ebd_size = self.max_relative_positions
            if self.position_buckets > 0:
                self.pos_ebd_size = self.position_buckets
                # For backward compitable

            self.pos_dropout = StableDropout(self.hidden_dropout_prob)

            if (not self.share_att_key):
                if 'c2p' in self.pos_att_type or 'p2p' in self.pos_att_type:
                    self.pos_key_proj = nn.Linear(d_model, self.all_head_size, bias=True)
                if 'p2c' in self.pos_att_type or 'p2p' in self.pos_att_type:
                    self.pos_query_proj = nn.Linear(d_model, self.all_head_size)

        self.dropout = StableDropout(self.attention_probs_dropout_prob)
        self._register_load_state_dict_pre_hook(self._pre_load_hook)

    def transpose_for_scores(self, x, attention_heads):
        new_x_shape = x.size()[:-1] + (attention_heads, -1)
        x = x.view(*new_x_shape)
        return x.permute(0, 2, 1, 3).contiguous().view(-1, x.size(1), x.size(-1))

    def forward(self, hidden_states, query_states=None, relative_pos=None, rel_embeddings=None):
        if query_states is None:
            query_states = hidden_states
        query_layer = self.transpose_for_scores(self.query_proj(query_states), self.num_attention_heads).float()
        key_layer = self.transpose_for_scores(self.key_proj(hidden_states), self.num_attention_heads).float()
        value_layer = self.transpose_for_scores(self.value_proj(hidden_states), self.num_attention_heads)
        
        rel_att = None
        # Take the dot product between "query" and "key" to get the raw attention scores.
        scale_factor = 1
        if 'c2p' in self.pos_att_type:
            scale_factor += 1
        if 'p2c' in self.pos_att_type:
            scale_factor += 1
        if 'p2p' in self.pos_att_type:
            scale_factor += 1
        scale = 1/math.sqrt(query_layer.size(-1)*scale_factor)
        # Q K
        attention_scores = torch.bmm(query_layer, key_layer.transpose(-1, -2)*scale)
        if self.relative_attention:
            rel_embeddings = self.pos_dropout(rel_embeddings)
            rel_att = self.disentangled_attention_bias(query_layer, key_layer, relative_pos, rel_embeddings, scale_factor)

        if rel_att is not None:
            attention_scores = (attention_scores + rel_att)
        attention_scores = (attention_scores - attention_scores.max(dim=-1, keepdim=True).values.detach()).to(hidden_states)
        attention_scores = attention_scores.view(-1, self.num_attention_heads, attention_scores.size(-2), attention_scores.size(-1))

        # bxhxlxd
        _attention_probs = XSoftmax.apply(attention_scores, self.attention_mask, -1)
        attention_probs = self.dropout(_attention_probs)
        context_layer = torch.bmm(attention_probs.view(-1, attention_probs.size(-2), attention_probs.size(-1)), value_layer)
        context_layer = context_layer.view(-1, self.num_attention_heads, context_layer.size(-2), context_layer.size(-1)).permute(0, 2, 1, 3).contiguous()
        new_context_layer_shape = context_layer.size()[:-2] + (-1,)
        context_layer = context_layer.view(*new_context_layer_shape)

        return context_layer

    def disentangled_attention_bias(self, query_layer, key_layer, relative_pos, rel_embeddings, scale_factor):
        if relative_pos is None:
            q = query_layer.size(-2)
            relative_pos = build_relative_position(q, key_layer.size(-2), bucket_size = self.position_buckets, \
                max_position = self.max_relative_positions, device=query_layer.device)
        if relative_pos.dim()==2:
            relative_pos = relative_pos.unsqueeze(0).unsqueeze(0)
        elif relative_pos.dim()==3:
            relative_pos = relative_pos.unsqueeze(1)
        # bxhxqxk
        elif relative_pos.dim()!=4:
            raise ValueError(f'Relative postion ids must be of dim 2 or 3 or 4. {relative_pos.dim()}')

        att_span = self.pos_ebd_size
        relative_pos = relative_pos.long().to(query_layer.device)

        rel_embeddings = rel_embeddings[self.pos_ebd_size - att_span:self.pos_ebd_size + att_span, :].unsqueeze(0) #.repeat(query_layer.size(0)//self.num_attention_heads, 1, 1)
        if self.share_att_key:
            pos_query_layer = self.transpose_for_scores(self.query_proj(rel_embeddings), self.num_attention_heads)\
                .repeat(query_layer.size(0)//self.num_attention_heads, 1, 1) #.split(self.all_head_size, dim=-1)
            pos_key_layer = self.transpose_for_scores(self.key_proj(rel_embeddings), self.num_attention_heads)\
                .repeat(query_layer.size(0)//self.num_attention_heads, 1, 1) #.split(self.all_head_size, dim=-1)
        else:
            if 'c2p' in self.pos_att_type or 'p2p' in self.pos_att_type:
                pos_key_layer = self.transpose_for_scores(self.pos_key_proj(rel_embeddings), self.num_attention_heads)\
                    .repeat(query_layer.size(0)//self.num_attention_heads, 1, 1) #.split(self.all_head_size, dim=-1)
            if 'p2c' in self.pos_att_type or 'p2p' in self.pos_att_type:
                pos_query_layer = self.transpose_for_scores(self.pos_query_proj(rel_embeddings), self.num_attention_heads)\
                    .repeat(query_layer.size(0)//self.num_attention_heads, 1, 1) #.split(self.all_head_size, dim=-1)

        score = 0
        # content->position
        if 'c2p' in self.pos_att_type:
            scale = 1/math.sqrt(pos_key_layer.size(-1)*scale_factor)
            c2p_att = torch.bmm(query_layer, pos_key_layer.transpose(-1, -2).to(query_layer)*scale)
            c2p_pos = torch.clamp(relative_pos + att_span, 0, att_span*2-1).squeeze(0).expand([query_layer.size(0), query_layer.size(1), relative_pos.size(-1)])
            c2p_att = torch.gather(c2p_att, dim=-1, index=c2p_pos)
            score += c2p_att

        # position->content
        if 'p2c' in self.pos_att_type or 'p2p' in self.pos_att_type:
            scale = 1/math.sqrt(pos_query_layer.size(-1)*scale_factor)

        if 'p2c' in self.pos_att_type:
            p2c_att = torch.bmm(pos_query_layer.to(key_layer)*scale, key_layer.transpose(-1, -2))
            p2c_att = torch.gather(p2c_att, dim=-2, index=c2p_pos)
            score += p2c_att

        # position->position
        if 'p2p' in self.pos_att_type:
            pos_query = pos_query_layer[:,:,att_span:,:]
            p2p_att = torch.matmul(pos_query, pos_key_layer.transpose(-1, -2))
            p2p_att = p2p_att.expand(query_layer.size()[:2] + p2p_att.size()[2:])
            if query_layer.size(-2) != key_layer.size(-2):
                p2p_att = torch.gather(p2p_att, dim=-2, index=pos_index.expand(query_layer.size()[:2] + (pos_index.size(-2), p2p_att.size(-1))))
            p2p_att = torch.gather(p2p_att, dim=-1, index=c2p_pos.expand([query_layer.size(0), query_layer.size(1), query_layer.size(2), relative_pos.size(-1)]))
            score += p2p_att

        return score

    def _pre_load_hook(self, state_dict, prefix, local_metadata, strict,
        missing_keys, unexpected_keys, error_msgs):
        self_state = self.state_dict()
        if ((prefix + 'query_proj.weight') not in state_dict) and ((prefix + 'in_proj.weight') in state_dict):
          v1_proj = state_dict[prefix+'in_proj.weight']
          v1_proj = v1_proj.unsqueeze(0).reshape(self.num_attention_heads, -1, v1_proj.size(-1))
          q,k,v=v1_proj.chunk(3, dim=1)
          state_dict[prefix + 'query_proj.weight'] = q.reshape(-1, v1_proj.size(-1))
          state_dict[prefix + 'key_proj.weight'] = k.reshape(-1, v1_proj.size(-1))
          state_dict[prefix + 'key_proj.bias'] = self_state['key_proj.bias']
          state_dict[prefix + 'value_proj.weight'] = v.reshape(-1, v1_proj.size(-1))
          v1_query_bias = state_dict[prefix + 'q_bias']
          state_dict[prefix + 'query_proj.bias'] = v1_query_bias
          v1_value_bias = state_dict[prefix +'v_bias']
          state_dict[prefix + 'value_proj.bias'] = v1_value_bias

          v1_pos_key_proj = state_dict[prefix + 'pos_proj.weight']
          state_dict[prefix + 'pos_key_proj.weight'] = v1_pos_key_proj
          v1_pos_query_proj = state_dict[prefix + 'pos_q_proj.weight']
          state_dict[prefix + 'pos_query_proj.weight'] = v1_pos_query_proj
          v1_pos_query_proj_bias = state_dict[prefix + 'pos_q_proj.bias']
          state_dict[prefix + 'pos_query_proj.bias'] = v1_pos_query_proj_bias
          state_dict[prefix + 'pos_key_proj.bias'] = self_state['pos_key_proj.bias']

          del state_dict[prefix + 'in_proj.weight']
          del state_dict[prefix + 'q_bias']
          del state_dict[prefix + 'v_bias']
          del state_dict[prefix + 'pos_proj.weight']
          del state_dict[prefix + 'pos_q_proj.weight']
          del state_dict[prefix + 'pos_q_proj.bias']


class DisentangledSelfAttention2(nn.Module):
    def __init__(self, d_model, n_head, att_type: list):
        super().__init__()
        self.att_type = att_type
        self.num_attention_heads = n_head
        _attention_head_size = d_model // n_head
        self.attention_head_size = _attention_head_size
        self.all_head_size = self.num_attention_heads * self.attention_head_size

        self.query_proj = nn.Linear(d_model, self.all_head_size, bias=True)
        self.key_proj = nn.Linear(d_model, self.all_head_size, bias=True)
        self.value_proj = nn.Linear(d_model, self.all_head_size, bias=True)

        self.pos_query_proj = nn.Linear(d_model, self.all_head_size, bias=True)
        self.pos_key_proj = nn.Linear(d_model, self.all_head_size, bias=True)

        self.attention_mask = None

    def transpose_for_scores(self, x, attention_heads):
        new_x_shape = x.size()[:-1] + (attention_heads, -1)
        x = x.view(*new_x_shape)
        return x.permute(0, 2, 1, 3).contiguous().view(-1, x.size(1), x.size(-1))

    def forward(self, hidden_states, pos_states):
        query_layer = self.transpose_for_scores(self.query_proj(hidden_states), self.num_attention_heads).float()
        key_layer = self.transpose_for_scores(self.key_proj(hidden_states), self.num_attention_heads).float()
        value_layer = self.transpose_for_scores(self.value_proj(hidden_states), self.num_attention_heads)

        pos_query = self.transpose_for_scores(self.pos_query_proj(pos_states), self.num_attention_heads).float()
        pos_key = self.transpose_for_scores(self.pos_key_proj(pos_states), self.num_attention_heads).float()

        scale_factor = len(self.att_type)

        scale = 1 / math.sqrt(query_layer.size(-1) * scale_factor)

        # att_score = torch.bmm(pos_query, pos_key.transpose(-1, -2) * scale)
        # print(att_score.shape)
        # torch.save(att_score, '/home/Newdisk/luowenlong/Projects/DA-CLIP/output/ls/ft/v5/c2c_o2c_c2o_o2o/4layer/layer1_att_score_o2o.pth')
        # assert 1 < 0

        # Q K
        attention_scores = None
        if 'c2c' in self.att_type:
            attention_scores = torch.bmm(query_layer, key_layer.transpose(-1, -2) * scale)
        if 'o2o' in self.att_type:
            attention_scores += torch.bmm(pos_query, pos_key.transpose(-1, -2) * scale)
        if 'c2o' in self.att_type:
            attention_scores += torch.bmm(query_layer, pos_key.transpose(-1, -2) * scale)
        if 'o2c' in self.att_type:
            attention_scores += torch.bmm(pos_query, query_layer.transpose(-1, -2) * scale)

        attention_scores = ((attention_scores - attention_scores.max(dim=-1, keepdim=True).values.detach()).to(
            hidden_states))
        attention_scores = attention_scores.view(-1, self.num_attention_heads, attention_scores.size(-2),
                                                 attention_scores.size(-1))

        # bxhxlxd
        attention_probs = softmax(attention_scores, dim=-1)
        context_layer = torch.bmm(attention_probs.view(-1, attention_probs.size(-2), attention_probs.size(-1)),
                                  value_layer)
        context_layer = context_layer.view(-1, self.num_attention_heads, context_layer.size(-2),
                                           context_layer.size(-1)).permute(0, 2, 1, 3).contiguous()
        new_context_layer_shape = context_layer.size()[:-2] + (-1,)
        context_layer = context_layer.view(*new_context_layer_shape)
        return context_layer, attention_probs

    def disentangled_attention_bias(self, query_layer, key_layer, relative_pos, rel_embeddings, scale_factor):
        if relative_pos is None:
            q = query_layer.size(-2)
            relative_pos = build_relative_position(q, key_layer.size(-2), bucket_size=self.position_buckets, \
                                                   max_position=self.max_relative_positions, device=query_layer.device)
        if relative_pos.dim() == 2:
            relative_pos = relative_pos.unsqueeze(0).unsqueeze(0)
        elif relative_pos.dim() == 3:
            relative_pos = relative_pos.unsqueeze(1)
        # bxhxqxk
        elif relative_pos.dim() != 4:
            raise ValueError(f'Relative postion ids must be of dim 2 or 3 or 4. {relative_pos.dim()}')

        att_span = self.pos_ebd_size
        relative_pos = relative_pos.long().to(query_layer.device)

        rel_embeddings = rel_embeddings[self.pos_ebd_size - att_span:self.pos_ebd_size + att_span, :].unsqueeze(
            0)  # .repeat(query_layer.size(0)//self.num_attention_heads, 1, 1)
        if self.share_att_key:
            pos_query_layer = self.transpose_for_scores(self.query_proj(rel_embeddings), self.num_attention_heads) \
                .repeat(query_layer.size(0) // self.num_attention_heads, 1, 1)  # .split(self.all_head_size, dim=-1)
            pos_key_layer = self.transpose_for_scores(self.key_proj(rel_embeddings), self.num_attention_heads) \
                .repeat(query_layer.size(0) // self.num_attention_heads, 1, 1)  # .split(self.all_head_size, dim=-1)
        else:
            if 'c2p' in self.pos_att_type or 'p2p' in self.pos_att_type:
                pos_key_layer = self.transpose_for_scores(self.pos_key_proj(rel_embeddings), self.num_attention_heads) \
                    .repeat(query_layer.size(0) // self.num_attention_heads, 1, 1)  # .split(self.all_head_size, dim=-1)
            if 'p2c' in self.pos_att_type or 'p2p' in self.pos_att_type:
                pos_query_layer = self.transpose_for_scores(self.pos_query_proj(rel_embeddings),
                                                            self.num_attention_heads) \
                    .repeat(query_layer.size(0) // self.num_attention_heads, 1, 1)  # .split(self.all_head_size, dim=-1)

        score = 0
        # content->position
        if 'c2p' in self.pos_att_type:
            scale = 1 / math.sqrt(pos_key_layer.size(-1) * scale_factor)
            c2p_att = torch.bmm(query_layer, pos_key_layer.transpose(-1, -2).to(query_layer) * scale)
            c2p_pos = torch.clamp(relative_pos + att_span, 0, att_span * 2 - 1).squeeze(0).expand(
                [query_layer.size(0), query_layer.size(1), relative_pos.size(-1)])
            c2p_att = torch.gather(c2p_att, dim=-1, index=c2p_pos)
            score += c2p_att

        # position->content
        if 'p2c' in self.pos_att_type or 'p2p' in self.pos_att_type:
            scale = 1 / math.sqrt(pos_query_layer.size(-1) * scale_factor)

        if 'p2c' in self.pos_att_type:
            p2c_att = torch.bmm(pos_query_layer.to(key_layer) * scale, key_layer.transpose(-1, -2))
            p2c_att = torch.gather(p2c_att, dim=-2, index=c2p_pos)
            score += p2c_att

        # position->position
        if 'p2p' in self.pos_att_type:
            pos_query = pos_query_layer[:, :, att_span:, :]
            p2p_att = torch.matmul(pos_query, pos_key_layer.transpose(-1, -2))
            p2p_att = p2p_att.expand(query_layer.size()[:2] + p2p_att.size()[2:])
            if query_layer.size(-2) != key_layer.size(-2):
                p2p_att = torch.gather(p2p_att, dim=-2, index=pos_index.expand(
                    query_layer.size()[:2] + (pos_index.size(-2), p2p_att.size(-1))))
            p2p_att = torch.gather(p2p_att, dim=-1, index=c2p_pos.expand(
                [query_layer.size(0), query_layer.size(1), query_layer.size(2), relative_pos.size(-1)]))
            score += p2p_att

        return score

    def _pre_load_hook(self, state_dict, prefix, local_metadata, strict,
                       missing_keys, unexpected_keys, error_msgs):
        self_state = self.state_dict()
        if ((prefix + 'query_proj.weight') not in state_dict) and ((prefix + 'in_proj.weight') in state_dict):
            v1_proj = state_dict[prefix + 'in_proj.weight']
            v1_proj = v1_proj.unsqueeze(0).reshape(self.num_attention_heads, -1, v1_proj.size(-1))
            q, k, v = v1_proj.chunk(3, dim=1)
            state_dict[prefix + 'query_proj.weight'] = q.reshape(-1, v1_proj.size(-1))
            state_dict[prefix + 'key_proj.weight'] = k.reshape(-1, v1_proj.size(-1))
            state_dict[prefix + 'key_proj.bias'] = self_state['key_proj.bias']
            state_dict[prefix + 'value_proj.weight'] = v.reshape(-1, v1_proj.size(-1))
            v1_query_bias = state_dict[prefix + 'q_bias']
            state_dict[prefix + 'query_proj.bias'] = v1_query_bias
            v1_value_bias = state_dict[prefix + 'v_bias']
            state_dict[prefix + 'value_proj.bias'] = v1_value_bias

            v1_pos_key_proj = state_dict[prefix + 'pos_proj.weight']
            state_dict[prefix + 'pos_key_proj.weight'] = v1_pos_key_proj
            v1_pos_query_proj = state_dict[prefix + 'pos_q_proj.weight']
            state_dict[prefix + 'pos_query_proj.weight'] = v1_pos_query_proj
            v1_pos_query_proj_bias = state_dict[prefix + 'pos_q_proj.bias']
            state_dict[prefix + 'pos_query_proj.bias'] = v1_pos_query_proj_bias
            state_dict[prefix + 'pos_key_proj.bias'] = self_state['pos_key_proj.bias']

            del state_dict[prefix + 'in_proj.weight']
            del state_dict[prefix + 'q_bias']
            del state_dict[prefix + 'v_bias']
            del state_dict[prefix + 'pos_proj.weight']
            del state_dict[prefix + 'pos_q_proj.weight']
            del state_dict[prefix + 'pos_q_proj.bias']