import math
import torch
import torch.nn as nn
from utils.tensor import generate_relative_positions_matrix, relative_matmul


class MultiHeadAttention(nn.Module):
    """

    Args:
        head_count : number of heads
        model_dim : total dim of the attn
        dropout : dropout
        max_relative_positions : 截断距离
    Input:
        key : [batch, key_len, total_dim]
        value : [batch, key_len, total_dim]
        query : [batch, query_len, total_dim]
        mask : [batch, query_len, key_len]
        layer_cache
        attn_type
    Output:
        context : [batch, query_len, total_dim]
        attn : [batch, head, query_len, key_len]
    """

    def __init__(self,
                 head_count,
                 model_dim,
                 dropout,
                 max_relative_positions):
        assert model_dim % head_count == 0
        self.dim_per_head = model_dim // head_count
        self.model_dim = model_dim

        super(MultiHeadAttention, self).__init__()
        self.head_count = head_count

        self.wk = nn.Linear(model_dim, head_count * self.dim_per_head)
        self.wv = nn.Linear(model_dim, head_count * self.dim_per_head)
        self.wq = nn.Linear(model_dim, head_count * self.dim_per_head)

        self.softmax = nn.Softmax(dim=-1)
        self.dropout = nn.Dropout(dropout)
        self.final_linear = nn.Linear(model_dim, model_dim)

        self.max_relative_positions = max_relative_positions

        if max_relative_positions > 0:
            positions_choices = max_relative_positions * 2 + 1
            self.relative_positions_embeddings = nn.Embedding(positions_choices, self.dim_per_head)  # 相对位置嵌入矩阵

    def forward(self, key, value, query, mask=None, layer_cache=None, attn_type=None):
        # key : [batch , key_len, dim]
        # value : [batch, value_len, dim]
        # query : [batch, query_len, dim]
        batch_size = key.shape[0]
        dim_per_head = self.dim_per_head
        head_count = self.head_count

        def shape(x):
            return x.view(batch_size, -1, head_count, dim_per_head).transpose(1, 2)

        def un_shape(x):
            return x.transpose(1, 2).contiguous().view(batch_size, -1, head_count * dim_per_head)

        # Get, key, value and query
        if layer_cache is not None:
            if attn_type == "self":
                query, key, value = self.wq(query), self.wk(key), self.wv(value)
                key = shape(key)  # [batch, k_len, head_count, dim_per_head]
                value = shape(value)
                if layer_cache["self_keys"] is not None:
                    key = torch.cat((layer_cache["self_keys"], key), dim=2)
                if layer_cache["self_values"] is not None:
                    value = torch.cat((layer_cache["self_values"], value), dim=2)
                layer_cache["self_keys"] = key
                layer_cache["self_values"] = value
            elif attn_type == "context":
                query = self.linear_query(query)
                if layer_cache["memory_keys"] is None:
                    key, value = self.linear_keys(key), self.linear_values(value)
                    key = shape(key)
                    value = shape(value)
                else:
                    key, value = layer_cache["memory_keys"], layer_cache["memory_values"]
                layer_cache["memory_keys"] = key
                layer_cache["memory_values"] = value
        else:
            query, key, value = self.wq(query), self.wk(key), self.wv(value)
            key = shape(key)
            value = shape(value)

        # query [batch, q_len, head_count * dim_per_head]
        # key [batch, head_count, k_len ,dim_per_head]
        # value [batch, head_count, v_len ,dim_per_head]

        if self.max_relative_positions > 0 and attn_type == "self":
            key_len = key.shape[2]

            relative_positions_matrix = generate_relative_positions_matrix(
                key_len,
                self.max_relative_positions,
                cache=True if layer_cache is not None else False)

            relations_keys = self.relative_positions_embeddings(relative_positions_matrix.to(key.device))

            relations_values = self.relative_positions_embeddings(
                relative_positions_matrix.to(key.device))

        query = shape(query)  # query [batch, head_count, q_len ,dim_per_head]
        key_len = key.shape[2]
        query_len = query.shape[2]

        query = query / math.sqrt(dim_per_head)
        query_key = torch.matmul(query, key.transpose(2, 3))  # [batch, head_count, q_len, k_len]

        if self.max_relative_positions > 0 and attn_type == "self":
            scores = query_key + relative_matmul(query, relations_keys, True)
        else:
            scores = query_key

        scores = scores.float()  # [batch, head_count, q_len, k_len]

        if mask is not None:
            mask = mask.unsqueeze(1)  # [batch, 1, q_len, k_len]
            scores = scores.masked_fill(mask, float('-inf'))

        attn = self.softmax(scores).to(query.dtype)  # [batch, head_count, q_len, k_len]
        drop_attn = self.dropout(attn)

        # [batch, head_count, q_len, k_len] * [batch, head_count, v_len, dim_per_head]
        context_original = torch.matmul(drop_attn, value)  # [batch, head_count, q_len, dim_per_head]

        if self.max_relative_positions > 0 and attn_type == "self":
            context = un_shape(context_original + relative_matmul(drop_attn, relations_values, False))

        else:
            context = un_shape(context_original)

        # context [batch, q_len, head_count * dim_per_head]
        output = self.final_linear(context)
        # out_put [batch, q_len, head_count * dim_per_head]
        attns = attn.view(batch_size, head_count, query_len, key_len)
        return output, attns


if __name__ == '__main__':
    test_key = torch.ones(4, 5, 16)
    test_value = torch.ones(4, 5, 16)
    test_query = torch.ones(4, 3, 16)

    ma = MultiHeadAttention(
        head_count=2,
        model_dim=16,
        dropout=0.,
        max_relative_positions=0
    )

    ma(test_key, test_value, test_query)
