# -*- coding: utf-8 -*-

import torch
import torch.nn as nn
import torch.nn.functional as F


class ScaleDotProductAttention(nn.Module):
    def __init__(self, scale_v, attn_dropout=0.1):
        super(ScaleDotProductAttention, self).__init__()
        self.scale_v = scale_v
        self.attn_dropout = attn_dropout
        self.dropout = nn.Dropout(attn_dropout)

    def forward(self, q, k, v, mask=None):
        """

        :param q:           B, n_head, L, d_q
        :param k:           B, n_head, L, d_k
        :param v:           B, n_head, L, d_v
        :param mask:        B, 1, 1, L
        :return:
        """
        # attn: B, n_head, L, L
        attn = torch.matmul(q / self.scale_v, k.transpose(2, 3))

        if mask is not None:
            # logits or scores
            attn = attn.masked_fill(mask == 0, -1e9)  # broadcasting

        attn = self.dropout(F.softmax(attn, dim=-1))

        ##########################
        # attn: B, n_head, L, L
        # v   : B, n_head, L, d_v
        # --> : B, n_head, L, d_v
        ##########################
        queried_value = torch.matmul(attn, v)

        return attn, queried_value


class MultiHeadAttention(nn.Module):
    def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1):
        super(MultiHeadAttention, self).__init__()
        self.n_head = n_head  # 8
        self.d_model = d_model  # 512
        self.d_k = d_k  # d_k = d_q  64
        self.d_v = d_v  # 64
        self.dropout = nn.Dropout(dropout)

        self.w_qs = nn.Linear(d_model, n_head * d_k, bias=False)
        self.w_ks = nn.Linear(d_model, n_head * d_k, bias=False)
        self.w_vs = nn.Linear(d_model, n_head * d_v, bias=False)

        self.scale_dot_product_attention = ScaleDotProductAttention(d_model ** 0.5,
                                                                    attn_dropout=dropout)
        self.fc = nn.Linear(n_head * d_v, d_model, bias=False)
        # LayerNorm (NLP) // BatchNorm (CV)
        self.layer_norm = nn.LayerNorm(d_model, eps=1e-6)

    def forward(self, q, k, v, mask=None):
        """

        DEMO:
            I like China <pad> <pad> <pad> <pad> <pad> <pad> <pad>
            I like play with dog <pad> <pad> <pad> <pad> <pad>

        ==> token to index (int)
        ==> mask [B, L] torch.int32
            [[1 1 1 0 0 0 0 0 0 0],
            [1 1 1 1 1 0 0 0 0 0]]

        :param q:           B, L, H (=d_model)  --multi_head--> B, n_head, L, d_k
        :param k:           B, L, H (=d_model)  --multi_head--> B, n_head, L, d_k --transpose--> B, n_head, d_k, L
        :param v:           B, L, H (=d_model)
        :param mask:        B, 1, 1, L  (hoping broadcasting to [B, n_head, L, L])
        :return:
        """
        B, L = q.size(0), q.size(1)

        # B, L, H
        residual = q

        # 1. 如何获得多头attention
        # TODO : 了解下 permute 函数，和 transpose 函数有啥区别
        # q(B, L, H) --w_qs--> B, L, n_head * d_k --reshape--> B, L, n_head, d_k --transpose--> B, n_head, L, d_k
        multi_q = self.w_qs(q).reshape(B, L, self.n_head, self.d_k).transpose(1, 2)
        multi_k = self.w_ks(k).reshape(B, L, self.n_head, self.d_k).transpose(1, 2)
        multi_v = self.w_vs(v).reshape(B, L, self.n_head, self.d_v).transpose(1, 2)

        # TODO: 思考 pytorch 的 broadcasting，有哪些 tensor 操作可以使用 broadcasting
        # queried_value: B, n_head, L, d_v
        attn_weight, queried_value = self.scale_dot_product_attention.forward(multi_q,
                                                                              multi_k,
                                                                              multi_v,
                                                                              mask=mask)
        # d_k == d_q 但是 d_k 不一定等于 d_v
        # B, n_head, L, d_v --> B, L, n_head, d_v --> B, L, n_head * d_v
        queried_value = queried_value.transpose(1, 2).reshape(B, L, -1)
        # B, L, H
        queried_value = self.dropout(self.fc(queried_value))

        # Add & Norm
        q = queried_value + residual
        q = self.layer_norm(q)

        return q, attn_weight


def construct_mask_matrix(seq_len, device):
    index = torch.arange(seq_len, device=device)  # 0, 1, 2, 3, 4, 5
    col_index = index.unsqueeze(1)  # L, 1
    row_index = index.unsqueeze(0)  # 1, L
    mask = (col_index >= row_index).float()
    return mask


class MaskedMultiHeadAttention(nn.Module):
    def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1):
        super(MaskedMultiHeadAttention, self).__init__()
        self.multi_head_attention = MultiHeadAttention(n_head, d_model, d_k, d_v, dropout=dropout)

    def forward(self, q, k, v):
        """
        DEMO1:

            Mask 矩阵的维度

            B, n_head, L, L
            1, 1, L, L (L=6)

            <sos> Dongdong like Minmin <eos> <pad>

                        <sos>       Dongdong    like        Minmin      <eos>       <pad>
            <sos>       1           0           0           0           0           0
            Dong        1           1           0           0           0           0
            like        1           1           1           0           0           0
            Min         1           1           1           1           0           0
            <eos>       1           1           1           1           1           0
            <pad>       1           1           1           1           1           1


        :param q:       B, L, H (所有的L都是target sequence的长度)
        :param k:       B, L, H
        :param v:       B, L, H
        :return:
        """
        # L, L
        mask = construct_mask_matrix(q.size(1), q.device)
        # [1, 1, L, L] (hoping broadcasting [B, n_head, L, L])
        mask = mask.unsqueeze(0).unsqueeze(1)
        queried_value, attn_weight = self.multi_head_attention.forward(q, k, v, mask=mask)
        return queried_value, attn_weight


if __name__ == '__main__':
    pass

    multi_head_attention = MultiHeadAttention()
    q, k, v, mask = None, None, None, None  # tensor, data or output of previous module
    multi_head_attention.forward()
