#############################################################################
# 本脚本展示使用PyTorch自带的模块实现Transformer模型的Encoder部分, 代码参考如下连接:
# https://github.com/mochan-b/pytorch-tutorial1/blob/main/transformer_model.py
#
# 
#                                                       Author： Xian Yang
#                                                     Date Time： 2023.11.25
##############################################################################

import math
import torch
import numpy        as np
import torch.nn     as nn
from   torchsummary import summary
from   torch.nn     import TransformerEncoder, TransformerEncoderLayer
import torch.autograd.variable as Variable     # torch中的变量封装函数

class PositionEmbedding(nn.Module):
    def __init__(self, d_model : int, dropout : float, max_len : int = 5000) -> None:
        r"""_summary_

        Args:
            d_model (int):           词嵌入(隐藏层)大小
            dropout (float):         置零的比例
            max_len (int, optional): 句子最大长度, 默认为5000
        """
        super(PositionEmbedding, self).__init__()
        self._dropout = nn.Dropout(p = dropout)

        # 初始化一个位置编码矩阵
        pe            = torch.zeros(max_len, d_model, dtype=torch.float32)

        pos           = torch.arange(0, max_len, dtype=torch.float32).reshape(-1, 1) 
        div_term      = torch.exp(torch.arange(0, d_model, 2) * -(math.log(10000.0)/d_model))
        pe[:, 0::2]   = torch.sin(pos * div_term)
        pe[:, 1::2]   = torch.cos(pos * div_term)
        pe            = pe.unsqueeze(0)           # 在第0维增加一个维度，为了与输入维度匹配

        # TODO: 特别注意
        # 把pe位置编码矩阵注册成模型的buffer，什么是模型的buffer呢？
        # 我们认为它是对模型有帮助的，但是不是模型中的参数或超参, 其不需要随模型的训练而更新
        # 注册之后我们可以在模型保存后重新加载时和模型结构与参数一同被加载
        self.register_buffer('pe', pe)

    def forward(self, x : torch.Tensor) -> torch.Tensor:  
        #  x.shape = [batch, seq_len, d_model]
        x = x + Variable(self.pe[:, x.size(1)], required=False)

        # 使用dropout对象进行丢弃操作，返回结果
        # dropput的作用是使用指定的概率将输入的tensor置零
        r"""pyhton
            >>> import torch
            >>> import torch.nn as nn
            >>> m = nn.Dropout(p=0.2)
            >>> x = torch.randn(4, 5)
            >>> y = m(x)
            >>> y
            tensor([[ 0.0000, -1.3313,  0.0000,  0.0000, -0.0000],
                    [-0.0000, -2.0547, -0.4483,  1.7132, -0.6219],
                    [-1.2918, -0.0026, -0.0000, -1.1671,  0.2106],
                    [ 0.1137,  0.2830,  0.0770, -2.2582,  0.0000]])
        """
        return self._dropout(x)
    

class Transformer(nn.Module):
    def __init__(self, max_len : int, d_model : int, num_head : int, 
                 d_ffn : int, num_layer : int, dropout : float = 0.1):
        """_summary_

        Args:
            max_len   (int):   _description_
            d_model   (int):   _description_
            num_head  (int):   _description_
            d_ffn     (int):   _description_
            num_layer (int):   _description_
            dropout   (float): _description_. Defaults to 0.1.
        """
        super(Transformer, self).__init__()

        self._model_type  = "Transformer"
        self._pos_encoder = PositionEmbedding(d_model, dropout, max_len)
        encode_layer      = TransformerEncoderLayer(d_model, num_head, d_ffn, dropout)
        self._encoder     = TransformerEncoder(encode_layer, num_layer) 
        self._embedding   = nn.Embedding(max_len, d_model)
        self._decoder     = nn.Linear(d_model, max_len)
        self._d_model     = d_model
        self._mask        = Transformer.gen_mask(max_len)

        self.init_weights()   # 初始化一些权重

    def init_weights(self):
        init_range = 0.1
        self._embedding.weight.data.uniform_(-init_range, init_range)
        self._decoder.bias.data.zero_()
        self._decoder.weight.data.uniform_(-init_range, init_range)

    # 三维张量: attn_shape = (1, max_len, max_len)
    @staticmethod
    def gen_mask(max_len : int) -> torch.Tensor:
        """_summary_
            生成向后这样的掩码张量, 参数size是
        Args:
            size (int): 掩码张量的尺寸
        """
        attn_shape = (1, max_len, max_len)
        """_summary_
            >>> size = 5
            >>> attn_shape = (1, max_len, max_len)
            # k = 0 表示主对角线, k > 0 表示向右上移动
            >>> mask       = np.triu(np.ones(shape=attn_shape), k = 1).astype(np.uint8)
            >>> mask
                array([[[0, 1, 1, 1, 1],
                        [0, 0, 1, 1, 1],
                        [0, 0, 0, 1, 1],
                        [0, 0, 0, 0, 1],
                        [0, 0, 0, 0, 0]]], dtype=uint8)
            >>> torch.from_numpy(1 - mask)
                tensor([[[1, 0, 0, 0, 0],
                        [1, 1, 0, 0, 0],
                        [1, 1, 1, 0, 0],
                        [1, 1, 1, 1, 0],
                        [1, 1, 1, 1, 1]]], dtype=torch.uint8)
        """
        # TODO: 向后遮掩
        mask   = np.triu(np.ones(shape=attn_shape), k = 1).astype(np.uint8)
        return torch.from_numpy(1 - mask)
    

    def forward(self, x : torch.Tensor):
        x = self._embedding(x) * math.sqrt(self._d_model)
        x = self._pos_encoder(x)
        x = self._encoder(x)
        return self._decoder(x)
    

if __name__ == "__main__":
    max_len   = 30522
    d_model   = 768
    num_head  = 12
    d_ffn     = 3073
    num_layer = 6
    dropout   = 0.1
    # Bert Base的的配置
    # max_len   = 30522
    # d_model   = 768
    # num_head  = 12
    # d_ffn     = 3073
    # num_layer = 6
    # dropout   = 0.1
    model     = Transformer(max_len, d_model, num_head, d_ffn, num_layer, dropout)
    print(model)
