import copy
import math
import torch.nn.functional as F
import numpy as np
import torch
from matplotlib import pyplot as plt
import torch.nn as nn


# 定义词嵌入层
class Embeddings(nn.Module):
    def __init__(self, vocab_size, d_model):
        super(Embeddings, self).__init__()
        self.vocab_size = vocab_size
        self.d_model = d_model

        self.embedding = nn.Embedding(num_embeddings=vocab_size, embedding_dim=d_model)

    def forward(self, x):
        x = self.embedding(x) * math.sqrt(self.d_model)
        return x


# 定义位置编码层
class PositionalEncoding(nn.Module):
    def __init__(self, d_model, dropout=0.1, max_len=60):
        super(PositionalEncoding, self).__init__()
        self.dropout = nn.Dropout(p=dropout)
        self.max_len = max_len
        self.d_model = d_model

        # 创建一个全零的位置编码矩阵pe [60,512]
        pe = torch.zeros(max_len, d_model)

        # 定义一个位置列矩阵position [60] -- [60,1]
        position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)

        # 定义转换矩阵div_term 套公式  [256]
        div_term = torch.exp(torch.arange(0, self.d_model, 2) * -(math.log(10000.0) / self.d_model))

        # 转换矩阵div_term * 位置列矩阵position = 三角函数里面的值
        # [60,1] * [256] = [60,256]
        position_value = position * div_term

        # 往pe中赋值 三角函数的值
        pe[:, 0::2] = torch.sin(position_value)
        pe[:, 1::2] = torch.cos(position_value)

        # 给位置编码矩阵pe增加一维度
        # [60,512] --> [1,60,512]
        pe = pe.unsqueeze(0)
        # 将位置编码矩阵pe注册到模型缓冲区
        self.register_buffer('pe', pe)

    def forward(self, x):
        # x [2,4,512]
        # x + 位置编码矩阵pe   给x增加位置编码
        x = x + self.pe[:, :x.size(1)]
        x = self.dropout(x)

        return x


def create_position_x():
    """
    编码器的输入
    :return: 返回经过词嵌入层和位置编码的x
    """
    x = torch.tensor([[100, 2, 421, 508], [491, 998, 1, 221]])  # [2,4]

    my_embeddings = Embeddings(vocab_size=1000, d_model=512)

    y = my_embeddings(x)  # [2,4,512]

    positional_encoding = PositionalEncoding(d_model=512, max_len=60, dropout=0.1)

    position_x = positional_encoding(y)

    return position_x


def dm01_nptriu():
    """
    测试使用np.triu、三角掩码的生成
    """
    matrix = np.array([[1, 1, 1, 1, 1],
                       [2, 2, 2, 2, 2],
                       [3, 3, 3, 3, 3],
                       [4, 4, 4, 4, 4],
                       [5, 5, 5, 5, 5]])  # 将列表转换为numpy数组

    a1 = np.triu(matrix, k=1)
    print('k=1\n', a1)
    a1 = np.triu(matrix, k=0)
    print('k=0\n', a1)
    a1 = np.triu(matrix, k=-1)
    print('k=-1\n', a1)


def subsequent_mask(size):
    """
    生成掩码
    :param size: 方阵的大小
    :return:  下三角掩码张量方阵
    """
    # 生成上三角矩阵
    subsequent_mask = np.triu(np.ones((1, size, size)), k=1).astype(np.uint8)
    # 生成下三角矩阵
    return torch.from_numpy(1 - subsequent_mask)


def dm02_subsequent_mask():
    """
    测试掩码生成函数及掩码可视化
    """
    result = subsequent_mask(size=5)
    print(result)
    # 掩码张量的可视化
    plt.figure(figsize=(5, 5))
    plt.imshow(subsequent_mask(20)[0])
    plt.show()


def dm03_masked_fill():
    """
    测试masked_fill方法，如何对数据进行掩码
    """
    mask = torch.zeros(2, 4, 4)
    print(mask)
    print(mask == 1)
    print(mask == 0)
    # 示例：将[0,1,2]的位置设置为1
    mask[0, 1, :] = 1
    print(mask)
    print(mask.bool())
    attention_weight = torch.rand(2, 4, 4)
    print(attention_weight)
    # masked_fill方法要求掩码张量为布尔类型，因此将mask转换为布尔类型mask.bool()
    attention_weight = attention_weight.masked_fill(mask.bool(), -1e9)
    print(attention_weight)


def attention(query, key, value, mask=None, dropout=None):
    """
    注意力计算函数
    :param query: Q
    :param key: K
    :param value: V
    :param mask: 掩码
    :param dropout: 随机失活层
    :return:Q的注意力结果表示、Q的权重分布
    """
    # 获取dk
    dk = query.size(-1)

    # Q乘K的转置除根号dk
    attention_weight = torch.matmul(query, key.transpose(-1, -2)) / math.sqrt(dk)

    # 是否对注意力权重attention_weight进行mask掩码
    if mask is not None:
        attention_weight = attention_weight.masked_fill(mask == 0, -1e9)

    # 对注意力权重attention_weight进行softmax
    p_attention_weight = F.softmax(attention_weight, dim=-1)

    # 是否进行随机失活
    if dropout is not None:
        p_attention_weight = dropout(p_attention_weight)

    return torch.matmul(p_attention_weight, value), p_attention_weight


def dm04_attention():
    """
    测试注意力计算函数
    """
    # position_x: wordEmbedding + positionEncoding
    # position_x  [2,4,512]
    position_x = create_position_x()

    # 自注意力机制 Q=K=Value
    query = key = value = position_x

    # 没有mask掩码：调用attention
    results1, p_attention1 = attention(query, key, value)
    # query key value  [2,4,512]
    # results1         [2,4,512]
    # p_attention1     [2,4,4]
    print(f'results1:\n{results1}')
    print(f'results1.shape:{results1.shape}')
    print(f'p_attention1\n{p_attention1}')
    print(f'p_attention1.shape:{p_attention1.shape}')

    # 有mask掩码：调用attention
    mask = torch.zeros(2, 4, 4)
    results2, p_attention2 = attention(query, key, value, mask)
    # query key value  [2,4,512]
    # results2         [2,4,512]
    # p_attention2     [2,4,4]
    print(f'results2:\n{results2}')
    print(f'results2.shape:{results2.shape}')
    print(f'p_attention2\n{p_attention2}')
    print(f'p_attention2.shape:{p_attention2.shape}')


def clones(module, N):
    """
    克隆模型（模块）函数
    :param module: 模块
    :param N: 复制次数
    :return: 复制后的模块列表
    """
    return nn.ModuleList([copy.deepcopy(module) for _ in range(N)])


# 多头注意力机制层
class MultiHeadAttention(nn.Module):
    def __init__(self, embedding_dim, head, dropout=0.1):
        super(MultiHeadAttention, self).__init__()
        self.embedding_dim = embedding_dim
        self.head = head

        # 确保embedding_dim 整除 head ，不能整除则报错
        assert embedding_dim % head == 0
        # 计算每个头维度 512/8 = 64
        self.d_k = self.embedding_dim // self.head

        # 创建4个全连接层列表 [512,512] 的方阵
        self.linears = clones(nn.Linear(self.embedding_dim, self.embedding_dim), 4)

        # 注意力权重分布
        self.attention = None

        # 创建随机失活层
        self.dropout = nn.Dropout(p=dropout)

    def forward(self, query, key, value, mask=None):
        # 获取batch_size
        batch_size = query.size(0)

        # Q,K,V传入四个线性层
        # [2,4,512] --[512,512]--> [2,4,512] --> [2,-1,8,64] --> [2,4,8,64] --> [2,8,4,64]
        # transpose(1,2) 4代表4个单词，8代表8个头，让句子长度4和句子特征64靠在一起，更有利捕捉句子特征
        query, key, value = [model(x).view(batch_size, -1, self.head, self.d_k).transpose(1, 2)
                             for model, x in zip(self.linears, (query, key, value))]

        # 判断是否掩码，掩码的话增加一维
        # [8,4,4] --> [1,8,4,4]
        if mask is not None:
            mask = mask.unsqueeze(0)

        # 计算多头注意力
        results, self.attention = attention(query, key, value, mask, self.dropout)

        # 将多头注意力结果进行concat拼接
        results = results.transpose(1, 2).contiguous().view(batch_size, -1, self.embedding_dim)

        # 将拼接后的结果传入第四个线性层
        return self.linears[-1](results)


def dm05_multi_head_attention():
    """
    测试多头注意力机制层
    """
    # 1、创建position_x，词嵌入加位置编码，赋值给QKV，[2,4,512]
    position_x = create_position_x()
    query = key = value = position_x

    # 2、实例化多头注意力机制层
    my_attention = MultiHeadAttention(embedding_dim=512, head=8, dropout=0.1)

    mask = torch.zeros(8, 4, 4)

    # 3、调用多头注意力机制层前向传播函数，返回多头注意力结果
    results = my_attention(query, key, value, mask)

    print(f'多头注意机制后的results.shape:{results.shape}')
    print(f'多头注意机制后的results:\n{results}')
    print(f'多头注意机制后的注意力权重分布attention.shape:{my_attention.attention.shape}')


# 前馈神经网络层
class FeedForward(nn.Module):
    def __init__(self, d_model, d_ff, dropout_p=0.1):
        super(FeedForward, self).__init__()
        self.linear1 = nn.Linear(d_model, d_ff)
        self.linear2 = nn.Linear(d_ff, d_model)
        self.relu = nn.ReLU()
        self.dropout = nn.Dropout(p=dropout_p)

    def forward(self, x):
        # 经过第一层线性层、ReLU激活函数、随机失活
        x = self.linear1(x)
        x = self.relu(x)
        x = self.dropout(x)

        # 经过第二层线性层
        x = self.linear2(x)

        return x


def dm06_feedforward():
    """
    测试前馈神经网络层
    """
    # 1、创建position_x，词嵌入加位置编码，赋值给QKV，[2,4,512]
    position_x = create_position_x()
    query = key = value = position_x

    # 2、实例化多头注意力机制层
    my_attention = MultiHeadAttention(embedding_dim=512, head=8, dropout=0.1)

    mask = torch.zeros(8, 4, 4)

    # 3、调用多头注意力机制层前向传播函数，返回多头注意力结果
    results = my_attention(query, key, value, mask)

    # 4、实例化前馈神经网络层
    my_feedforward = FeedForward(d_model=512, d_ff=1024, dropout_p=0.1)

    results = my_feedforward(results)
    print(f'前馈神经网络层的结果results.shape:{results.shape}')
    print(f'前馈神经网络层的结果results:\n{results}')


# 规范化（归一化）层
class LayerNorm(nn.Module):
    def __init__(self, features, eps=1e-6):
        super(LayerNorm, self).__init__()
        self.features = features
        self.eps = eps

        # 创建可训练参数k，缩放因子
        self.k = nn.Parameter(torch.ones(features))

        # 创建可训练参数b，平移因子
        self.b = nn.Parameter(torch.zeros(features))

    def forward(self, x):
        # 获取x的均值、标准差
        mean = x.mean(-1, keepdim=True)
        std = x.std(-1, keepdim=True)

        # 归一化
        x = (x - mean) / (std + self.eps)

        # 缩放
        x = self.k * x

        # 平移
        x = x + self.b

        return x


def dm07_layer_norm():
    """
    测试规范化（归一化）层
    """
    # 1、创建position_x，词嵌入加位置编码，赋值给QKV，[2,4,512]
    position_x = create_position_x()
    query = key = value = position_x

    # 2、实例化多头注意力机制层
    my_attention = MultiHeadAttention(embedding_dim=512, head=8, dropout=0.1)

    mask = torch.zeros(8, 4, 4)

    # 3、调用多头注意力机制层前向传播函数，返回多头注意力结果
    results = my_attention(query, key, value, mask)

    # 4、实例化前馈神经网络层、调用前馈神经网络层
    my_feedforward = FeedForward(d_model=512, d_ff=1024, dropout_p=0.1)

    results = my_feedforward(results)

    # 5、实例化规范化层、调用规范化层
    my_layer_norm = LayerNorm(features=512, eps=1e-6)

    layer_norm_x = my_layer_norm(results)
    print(f'layer_norm_x.shape:{layer_norm_x.shape}')
    print(f'layer_norm_x:\n{layer_norm_x}')


# 子层连接层
class SublayerConnection(nn.Module):
    def __init__(self, size, dropout_p=0.1):
        super(SublayerConnection, self).__init__()
        self.size = size
        self.dropout = nn.Dropout(p=dropout_p)
        self.norm = LayerNorm(features=size, eps=1e-6)

    def forward(self, x, sublayer):
        # 先归一化再进行残差链接
        result = x + self.dropout(self.norm(sublayer(x)))
        return result


def dm08_sublayer():
    """
    测试子层连接层（多头注意力）
    """
    # 1、创建position_x，词嵌入加位置编码，赋值给QKV，[2,4,512]
    position_x = create_position_x()
    query = key = value = position_x

    # 2、实例化多头注意力机制层
    my_attention = MultiHeadAttention(embedding_dim=512, head=8, dropout=0.1)

    mask = torch.zeros(8, 4, 4)

    # results = my_attention(query, key, value, mask)

    sublayer = lambda x: my_attention(x, x, x, mask)

    # 3、实例化子层连接层（多头注意力）、调用子层连接层
    my_sublayer_connection = SublayerConnection(size=512, dropout_p=0.1)
    result = my_sublayer_connection(position_x, sublayer)
    print(f'第一个子层连接结构的输出结果result.shape:\n{result.shape}')
    print(f'第一个子层连接结构的输出结果result:\n{result}')


# 编码器层
class EncoderLayer(nn.Module):
    def __init__(self, size, self_attention, feed_forward, dropout_p=0.1):
        super(EncoderLayer, self).__init__()
        self.size = size
        self.self_attention = self_attention
        self.feed_forward = feed_forward
        self.sublayers = clones(SublayerConnection(self.size, dropout_p), 2)

    def forward(self, x, mask):
        # 先进行多头注意力机制
        x1 = self.sublayers[0](x, lambda x: self.self_attention(x, x, x, mask))
        # 再进行前馈神经网络
        x2 = self.sublayers[1](x1, self.feed_forward)

        return x2


def dm09_encoder_layer():
    """
    测试编码器层
    """
    # 1、创建position_x，词嵌入加位置编码，赋值给QKV，[2,4,512]
    position_x = create_position_x()
    query = key = value = position_x

    # 2、实例化多头注意力机制层
    my_attention = MultiHeadAttention(embedding_dim=512, head=8, dropout=0.1)

    mask = torch.zeros(8, 4, 4)

    # 3、实例化前馈神经网络层
    my_feedforward = FeedForward(d_model=512, d_ff=1024, dropout_p=0.1)

    # 4、实例化编码器层对象、调用编码器层方法
    my_encoder_layer = EncoderLayer(size=512, self_attention=my_attention, feed_forward=my_feedforward, dropout_p=0.1)
    result = my_encoder_layer(position_x, mask)

    print(f'第一个编码器层的结果result.shape:{result.shape}')
    print(f'第一个编码器层的结果result:{result}')


# 编码器
class Encoder(nn.Module):
    def __init__(self, layer, N):
        super(Encoder, self).__init__()
        self.layers = clones(layer, N)

        self.norm = LayerNorm(features=layer.size)

    def forward(self, x, mask):
        for layer in self.layers:
            x = layer(x, mask)

        return self.norm(x)


def dm10_encoder():
    """
    测试编码器
    """
    # 1、创建position_x，词嵌入加位置编码，[2,4,512]
    position_x = create_position_x()

    # 2、实例化多头注意力机制层
    my_attention = MultiHeadAttention(embedding_dim=512, head=8, dropout=0.1)

    mask = torch.zeros(8, 4, 4)

    # 3、实例化前馈神经网络层
    my_feedforward = FeedForward(d_model=512, d_ff=1024, dropout_p=0.1)

    # 4、实例化编码器层对象
    my_encoder_layer = EncoderLayer(size=512, self_attention=my_attention, feed_forward=my_feedforward, dropout_p=0.1)

    # 5、实例化编码器对象、调用编码器方法
    my_encoder = Encoder(layer=my_encoder_layer, N=1)
    result = my_encoder(position_x, mask)

    print(f'编码器的结果result.shape:{result.shape}')
    print(f'编码器的结果result:{result}')

    return result


if __name__ == '__main__':
    dm01_nptriu()
    dm02_subsequent_mask()
    dm03_masked_fill()
    dm04_attention()
    dm05_multi_head_attention()
    dm06_feedforward()
    dm07_layer_norm()
    dm08_sublayer()
    dm09_encoder_layer()
    dm10_encoder()
