import copy
import math

import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F

from dm01_input import *

# 定义一个矩阵，生成下三角矩阵
def subsequent_mask(size):
    temp = np.triu(np.ones((1, size, size)), k=1).astype('uint8')
    return torch.from_numpy(1-temp)

# 定义函数，进行注意力计算
def attention(query, key, value, mask=None, dropout=None):
    # 获取词嵌入维度
    d_k = query.size(-1)
    # 将query和key的转置进行矩阵运算
    atte_weight = torch.matmul(query, key.transpose(-1,-2)) / math.sqrt(d_k)

    # 是否需要掩码掩盖
    if mask is not None:
        atte_weight = atte_weight.masked_fill(mask == 0, -1e9)

    # 对权重分数进行归一化
    p_atte = F.softmax(atte_weight, dim=-1)

    # 是否需要随机失活
    if dropout is not None:
        p_atte = dropout(p_atte)

    return torch.matmul(p_atte, value), p_atte

def clones(module, N):
    return nn.ModuleList([copy.deepcopy(module) for _ in range(N)])

class MultiHeadedAttention(nn.Module):
    def __init__(self, head, embedding_dim, dropout = 0.1):
        super(MultiHeadedAttention, self).__init__()
        assert embedding_dim % head == 0
        self.d_k = embedding_dim // head
        self.embedding_dim = embedding_dim
        self.head = head
        self.linears = clones(nn.Linear(embedding_dim, embedding_dim), 4)
        self.dropout = nn.Dropout(p=dropout)
        self.atte =None

    def forward(self, query, key, value, mask=None):

        if mask is not None:
            mask = mask.unsqueeze(0)

        self.batch_size = query.size(0)

        query, key, value = [model(x).view(self.batch_size, -1, self.head, self.d_k).transpose(1, 2)
                             for model, x in zip(self.linears, (query, key, value))]
        x, self.atte = attention(query, key, value, mask, dropout=self.dropout)

        atte_x = x.transpose(1,2).contiguous().view((self.batch_size, -1,self.embedding_dim))
        return self.linears[-1](atte_x)

class PositionwiseFeedForward(nn.Module):
    def __init__(self, d_model, d_ff, dropout = 0.1):
        super(PositionwiseFeedForward, self).__init__()
        self.w_1 = nn.Linear(d_model, d_ff)
        self.w_2 = nn.Linear(d_ff, d_model)
        self.dropout = nn.Dropout(dropout)

    def forward(self, x):
        return self.w_2(self.dropout(F.relu(self.w_1(x))))

class LayerNorm(nn.Module):
    def __init__(self, feature, eps=1e-6):
        super(LayerNorm, self).__init__()

        self.a = nn.Parameter(torch.ones(feature))

        self.b = nn.Parameter(torch.zeros(feature))

        self.eps = eps

    def forward(self, x):
        mean = x.mean(dim=-1, keepdim=True)
        std = x.std(dim=-1, keepdim=True)

        return self.a * (x-mean) / (std + self.eps) + self.b

class SublayerConnection(nn.Module):
    def __init__(self, size, dropout=0.1):
        super(SublayerConnection, self).__init__()
        self.norm = LayerNorm(size)

        self.dropout = nn.Dropout(dropout)

    def forward(self, x, sublayer):
        result = x + self.dropout(sublayer(self.norm(x)))

        return result

def test():
    x = torch.tensor([[100, 2, 421, 508],
                     [491, 998, 1, 221]])

    # 将x松土文本嵌入层
    vocab_size, d_model = 1000, 512
    my_embedd = Embedding(vocab_size=vocab_size, d_model=d_model)
    embed_x = my_embedd(x)
    print(f'embedding之后的结果-->{embed_x.shape}')

    # 将编码后的x送到位置编码层
    my_position = PositionalEncoding(d_model=d_model, dropout=0.1, max_len=1000)
    position_x = my_position(embed_x)
    print(f'embedding+位置编码信息之后的结果--》{position_x.shape}')

    # 实例化多头注意力机制层

    my_attention = MultiHeadedAttention(embedding_dim=512, head=8, dropout=0.1)
    mask = torch.zeros(8, 4, 4)
    sublayer = lambda x:my_attention(x, x, x, mask)

    # 实例化子层连接结构
    my_sublayer = SublayerConnection(size=512)
    result = my_sublayer(position_x, sublayer)
    print(result.shape)

class EncoderLayer(nn.Module):
    def __init__(self, size, self_attn, feed_forward, dropout=0.1):
        super(EncoderLayer, self).__init__()
        # size 词嵌入维度
        self.size = size
        # size_attn 多头注意力机制对象
        self.self_attn = self_attn
        # feed_forward 前馈全连接层对象
        self.feed_forward = feed_forward

        # 定义两个子层连接对象
        self.sublaters = clones(SublayerConnection(size, dropout), 2)

    def forward(self, x, mask):
        # 先经过第一个子层连接结构
        x1 = self.sublaters[0](x, lambda x: self.self_attn(x, x, x, mask))
        x2 = self.sublaters[1](x1, self.feed_forward)
        return x2

class Encoder(nn.Module):
    def __init__(self, layer, N):
        super(Encoder, self).__init__()
        self.laters = clones(layer, N)
        self.norm = LayerNorm(layer.size)

    def forward(self, x, mask):
        for layer in self.laters:
            x = layer(x, mask)
        return self.norm(x)


if __name__ == '__main__':
    test()




