import torch
import torch.nn as nn

from PE import Positional
from config import *

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")


# 注意力计算
def attention(q, k, v, mask=False):
    """
    :param mask:掩码操作 bool类型
    :param q:
    :param k:
    :param v:
    :param dk:
    :return:
    """
    """
    attention exemple:
    Q = torch.rand([1, 3, 3])
    K = torch.rand([1, 3, 3])
    V = torch.rand([1, 3, 3])
    o = attention(Q, K, V, dk=d_k, mask=True)
    print(o)
    print(o.shape)
    """
    dk = torch.tensor(d_k, dtype=torch.float32)
    temp = torch.matmul(q, k.transpose(-1, -2))  # k最后两个维度进行转置
    temp = torch.div(temp, torch.sqrt(dk))
    # 掩码操作
    if mask:
        mask_mat = torch.tril(torch.ones(temp.shape), diagonal=0)  # 生成一个下三角矩阵
        mask_mat = mask_mat.to(device)
        temp = temp.mul(mask_mat)
    temp = torch.softmax(temp, dim=-1)  # dim=-1在最后一维度进行softmax

    return torch.matmul(temp, v)


class MultiHeadAttention(nn.Module):

    def __init__(self, masked=False):
        """
        :param h:
        :param dmodel:
        :param dk:
        """
        """
        example:
        mul_head = MultiHeadAttention()
        x = torch.ones([1, 2, d_model])
        z = mul_head(x)
        print(z)
        print("x的维度:", x.shape)
        print("z的维度:", z.shape)
        # 最后的z与x的维度一样就成功了
        """
        super().__init__()
        self.masked = masked
        # 创建h个单独的w_q、w_k、w_v，不要直接在dk后乘上h，因为这样并没有展现出多头的效果，即不能如下代码：
        """
        self.WQ = nn.Linear(dmodel, dk*h)
        self.WK = nn.Linear(dmodel, dk*h)
        self.WV = nn.Linear(dmodel, dk*h)
        """
        self.WQ = nn.ModuleList([nn.Linear(d_model, d_k, bias=False) for _ in range(head_en)])
        self.WK = nn.ModuleList([nn.Linear(d_model, d_k, bias=False) for _ in range(head_en)])
        self.WV = nn.ModuleList([nn.Linear(d_model, d_k, bias=False) for _ in range(head_en)])

        self.WO = nn.Linear(d_k * head_en, d_model)

    def forward(self, q, k, v):
        Z = []
        for idx in range(head_en):
            Q = self.WQ[idx](q)
            K = self.WK[idx](k)
            V = self.WV[idx](v)
            Z.append(attention(q=Q, k=K, v=V, mask=self.masked))
        z = torch.cat(Z, dim=-1)  # 在最后一维进行拼接concat
        return self.WO(z)


class FeedForward(nn.Module):
    def __init__(self, input_output_size=d_model, hidden_size=Hidden_size):
        super().__init__()
        self.fc = nn.Sequential(
            nn.Linear(input_output_size, hidden_size),
            nn.ReLU(),
            nn.Linear(hidden_size, input_output_size)
        )

    def forward(self, x):
        return self.fc(x)


class EncoderLayer(nn.Module):
    """
    example:
    encoder = EncoderLayer()
    x = torch.ones([1, Max_len, d_model])
    print(encoder(x))
    print(encoder(x).shape)
    """

    def __init__(self):
        super().__init__()
        self.multiheadattention = MultiHeadAttention()
        self.norm1 = nn.LayerNorm(normalized_shape=[d_model])  # 每个单词内的d维度进行标准化
        self.feedforward = FeedForward()
        self.norm2 = nn.LayerNorm(normalized_shape=[d_model])  # 每个单词内的d维度进行标准化

    def forward(self, x):
        x = self.norm1(x + self.multiheadattention(x, x, x))
        x = self.norm2(x + self.feedforward(x))
        return x


class Encoder(nn.Module):
    """
    example:
    encoder = Encoder()
    x = torch.ones([1, Max_len, d_model])
    print(encoder(x))
    print(encoder(x).shape)
    """

    def __init__(self):
        super().__init__()
        # 想要循环多层必须使用nn.ModuleList，如果使用普通的列表，那么涉及到的网络参数不会放到计算图里，
        # 同时nn.ModuleList实现的多层的每层网络参数是独立的
        self.embedding = nn.Embedding(vocab_input_len, d_model)
        self.encoder_list = nn.ModuleList([EncoderLayer() for _ in range(en_N)])

    def forward(self, x):
        x = self.embedding(x)
        x = x + Positional(x)
        for idx in range(en_N):
            x = self.encoder_list[idx](x)
        return x
