from . import part as pt

import copy

import torch
import torch.nn as nn


class EncoderLayer(nn.Module):
    def __init__(
            self,
            heads: int = 8,       # 多少个 注意力头
            d_model: int = 512,   # token 编码之后的向量的长度
            dff: int = 1024,      # dff feed-forward 的隐藏层
            dropout: float = 0.1  # dropout 概率
    ):
        super(EncoderLayer, self).__init__()

        # 第一层 self-attention
        self.attention = pt.MultiHeadAttentionLayer(heads, d_model, dropout=dropout)
        self.norm1 = pt.LayerNormLayer(d_model)
        self.dropout1 = nn.Dropout(dropout)

        # 第二层 feed-forward
        self.feed_forward = pt.FeedForwardLayer(d_model, d_ff=dff, dropout=dropout)
        self.norm2 = pt.LayerNormLayer(d_model)
        self.dropout2 = nn.Dropout(dropout)

    def forward(
            self,
            x: torch.Tensor,      # (batch_size, src_seq_len, d_m)
            mask: torch.Tensor,   # (batch_size, src_seq_len, src_seq_len)
    ) -> torch.Tensor:            # (batch_size, src_seq_len, d_m)

        # 原论文中是 先进行 resnet 后进行 layer norm
        # 但这里为了代码的简洁，是 先 layer norm 后 resnet，效果也很好
        x = x + self.dropout1(self.norm1(self.attention(x, x, x, mask)))
        return x + self.dropout2(self.norm2(self.feed_forward(x)))


class Encoder(nn.Module):
    def __init__(
            self,
            N:int = 6,            # 多少个 encoder layer
            heads: int = 8,       # 多少个 注意力头
            d_model: int = 512,   # token 编码之后的向量的长度
            dff: int = 1024,      # dff feed-forward 的隐藏层
            dropout: float = 0.1  # dropout 概率
    ):
        super(Encoder, self).__init__()
        # 将临时对象拷贝 N 次
        self.layers = nn.ModuleList(
            [copy.deepcopy(EncoderLayer(heads, d_model, dff, dropout))
             for _ in range(N)]
        )
        self.norm = pt.LayerNormLayer(d_model)

    def forward(
            self,
            x: torch.Tensor,      # (batch_size, src_seq_len, d_m)
            mask: torch.Tensor,   # (batch_size, src_seq_len, src_seq_len)
    ) -> torch.Tensor:            # (batch_size, src_seq_len, d_m)

        for layer in self.layers:
            x = layer(x, mask)
        # 在原始的 transformer 中其实没有这个最后一层的 norm ，
        # 但是由于我们在 EncoderLayer 中的输出是先 layernorm 后 resnet，因此要在最终做一次 layernorm
        return self.norm(x)


if __name__ == "__main__":

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print("Device:", device)

    batch_size = 2
    seq_len = 10
    d_model = 512

    # 构造随机输入张量，形状为 (batch_size, seq_len, d_model)
    input_tensor = torch.randn(batch_size, seq_len, d_model).to(device)

    mask = pt.subsequentMask(seq_len, seq_len).to(device)

    # 实例化 Encoder，默认参数
    encoder = Encoder().to(device)

    for p in encoder.parameters():
        if p.dim() > 1:  # 一般线性层和卷积层权重才这么初始化
            nn.init.xavier_uniform_(p)

    encoder.eval()

    output = encoder(input_tensor, mask)

    print("Input shape:", input_tensor.shape)
    print("Output shape:", output.shape)

    # 打印部分输出数据
    print("Output sample:", output[0, 0, :10])  # 打印第一个 batch，第一个 token，前10维的数据