from . import part as pt

import copy

import torch
import torch.nn as nn


# ************************************************************************************
# Usage: 单层的解码层
# ====================================================================================
class DecoderLayer(nn.Module):
    def __init__(
            self,
            heads: int = 8,       # 多头注意力机制中用几个头
            d_model: int = 512,   # 做词嵌入的时候，一个词对应的是多少维的向量
            dff: int = 1024,      # 解码器最后面那个 feed-forward 层的 隐藏层的神经元数量
            dropout: float = 0.1  # dropout 概率
    ):
        super(DecoderLayer, self).__init__()
        # 第一层 tgt 的 self-attention
        self.self_attention = pt.MultiHeadAttentionLayer(heads, d_model, dropout)
        self.norm1 = pt.LayerNormLayer(d_model)
        self.dropout1 = nn.Dropout(dropout)

        # 第二层 将 src 和 tgt 融合的 cross-attention
        self.attention = pt.MultiHeadAttentionLayer(heads, d_model, dropout)
        self.norm2 = pt.LayerNormLayer(d_model)
        self.dropout2 = nn.Dropout(dropout)

        # 第三层 全连接层 作为最终的输出
        self.feed_forward = pt.FeedForwardLayer(d_model, dff, dropout)
        self.norm3 = pt.LayerNormLayer(d_model)
        self.dropout3 = nn.Dropout(dropout)

    def forward(
        self,
        x: torch.Tensor,  # (batch_size, tgt_seq_len, d_m) decoder 的输入
        m: torch.Tensor,  # (batch_size, src_seq_len, d_m) encoder 的输出

        cross_mask: torch.Tensor,   # (batch_size, tgt_seq_len, src_seq_len) 屏蔽 encoder 的 key
                                  # 这个 mask 来自 encoder 的输入，目标是让 decoder 的 quary
                                  # 不要看到 encoder 输入中 数值为 <pad> 的 key

        tgt_mask: torch.Tensor,   # (batch_size, tgt_seq_len, tgt_seq_len)
                                  # decoder 中 self-attention 的 mask
                                  # 包括 decoder 输入的 padding mask 还有 subsequent mask
    ) -> torch.Tensor:  # (batch_size, tgt_seq_len, d_m) 和 decoder 输入 shape 相同

        # decoder 的第一个输入是 带 mask 的 self-attention
        x = x + self.dropout1(self.norm1(self.self_attention(x, x, x, tgt_mask)))
        # decoder 的第二层不是 self-attention, query 来自 decoder, 而 key 和 value 来自 encoder
        x = x + self.dropout2(self.norm2(self.attention(x, m, m, cross_mask)))
        # decoder 的第三层是一个 feed-forward
        return x + self.dropout3(self.norm3(self.feed_forward(x)))


class Decoder(nn.Module):
    def __init__(
            self,
            N:int = 6,            # 多少个 encoder layer
            heads: int = 8,       # 多少个 注意力头
            d_model: int = 512,   # token 编码之后的向量的长度
            dff: int = 1024,      # dff feed-forward 的隐藏层
            dropout: float = 0.1  # dropout 概率
    ):
        super(Decoder, self).__init__()

        # 将临时对象拷贝 N 次
        self.layers = nn.ModuleList(
            [copy.deepcopy(DecoderLayer(heads, d_model, dff, dropout))
             for _ in range(N)]
        )

        # 最终的 norm
        self.norm = pt.LayerNormLayer(d_model)

    def forward(
        self,
        x: torch.Tensor,  # (batch_size, tgt_seq_len, d_m) decoder 的输入
        m: torch.Tensor,  # (batch_size, src_seq_len, d_m) encoder 的输出

        cross_mask: torch.Tensor,   # (batch_size, tgt_seq_len, src_seq_len) 屏蔽 encoder 的 key
                                  # 这个 mask 来自 encoder 的输入，目标是让 decoder 的 quary
                                  # 不要看到 encoder 输入中 数值为 <pad> 的 key

        tgt_mask: torch.Tensor,   # (batch_size, tgt_seq_len, tgt_seq_len)
                                  # decoder 中 self-attention 的 mask
                                  # 包括 decoder 输入的 padding mask 还有 subsequent mask
    ) -> torch.Tensor:  # (batch_size, tgt_seq_len, d_m) 和 decoder 输入 shape 相同

        for layer in self.layers:
            x = layer(x=x, m=m, cross_mask=cross_mask, tgt_mask=tgt_mask)
        return self.norm(x)


if __name__ == "__main__":
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print("Device:", device)

    batch_size = 2
    seq_len_src = 10
    seq_len_tgt = 8
    d_model = 512

    # 随机输入，模拟 decoder 输入 (target sequence)
    decoder_input = torch.randn(batch_size, seq_len_tgt, d_model).to(device)
    # 随机输入，模拟 encoder 输出 (memory)
    memory = torch.randn(batch_size, seq_len_src, d_model).to(device)

    # 构造 mask
    src_mask = pt.subsequentMask(seq_len_src, seq_len_src).to(device)  # 假设用相同函数生成
    tgt_mask = pt.subsequentMask(seq_len_tgt, seq_len_tgt).to(device)

    decoder = Decoder().to(device)

    # 初始化权重，防止全随机导致表现不佳
    def initialize_weights(model):
        for p in model.parameters():
            if p.dim() > 1:
                nn.init.xavier_uniform_(p)
    initialize_weights(decoder)

    decoder.eval()

    output = decoder(decoder_input, memory, src_mask, tgt_mask)

    print("Decoder input shape:", decoder_input.shape)
    print("Memory (encoder output) shape:", memory.shape)
    print("Output shape:", output.shape)
    print("Output sample:", output[0, 0, :10])  # 打印第一个batch第一个token前10维
