import torch
from torch import nn
from transformer.MultiHeadAttention import MultiHeadAttention
from transformer.AddNorm import AddNorm
from transformer.PointWiseFFN import PointWiseFFN


class DecoderBlock(nn.Module):
    def __init__(self, query_size, key_size, value_size, num_head, hidden_dims, dropout):
        super().__init__()

        self.attention1 = MultiHeadAttention(query_size, key_size, value_size, num_head, hidden_dims, dropout)
        self.add_norm1 = AddNorm(query_size, dropout)
        self.attention2 = MultiHeadAttention(query_size, key_size, value_size, num_head, hidden_dims, dropout)
        self.add_norm2 = AddNorm(query_size, dropout)
        self.ffn = PointWiseFFN(query_size, hidden_dims, query_size)
        self.add_norm3 = AddNorm(query_size, dropout)
        self.key_values = None

    def forward(self, x, state):
        enc_outputs, enc_valid_lens = state[0], state[1]
        dec_valid_lens = None  # 只有当训练该代码时，才会使用因果掩码
        if self.training:
            # 因果掩码
            # attn_mask = nn.Transformer.generate_square_subsequent_mask(x.shape[1])
            batch_size = x.shape[0]
            seq_len = x.shape[1]
            dec_valid_lens = torch.arange(1, seq_len + 1, 1).unsqueeze(0).repeat(batch_size, 1)  # 二维
        if self.key_values is None:
            self.key_values = x
        else:
            # 叠加不同的DecoderBlock带来的影响力
            self.key_values = torch.cat([self.key_values, x], dim=1)  # seq_len方向
        # 带掩码的自注意力机制 (Decoder-Attention)
        y1 = self.attention1(x, self.key_values, self.key_values, dec_valid_lens)
        z1 = self.add_norm1(x, y1)
        # 注意力机制（Encoder-Decoder Attention）
        y2 = self.attention1(z1, enc_outputs, enc_outputs, enc_valid_lens)
        z2 = self.add_norm1(z1, y2)
        x = self.add_norm2(z2, self.ffn(z2))
        return x, state


# if __name__ == '__main__':
#     # Encoder结果(batch_size,seq_len,embedding_dims)
#     enc_inputs = torch.randn(10, 9, 20)
#     enc_valid_lens = torch.randint(1, 6, (10,))
#     state = (enc_inputs, enc_valid_lens)
#
#     # Decoder输入(batch_size,seq_len,embedding_dims)
#     dec_inputs = torch.randn(10, 12, 20)
#
#     decoder_block = DecoderBlock(20, 20, 20, 2, 30, 0.5)
#     dec_outputs, state = decoder_block(dec_inputs, state)
#     print(dec_outputs.shape)  # (10, 12, 20)
