from d2l_learn.utils import *
from d2l import torch as d2l
# transformer 的编码器：有n 个 多头自注意力 + 前向反馈网络 组成的block，并且每一个输出都使用了残差连接
class EncoderBlock(nn.Module):
    def __init__(self, query_size, key_size, value_size, num_hiddens, norm_shape, ffn_num_inputs, ffn_num_outputs, ffn_num_hiddens, num_heads, dropout, use_bias=False, **kwargs):
        super(EncoderBlock, self).__init__(**kwargs)
        self.multiAttention = d2l.MultiHeadAttention(num_hiddens, num_heads, dropout, bias=use_bias)
        self.addNorm1 = AddNorm(norm_shape, dropout)
        self.ffn = PositionWiseFFN(ffn_num_inputs, ffn_num_hiddens, ffn_num_outputs)
        self.addNorm2 = AddNorm(norm_shape, dropout)

    def forward(self, X, valid_lens):
        Y = self.addNorm1(X, self.multiAttention(X, X, X, valid_lens))
        return self.addNorm2(Y, self.ffn(Y))

def run_transformer():
    pass

# 编码器：位置编码、嵌入层、多头注意力、AddNorm、前馈神经网络、AddNorm
class TransformerEncoder(nn.Module):
    def __init__(self, vocab_size, key_size, query_size, value_size, num_hiddens, norm_shape, ffn_inputs, ffn_hiddens, num_heads, num_layers, dropout, use_bias=False, **kwargs):
        super(TransformerEncoder, self).__init__()
        self.num_hiddens = num_hiddens
        self.positionEncoder = PositionalEncoding(num_hiddens, dropout)
        self.embedding = nn.Embedding(vocab_size, num_hiddens)
        self.blks = nn.Sequential()
        for i in range(num_layers):
            self.blks.add_module("block" + str(i),
                                 EncoderBlock(query_size, key_size, value_size, num_hiddens, norm_shape,
                                              ffn_inputs, ffn_hiddens, ffn_hiddens, num_heads, dropout, use_bias=use_bias))

    def forward(self, X, valid_lens):
        X = self.embedding(X) * math.sqrt(self.num_hiddens)
        X = self.positionEncoder(X)
        self.attention_weights = [None] * len(self.blks)
        for i, blk in enumerate(self.blks):
            X = blk(X, valid_lens)
            self.attention_weights[i] = blk.multiAttention.attention.attention_weights
        return X

class DecoderBlock(nn.Module):



if __name__ == '__main__':
    X = torch.ones((2, 100, 24))
    valid_lens = torch.tensor([3, 2])
    encoder_blk = EncoderBlock(24, 24, 24, 24, [100, 24], 24, 24, 48, 8, 0.5)
    encoder_blk.eval()
    print(encoder_blk(X, valid_lens).shape)

