import torch.nn as nn
from .coder import Encoder, Decoder


class Transformer(nn.Module):
    def __init__(self, d_model, vocab_size, src_context_length, tgt_context_length, heads_number, n_layers, d_k, d_v,
                 d_ff, dropout):
        """
        :param d_model: 单个词的维度
        :param vocab_size: 独立单词数量
        :param src_context_length: 输入最大文本长度
        :param tgt_context_length: 输出最大文本长度
        :param heads_number: 多头数量
        :param n_layers: 编码解码层数
        :param d_k:
        :param d_v:
        :param d_ff:
        :param dropout:
        """
        super(Transformer, self).__init__()
        # 编码层
        self.encoder = Encoder(d_model, vocab_size, src_context_length, heads_number, n_layers, d_k, d_v, d_ff, dropout)

        # 解码层
        self.decoder = Decoder(d_model, vocab_size, tgt_context_length, heads_number, n_layers, d_k, d_v, d_ff, dropout)
        #
        self.projection = nn.Linear(d_model, vocab_size, bias=False)

    # 实现函数
    def forward(self, encoder_inputs, decoder_inputs):
        """
        :param encoder_inputs: 编码输入 shape(batch_size, context_length)
        :param decoder_inputs: 解码输入 shape(batch_size, context_length)
        :return:
        """
        # 经过Encoder网络后，得到的输出还是[batch_size, context_length, d_model]  每个单词都编码成512维且包含位置信息
        encoder_outputs, encoder_self_attentions = self.encoder(encoder_inputs)
        # encoder_outputs shape(batch,context_len,d_model)
        # encoder_self_attentions 6个shape(batch_size, n_heads, len_q, len_k)

        decoder_out = self.decoder(decoder_inputs, encoder_inputs, encoder_outputs)
        decoder_outputs, decoder_self_attentions, decoder_encoder_attentions = decoder_out

        decoder_logits = self.projection(decoder_outputs)

        return decoder_logits.view(-1, decoder_logits.size(
            -1)), encoder_self_attentions, decoder_self_attentions, decoder_encoder_attentions
