import copy

import torch

from dm02_encoder import *
from dm03_decoder import *
from dm04_generator import *


class EncoderDecoder(nn.Module):
    def __init__(self, encoder, decoder, src_embed, trg_embed, generator):
        super(EncoderDecoder, self).__init__()
        self.encoder = encoder
        self.decoder = decoder
        self.src_embed = src_embed
        self.trg_embed = trg_embed
        self.generator = generator

    def forward(self, src, tgt, src_mask, trg_mask):
        encoder_embed_x = self.src_embed(src)
        encoder_result = self.encoder(encoder_embed_x, src_mask)

        decoder_embed_x = self.trg_embed(tgt)
        decoder_result = self.decoder(decoder_embed_x, encoder_result, src_mask, trg_mask)

        output = self.generator(decoder_result)
        return output


def make_model(source_vocab, target_vocab, N=6, d_model=512, d_ff=1024, head=8, dropout=0.1):
    c = copy.deepcopy

    attn = MultiHeadedAttention(head=head, embedding_dim=d_model, dropout=dropout)
    ff = PositionwiseFeedForward(d_model=d_model, d_ff=d_ff, dropout=dropout)
    encode_embed = Embedding(vocab_size=source_vocab, d_model=d_model)
    position = PositionalEncoding(d_model=d_model, dropout=dropout, max_len=2000)
    decode_embed = Embedding(vocab_size=target_vocab, d_model=d_model)
    generator = Generator(d_model=d_model, vocab_size=target_vocab)

    model = EncoderDecoder(encoder=Encoder(layer=EncoderLayer(size=d_model, self_attn=c(attn), feed_forward=c(ff), dropout=dropout), N=N),
                   decoder=Decoder(DecoderLayer(d_model, c(attn), c(attn), c(ff), dropout), N=N),
                   src_embed=nn.Sequential(encode_embed, c(position)),
                   trg_embed=nn.Sequential(decode_embed, c(position)),
                   generator=generator)
    for p in model.parameters():
        if p.dim() > 1:
            nn.init.xavier_uniform_(p)

    return model

if __name__ == '__main__':
    model = make_model(source_vocab=1000, target_vocab=1000)
    # print(model)
    src = torch.tensor([[1,2,3,4], [2,4,6,8]])
    tgt = torch.tensor([[5,2,3,4], [2,40,6,80]])
    src_mask = trg_mask = torch.zeros(8,4,4)
    result = model(src, tgt, src_mask, trg_mask)
    print(result.shape)
    # my_model = nn.Transformer()
    # print('*'*80)
    # print(my_model)