import torch
from torch import nn
import torch.nn.functional as F
import decoder
import encoder
import embedding as emb
import projection_layer as pl
class Transformer(nn.Module):
    def __init__(self, src_seq_len, tgt_seq_len, 
                 enc_vocab_size, dec_vocab_size, 
                 dim_model, head_num, ffn_hidden,
                 block_num, dropout_p):
        super(Transformer, self).__init__()
        self.encoder = encoder.Encoder(dim_model, ffn_hidden, 
                                        head_num, block_num, dropout_p)
        self.decoder = decoder.Decoder(dim_model, ffn_hidden,
                                        head_num, block_num, dropout_p)
        self.src_embed = emb.TransformerEmbedding(enc_vocab_size, dim_model, src_seq_len, dropout_p)
        self.tgt_embed = emb.TransformerEmbedding(dec_vocab_size, dim_model, tgt_seq_len, dropout_p)
        self.projection_layer = pl.ProjectionLayer(dim_model, dec_vocab_size)
    
    def encode(self, src, src_mask):
        src = self.src_embed(src)
        return self.encoder(src, src_mask)
    
    def decode(self, encoder_output, src_mask, tgt, tgt_mask):
        tgt = self.tgt_embed(tgt)
        return self.decoder(tgt, encoder_output, src_mask, tgt_mask)
    
    def project(self, x):
        return self.projection_layer(x)

def build_transformer(src_vocab_size, tgt_vocab_size,
                    src_seq_len, tgt_seq_len,
                    dim_model, N=6, head_num=8, 
                    dropout_p=0.1, ffn_hidden=2048):
    transformer  = Transformer(src_seq_len, tgt_seq_len,
                                src_vocab_size, tgt_vocab_size,
                                dim_model, head_num, ffn_hidden, N, dropout_p)
    
    for p in transformer.parameters():
        if p.dim() > 1:
            nn.init.xavier_uniform_(p)
    return transformer