import copy
import torch.nn as nn

from multi_headed_attention import MultiHeadedAttention
from positionwise_feed_forward import PositionwiseFeedForward
from encoder_layer import EncoderLayer
from encoder import Encoder
from decoder_layer import DecoderLayer
from decoder import Decoder
from encoder_decoder import EncoderDecoder
from embedding import Embeddings
from positional_encoding import PositionalEncoding
from generator import Generator


def make_model(source_vocab, target_vocab, N=6, feature_dim=512, d_ff=2048, num_head=8, dropout=0.1):
    """
    source_vocab: 源数据特征(词汇)总数
    target_vocab: 目标数据特征(词汇)总数
    N: 编码器和解码器堆叠数，词向量映射维度，前馈全连接网络中变换矩阵的维度，
    num_head: 多头注意力结构中的多头数
    feature_dim: 论文中的d_model
    """

    c = copy.deepcopy

    attn = MultiHeadedAttention(
        num_head=num_head,
        feature_dim=feature_dim,
        dropout=dropout,
    )

    feed_forward = PositionwiseFeedForward(
        feature_dim=feature_dim,
        d_ff=d_ff,
        dropout=dropout,
    )

    positional_encoding = PositionalEncoding(feature_dim=feature_dim, dropout=dropout)
    generator = Generator(feature_dim=feature_dim, vocab_size=target_vocab)

    encoder = Encoder(
        EncoderLayer(
            feature_dim=feature_dim,
            self_attn=c(attn),
            feed_forward=c(feed_forward),
            dropout=dropout,
        ),
        num_encoder_layer=N,
    )

    decoder = Decoder(
        DecoderLayer(
            feature_dim=feature_dim,
            self_attn=c(attn),
            src_attn=c(attn),
            feed_forward=feed_forward,
            dropout=dropout,
        ),
        num_decoder_layer=N,
    )

    model = EncoderDecoder(
        encoder=encoder,
        decoder=decoder,
        source_embed=nn.Sequential(Embeddings(feature_dim, source_vocab), c(positional_encoding)),
        target_embed=nn.Sequential(Embeddings(feature_dim, target_vocab), c(positional_encoding)),
        generator=generator,
    )

    for p in model.parameters():  # 这里一但判断参数的维度大于1，则会将其初始化成一个服从均匀分布的矩阵，
        if p.dim() > 1:
            nn.init.xavier_uniform_(p)
    return model
