import copy
import torch.nn as nn
from learn_embedding import Embeddings
from learn_positional import PositionalEncoding
from learn_feedforward import PositionwiseFeedForward
from learn_multihead_attention import MultiHeadedAttention
from learn_encoder import EncoderLayer
from learn_decoderlayer import DecoderLayer
from learn_encoder import Encoder
from learn_decoder import Decoder
from learn_generator import Generator
from learn_encoderdecoder import EncoderDecoder

def make_model(source_vocab, target_vocab, N=6,
               d_model=512, d_ff=2048, head=8, dropout=0.1):
    """该函数用来构建模型, 有7个参数，分别是
        source_vocab:代表源数据的词汇总数
        target_vocab:代表目标数据的词汇总数
        N:代表编码器和解码器堆叠的层数
        d_model:代表词嵌入的维度
        d_ff:代表前馈全连接层中变换矩阵的维度
        head:多头注意力机制中的头数
        dropout:指置零的比率 """

    # 首先得到一个深度拷贝命令，接下来很多结构都需要进行深度拷贝，
    # 来保证他们彼此之间相互独立，不受干扰.
    c = copy.deepcopy

    # 实例化了多头注意力类，得到对象attn
    attn = MultiHeadedAttention(head, d_model)

    # 然后实例化前馈全连接类，得到对象ff
    ff = PositionwiseFeedForward(d_model, d_ff, dropout)

    # 实例化位置编码类，得到对象position
    position = PositionalEncoding(d_model, dropout)

    # 根据结构图, 最外层是EncoderDecoder，在EncoderDecoder中，
    # 分别是编码器层，解码器层，源数据Embedding层和位置编码组成的有序结构，目标数据Embedding层和位置编码组成的有序结构，以及类别生成器层
    model = EncoderDecoder(
        # 在编码器层中有attention子层以及前馈全连接子层，
        Encoder(EncoderLayer(d_model, c(attn), c(ff), dropout), N),
        # 在解码器层中有两个attention子层以及前馈全连接层.
        Decoder(DecoderLayer(d_model, c(attn), c(attn), c(ff), dropout), N),
        # 源数据Embedding层和位置编码组成的有序结构
        nn.Sequential(Embeddings(d_model, source_vocab), c(position)),
        # 目标数据Embedding层和位置编码组成的有序结构
        nn.Sequential(Embeddings(d_model, target_vocab), c(position)),
        # 类别生成器层,包含线性层和softmax计算层
        Generator(d_model, target_vocab))

    # 模型结构完成后，接下来就是初始化模型中的参数，比如线性层中的变换矩阵
    # 这里一但判断参数的维度大于1，则会将其初始化成一个服从均匀分布的矩阵，
    for p in model.parameters():
        if p.dim() > 1:
            nn.init.xavier_uniform(p)
            return model


# 输入参数:
source_vocab = 11
target_vocab = 11 
N = 6
# 其他参数都使用默认值 

# 调用:
if __name__ == '__main__':
    res = make_model(source_vocab, target_vocab, N)
    print(res)


# 输出效果:
'''
EncoderDecoder(
  (encoder): Encoder(
    (layers): ModuleList(
      (0-5): 6 x EncoderLayer(
        (self_attn): MultiHeadedAttention(
          (linears): ModuleList(
            (0-3): 4 x Linear(in_features=512, out_features=512, bias=True)
          )
          (dropout): Dropout(p=0.1, inplace=False)
        )
        (feed_forward): PositionwiseFeedForward(
          (w1): Linear(in_features=512, out_features=2048, bias=True)
          (w2): Linear(in_features=2048, out_features=512, bias=True)
          (dropout): Dropout(p=0.1, inplace=False)
        )
        (sublayer): ModuleList(
          (0-1): 2 x SublayerConnection(
            (norm): LayerNorm()
            (dropout): Dropout(p=0.1, inplace=False)
          )
        )
      )
    )
    (norm): LayerNorm()
  )
  (decoder): Decoder(
    (layers): ModuleList(
      (0-5): 6 x DecoderLayer(
        (self_attn): MultiHeadedAttention(
          (linears): ModuleList(
            (0-3): 4 x Linear(in_features=512, out_features=512, bias=True)
          )
          (dropout): Dropout(p=0.1, inplace=False)
        )
        (src_attn): MultiHeadedAttention(
          (linears): ModuleList(
            (0-3): 4 x Linear(in_features=512, out_features=512, bias=True)
          )
          (dropout): Dropout(p=0.1, inplace=False)
        )
        (feed_forward): PositionwiseFeedForward(
          (w1): Linear(in_features=512, out_features=2048, bias=True)
          (w2): Linear(in_features=2048, out_features=512, bias=True)
          (dropout): Dropout(p=0.1, inplace=False)
        )
        (sublayer): ModuleList(
          (0-2): 3 x SublayerConnection(
            (norm): LayerNorm()
            (dropout): Dropout(p=0.1, inplace=False)
          )
        )
      )
    )
    (norm): LayerNorm()
  )
  (src_embed): Sequential(
    (0): Embeddings(
      (lut): Embedding(11, 512)
    )
    (1): PositionalEncoding(
      (dropout): Dropout(p=0.1, inplace=False)
    )
  )
  (tgt_embed): Sequential(
    (0): Embeddings(
      (lut): Embedding(11, 512)
    )
    (1): PositionalEncoding(
      (dropout): Dropout(p=0.1, inplace=False)
    )
  )
  (generator): Generator(
    (project): Linear(in_features=512, out_features=11, bias=True)
  )
)
'''