# -*- coding: utf-8 -*-

import torch.nn as nn
from transformer.module.encoder_layer import EncoderLayer
from transformer.module.positional_encoding import PositionalEncoding

"""
embedding: 是否共享?
1. 机器翻译不共享      - en -> cn，不共享一套词表
2. 对话共享           - question & answer 同属于同一种语言，共享同一套词表
"""


class TransformerEncoder(nn.Module):
    def __init__(self, v_size, d_model, n_head, n_layer, n_position,
                 dropout=0.1, embedding=None):
        """

        :param d_model:         512
        :param n_head:          8
        :param n_layer:         6
        :param n_position:      500  (序列的最长的长度)
        :param dropout:         0.1
        """
        super(TransformerEncoder, self).__init__()
        self.d_model = d_model
        self.n_head = n_head
        self.n_layer = n_layer  # layer number
        self.max_seq_len = n_position
        self.dropout = dropout

        # Word Embedding
        if embedding is not None:
            self.embedding = embedding
        else:
            self.embedding = nn.Embedding(v_size, d_model)
        # Positional Embedding
        self.positional_embedding = PositionalEncoding(d_model, n_position)

        # TODO: 为什么要使用ModuleList ? isinstance(x, Module)
        # 写法1开始
        self.encoder_layers = nn.ModuleList()
        for i in range(self.n_layer):
            layer_i = EncoderLayer(self.d_model, self.n_head, self.dropout)
            self.encoder_layers.append(layer_i)
        # 写法1结束

        # 写法2开始
        # *args 可变参数
        # [1, 2, 3, 4, 5] *[1, 2, 3, 4, 5] (1, 2, 3, 4, 5)
        # TODO nn.Sequential
        # self.encoder_sequential = nn.Sequential(
        #     *[EncoderLayer(self.d_model, self.n_head, self.dropout) for _ in range(self.n_layer)]
        # )
        # 写法二结束

    def forward(self, src_idx, mask=None):
        """

        :param src_idx:     B, L
        :param mask:        B, 1, 1, L (len = 3 ==> 1 1 1 0 0)
                            the mask we wanted should be shaped of [B, n_head, L, L]
                            query: [B, n_head, L, d_q]
                            key  : [B, n_head, d_k, L]
                            query dot key ==> [B, n_head, L, L]
        :return: output:    B, L, H
        """
        # token index --> token embedding
        src_emb = self.embedding(src_idx)
        # token embedding --> positional token embedding
        pos_src_emb = self.positional_embedding(src_emb)

        # 写法1开始
        # 循环 self.n_layer 层
        output = pos_src_emb
        for i in range(self.n_layer):
            layer_i = self.encoder_layers[i]
            output = layer_i.forward(output, mask=mask)
        # 写法1结束

        # 写法2开始  经过 6 层的 Transformer-Encoder Layer
        # output = self.encoder_sequential((pos_src_emb, mask))  # B, L, H
        # 写法2结束

        return output
