import torch
from torch import nn

from .modeling_utils import ModelBase, ModelConfig


class Seq2SeqConfig(ModelConfig):
    """
    参数:
        vocab_size：词表大小
        embed_dim：embed dim，encoder、decoder 共用embedding
        linear_size_list：线性层大小，若最后一层大小不是vocab_size，则会默认填充一层vocab_size
            None 则会填充成 [vocab_size]
            [size1, size2]
                当 size2 != vocab_size -> [size1, size2, vocab_size]
                当 size2 == vocab_size -> [size1, size2]

        pad_token_id：填充id
        start_token_id：开始token id，用于推断的时候，第一个Token的初始化
            None：推断时，第一个token取src_input第一个token

        seq_max_length：序列最大长度，推断的时候，解码序列长度
    """
    def __init__(self,
                 vocab_size=5002,
                 embed_dim=256,
                 encoder_lstm_hidden=256,
                 encoder_lstm_n_layer=2,
                 encoder_lstm_bidirectional=True,
                 decoder_lstm_hidden=256,
                 linear_size_list=None,
                 batch_first=True,
                 dropout=0.1,
                 pad_token_id=0,
                 label_pad_id=-100,
                 start_token_id=None,
                 seq_max_length=128,
                 **kwargs):
        super(Seq2SeqConfig, self).__init__(**kwargs)
        self.vocab_size = vocab_size
        self.embed_dim = embed_dim
        self.encoder_lstm_hidden = encoder_lstm_hidden
        self.encoder_lstm_n_layer = encoder_lstm_n_layer
        self.encoder_lstm_bidirectional = encoder_lstm_bidirectional
        self.decoder_lstm_hidden = decoder_lstm_hidden

        # linear 的 size 变化，最后一个fc输出，必须为 vocab_size
        if not isinstance(linear_size_list, (list, tuple)):
            linear_size_list = []
        linear_size_list = list(linear_size_list)
        if len(linear_size_list) == 0 or (linear_size_list[-1] != self.vocab_size):
            linear_size_list = list(linear_size_list) + [self.vocab_size]

        self.linear_size_list = linear_size_list

        self.batch_first = batch_first
        self.dropout = dropout
        self.pad_token_id = pad_token_id
        self.label_pad_id = label_pad_id
        self.start_token_id = start_token_id
        self.seq_max_length = seq_max_length
        self._load_finish_()


class Seq2SeqLSTM(ModelBase):
    """ seq2seq 模型

    例子1:
        config = Seq2SeqConfig(linear_size_list=[512, 1024, 512])
        model = Seq2SeqLSTM(config=config)
        i = torch.randint(0, 5000, (2, 30))
        o, loss = model(x=[i, i], label=i)

    例子2：
        config = Seq2SeqConfig(linear_size_list=[512, 1024, 512])
        model = Seq2SeqLSTM(config=config)
        i = torch.randint(0, 5000, (2, 30))
        o = model(x=[i, i])
    """

    config_class = Seq2SeqConfig

    def __init__(self, config: Seq2SeqConfig, **kwargs):
        super(Seq2SeqLSTM, self).__init__(config=config, **kwargs)

        self.config = config
        self.embedding = nn.Embedding(num_embeddings=self.config.vocab_size,
                                      embedding_dim=self.config.embed_dim,
                                      padding_idx=self.config.pad_token_id)

        encoder_lstm_dropout = 0 if self.config.encoder_lstm_n_layer <= 1 else self.config.dropout
        self.encoder_lstm = nn.LSTM(input_size=self.config.embed_dim,
                                    hidden_size=self.config.encoder_lstm_hidden,
                                    num_layers=self.config.encoder_lstm_n_layer,
                                    bidirectional=self.config.encoder_lstm_bidirectional,
                                    dropout=encoder_lstm_dropout,
                                    batch_first=self.config.batch_first)
        self.decoder_lstm = nn.LSTM(input_size=self.config.embed_dim,
                                    hidden_size=self.config.decoder_lstm_hidden,
                                    num_layers=1,
                                    bidirectional=False,
                                    batch_first=self.config.batch_first)

        fc_in_list = [self.config.encoder_lstm_hidden] + self.config.linear_size_list[:-1]
        fc_out_list = self.config.linear_size_list
        linear_list = []
        for in_features, out_features in zip(fc_in_list, fc_out_list):
            linear_list.append(nn.Linear(in_features=in_features, out_features=out_features))
        self.fcs = nn.ModuleList(linear_list)

    def forward(self, x, label=None, training=True):
        if training:
            src_input, trg_input = x
        else:
            src_input = x
            trg_input = x

        i_embed = self.embedding(src_input)
        _, (i_hidden, i_cell) = self.encoder_lstm(i_embed)
        hidden = torch.sum(i_hidden, dim=0, keepdim=True)
        cell = torch.sum(i_cell, dim=0, keepdim=True)
        outputs = []
        seq_length = trg_input.shape[-1] if training else self.config.seq_max_length
        for i in range(seq_length):
            if not training:
                if i == 0:
                    if self.config.start_token_id:
                        input_shape = trg_input[:, 0].unsqueeze(-1)
                        e_i = torch.full_like(input_shape, self.config.start_token_id, dtype=torch.long)
                    else:
                        e_i = trg_input[:, i].unsqueeze(-1)
                else:
                    # 每次生成的作为e_i
                    pass
            else:
                e_i = trg_input[:, i].unsqueeze(-1)
            e_i_embed = self.embedding(e_i)
            out, (hidden, cell) = self.decoder_lstm(e_i_embed, (hidden, cell))
            for fc in self.fcs:
                # out = torch.nn.functional.dropout(input=out, p=self.config.dropout)
                out = fc(out)
            if not training:
                e_i = torch.argmax(out, dim=-1)
            outputs.append(out)
        outputs = torch.cat(outputs, 1)
        if training and (label is not None):
            loss_fct = torch.nn.CrossEntropyLoss(ignore_index=self.config.label_pad_id)
            loss = loss_fct(outputs.view(-1, outputs.size(-1)), label.view(-1))
            return loss, outputs
        else:
            return outputs
