# -*- coding: utf-8 -*-

import torch
import torch.nn as nn
from transformer.transformer_encoder import TransformerEncoder
from transformer.transformer_decoder import TransformerDecoder


def shift(tensor, sos_idx):
    batch_sos = torch.ones([tensor.size(0), 1],
                           device=tensor.device, dtype=torch.long) * sos_idx
    return torch.cat([batch_sos, tensor[:, :-1]], -1)


class Translator(nn.Module):
    """Translator"""

    def __init__(self, src_vocab_size, tgt_vocab_size, d_model, n_head,
                 n_layer, src_n_position, tgt_n_position, dropout=0.1,
                 max_gen_len=100):
        super(Translator, self).__init__()
        self.encoder = TransformerEncoder(src_vocab_size, d_model, n_head,
                                          n_layer, src_n_position, dropout=dropout)
        self.decoder = TransformerDecoder(tgt_vocab_size, d_model, n_head,
                                          n_layer, tgt_n_position, dropout=dropout)
        self.proj_head = nn.Sequential(nn.Linear(d_model, d_model),
                                       nn.ReLU(),
                                       nn.Linear(d_model, tgt_vocab_size),
                                       nn.LogSoftmax(-1))
        self.max_gen_len = max_gen_len

    def forward(self, src, shift_tgt=None, src_mask=None):
        """

        :param src: [B, L_{src}]            I  like China <eos> <pad>
        :param shift_tgt: [B, L_{tgt}]      <sos> I like China <eos>
        :param src_mask: [B, L_{src}]
        :return:
        """
        # 区分为 training 和 test
        if self.training:
            # B, L_{src}, H
            src_enc = self.encoder.forward(src, mask=src_mask)  # mask 自己去写 TODO 改造 DataLoader 用于输入 mask
            # B, L_{tgt}, H
            tgt_enc = self.decoder.forward(src_enc, shift_tgt, src_mask=src_mask)
            # B, L_{tgt}, V 将 hidden representation 映射到 vocab
            tgt_prob = self.proj_head(tgt_enc)
            return tgt_prob
        else:
            # 这里已经是 test
            device = src.device

            # test <sos> w1 w2 w3 w4 ...
            # B, L_{src}, H
            src_enc = self.encoder.forward(src, mask=src_mask)  # I like China ==> 我 爱 中国

            # <pad> <pad> <pad> <pad> ... <pad> 长度为
            response_input = torch.ones(src.size(0), self.max_gen_len, device=device, dtype=torch.long) * 2
            # <sos> <pad> <pad> <pad> ... <pad> 第一个时刻的
            # B, L
            response_input = shift(response_input, sos_idx=1)

            # AutoRegressive
            for i in range(self.max_gen_len - 1):
                # B, L, H
                tgt_enc = self.decoder.forward(src_enc, response_input, src_mask=src_mask)
                # B, 1, V
                i_step_prob = self.proj_head(tgt_enc[:, i:i + 1, :])
                i_step_idx = i_step_prob.argmax(-1)  # B, 1
                # <sos> w1
                response_input[:, i + 1: i + 2] = i_step_idx

            return response_input[:, 1:]


if __name__ == '__main__':
    # ** Model **
    max_vocab_size = 20000
    max_len = 200
    src_vocab_size, tgt_vocab_size = max_vocab_size, max_vocab_size
    d_model, n_head, n_layer, src_n_position, tgt_n_position = 512, 8, 6, max_len, max_len
    translator = Translator(src_vocab_size, tgt_vocab_size, d_model, n_head,
                            n_layer, src_n_position, tgt_n_position, dropout=0.1,
                            max_gen_len=max_len)

    import numpy as np
    B = 4
    src = torch.tensor(np.random.randint(0, max_vocab_size, (B, max_len)))
    shift_tgt = torch.tensor(np.random.randint(0, max_vocab_size, (B, max_len)))
    src_mask = torch.tensor(np.random.randint(0, 2, (B, max_len))).unsqueeze(1).unsqueeze(2)
    tgt_prob = translator.forward(src, shift_tgt=shift_tgt, src_mask=src_mask)
    print(tgt_prob.shape)
