"""
@author : Hyunwoong
@when : 2019-12-18
@homepage : https://github.com/gusdnd852
"""
import torch
from torch import nn

from models.model.decoder import Decoder
from models.model.encoder import Encoder


class Transformer(nn.Module):

    def __init__(self, src_pad_idx, trg_pad_idx, trg_sos_idx, enc_voc_size, dec_voc_size, d_model, n_head, max_len,
                 ffn_hidden, n_layers, drop_prob, device, is_inference=False):
        super().__init__()
        self.is_inference = is_inference
        self.src_pad_idx = src_pad_idx
        self.trg_pad_idx = trg_pad_idx
        self.trg_sos_idx = trg_sos_idx
        self.device = device
        self.encoder = Encoder(d_model=d_model,
                               n_head=n_head,
                               max_len=max_len,
                               ffn_hidden=ffn_hidden,
                               enc_voc_size=enc_voc_size,
                               drop_prob=drop_prob,
                               n_layers=n_layers,
                               device=device)

        self.decoder = Decoder(d_model=d_model,
                               n_head=n_head,
                               max_len=max_len,
                               ffn_hidden=ffn_hidden,
                               dec_voc_size=dec_voc_size,
                               drop_prob=drop_prob,
                               n_layers=n_layers,
                               device=device)
        
    def forward(self, src, trg=None):
        src_mask = self.make_pad_mask(src, src, self.src_pad_idx, self.src_pad_idx)
        # 对源序列进行编码
        enc_src = self.encoder(src, src_mask)

        if self.is_inference:
            return self.decode_inference(src, enc_src)
        else:
            return self.decode_train(src, enc_src, trg)

    def decode_train(self, src, enc_src, trg):

        src_trg_mask = self.make_pad_mask(trg, src, self.trg_pad_idx, self.src_pad_idx)

        trg_mask = self.make_pad_mask(trg, trg, self.trg_pad_idx, self.trg_pad_idx) * \
                   self.make_no_peak_mask(trg, trg)

        output = self.decoder(trg, enc_src, trg_mask, src_trg_mask)
        return output
    
    def decode_inference(self, src, enc_src):
        # 初始化目标序列，全部为起始符
        trg = torch.ones((src.shape[0], 1), dtype=torch.long, device=self.device) * self.trg_sos_idx
        # 德文的目标最大长度不会比英文长太多
        max_len = max(10, int(src.shape[1]*1.5))
        # 逐个生成目标序列
        for i in range(1, max_len):
            # 生成目标序列的掩码
            trg_mask = self.make_pad_mask(trg, trg, self.trg_pad_idx, self.trg_pad_idx) * \
                    self.make_no_peak_mask(trg, trg)
            # 生成源序列和目标序列的掩码
            src_trg_mask = self.make_pad_mask(trg, src, self.trg_pad_idx, self.src_pad_idx)
            # 将目标序列、源序列的编码、目标序列掩码、源序列和目标序列的掩码输入到解码器中
            output = self.decoder(trg, enc_src, trg_mask, src_trg_mask)
            # 取出解码器输出的最后一个时刻的预测结果
            pred = output[:, -1, :]
            # 取预测结果中概率最大的词作为当前时刻的预测结果
            pred = torch.argmax(pred, dim=1).unsqueeze(1)
            # 将当前时刻的预测结果拼接到目标序列中
            trg = torch.cat([trg, pred], dim=1)
        # 返回生成的目标序列，不包括起始符
        return trg[:, 1:]

    def make_pad_mask(self, q, k, q_pad_idx, k_pad_idx):
        len_q, len_k = q.size(1), k.size(1)

        # batch_size x 1 x 1 x len_k
        k = k.ne(k_pad_idx).unsqueeze(1).unsqueeze(2)
        # batch_size x 1 x len_q x len_k
        k = k.repeat(1, 1, len_q, 1)

        # batch_size x 1 x len_q x 1
        q = q.ne(q_pad_idx).unsqueeze(1).unsqueeze(3)
        # batch_size x 1 x len_q x len_k
        q = q.repeat(1, 1, 1, len_k)

        mask = k & q
        return mask

    def make_no_peak_mask(self, q, k):
        len_q, len_k = q.size(1), k.size(1)

        # len_q x len_k
        mask = torch.tril(torch.ones(len_q, len_k)).type(torch.BoolTensor).to(self.device)

        return mask
