import torch
from loguru import logger

from utils import LoadCoupletDataset, my_tokenizer
from model import CoupletModel
from config import cfg


def sampling_decode(model, src, max_len, start_symbol, config=None):
    src = src.to(config.device)
    # 对输入的 Token 序列进行解码翻译
    memory = model.encoder(src)
    # 解码的第一个输入，起始符号
    ys = torch.ones(1, 1).fill_(start_symbol, ).type(torch.long).to(config.device)

    for i in range(max_len):
        memory = memory.to(config.device)
        # shape [tgt_len, 1, embed_dim]
        out = model.decoder(ys, memory)
        # shape [1, tgt_len, embed_dim]
        out = out.transpose(0, 1)
        # 只对对预测的下一个词进行分类
        prob = model.classification(out[:, -1])
        # 选择概率最大的
        _, next_word = torch.max(prob, dim=1)
        next_word = next_word.item()
        # 将当前时刻解码的预测输出结果，同之前所有的结果堆叠作为输入再去预测下一个词
        ys = torch.cat([ys, torch.ones(1, 1).type_as(src.data).fill_(next_word)], dim=0)
    return ys.flatten()


def couplet(model, src, data_loader, config):
    model.eval()
    vocab = data_loader.vocab
    # 预测文本转化为Token IDX Seq
    src = data_loader.make_inference_sample(src)

    # 将 src_len 作为第一个维度
    tgt_tokens = sampling_decode(model, src,
                                 max_len=src.shape[0],
                                 start_symbol=data_loader.BOS_IDX,
                                 config=config)
    # 解码的预测结果
    result = "".join([vocab.itos[tok] for tok in tgt_tokens])
    return result.replace("<bos>", "").replace("<eos>", "")


def do_couplet(srcs, config):
    data_loader = LoadCoupletDataset(config.train_corpus_file_paths,
                                     batch_size=config.batch_size,
                                     top_k=config.top_k)
    couplet_model = CoupletModel(vocab_size=len(data_loader.vocab),
                                 d_model=config.d_model,
                                 nhead=config.num_head,
                                 num_encoder_layers=config.num_encoder_layers,
                                 num_decoder_layers=config.num_decoder_layers,
                                 dim_feedforward=config.dim_feedforward,
                                 dropout=config.dropout)
    couplet_model = couplet_model.to(config.device)
    # 加载模型参数
    loaded_paras = torch.load(config.model_save_path)
    couplet_model.load_state_dict(loaded_paras)

    res = []
    # 遍历待预测样本
    for src in srcs:
        pred = couplet(couplet_model, src, data_loader, config)
        res.append(pred)
    return res


if __name__ == '__main__':
    src_couplets = ["晚风摇树树还挺",
                    "忽忽几晨昏，离别间之，疾病间之，不及终年同静好",
                    "风声、雨声、读书声，声声入耳"]
    targets = ["晨露润花花更红",
               "茕茕小儿女，孱羸若此，娇憨若此，更烦二老费精神",
               "家事、国事、天下事，事事关心"]
    src_couplets = [" ".join(src) for src in src_couplets]
    results = do_couplet(src_couplets, cfg)
    for src, tgt, r in zip(src_couplets, targets, results):
        logger.info(f"上联：{''.join(src.split())}")
        logger.info(f" AI：{r}")
        logger.info(f"下联：{tgt}")
        logger.info("=======")
