# -*- coding:utf-8 -*-
import os
import json
import torch
import logging
import models.model_helper as model_helper
from utils.vocab import load_vocab
from utils.data_loader import padding


USE_CUDA = torch.cuda.is_available()
LOG_FORMAT = '%(asctime)s %(levelname)s %(message)s'


class Predictor(object):
    def __init__(self, lib_dir, ckpt_file):
        self._lib_dir = lib_dir

        config_path = os.path.join(lib_dir, "config.json")
        config = json.load(open(config_path, 'r', encoding='utf-8'))
        self._config = config

        logging.basicConfig(format=LOG_FORMAT, level=logging.INFO)
        for key in config:
            logging.info("......{}: {}".format(key, config[key]))

        assert ckpt_file is not None
        ckpt_path = os.path.join(lib_dir, "checkpoints", ckpt_file)
        if not os.path.exists(ckpt_path):
            raise ValueError("checkpoint not exist")

        matcher, _, _ = model_helper.load_or_build_pairwise_model(config, ckpt_path)

        self.device = torch.device("cuda" if USE_CUDA else "cpu")

        self.matcher = matcher.to(self.device)
        # set to eval mode
        self.matcher.eval()

        self.vocab = load_vocab(os.path.join(config['data_dir'], config['vocab_file']))
        pass

    def predict(self, query_sent: list, candidate_sent: list):
        query_ids = self.vocab.convert2idx(query_sent)
        candidate_ids = self.vocab.convert2idx(candidate_sent)

        with torch.no_grad():
            padding_query_ids = padding(query_ids, self._config['sent1_maxlen'])
            padding_candidate_ids = padding(candidate_ids, self._config['sent2_maxlen'])

            query_tensor = torch.LongTensor(padding_query_ids).unsqueeze(0).to(self.device)
            candidate_tensor = torch.LongTensor(padding_candidate_ids).unsqueeze(0).to(self.device)

            score = self.matcher.forward(query_tensor, candidate_tensor)
            assert score.size(0) == 1
            return score[0].item()


if __name__ == '__main__':
    predictor = Predictor("../outputs/stc_lstm", "2018_11_11_00_30_06_step_45000_score_0.8308.tar")
    print(predictor.predict("我 爱 你".split(), "我 喜欢 你 啊 ！".split()))
    pass



