#! -*- coding: utf-8 -*-
"""
@Info: 推理接口
"""
import json
import torch
from transformers import BertTokenizer

import model


with open('./data/intent_mapping.json', 'r', encoding='utf-8') as f:
    INTENT_MAPPING = json.loads(f.read())


def get_intent_mapping(second_business):
    """根据二级意图找出关联三级意图"""
    for line in INTENT_MAPPING:
        if line['second_business'] == second_business['label']:
            return line['operate_type']
    return []


class Inference(object):
    def __init__(self, config):
        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        self.model = model.ClassifyBertLSTM(config)
        self.model.load_state_dict(torch.load(config['model_path']))
        self.model.to(self.device)
        self.tokenizer = BertTokenizer.from_pretrained(config['pretrain_model_path'])
        self.labels = config['labels']
        self.id2label = {i: label for i, label in enumerate(self.labels)}
        self.max_length = 512

        self.model.eval()

    def _sort(self, scores, k=3, filter_labels=None):
        """对分值进行排序，返回前三"""
        if not filter_labels:
            filter_labels = self.labels
        score_with_index = {i: score for i, score in enumerate(scores)}
        sorted_score_with_index = sorted(score_with_index.items(), key=lambda x: x[1], reverse=True)
        result = []
        for i in range(len(sorted_score_with_index)):
            value = sorted_score_with_index[i]
            label = self.id2label[value[0]]
            if label not in filter_labels:
                continue
            result.append({
                'label': self.id2label[value[0]],
                'score': round(value[1], 2)
            })
        return result[:k]

    def predict(self, text, second_business=None):
        """预测接口"""
        if text.endswith('。'):
            text = text[:-1]
        text_tokens = self.tokenizer.tokenize(text)

        if len(text_tokens) > self.max_length - 2:
            text_tokens = text_tokens[:self.max_length - 2]

        tokens = ['[CLS]'] + text_tokens + ['SEP']
        input_ids = self.tokenizer.convert_tokens_to_ids(tokens)
        token_type_ids = [0] * (len(text_tokens) + 2)
        mask_ids = [1] * len(input_ids)
        with torch.no_grad():
            logits = self.model(input_ids=torch.LongTensor([input_ids]).to(self.device),
                                token_type_ids=torch.LongTensor([token_type_ids]).to(self.device),
                                attention_mask=torch.LongTensor([mask_ids]).to(self.device))
        scores = torch.softmax(logits, dim=-1).view(-1).detach().cpu().numpy().tolist()
        filter_labels = get_intent_mapping(second_business) if second_business else None
        former_three = self._sort(scores, k=5, filter_labels=filter_labels)
        return former_three


if __name__ == '__main__':
    import json
    from arguments import OPERATE_MODEL_CONFIG

    test_inputs = [
        '烦烦烦的',
        '大大顶顶顶顶22dd大',
        '查找任务编号为TASK202400000501的任务清单',
        '帮我把订单MO20240530尽量排到前面',
        '明天P9Can工作站员工缺勤，请重新安排顶岗，请重排任务'
    ]
    for test_input in test_inputs:
        predictor1 = Inference(OPERATE_MODEL_CONFIG)
        print(predictor1.predict(test_input))
