import torch
import time
from PET.pet_config import ProjectConfig
from transformers import AutoModelForMaskedLM, AutoTokenizer
from PET.data_handle.data_dataset import convert_example
from PET.data_handle.template import HardTemplate
from PET.utils.common_utils import convert_logits_to_ids
from PET.utils.verbalizer import Verbalizer


def inference(contents):
    start_time = time.time()

    # 加载配置类
    pc = ProjectConfig()

    # 加载训练好的模型
    model_path = './checkpoints/model_best'
    model = AutoModelForMaskedLM.from_pretrained(model_path)
    model = model.to(pc.device)
    # 分词器
    tokenizer = AutoTokenizer.from_pretrained(model_path)

    # 标签词映射
    verbalizer = Verbalizer(verbalizer_file=pc.verbalizer,
                            tokenizer=tokenizer,
                            max_label_len=pc.max_label_len)

    # 将contents转换成token
    prompt = open(pc.prompt_file, 'r', encoding='utf8').readlines()[0].strip()
    hard_template = HardTemplate(prompt=prompt)
    examples = {"text": contents}
    tokenized_output = convert_example(examples=examples,
                                       tokenizer=tokenizer,
                                       hard_template=hard_template,
                                       max_seq_len=pc.max_seq_len,
                                       max_label_len=pc.max_label_len,
                                       train_mode=False,
                                       return_tensor=True)
    # print('tokenized_output:', tokenized_output)
    # print('tokenized_output.keys():', tokenized_output.keys())
    # print("tokenized_output['input_ids'].shape:", tokenized_output['input_ids'].shape)  # torch.Size([5, 512])
    # print("tokenized_output['token_type_ids'].shape:", tokenized_output['token_type_ids'].shape)  # torch.Size([5, 512])
    # print("tokenized_output['attention_mask'].shape:", tokenized_output['attention_mask'].shape)  # torch.Size([5, 512])
    # print("tokenized_output['mask_positions'].shape:", tokenized_output['mask_positions'].shape)  # torch.Size([5, 2])

    with torch.no_grad():
        logits = model(
            input_ids=tokenized_output['input_ids'].to(pc.device),
            token_type_ids=tokenized_output['token_type_ids'].to(pc.device),
            attention_mask=tokenized_output['attention_mask'].to(pc.device)
        ).logits
        # print('logits.shape:', logits.shape) # torch.Size([5, 512, 21128])

        # 从logits找出mask_positions位置的token
        predictions = convert_logits_to_ids(logits, tokenized_output['mask_positions'])
        # print('predictions:', predictions)
        predictions = predictions.cpu().numpy().tolist()
        # print('predictions1:', predictions)
        # [[5381, 1769], [6983, 4638], [6983, 2421], [3180, 4638], [5381, 1351], [4500, 4500], [4500, 4638], [4500, 1501]]
        # 找到模型预测的子标签对应的主标签
        predictions = verbalizer.batch_find_main_label(predictions, hard_mapping=True)
        # print('predictions2:', predictions)
        # 只取主标签的文本
        predictions = [e['label'] for e in predictions]
        # print('predictions3:', predictions)
        # ['平板', '衣服', '电脑', '平板', '电器', '手机', '电脑', '衣服']
    used_time = time.time() - start_time
    print("inference time:%.4f" % used_time)
    return predictions


if __name__ == '__main__':
    from rich import print
    contents = [
        '天台很好看，躺在躺椅上很悠闲，因为活动所以我觉得性价比还不错，适合一家出行，特别是去迪士尼也蛮近的，下次有机会肯定还会再来的，值得推荐',
        '环境，设施，很棒，周边配套设施齐全，前台小姐姐超级漂亮！酒店很赞，早餐不错，服务态度很好，前台美眉很漂亮。性价比超高的一家酒店。强烈推荐',
        "物流超快，隔天就到了，还没用，屯着出游的时候用的，听方便的，占地小",
        "福行市来到无早集市，因为是喜欢的面包店，所以跑来集市看看。第一眼就看到了，之前在微店买了小刘，这次买了老刘，还有一直喜欢的巧克力磅蛋糕。好奇老板为啥不做柠檬磅蛋糕了，微店一直都是买不到的状态。因为不爱碱水硬欧之类的，所以期待老板多来点其他小点，饼干一直也是大爱，那天好像也没看到",
        "服务很用心，房型也很舒服，小朋友很喜欢，下次去嘉定还会再选择。床铺柔软舒适，晚上休息很安逸，隔音效果不错赞，下次还会来"
    ]
    print("针对下面的文本评论，请分别给出对应所属类别：")
    print(contents)
    res = inference(contents)
    print(res)
