# -*- coding: UTF-8 -*-
"""
    @Author:YTQ
    @Time: 2022/11/28 20:45
    Description:
    
"""
import torch

from modle.TextCNN import TextCNN
from utils.utils import get_label
import logging
from transformers import BertTokenizer


def predict(modelPath, config):
    # 类别
    labels, label_dir = get_label()
    # 模型
    model = TextCNN(config=config)
    model.load_state_dict(torch.load(modelPath, map_location=config.device))
    tokenizer = BertTokenizer.from_pretrained(config.bert_path)
    # 预测数据
    texts = ['词汇阅读是关键 08年考研暑期英语复习全指南', '“搞笑达人”欧弟炼成脱口秀最红主持', '美棉强劲回升 郑棉震荡收涨']
    batch_input_ids = []
    batch_mask = []
    for text in texts:
        tokened = tokenizer(text)
        input_ids = tokened['input_ids']
        mask = tokened['attention_mask']
        if len(input_ids) < config.pad_size:
            pad_len = (config.pad_size - len(input_ids))
            input_ids += [0] * pad_len
            mask += [0] * pad_len
        batch_input_ids.append(input_ids[:config.pad_size])
        batch_mask.append(mask[:config.pad_size])

    batch_input_ids = torch.tensor(batch_input_ids)
    batch_mask = torch.tensor(batch_mask)
    pred = model(batch_input_ids, batch_mask)
    pred_ = torch.argmax(pred, dim=1)

    logging.info([labels[i] for i in pred_])
