# -*- coding:utf-8 -*-
from tqdm import tqdm
from model import MyModel
from config import parsers
import torch
from transformers import BertTokenizer
from log import logger_init
import logging
import time
import pandas as pd


def load_model(device, model_path):
    myModel = MyModel().to(device)
    myModel.load_state_dict(torch.load(model_path))
    myModel.eval()
    return myModel


def process_text(text, bert_pred):
    tokenizer = BertTokenizer.from_pretrained(bert_pred)
    token_id = tokenizer.convert_tokens_to_ids(["[CLS]"] + tokenizer.tokenize(text))
    mask = [1] * len(token_id) + [0] * (args.max_len + 2 - len(token_id))
    token_ids = token_id + [0] * (args.max_len + 2 - len(token_id))
    token_ids = torch.tensor(token_ids).unsqueeze(0)
    mask = torch.tensor(mask).unsqueeze(0)
    x = torch.stack([token_ids, mask])
    return x


def text_class_name(text, pred):
    result = torch.argmax(pred, dim=1)
    result = result.cpu().numpy().tolist()
    classification = open(args.classification, "r", encoding="utf-8").read().split("\n")
    classification_dict = dict(zip(range(len(classification)), classification))
    logging.info(f"文本：{text}\t预测的类别为：{classification_dict[result[0]]}")


def load_data(path):
    with open(path, 'r', encoding='utf-8') as f:
        sentences = f.readlines()
        sentences = [sen.strip() for sen in sentences]
    return sentences


def predict_label_data(pred):
    result = torch.argmax(pred, dim=1)
    result = result.cpu().numpy().tolist()
    classification = open(args.classification, "r", encoding="utf-8").read().split("\n")
    classification_dict = dict(zip(range(len(classification)), classification))
    # logging.info(f"预测的类别为：{classification_dict[result[0]]}")
    return classification_dict[result[0]]


if __name__ == "__main__":
    start = time.time()
    args = parsers()
    logger_init(log_level=logging.INFO)
    device = "cuda:0" if torch.cuda.is_available() else "cpu"

    model = load_model(device, args.save_model_best)
    Auto_label_path = 'Auto_label_data/bert_test_0719.txt'
    texts = load_data(Auto_label_path)
    # texts = ["我们一起去打篮球吧！", "我喜欢踢足球！", "沈腾和马丽的新电影《独行月球》很好看", "昨天玩游戏，完了一整天",
    #          "现在的高考都已经开始分科考试了。", "中方：佩洛西如赴台将致严重后果", "现在的股票基金趋势很不好"]
    # logging.info("模型预测结果：")
    # while True:
    #     text = input("请输入推送文本：\n")
    #     x = process_text(text, args.bert_pred)
    #     pred = model(x)
    #     text_class_name(text ,pred)

    # for text in texts:
    #     x = process_text(text, args.bert_pred)
    #     pred = model(x)
    #     text_class_name(text, pred)
    #

    labels = []
    for text in tqdm(texts):
        try:
            x = process_text(text, args.bert_pred)
            pred = model(x)
            label = predict_label_data(pred)
            labels.append(label)
        except Exception as e:
            labels.append('Wrong')

    df = pd.DataFrame({"text": texts, "label": labels})
    df.to_csv('Auto_label_data/result/bert_result_0719.csv')

    end = time.time()
    logging.info(f"耗时为：{end - start} s")