import os

import pandas as pd
import torch
from loguru import logger
from torchtext.data.utils import get_tokenizer

from config.config import Config
from model.TextSentiment import TextClassificationModel
from utils.utils import log_init, build_vocab


def predict(test_data):
    # 保存每个样本tenor张量数组与每个样本偏移量数组
    text_list, offsets = [], [0]
    # 遍历样本，text ---> word ---> word_index
    for _text in test_data:
        processed_text = vocab(tokenizer(_text))
        text_tensor = torch.tensor(processed_text, dtype=torch.int64)
        text_list.append(text_tensor)
        offsets.append(text_tensor.size(0))
    offsets = torch.tensor(offsets[:-1]).cumsum(dim=0).to(config.device)
    text_list = torch.cat(text_list).to(config.device)
    model.eval()
    out = model(text_list, offsets)
    predict_label = out.argmax(axis=1)
    return predict_label


if __name__ == '__main__':
    # 初始化日志对象
    log_init('train_model', log_dir='./logs')
    # 实例化配置对象
    config = Config()
    # 创建分词器
    tokenizer = get_tokenizer('basic_english')
    # 构建词汇表
    vocab = build_vocab(config.dataset_dir, split='train', tokenizer=tokenizer)
    # 实例化文本分类模型
    model = TextClassificationModel(vocab_size=len(vocab),
                                    embed_dim=config.embed_dim,
                                    num_class=config.num_class).to(config.device)
    model_file_path = os.path.join(config.model_save_dir, 'model.pkl')
    checkpoint = torch.load(model_file_path)
    model.load_state_dict(checkpoint['model_state_dict'])

    # 加载测试样本
    test_file_path = os.path.join(config.dataset_dir, 'datasets/AG_NEWS/test.csv')
    test = pd.read_csv(test_file_path, header=None, names=['label', 'title', 'desc'])
    # 随机取10个样本
    random_samples = test.sample(n=10)
    labels = torch.tensor([label - 1 for label in random_samples['label'].tolist()], dtype=torch.int64)
    texts = random_samples['desc'].tolist()
    predict_label = predict(texts)
    logger.debug(f'true label: {labels}')
    logger.debug(f'predict label: {predict_label}')
    acc = (predict_label == labels).sum().item()
    logger.debug(f'accuracy: {acc / labels.size(0)}')
