import torch
from transformers import AutoTokenizer
from model import ReviewAnalyzeModel
from config import MODELS_DIR, SEQ_LEN, PRETRAINED_DIR


def predict_batch(model, input_ids, attention_mask):
    """ 预测一个batch """
    model.eval()
    with torch.no_grad():
        output = model(input_ids, attention_mask)  # [batch_size]
        probs = torch.sigmoid(output)  # [batch_size]
    return probs.tolist()


def predict(user_input, model, tokenizer, device):
    """ 预测 """
    # 处理数据
    encoded = tokenizer(
        user_input,
        max_length=SEQ_LEN,
        padding="max_length",
        truncation=True,
        return_tensors="pt"
    )
    input_ids = encoded["input_ids"].to(device)  # [1, seq_len]
    attention_mask = encoded["attention_mask"].to(device)  # [1, seq_len]

    probs = predict_batch(model, input_ids, attention_mask)
    return probs[0]


def run_predict():
    """ 预测交互程序 """
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")  # 设备

    # 创建tokenizer
    tokenizer = AutoTokenizer.from_pretrained(PRETRAINED_DIR / 'bert-base-chinese')

    # 创建并加载模型
    model = ReviewAnalyzeModel(freeze_bert=True).to(device)
    model.load_state_dict(torch.load(MODELS_DIR / 'model.pt'))

    # 客户端交互
    print('请输入要预测的评论：（输入q或quit退出系统）')
    while True:
        user_input = input('>')
        if user_input in ['q', 'quit']:
            print('退出系统')
            break

        if not user_input:
            print('请输入要预测的评论：')
            continue

        prob = predict(user_input, model, tokenizer, device)
        if prob > 0.5:
            print(f'正面评价（置信度：{prob:.4f}）')
        else:
            print(f'负面评价（置信度：{1 - prob:.4f}）')


if __name__ == '__main__':
    run_predict()
