import torch
from transformers import AutoTokenizer
from preprocess.dataset import get_dataset, DataType
from model.bert_classifier import BertClassifier
from configuration import config


def predict_batch(model, input_ids, attention_mask):
    model.eval()
    with torch.no_grad():
        outputs = model(input_ids=input_ids, attention_mask=attention_mask)  # [batch_size, num_classes]
        predicts = torch.argmax(outputs, dim=-1)  # [batch_size]
    return predicts.tolist()


def predict(user_input, model, tokenizer, label_feature, device):
    encoded = tokenizer(
        [user_input],
        max_length=config.SEQ_LEN,
        padding="max_length",
        truncation=True,
        return_tensors="pt"
    )

    input_ids = encoded['input_ids'].to(device)  # [1, seq_len]
    attention_mask = encoded['attention_mask'].to(device)  # [1, seq_len]

    predicts = predict_batch(model, input_ids, attention_mask)  # [batch_size]

    pred_id = predicts[0]
    pred_label = label_feature.int2str(pred_id)

    return pred_id, pred_label


def run_predict():
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    tokenizer = AutoTokenizer.from_pretrained(config.PRETRAINED_DIR / 'bert-base-chinese')

    dataset = get_dataset(type=DataType.TRAIN)
    label_feature = dataset.features['label']

    model = BertClassifier(freeze_bert=True).to(device)
    model.load_state_dict(torch.load(config.MODELS_DIR / 'model.pt'))

    print('请输入商品标题：（输入q或quit退出系统）')
    while True:
        user_input = input('>')
        if user_input.lower() in ['q', 'quit']:
            print('退出系统')
            break

        if not user_input:
            print('请输入商品标题：')
            continue

        pred_id, pred_label = predict(user_input, model, tokenizer, label_feature, device)
        print(f'类别id：{pred_id}, 类别名称：{pred_label}')
