#!/usr/bin/env python
# -*- encoding:utf-8 -*-

from typing import List, Tuple
from transformers import BertForSequenceClassification, Trainer, TrainingArguments, BertTokenizerFast
import torch

tokenizer = BertTokenizerFast.from_pretrained('./model')
model = BertForSequenceClassification.from_pretrained(
    './model')
if torch.cuda.is_available():
    model = model.to("cuda")
else:
    model = model.to("cpu")


def predict(lines: List[str]) -> List[Tuple[str, str]]:
    encoding = tokenizer(
        lines,
        add_special_tokens=True,
        max_length=100,
        padding="max_length",
        truncation=True,
        return_tensors="pt",
    )
    with torch.no_grad():
        output = model(**encoding)
    labels = []
    for predict in output.logits:
        labels.append(model.config.id2label[predict.argmax().item()])
    return list(zip(lines, labels))


if __name__ == "__main__":
    lines = ["精通java", "精通go编程语言", "本科及以上学历，国际贸易、商务英语等相关专业优先",
             "发展路线:全网学习顾问-督导-储备经理-经理-总监-区域总经理一大区总经理一集团副总裁。高薪不是幻影， 能力有多高，平台就有多大！"]
    for (line, label) in predict(lines):
        print(line, label)
