#!/usr/bin/env python
# -*- encoding:utf-8 -*-

from typing import List, Mapping, Tuple
from transformers import BertForTokenClassification, BertTokenizerFast
import torch
import re
import sys

id2label = {
    0: "O",
    1: "B-MAJOR",
    2: "I-MAJOR",
}

tokenizer = BertTokenizerFast.from_pretrained('./model')
model = BertForTokenClassification.from_pretrained('./model')
if torch.cuda.is_available():
    model = model.to("cuda")
else:
    model = model.to("cpu")


def extractMajorTag(textList: List[str]) -> List[Tuple[str, List[str]]]:
    inputs = tokenizer(textList, add_special_tokens=True, max_length=100,
                       padding="max_length", truncation=True, return_tensors="pt")
    if torch.cuda.is_available():
        inputs = inputs.to("cuda")
    else:
        inputs = inputs.to("cpu")
        
    with torch.inference_mode():
        outputs = model(**inputs)
    predictions = torch.argmax(outputs.logits, dim=2)
    resultList = []
    for index, prediction in enumerate(predictions):
        tokens = tokenizer.convert_ids_to_tokens(
            inputs["input_ids"][index].tolist())
        tmpList = []
        tmpNameList = []
        for i, tag in enumerate(prediction):
            if tag in [1, 2]:
                if tokens[i].startswith("##"):
                    tmpList.append(tokens[i].replace("##", ""))
                else:
                    if re.match(r"^[a-zA-Z]+$", tokens[i]):
                        tmpList.append(" "+tokens[i])
                    else:
                        tmpList.append(tokens[i])
            else:
                if len(tmpList) > 0:
                    name = "".join(tmpList)
                    if len(name) > 1:
                        tmpNameList.append(name)
                tmpList = []
        resultList.append(tmpNameList)
    return list(zip(textList, resultList))


if __name__ == "__main__":
    lines = ["大学本科毕业，法律或计算机相关专业。"]
    results = extractMajorTag(lines)
    for (line, majors) in results:
        print(line, majors)
