#!/usr/bin/env python
# -*- encoding:utf-8 -*-

from typing import List, Mapping, Tuple
from transformers import BertForTokenClassification, BertTokenizerFast
import torch
import re

id2label = {
    0: "O",
    1: "B-NAME",
    2: "I-NAME",
}

tokenizer = BertTokenizerFast.from_pretrained('./model')
model = BertForTokenClassification.from_pretrained('./model')
if torch.cuda.is_available():
    model = model.to("cuda")
else:
    model = model.to("cpu")


def predict(textList: List[str]) -> List[Tuple[str, str]]:
    inputs = tokenizer(textList, add_special_tokens=True, max_length=100,
                       padding="max_length", truncation=True, return_tensors="pt")
    if torch.cuda.is_available():
        inputs = inputs.to("cuda")
    else:
        inputs = inputs.to("cpu")
    with torch.no_grad():
        outputs = model(**inputs)
    predictions = torch.argmax(outputs.logits, dim=2)
    resultList = []
    for index, prediction in enumerate(predictions):
        tokens = tokenizer.convert_ids_to_tokens(
            inputs["input_ids"][index].tolist())
        tmpList = []
        tmpNameList = []
        for i, tag in enumerate(prediction):
            if tag in [1, 2]:
                if tokens[i].startswith("##"):
                    tmpList.append(tokens[i].replace("##", ""))
                else:
                    if re.match(r"^[a-zA-Z]+$", tokens[i]):
                        tmpList.append(" "+tokens[i])
                    else:
                        tmpList.append(tokens[i])
            else:
                if len(tmpList) > 0:
                    name = "".join(tmpList)
                    if len(name) > 1:
                        tmpNameList.append(name)
                tmpList = []
        if len(tmpNameList) == 0:
            if len(textList[index]) <= 10:
                resultList.append(textList[index])
            else:
                resultList.append("")
        else:
            newName = "/".join(tmpNameList).replace(
                "/ ", "/").replace("[UNK]", "").replace(". ", ".").strip()
            resultList.append(newName)
    return list(zip(textList, resultList))


def loadCharMap() -> Mapping[str, str]:
    retMap = {}
    with open("./char_map.txt", "r", encoding="utf-8") as f:
        for line in f:
            line = line.strip()
            parts = line.split("→")
            if len(parts) == 2:
                retMap[parts[0]] = parts[1]
    return retMap


def normalJobName(line: str, charMap: Mapping[str, str]) -> str:
    parts = list(line)
    for i, part in enumerate(parts):
        if part in charMap:
            parts[i] = charMap[part]
    newLine = "".join(parts)
    if newLine != line:
        print(line, newLine)
    return newLine


if __name__ == "__main__":
    charMap = loadCharMap()
    oldJobNameList = map(lambda x: normalJobName(x, charMap), [
        "(09:00-17:00)周末双休+六险一金●运营客服",
        "(09:00-17:00)周末双休+法定节假日带薪休假+绝不加班+五险一金",
        "(09:00-17:00)周末双休不加班+六险一金",
        "(09:00-17:30)周末双休+六险一金/五百强岗 不加班 不内卷",
        "(09:00-18:00)周末双休+六险一金●运营客服",
        "(9:00-17:00)六险一金+周末双休不加班",
        "(国营企业330一天)+五险一金+包吃包住+空调车间",
        "0元入职上班，0成本0投入，司机只拿工资",
        "0押金0租金纯上班司机",
        "0押金不租不买司机保底8000",
        "0押金，0租金，纯上班司机",
        "0押金，0租金，纯上班小车司机",
        "0押，0租金，0违章金，无任何费用上班",
        "0费用接送乘客保底8000",

    ])
    results = predict(list(oldJobNameList))
    for (oldName, newName) in results:
        print(oldName, newName)
