#!/usr/bin/env python
# -*- encoding:utf-8 -*-

from typing import List, Tuple
from transformers import BertForTokenClassification, Trainer, TrainingArguments, BertTokenizerFast
import os
import json
import torch
from sklearn.model_selection import train_test_split
from torch.utils.data import Dataset
import random

id2label = {
    0: "O",
    1: "B-NAME",
    2: "I-NAME",
}

label2id = {
    "O": 0,
    "B-NAME": 1,
    "I-NAME": 2,
}

tokenizer = BertTokenizerFast.from_pretrained('hfl/chinese-bert-wwm')
vocabLen = len(tokenizer)
tokenizer.add_tokens(["[S-NAME]", "[E-NAME]"], True)

model = BertForTokenClassification.from_pretrained(
    'hfl/chinese-bert-wwm', num_labels=3, id2label=id2label, label2id=label2id)
if torch.cuda.is_available():
    model = model.to("cuda")
else:
    model = model.to("cpu")

# layer.5到layer.11 变更权重
for (name, param) in model.bert.named_parameters():
    param.requires_grad = False
    for nameKw in ["layer.5.", "layer.6.", "layer.7.", "layer.8.", "layer.9.", "layer.10.", "layer.11."]:
        if nameKw in name:
            param.requires_grad = True


def loadAnnotations(filePath: str) -> List[Tuple[torch.Tensor, torch.Tensor, torch.Tensor]]:
    retList = []
    with open(filePath, "r", encoding="utf-8") as f:
        objList = json.load(f)
        for obj in objList:
            if "text" not in obj:
                continue
            tokenList = list(obj["text"])
            oldEncode = tokenizer(obj["text"], add_special_tokens=True, max_length=100,
                                  padding="max_length", truncation=True)
            for label in obj.get("label", []):
                start = label["start"]
                end = label["end"]
                tokenList[start] = f"[S-NAME]{tokenList[start]}"
                if end-1 != start:
                    tokenList[end-1] = f"{tokenList[end-1]}[E-NAME]"
            text = "".join(tokenList)
            encode = tokenizer(text, add_special_tokens=True, max_length=100,
                               padding="max_length", truncation=True)
            tokens = tokenizer.convert_ids_to_tokens(encode["input_ids"])
            newTokens = []
            newLabels = []
            inName = False
            inNameCount = 0
            for token in tokens:
                if token == "[S-NAME]":
                    inName = True
                    inNameCount = 0
                elif token == "[E-NAME]":
                    inName = False
                else:
                    newTokens.append(token)
                    if inName:
                        inNameCount += 1
                        if inNameCount == 1:
                            newLabels.append(1)
                        else:
                            newLabels.append(2)
                    else:
                        newLabels.append(0)
            newIds = list(filter(lambda x: x < vocabLen, encode["input_ids"]))

            while len(newIds) != 100:
                newIds.append(0)
            while len(newLabels) != 100:
                newLabels.append(0)
            retList.append((torch.tensor(newIds), torch.tensor(
                oldEncode["attention_mask"]), torch.tensor(newLabels)))
    return retList


def loadData() -> List[Tuple[torch.Tensor, torch.Tensor]]:
    allItemList = []
    for root, _, files in os.walk("./annotations"):
        for file in files:
            realPath = os.sep.join([root, file])
            items = loadAnnotations(realPath)
            for _ in range(50): #扩大数量和jobname_ok.txt行数匹配
                allItemList.extend(items)
    print("annotation count", len(allItemList))
    # 加载jobname_ok.txt
    with open("./data/jobname_ok.txt", "r", encoding="utf-8") as f:
        for line in f:
            line = line.strip()
            if line == "":
                continue
            encode = tokenizer(line, add_special_tokens=True, max_length=100,
                               padding="max_length", truncation=True)
            newLabels = []
            hasStart = False
            for id in encode["input_ids"]:
                if id in [0, 101, 102]:
                    newLabels.append(0)
                else:
                    if not hasStart:
                        hasStart = True
                        newLabels.append(1)
                    else:
                        newLabels.append(2)
            allItemList.append(
                (torch.tensor(encode["input_ids"]), torch.tensor(encode["attention_mask"]), torch.tensor(newLabels)))
    random.shuffle(allItemList)
    return allItemList


class MyDataset(Dataset):
    def __init__(self, items):
        self.items = items

    def __len__(self):
        return len(self.items)

    def __getitem__(self, idx):
        item = self.items[idx]
        return {
            "input_ids": item[0],
            "attention_mask": item[1],
            "labels": item[2],
        }


if __name__ == "__main__":
    allItemList = loadData()
    trainItems, testItems = train_test_split(allItemList, test_size=0.2)
    train_dataset = MyDataset(trainItems)
    test_dataset = MyDataset(testItems)

    training_args = TrainingArguments(
        output_dir='./results',          # output directory
        num_train_epochs=100,              # total # of training epochs
        per_device_train_batch_size=256,  # batch size per device during training
        per_device_eval_batch_size=64,   # batch size for evaluation
        warmup_steps=500,
        weight_decay=0.01,               # strength of weight decay
        logging_dir='./logs',            # directory for storing logs
        eval_strategy="steps",
    )

    trainer = Trainer(
        model=model,
        args=training_args,                  # training arguments, defined above
        train_dataset=train_dataset,         # training dataset
        eval_dataset=test_dataset            # evaluation dataset
    )
    trainer.train()
