"""
@Time: 2021/1/25 下午 4:39
@Author: jinzhuan
@File: ner_cn_toolkit.py
@Desc: 
"""
from cognlp import *
from ..base_toolkit import BaseToolkit
import threading
import torch
import cognlp.io.processor.ner.msra as processor


class NerCNToolkit(BaseToolkit):
    def __init__(
            self,
            bert_model="hfl/chinese-roberta-wwm-ext",
            model_path=None,
            vocabulary_path=None,
            device=torch.device("cuda"),
            device_ids=[0],
            max_seq_length=256
    ):
        super().__init__(bert_model, model_path, vocabulary_path, device, device_ids, max_seq_length)
        self.model = Bert4CNNer(self.vocabulary)
        self.load_model()

    _instance_lock = threading.Lock()

    def __new__(cls, *args, **kwargs):
        if not hasattr(NerCNToolkit, "_instance"):
            with NerCNToolkit._instance_lock:
                if not hasattr(NerCNToolkit, "_instance"):
                    NerCNToolkit._instance = object.__new__(cls)
        return NerCNToolkit._instance

    def run(self, sentence):
        words = []
        for word in sentence:
            words.append(word)
        self.model.eval()
        labels = ["O"] * len(words)
        input_id, attention_mask, segment_id, label_id, label_mask = \
            processor.process(list(words), labels, self.tokenizer, self.vocabulary, self.max_seq_length)

        input_id = torch.tensor([input_id], dtype=torch.long, device=self.device)
        attention_mask = torch.tensor([attention_mask], dtype=torch.long, device=self.device)
        segment_id = torch.tensor([segment_id], dtype=torch.long, device=self.device)
        label_id = torch.tensor([label_id], dtype=torch.long, device=self.device)
        label_mask = torch.tensor([label_mask], dtype=torch.long, device=self.device)

        with torch.no_grad():
            prediction, valid_len = self.model.predict(
                [input_id, attention_mask, segment_id, label_id, label_mask])
        if len(prediction) == 0:
            return []
        prediction = prediction[0]
        valid_len = valid_len[0]
        tag = []
        for i in range(valid_len.item()):
            if i != 0 and i != valid_len.item() - 1:
                tag.append(self.vocabulary.to_word(prediction[i].item()))
        spans = _bio_tag_to_spans(words, tag)
        return spans


def _bio_tag_to_spans(words, tags, ignore_labels=None):
    ignore_labels = set(ignore_labels) if ignore_labels else set()
    spans = []
    prev_bio_tag = None
    for idx, tag in enumerate(tags):
        tag = tag.lower()
        bio_tag, label = tag[:1], tag[2:]
        if bio_tag == "b":
            spans.append((label, [idx, idx]))
        elif bio_tag == "i" and prev_bio_tag in ("b", "i") and label == spans[-1][0]:
            spans[-1][1][1] = idx
        elif bio_tag == "o":  # o tag does not count
            pass
        else:
            spans.append((label, [idx, idx]))
        prev_bio_tag = bio_tag
    return [{"mention": words[span[1][0]:span[1][1] + 1], "start": span[1][0], "end": span[1][1] + 1, "type": span[0]} for span in spans
            if span[0] not in ignore_labels]
