"""
@Time: 2021/1/12 下午 4:15
@Author: jinzhuan
@File: ner_toolkit.py
@Desc: 
"""
from cognlp import *
from ..base_toolkit import BaseToolkit
import threading
import torch
import cognlp.io.processor.ner.conll2003 as processor


class NerToolkit(BaseToolkit):
    def __init__(
            self,
            bert_model="bert-base-cased",
            model_path=None,
            vocabulary_path=None,
            device=torch.device("cuda"),
            device_ids=[0],
            max_seq_length=256
    ):
        super().__init__(bert_model, model_path, vocabulary_path, device, device_ids, max_seq_length)
        self.model = Bert4Ner(len(self.vocabulary))
        self.load_model()

    _instance_lock = threading.Lock()

    def __new__(cls, *args, **kwargs):
        if not hasattr(NerToolkit, "_instance"):
            with NerToolkit._instance_lock:
                if not hasattr(NerToolkit, "_instance"):
                    NerToolkit._instance = object.__new__(cls)
        return NerToolkit._instance

    def run(self, words):
        self.model.eval()
        labels = ["O"] * len(words)
        input_ids, attention_masks, segment_ids, valid_masks, label_ids, label_masks = \
            processor.process(list(words), labels, self.tokenizer, self.vocabulary, self.max_seq_length)

        input_ids = torch.tensor([input_ids], dtype=torch.long, device=self.device)
        attention_masks = torch.tensor([attention_masks], dtype=torch.long, device=self.device)
        segment_ids = torch.tensor([segment_ids], dtype=torch.long, device=self.device)
        valid_masks = torch.tensor([valid_masks], dtype=torch.long, device=self.device)
        label_ids = torch.tensor([label_ids], dtype=torch.long, device=self.device)
        label_masks = torch.tensor([label_masks], dtype=torch.long, device=self.device)

        with torch.no_grad():
            prediction, valid_len = self.model.predict(
                [input_ids, attention_masks, segment_ids, valid_masks, label_ids, label_masks])
        if len(prediction) == 0:
            return []
        prediction = prediction[0]
        valid_len = valid_len[0]
        tag = []
        for i in range(valid_len.item()):
            if i != 0 and i != valid_len.item() - 1:
                tag.append(self.vocabulary.to_word(prediction[i].item()))
        spans = _bio_tag_to_spans(words, tag)
        return spans

"""
Input:
Ontario is the most populous province in Canada.
Output:
[
    {
        "mention": [
            "Ontario"
        ],
        "start": 0,
        "end": 1
    },
    {
        "mention": [
            "Canada"
        ],
        "start": 7,
        "end": 8
    }
]
"""

def _bio_tag_to_spans(words, tags, ignore_labels=None):
    ignore_labels = set(ignore_labels) if ignore_labels else set()
    spans = []
    prev_bio_tag = None
    for idx, tag in enumerate(tags):
        tag = tag.lower()
        bio_tag, label = tag[:1], tag[2:]
        if bio_tag == "b":
            spans.append((label, [idx, idx]))
        elif bio_tag == "i" and prev_bio_tag in ("b", "i") and label == spans[-1][0]:
            spans[-1][1][1] = idx
        elif bio_tag == "o":  # o tag does not count
            pass
        else:
            spans.append((label, [idx, idx]))
        prev_bio_tag = bio_tag
    return [{"mention": words[span[1][0]:span[1][1] + 1], "start": span[1][0], "end": span[1][1] + 1} for span in spans
            if span[0] not in ignore_labels]
