"""
@Time: 2021/1/25 下午 4:56
@Author: jinzhuan
@File: ner_ace2005_toolkit.py
@Desc: 
"""
from cognlp import *
from ..base_toolkit import BaseToolkit
import threading
import torch
import cognlp.io.processor.ner.ace2005 as processor


class NerAce2005Toolkit(BaseToolkit):
    def __init__(
            self,
            bert_model="bert-base-cased",
            model_path=None,
            vocabulary_path=None,
            device=torch.device("cuda"),
            device_ids=[0],
            max_seq_length=256
    ):
        super().__init__(bert_model, model_path, vocabulary_path, device, device_ids, max_seq_length)
        self.model = BertSoftmax(self.vocabulary)
        self.load_model()

    _instance_lock = threading.Lock()

    def __new__(cls, *args, **kwargs):
        if not hasattr(NerAce2005Toolkit, "_instance"):
            with NerAce2005Toolkit._instance_lock:
                if not hasattr(NerAce2005Toolkit, "_instance"):
                    NerAce2005Toolkit._instance = object.__new__(cls)
        return NerAce2005Toolkit._instance

    def run(self, words):
        self.model.eval()
        labels = ["O"] * len(words)
        input_id, attention_mask, segment_id, head_index, label_id, label_mask = \
            processor.process(list(words), labels, self.tokenizer, self.vocabulary, self.max_seq_length)

        input_id = torch.tensor([input_id], dtype=torch.long, device=self.device)
        attention_mask = torch.tensor([attention_mask], dtype=torch.long, device=self.device)
        segment_id = torch.tensor([segment_id], dtype=torch.long, device=self.device)
        head_index = torch.tensor([head_index], dtype=torch.long, device=self.device)
        label_id = torch.tensor([label_id], dtype=torch.long, device=self.device)
        label_mask = torch.tensor([label_mask], dtype=torch.long, device=self.device)

        with torch.no_grad():
            prediction, valid_len = self.model.predict(
                [input_id, attention_mask, segment_id, head_index, label_id, label_mask])
        if len(prediction) == 0:
            return []
        prediction = prediction[0]
        valid_len = valid_len[0]
        tag = []
        for i in range(valid_len.item()):
            tag.append(self.vocabulary.to_word(prediction[i].item()))
        spans = _bio_tag_to_spans(words, tag)
        return spans

"""
Input: 
And these bozos let four armed Cubans land on our shores when they're trying to make a high terrorist alert.
Output:
[
    {
        "mention": [
            "these",
            "bozos"
        ],
        "start": 1,
        "end": 3,
        "type": "per:group"
    },
    {
        "mention": [
            "four",
            "armed",
            "Cubans"
        ],
        "start": 4,
        "end": 7,
        "type": "per:group"
    },
    {
        "mention": [
            "our",
            "shores"
        ],
        "start": 9,
        "end": 11,
        "type": "loc:region-general"
    },
    {
        "mention": [
            "they"
        ],
        "start": 12,
        "end": 13,
        "type": "per:group"
    },
    {
        "mention": [
            "terrorist"
        ],
        "start": 19,
        "end": 20,
        "type": "per:indeterminate"
    }
]
"""


def _bio_tag_to_spans(words, tags, ignore_labels=None):
    ignore_labels = set(ignore_labels) if ignore_labels else set()
    spans = []
    prev_bio_tag = None
    for idx, tag in enumerate(tags):
        tag = tag.lower()
        bio_tag, label = tag[:1], tag[2:]
        if bio_tag == "b":
            spans.append((label, [idx, idx]))
        elif bio_tag == "i" and prev_bio_tag in ("b", "i") and label == spans[-1][0]:
            spans[-1][1][1] = idx
        elif bio_tag == "o":  # o tag does not count
            pass
        else:
            spans.append((label, [idx, idx]))
        prev_bio_tag = bio_tag
    return [{"mention": words[span[1][0]:span[1][1] + 1], "start": span[1][0], "end": span[1][1] + 1, "type": span[0]} for span in spans
            if span[0] not in ignore_labels]
