"""
@Time: 2021/1/25 下午 12:45
@Author: jinzhuan
@File: tokenize_cn_toolkit.py
@Desc: 
"""
from cognlp import *
import torch
import cognlp.io.processor.ws.msra as processor
from ..base_toolkit import BaseToolkit
import threading


class TokenizeCNToolkit(BaseToolkit):

    def __init__(
            self,
            bert_model="hfl/chinese-roberta-wwm-ext",
            model_path=None,
            vocabulary_path=None,
            device=torch.device("cuda"),
            device_ids=[0],
            max_seq_length=256
    ):
        super().__init__(bert_model, model_path, vocabulary_path, device, device_ids, max_seq_length)
        self.model = Bert4WS(self.vocabulary)
        self.load_model()

    _instance_lock = threading.Lock()

    def __new__(cls, *args, **kwargs):
        if not hasattr(TokenizeCNToolkit, "_instance"):
            with TokenizeCNToolkit._instance_lock:
                if not hasattr(TokenizeCNToolkit, "_instance"):
                    TokenizeCNToolkit._instance = object.__new__(cls)
        return TokenizeCNToolkit._instance

    def run(self, sentence):
        self.model.eval()
        words = []
        for word in sentence:
            words.append(word)
        labels = ["S"] * len(words)
        input_id, attention_mask, segment_id, label_id, label_mask = processor.process(words, labels, self.tokenizer,
                                                                                       self.vocabulary,
                                                                                       self.max_seq_length)
        input_id = torch.tensor([input_id], dtype=torch.long, device=self.device)
        attention_mask = torch.tensor([attention_mask], dtype=torch.long, device=self.device)
        segment_id = torch.tensor([segment_id], dtype=torch.long, device=self.device)
        label_id = torch.tensor([label_id], dtype=torch.long, device=self.device)
        label_mask = torch.tensor([label_mask], dtype=torch.long, device=self.device)

        with torch.no_grad():
            prediction, valid_len = self.model.predict(
                [input_id, attention_mask, segment_id, label_id, label_mask])
        if len(prediction) == 0:
            return []
        prediction = prediction[0]
        valid_len = valid_len[0]
        tags = []
        for i in range(valid_len.item()):
            if i != 0 and i != valid_len.item() - 1:
                tags.append(self.vocabulary.to_word(prediction[i].item()))
        spans = _bmes_tag_to_spans(sentence, tags)
        return spans


def _bmes_tag_to_spans(sentence, tags):
    spans = []
    prev_bmes_tag = None
    for idx, tag in enumerate(tags):
        tag = tag.lower()
        bmes_tag, label = tag[:1], tag[2:]
        if bmes_tag in ("b", "s"):
            spans.append((label, [idx, idx]))
        elif bmes_tag in ("m", "e") and prev_bmes_tag in ("b", "m") and label == spans[-1][0]:
            spans[-1][1][1] = idx
        else:
            spans.append((label, [idx, idx]))
        prev_bmes_tag = bmes_tag
    return [(sentence[span[1][0]:span[1][1] + 1], span[1][0], span[1][1] + 1) for span in spans]
