"""
@Time: 2021/1/12 下午 8:29
@Author: jinzhuan
@File: et_toolkit.py
@Desc: 
"""
from cognlp import *
from ..base_toolkit import BaseToolkit
import threading
import torch
import cognlp.io.processor.et.ontonotes as processor


class EtToolkit(BaseToolkit):
    def __init__(
            self,
            bert_model="bert-base-cased",
            model_path=None,
            vocabulary_path=None,
            device=torch.device("cuda"),
            device_ids=[0],
            max_seq_length=256
    ):
        super().__init__(bert_model, model_path, vocabulary_path, device, device_ids, max_seq_length)
        self.model = Bert4Et(len(self.vocabulary))
        self.load_model()

    _instance_lock = threading.Lock()

    def __new__(cls, *args, **kwargs):
        if not hasattr(EtToolkit, "_instance"):
            with EtToolkit._instance_lock:
                if not hasattr(EtToolkit, "_instance"):
                    EtToolkit._instance = object.__new__(cls)
        return EtToolkit._instance

    def run(self, words, spans):
        self.model.eval()
        labels = ["<unk>"] * len(words)
        entities = []
        for i in range(len(spans)):
            input_ids, attention_mask, start_pos, end_pos, label_ids = \
                processor.process(list(words), spans[i]["start"], spans[i]["end"], labels, self.vocabulary,
                                  self.tokenizer,
                                  self.max_seq_length)
            input_ids = torch.tensor([input_ids], dtype=torch.long, device=self.device)
            attention_mask = torch.tensor([attention_mask], dtype=torch.long, device=self.device)
            start_pos = torch.tensor([start_pos], dtype=torch.long, device=self.device)
            end_pos = torch.tensor([end_pos], dtype=torch.long, device=self.device)
            label_ids = torch.tensor([label_ids], dtype=torch.long, device=self.device)
            with torch.no_grad():
                output = self.model.predict([input_ids, attention_mask, start_pos, end_pos, label_ids])
            if len(output) == 0:
                return []
            output = output[0]
            prediction = []
            for j in range(len(output)):
                if output[j] == 1:
                    prediction.append(self.vocabulary.to_word(j))
            entities.append({"mention": words[spans[i]["start"]:spans[i]["end"]], "start": spans[i]["start"],
                             "end": spans[i]["end"], "types": prediction})
        return entities
