"""
@Time: 2021/1/24 下午 9:55
@Author: jinzhuan
@File: ee_toolkit.py
@Desc: 
"""
from cognlp import *
from ..base_toolkit import BaseToolkit
import threading
import torch
import cognlp.io.processor.ee.ace2005 as processor


class EeToolkit(BaseToolkit):
    def __init__(
            self,
            bert_model="bert-base-cased",
            model_path=None,
            trigger_vocabulary_path=None,
            argument_vocabulary_path=None,
            device=torch.device("cuda"),
            device_ids=[0],
            max_seq_length=256
    ):
        super().__init__(bert_model, model_path, None, device, device_ids, max_seq_length)
        if trigger_vocabulary_path:
            self.trigger_vocabulary = Vocabulary.load(trigger_vocabulary_path)
        if argument_vocabulary_path:
            self.argument_vocabulary = Vocabulary.load(argument_vocabulary_path)
        self.model = Bert4EE(self.trigger_vocabulary, self.argument_vocabulary)
        self.load_model()

    _instance_lock = threading.Lock()

    def run(self, words, spans):
        self.model.eval()
        arguments = {
            "candidates": [
                # ex. (5, 6, "entity_type_str"), ...
            ],
            "events": {
                # ex. (1, 3, "trigger_type_str"): [(5, 6, "argument_role_idx"), ...]
            },
        }
        candidates = []
        for span in spans:
            candidates.append(tuple([span["start"], span["end"], None]))
        arguments["candidates"] = candidates
        tokens_x, triggers_y, arguments, head_indexes, _, triggers = processor.process(
            list(["[CLS]"] + words + ["[SEP]"]), [], arguments,
            self.tokenizer,
            self.trigger_vocabulary,
            self.max_seq_length)
        with torch.no_grad():
            predictions = self.model.predict([[tokens_x], [head_indexes], [triggers_y], [arguments], words, triggers])

        events = []
        if len(predictions) == 0:
            return events
        predictions = predictions[0]["events"]

        for trigger, argument in predictions.items():
            trigger = {"start": trigger[0], "end": trigger[1], "label": trigger[2],
                       "mention": words[trigger[0]:trigger[1]]}
            arguments = []
            event = {"trigger": trigger, "arguments": arguments}
            for item in argument:
                arguments.append({"start": item[0], "end": item[1], "label": self.argument_vocabulary.to_word(item[2]),
                                  "mention": words[item[0]:item[1]]})
            events.append(event)
        return events

    def __new__(cls, *args, **kwargs):
        if not hasattr(EeToolkit, "_instance"):
            with EeToolkit._instance_lock:
                if not hasattr(EeToolkit, "_instance"):
                    EeToolkit._instance = object.__new__(cls)
        return EeToolkit._instance

"""
Input:
Golden Example: And these bozos let four armed Cubans land on our shores when they"re trying to make a high terrorist alert.
Output:
[
    {
        "trigger": {
            "start": 7,
            "end": 8,
            "label": "Movement:Transport",
            "mention": [
                "land"
            ]
        },
        "arguments": [
            {
                "start": 1,
                "end": 3,
                "label": "Agent",
                "mention": [
                    "these",
                    "bozos"
                ]
            },
            {
                "start": 4,
                "end": 7,
                "label": "Artifact",
                "mention": [
                    "four",
                    "armed",
                    "Cubans"
                ]
            },
            {
                "start": 9,
                "end": 11,
                "label": "Destination",
                "mention": [
                    "our",
                    "shores"
                ]
            }
        ]
    }
]
"""


