"""
@Time: 2021/1/18 上午 10:39
@Author: jinzhuan
@File: re_toolkit.py
@Desc: 
"""
from cognlp import *
from ..base_toolkit import BaseToolkit
import threading
import torch
import cognlp.io.processor.re.trex as processor


class ReToolkit(BaseToolkit):
    def __init__(
            self,
            bert_model="bert-base-cased",
            model_path=None,
            vocabulary_path=None,
            device=torch.device("cuda"),
            device_ids=[0],
            max_seq_length=256
    ):
        super().__init__(bert_model, model_path, vocabulary_path, device, device_ids, max_seq_length)
        self.model = Bert4Re(self.vocabulary)
        self.load_model()

    _instance_lock = threading.Lock()

    def __new__(cls, *args, **kwargs):
        if not hasattr(ReToolkit, "_instance"):
            with ReToolkit._instance_lock:
                if not hasattr(ReToolkit, "_instance"):
                    ReToolkit._instance = object.__new__(cls)
        return ReToolkit._instance

    def run(self, words, spans):
        self.model.eval()
        for span in spans:
            span["position"] = [span["start"], span["end"]]
        input_ids, attention_mask, head_indexes, entity_mentions, relation_mentions, entity_mentions_mask, relation_mentions_mask = \
            processor.process(words, spans, [], self.tokenizer, self.vocabulary, self.max_seq_length)
        input_ids = torch.tensor([input_ids], dtype=torch.long, device=self.device)
        attention_mask = torch.tensor([attention_mask], dtype=torch.long, device=self.device)
        head_indexes = torch.tensor([head_indexes], dtype=torch.long, device=self.device)
        entity_mentions = torch.tensor([entity_mentions], dtype=torch.long, device=self.device)
        relation_mentions = torch.tensor([relation_mentions], dtype=torch.long, device=self.device)
        entity_mentions_mask = torch.tensor([entity_mentions_mask], dtype=torch.long, device=self.device)
        relation_mentions_mask = torch.tensor([relation_mentions_mask], dtype=torch.long,
                                              device=self.device)

        with torch.no_grad():
            outputs = self.model.predict(
                [input_ids, attention_mask, head_indexes, entity_mentions, relation_mentions,
                 entity_mentions_mask, relation_mentions_mask])

        relations = []
        for output in outputs:

            for span in spans:
                if span["start"] == output[1] and span["end"] == output[2]:
                    head_entity_mention = span["mention"]
                if span["start"] == output[3] and span["end"] == output[4]:
                    tail_entity_mention = span["mention"]
            head_entity = {"start": output[1], "end": output[2], "mention": head_entity_mention}
            tail_entity = {"start": output[3], "end": output[4], "mention": tail_entity_mention}
            relation = {"head_entity": head_entity,
                        "tail_entity": tail_entity,
                        "relations": [self.vocabulary.to_word(output[-1])]
                        }
            relations.append(relation)
        return relations

"""
Input:
Ontario is the most populous province in Canada.
Output:
[
    {
        "head_entity": {
            "start": 0,
            "end": 1,
            "mention": [
                "Ontario"
            ]
        },
        "tail_entity": {
            "start": 7,
            "end": 8,
            "mention": [
                "Canada"
            ]
        },
        "relations": [
            "http://www.wikidata.org/prop/direct/P131"
        ]
    },
    {
        "head_entity": {
            "start": 7,
            "end": 8,
            "mention": [
                "Canada"
            ]
        },
        "tail_entity": {
            "start": 0,
            "end": 1,
            "mention": [
                "Ontario"
            ]
        },
        "relations": [
            "http://www.wikidata.org/prop/direct/P150"
        ]
    }
]
"""