"""
@Time: 2020/12/2 下午 8:40
@Author: jinzhuan
@File: toolkit.py
@Desc: 
"""
import torch
from transformers import BertTokenizer
from cognlp import *
from cognlp.utils.util import module2parallel, load_model, load_json, get_all_forms
from cognlp.utils.vocabulary import Vocabulary
from cognlp.utils.cognet import CognetServer
# import blink.predictor as et_predictor
import nltk
import logging
import pickle
import cognlp.io.processor.ner.conll2003 as ner_processor
import cognlp.io.processor.et as et_processor
import cognlp.io.processor.rex as re_processor
import cognlp.io.processor.fn as fn_processor

logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
                    datefmt='%m/%d/%Y %H:%M:%S',
                    level=logging.INFO)
logger = logging.getLogger(__name__)


class Pipeline:
    def __init__(self, device=None, device_ids=None, max_seq_length=256):
        self.tokenizer = BertTokenizer.from_pretrained("bert-base-cased")
        self.ner_vocabulary = Vocabulary.load('../data/ner/trex/model/vocabulary.txt')
        self.et_vocabulary = Vocabulary.load('../data/et/OntoNotes/model/vocabulary.txt')
        self.re_vocabulary = Vocabulary.load('../data/re/trexfull/data/vocabulary.txt')
        self.fn_vocabulary = Vocabulary.load('../data/fn/framenet/model/vocabulary.txt')
        self.ner_model = Bert4Ner(len(self.ner_vocabulary))
        self.et_model = Bert4Et(len(self.et_vocabulary))
        self.re_model = Bert4Re(len(self.re_vocabulary))
        self.fn_model = Bert4Fn(len(self.fn_vocabulary))
        self.device = device
        self.device_ids = device_ids
        self.max_seq_length = max_seq_length
        self.blank_padding = True
        with open("../data/fn/framenet/data/lu_key_mapping.pkl", 'rb') as file:
            self.lu_dic = pickle.load(file)
        self.wikidata2wikipedia = load_json('../data/cognet/wikidata2wikipedia.json')
        self.cognet = CognetServer()

        self.ner_model = load_model(self.ner_model, '../data/ner/trex/model/2020-12-04-03:46:31-model.pkl')
        self.et_model = load_model(self.et_model, '../data/et/OntoNotes/model/2020-12-06-14:19:35-model.pkl')
        self.re_model = load_model(self.re_model, '../data/re/trexfull/model/ace2005-new/2020-12-21-15:12:38/checkpoint-15799/model.pt')
        self.fn_model = load_model(self.fn_model,
                                   '../data/fn/framenet/model/2020-12-11-15:36:31/checkpoint-5960/model.pt')
        if self.device_ids:
            self.ner_model = module2parallel(self.ner_model, self.device_ids)
            self.et_model = module2parallel(self.et_model, self.device_ids)
            self.re_model = module2parallel(self.re_model, self.device_ids)
            self.fn_model = module2parallel(self.fn_model, self.device_ids)
        self.id2url, self.et_ner_model, self.et_models = et_predictor.get_et_predictor()

    def predict_ner(self, sentence):
        self.ner_model.eval()
        words = nltk.word_tokenize(sentence)
        labels = ['O'] * len(words)
        input_ids, attention_masks, segment_ids, valid_masks, label_ids, label_masks = \
            ner_processor.process(list(words), labels, self.tokenizer, self.ner_vocabulary, self.max_seq_length)

        input_ids = torch.tensor([input_ids], dtype=torch.long, device=self.device)
        attention_masks = torch.tensor([attention_masks], dtype=torch.long, device=self.device)
        segment_ids = torch.tensor([segment_ids], dtype=torch.long, device=self.device)
        valid_masks = torch.tensor([valid_masks], dtype=torch.long, device=self.device)
        label_ids = torch.tensor([label_ids], dtype=torch.long, device=self.device)
        label_masks = torch.tensor([label_masks], dtype=torch.long, device=self.device)

        with torch.no_grad():
            prediction, valid_len = self.ner_model.predict(
                [input_ids, attention_masks, segment_ids, valid_masks, label_ids, label_masks])
        prediction = prediction[0]
        valid_len = valid_len[0]
        tag = []
        for i in range(valid_len.item()):
            if i != 0 and i != valid_len.item() - 1:
                tag.append(self.ner_vocabulary.to_word(prediction[i].item()))
        spans = _bio_tag_to_spans(words, tag)
        return {'data': spans}

    def predict_type(self, sentence):
        self.ner_model.eval()
        self.et_model.eval()
        words = nltk.word_tokenize(sentence)
        labels = ['O'] * len(words)
        input_ids, attention_masks, segment_ids, valid_masks, label_ids, label_masks = \
            ner_processor.process(list(words), labels, self.tokenizer, self.ner_vocabulary, self.max_seq_length)

        input_ids = torch.tensor([input_ids], dtype=torch.long, device=self.device)
        attention_masks = torch.tensor([attention_masks], dtype=torch.long, device=self.device)
        segment_ids = torch.tensor([segment_ids], dtype=torch.long, device=self.device)
        valid_masks = torch.tensor([valid_masks], dtype=torch.long, device=self.device)
        label_ids = torch.tensor([label_ids], dtype=torch.long, device=self.device)
        label_masks = torch.tensor([label_masks], dtype=torch.long, device=self.device)

        with torch.no_grad():
            prediction, valid_len = self.ner_model.predict(
                [input_ids, attention_masks, segment_ids, valid_masks, label_ids, label_masks])
        prediction = prediction[0]
        valid_len = valid_len[0]
        tag = []
        for i in range(valid_len.item()):
            if i != 0 and i != valid_len.item() - 1:
                tag.append(self.ner_vocabulary.to_word(prediction[i].item()))
        spans = _bio_tag_to_spans(words, tag)
        labels = ['<unk>'] * len(words)
        entities = []
        for i in range(len(spans)):
            input_ids, attention_mask, start_pos, end_pos, label_ids = \
                et_processor.process(list(words), spans[i]['start'], spans[i]['end'], labels, self.et_vocabulary,
                                     self.tokenizer,
                                     self.max_seq_length)
            input_ids = torch.tensor([input_ids], dtype=torch.long, device=self.device)
            attention_mask = torch.tensor([attention_mask], dtype=torch.long, device=self.device)
            start_pos = torch.tensor([start_pos], dtype=torch.long, device=self.device)
            end_pos = torch.tensor([end_pos], dtype=torch.long, device=self.device)
            label_ids = torch.tensor([label_ids], dtype=torch.long, device=self.device)
            with torch.no_grad():
                output = self.et_model.predict([input_ids, attention_mask, start_pos, end_pos, label_ids])
            output = output[0]
            prediction = []
            for j in range(len(output)):
                if output[j] == 1:
                    prediction.append(self.et_vocabulary.to_word(j))
            entities.append({'mention': words[spans[i]['start']:spans[i]['end']], 'start': spans[i]['start'],
                             'end': spans[i]['end'], 'types': prediction})
        return {'data': entities}

    def predict_linking(self, sentence):
        url = 'https://en.wikipedia.org/wiki/'
        links = et_predictor.run(10, *self.et_models, text=sentence, id2url=self.id2url, ner_model=self.et_ner_model)
        for link in links:
            forms = get_all_forms(link['title'])
            cognet_link = 'unk'
            for form in forms:
                wikipedia = url + form
                if wikipedia in self.wikidata2wikipedia:
                    wikidata = self.wikidata2wikipedia[wikipedia]
                    cognet_link = self.cognet.query("<" + wikidata + ">")
            link['cognet_link'] = cognet_link
        return {'data': links}

    def predict_relation(self, sentence):
        self.ner_model.eval()
        self.et_model.eval()
        self.re_model.eval()
        words = nltk.word_tokenize(sentence)
        labels = ['O'] * len(words)
        input_ids, attention_masks, segment_ids, valid_masks, label_ids, label_masks = \
            ner_processor.process(list(words), labels, self.tokenizer, self.ner_vocabulary, self.max_seq_length)

        input_ids = torch.tensor([input_ids], dtype=torch.long, device=self.device)
        attention_masks = torch.tensor([attention_masks], dtype=torch.long, device=self.device)
        segment_ids = torch.tensor([segment_ids], dtype=torch.long, device=self.device)
        valid_masks = torch.tensor([valid_masks], dtype=torch.long, device=self.device)
        label_ids = torch.tensor([label_ids], dtype=torch.long, device=self.device)
        label_masks = torch.tensor([label_masks], dtype=torch.long, device=self.device)

        with torch.no_grad():
            prediction, valid_len = self.ner_model.predict(
                [input_ids, attention_masks, segment_ids, valid_masks, label_ids, label_masks])
        prediction = prediction[0]
        valid_len = valid_len[0]
        tag = []
        for i in range(valid_len.item()):
            if i != 0 and i != valid_len.item() - 1:
                tag.append(self.ner_vocabulary.to_word(prediction[i].item()))
        spans = _bio_tag_to_spans(words, tag)

        relations = []
        for entity1 in spans:
            for entity2 in spans:
                if entity1 != entity2 and abs(entity1['end'] - entity2['start']) <= 5:
                    pos_head = [entity1['start'], entity1['end']]
                    pos_tail = [entity2['start'], entity2['end']]
                    indexed_tokens, pos1, pos2, att_mask = \
                        re_processor.process(words, pos_head, pos_tail, self.tokenizer, self.max_seq_length,
                                             self.blank_padding)
                    label_ids = []
                    input_ids = torch.tensor([indexed_tokens], dtype=torch.long, device=self.device)
                    attention_mask = torch.tensor([att_mask], dtype=torch.long, device=self.device)
                    pos1 = torch.tensor([pos1], dtype=torch.long, device=self.device)
                    pos2 = torch.tensor([pos2], dtype=torch.long, device=self.device)
                    label_ids = torch.tensor([label_ids], dtype=torch.long, device=self.device)
                    with torch.no_grad():
                        output = self.re_model.predict([input_ids, pos1, pos2, attention_mask, label_ids])
                    output = output[0]
                    relation = self.re_vocabulary.to_word(output.item())
                    relations.append({'head_entity': entity1, 'tail_entity': entity2, 'relations': [relation]})
        return {'data': relations}

    def predict_frame(self, sentence):
        self.fn_model.eval()
        words = nltk.word_tokenize(sentence)
        prediction = []
        for i in range(len(words)):
            if words[i] in self.lu_dic:
                label = '<unk>'
                position = i + 1
                input_ids, attention_mask, word_pos, label_ids = \
                    fn_processor.process(list(words), label, position, self.tokenizer, self.fn_vocabulary,
                                         self.max_seq_length)
                input_ids = torch.tensor([input_ids], dtype=torch.long, device=self.device)
                attention_mask = torch.tensor([attention_mask], dtype=torch.long, device=self.device)
                word_pos = torch.tensor([word_pos], dtype=torch.long, device=self.device)
                label_ids = torch.tensor([label_ids], dtype=torch.long, device=self.device)
                with torch.no_grad():
                    output = self.fn_model.predict([input_ids, attention_mask, word_pos, label_ids])
                prediction.append({'word': words[i], 'position': i, 'frame': self.fn_vocabulary.to_word(output.item())})
        return {'data': prediction}

    def predict(self, sentence):
        return self.predict_ner(sentence), self.predict_type(sentence), self.predict_linking(
            sentence), self.predict_relation(sentence), self.predict_frame(sentence)


def _bio_tag_to_spans(words, tags, ignore_labels=None):
    ignore_labels = set(ignore_labels) if ignore_labels else set()
    spans = []
    prev_bio_tag = None
    for idx, tag in enumerate(tags):
        tag = tag.lower()
        bio_tag, label = tag[:1], tag[2:]
        if bio_tag == 'b':
            spans.append((label, [idx, idx]))
        elif bio_tag == 'i' and prev_bio_tag in ('b', 'i') and label == spans[-1][0]:
            spans[-1][1][1] = idx
        elif bio_tag == 'o':  # o tag does not count
            pass
        else:
            spans.append((label, [idx, idx]))
        prev_bio_tag = bio_tag
    return [{'mention': words[span[1][0]:span[1][1] + 1], 'start': span[1][0], 'end': span[1][1] + 1} for span in spans
            if span[0] not in ignore_labels]
