"""
@Time: 2021/2/5 下午 7:48
@Author: jinzhuan
@File: deepke.py
@Desc: 
"""
from ..processor import Processor
from cognlp import *


class DeepkeRelationProcessor(Processor):

    def __init__(self, label_list=None, path=None, padding=None, unknown=None,
                 bert_model='hfl/chinese-roberta-wwm-ext',
                 max_length=256):
        super().__init__(label_list, path, padding, unknown, bert_model, max_length)

    def process(self, dataset):
        datable = DataTable()

        for i in range(len(dataset)):
            sentence, label, head, tail = dataset[i]
            words = []
            relation_mentions = []
            for word in sentence:
                words.append(word)
            label_id = self.vocabulary.to_index(label)
            head_pos = get_position(sentence, head)
            tail_pos = get_position(sentence, tail)
            if head_pos is None or tail_pos is None:
                continue
            relation_mentions.append([head_pos[0], head_pos[1], tail_pos[0], tail_pos[1], label_id])
            input_ids, attention_mask, segment_ids, head_indexes, relation_mentions, relation_mentions_mask = \
                process(words, relation_mentions, self.tokenizer, self.max_length)
            if len(input_ids) <= self.max_length and len(head_indexes) <= self.max_length and len(relation_mentions) <= self.max_length:
                datable('input_ids', input_ids)
                datable('attention_mask', attention_mask)
                datable('segment_ids', segment_ids)
                datable('head_indexes', head_indexes)
                datable('relation_mentions', relation_mentions)
                datable('relation_mentions_mask', relation_mentions_mask)
        return datable


def get_position(text, word):
    start = text.find(word)
    if start == -1:
        return None
    else:
        return [start, start + len(word)]


def process(words, relation_mentions, tokenizer, max_seq_length):
    words = ['[CLS]'] + words + ['[SEP]']
    input_ids, is_heads = [], []
    for word in words:
        if word == ' ' or word == '' or word == '\t':
            word = '[UNK]'
        token = tokenizer.tokenize(word) if word not in ['[CLS]', '[SEP]'] else [word]
        input_id = tokenizer.convert_tokens_to_ids(token)

        if word in ['[CLS]', '[SEP]']:
            is_head = [0]
        else:
            is_head = [1] + [0] * (len(token) - 1)

        input_ids.extend(input_id)
        is_heads.extend(is_head)

    head_indexes = []
    for i in range(len(is_heads)):
        if is_heads[i]:
            head_indexes.append(i)

    attention_mask = [1] * len(input_ids)
    segment_ids = [1] * len(input_ids)
    relation_mentions_mask = [1] * len(relation_mentions)

    input_ids += [0 for _ in range(max_seq_length - len(input_ids))]
    attention_mask += [0 for _ in range(max_seq_length - len(attention_mask))]
    segment_ids += [0 for _ in range(max_seq_length - len(segment_ids))]
    head_indexes += [0 for _ in range(max_seq_length - len(head_indexes))]
    relation_mentions += [[-1, -1, -1, -1, -1] for _ in range(max_seq_length - len(relation_mentions))]
    relation_mentions_mask += [0 for _ in range(max_seq_length - len(relation_mentions_mask))]

    return input_ids, attention_mask, segment_ids, head_indexes, relation_mentions, relation_mentions_mask
