"""
@Time: 2020/12/29 下午 6:33
@Author: jinzhuan
@File: ace2005.py
@Desc: 
"""
from ..processor import Processor
from cognlp import *
from tqdm import tqdm


class ACE2005RelationProcessor(Processor):
    """
    The ace2005 dataset processing follows https://github.com/Clearailhc/ACE2005-toolkit
    """

    def __init__(self, label_list=None, path=None, padding=None, unknown='<unk>', bert_model='bert-base-cased',
                 max_length=256):
        super().__init__(label_list, path, padding, unknown, bert_model, max_length)

    def process(self, dataset):
        datable = DataTable()

        for item in tqdm(dataset, desc='Processing Data'):
            words = item['words']
            item_entity_mentions = item['entity_mentions']
            item_relation_mentions = item['relation_mentions']
            input_id, attention_mask, segment_id, entity_mentions, relation_mentions, entity_mentions_mask, relation_mentions_mask = process(
                words, item_entity_mentions, item_relation_mentions, self.tokenizer, self.vocabulary, self.max_length)

            datable('input_ids', input_id)
            datable('attention_mask', attention_mask)
            datable('segment_ids', segment_id)
            datable('entity_mentions', entity_mentions)
            datable('relation_mentions', relation_mentions)
            datable('entity_mentions_mask', entity_mentions_mask)
            datable('relation_mentions_mask', relation_mentions_mask)

        return datable


def process(words, raw_entity_mentions, raw_relation_mentions, tokenizer, vocabulary, max_seq_length):
    words = ['[CLS]'] + words + ['[SEP]']
    input_id = []
    words_len = [0]
    entity_mentions = []
    relation_mentions = []
    for word in words:
        token = tokenizer.tokenize(word)
        words_len.append(len(token) + words_len[-1])
        input_id.extend(token)

    for raw_entity_mention in raw_entity_mentions:
        position = raw_entity_mention['position']
        entity_mentions.append([words_len[position[0] + 1], words_len[position[1] + 2]])
    entity_mentions.sort()

    for raw_relation_mention in raw_relation_mentions:
        relation_type = raw_relation_mention['relation-type']
        arguments = raw_relation_mention['arguments']
        if len(arguments) != 2:
            continue
        mention = []
        for argument in arguments:
            position = argument['position']
            mention.append(words_len[position[0] + 1])
            mention.append(words_len[position[1] + 2])
        mention.append(vocabulary.to_index(relation_type))
        relation_mentions.append(mention)
    relation_mentions.sort()

    input_id = tokenizer.convert_tokens_to_ids(input_id)
    attention_mask = [1] * len(input_id)
    segment_id = [0] * len(input_id)

    entity_mentions_mask = [1] * len(entity_mentions)
    relation_mentions_mask = [1] * len(relation_mentions)

    input_id = input_id[0:max_seq_length]
    attention_mask = attention_mask[0:max_seq_length]
    segment_id = segment_id[0:max_seq_length]
    entity_mentions = entity_mentions[0:max_seq_length]
    relation_mentions = relation_mentions[0:max_seq_length]
    entity_mentions_mask = entity_mentions_mask[0:max_seq_length]
    relation_mentions_mask = relation_mentions_mask[0:max_seq_length]

    input_id += [0 for _ in range(max_seq_length - len(input_id))]
    attention_mask += [0 for _ in range(max_seq_length - len(attention_mask))]
    segment_id += [0 for _ in range(max_seq_length - len(segment_id))]
    entity_mentions += [[-1, -1] for _ in range(max_seq_length - len(entity_mentions))]
    relation_mentions += [[-1, -1, -1, -1, -1] for _ in range(max_seq_length - len(relation_mentions))]
    entity_mentions_mask += [0 for _ in range(max_seq_length - len(entity_mentions_mask))]
    relation_mentions_mask += [0 for _ in range(max_seq_length - len(relation_mentions_mask))]

    return input_id, attention_mask, segment_id, entity_mentions, relation_mentions, entity_mentions_mask, relation_mentions_mask
