"""
@Time: 2020/12/9 下午 7:51
@Author: jinzhuan
@File: fn.py
@Desc: 
"""
from .processor import Processor
from transformers import BertTokenizer
from tqdm import tqdm


class FrameNetProcessor(Processor):
    def __init__(self, label_list=None, path=None):
        super().__init__(label_list, path)
        self.tokenizer = BertTokenizer.from_pretrained("bert-base-cased")
        self.max_length = 256

    def process(self, dataset):
        sentences = dataset['sentences']
        labels = dataset['labels']
        positions = dataset['positions']
        all_data = {'input_ids': [], 'attention_mask': [], 'word_pos': [], 'label_ids': []}
        for sentence, label, position in tqdm(zip(sentences, labels, positions), desc='Processing Data'):
            input_ids, attention_mask, word_pos, label_ids \
                = process(sentence, label, position, self.tokenizer, self.vocabulary, self.max_length)
            all_data['input_ids'].append(input_ids)
            all_data['attention_mask'].append(attention_mask)
            all_data['word_pos'].append(word_pos)
            all_data['label_ids'].append(label_ids)
        return all_data

    def get_labels(self):
        return list(self.label_set)


def process(sentence, label, position, tokenizer, vocabulary, max_length):
    sentence.insert(0, '[CLS]')
    sentence.append('[SEP]')
    tokens = []
    word_pos = []

    for i in range(len(sentence)):
        token = tokenizer.tokenize(sentence[i])
        if i == int(position):
            word_pos = [len(tokens), len(tokens) + len(token)]
        tokens.extend(token)

    input_ids = tokenizer.convert_tokens_to_ids(tokens)
    attention_mask = [1] * len(input_ids)
    label_ids = vocabulary.to_index(label)
    input_ids += [0 for _ in range(max_length - len(input_ids))]
    attention_mask += [0 for _ in range(max_length - len(attention_mask))]

    return input_ids, attention_mask, word_pos, label_ids
