import torch
from torch.utils.data import Dataset
from transformers import BertTokenizer
import json
import numpy as np


class NerDataset(Dataset):
    def __init__(self, all_data):
        self.input_ids = all_data['input_ids']
        self.attention_mask = all_data['attention_masks']
        self.segment_ids = all_data['segment_ids']
        self.valid_masks = all_data['valid_masks']
        self.label_ids = all_data['label_ids']
        self.label_masks = all_data['label_masks']
        self.length = len(self.input_ids)

    def __getitem__(self, index):
        return self.input_ids[index], \
               self.attention_mask[index], \
               self.segment_ids[index], \
               self.valid_masks[index], \
               self.label_ids[index], \
               self.label_masks[index]

    def __len__(self):
        return self.length


class RexDataset(Dataset):
    def __init__(self, all_data):
        self.indexed_tokens = all_data['indexed_tokens']
        self.pos1 = all_data['pos1']
        self.pos2 = all_data['pos2']
        self.att_mask = all_data['att_mask']
        self.label_ids = all_data['label_ids']
        self.length = len(self.label_ids)

    def __getitem__(self, index):
        return torch.tensor(self.indexed_tokens[index], dtype=torch.long, device=torch.device('cuda')), \
               torch.tensor(self.pos1[index], dtype=torch.long, device=torch.device('cuda')), \
               torch.tensor(self.pos2[index], dtype=torch.long, device=torch.device('cuda')), \
               torch.tensor(self.att_mask[index], dtype=torch.long, device=torch.device('cuda')), \
               torch.tensor(self.label_ids[index], dtype=torch.long, device=torch.device('cuda'))

    def __len__(self):
        return self.length


class OntoNotesDataset(Dataset):
    def __init__(self, all_data, device=torch.device('cuda')):
        self.input_ids = all_data['input_ids']
        self.attention_mask = all_data['attention_mask']
        self.start_pos = all_data['start_pos']
        self.end_pos = all_data['end_pos']
        self.label_ids = all_data['label_ids']
        self.length = len(self.input_ids)
        self.device = device

    def __getitem__(self, index):
        return torch.tensor(self.input_ids[index], dtype=torch.long, device=self.device), \
               torch.tensor(self.attention_mask[index], dtype=torch.long, device=self.device), \
               torch.tensor(self.start_pos[index], dtype=torch.long, device=self.device), \
               torch.tensor(self.end_pos[index], dtype=torch.long, device=self.device), \
               torch.tensor(self.label_ids[index], dtype=torch.long, device=self.device)

    def __len__(self):
        return self.length


class TrexNerDataset(Dataset):
    def __init__(self, datable, device=torch.device('cuda')):
        self.input_ids = datable.get_col('input_ids')
        self.attention_mask = datable.get_col('attention_masks')
        self.segment_ids = datable.get_col('segment_ids')
        self.valid_masks = datable.get_col('valid_mask')
        self.label_ids = datable.get_col('label_ids')
        self.label_masks = datable.get_col('label_masks')
        self.length = len(self.input_ids)
        self.device = device

    def __getitem__(self, index):
        return torch.tensor(self.input_ids[index], dtype=torch.long, device=self.device), \
               torch.tensor(self.attention_mask[index], dtype=torch.long, device=self.device), \
               torch.tensor(self.segment_ids[index], dtype=torch.long, device=self.device), \
               torch.tensor(self.valid_masks[index], dtype=torch.long, device=self.device), \
               torch.tensor(self.label_ids[index], dtype=torch.long, device=self.device), \
               torch.tensor(self.label_masks[index], dtype=torch.long, device=self.device)

    def __len__(self):
        return self.length


class FrameNetDataset(Dataset):
    def __init__(self, datable, device=torch.device('cuda')):
        self.input_ids = datable['input_ids']
        self.attention_mask = datable['attention_mask']
        self.word_pos = datable['word_pos']
        self.label_ids = datable['label_ids']
        self.length = len(self.input_ids)
        self.device = device

    def __getitem__(self, index):
        return torch.tensor(self.input_ids[index], dtype=torch.long, device=self.device), \
               torch.tensor(self.attention_mask[index], dtype=torch.long, device=self.device), \
               torch.tensor(self.word_pos[index], dtype=torch.long, device=self.device), \
               torch.tensor(self.label_ids[index], dtype=torch.long, device=self.device)

    def __len__(self):
        return self.length


class ACE2005Dataset(Dataset):
    def __init__(self, fpath, trigger_vocabulary, argument_vocabulary, bert_model='bert-base-cased'):
        self.sent_li, self.triggers_li, self.arguments_li = [], [], []
        self.tokenizer = BertTokenizer.from_pretrained(bert_model)
        self.trigger_vocabulary = trigger_vocabulary
        self.argument_vocabulary = argument_vocabulary
        NONE = 'O'
        CLS = '[CLS]'
        SEP = '[SEP]'

        with open(fpath, 'r') as f:
            data = json.load(f)
            for item in data:
                words = item['words']
                triggers = [NONE] * len(words)
                arguments = {
                    'candidates': [
                        # ex. (5, 6, "entity_type_str"), ...
                    ],
                    'events': {
                        # ex. (1, 3, "trigger_type_str"): [(5, 6, "argument_role_idx"), ...]
                    },
                }

                for entity_mention in item['golden-entity-mentions']:
                    arguments['candidates'].append(
                        (entity_mention['start'], entity_mention['end'], entity_mention['entity-type']))

                for event_mention in item['golden-event-mentions']:
                    for i in range(event_mention['trigger']['start'], event_mention['trigger']['end']):
                        trigger_type = event_mention['event_type']
                        if i == event_mention['trigger']['start']:
                            triggers[i] = 'B-{}'.format(trigger_type)
                        else:
                            triggers[i] = 'I-{}'.format(trigger_type)

                    event_key = (
                        event_mention['trigger']['start'], event_mention['trigger']['end'], event_mention['event_type'])
                    arguments['events'][event_key] = []
                    for argument in event_mention['arguments']:
                        role = argument['role']
                        if role.startswith('Time'):
                            role = role.split('-')[0]
                        arguments['events'][event_key].append(
                            (argument['start'], argument['end'], self.argument_vocabulary.to_index(role)))

                self.sent_li.append([CLS] + words + [SEP])
                self.triggers_li.append(triggers)
                self.arguments_li.append(arguments)

    def __len__(self):
        return len(self.sent_li)

    def __getitem__(self, idx):
        CLS = '[CLS]'
        SEP = '[SEP]'
        words, triggers, arguments = self.sent_li[idx], self.triggers_li[idx], self.arguments_li[idx]

        # We give credits only to the first piece.
        tokens_x, is_heads = [], []
        for w in words:
            tokens = self.tokenizer.tokenize(w) if w not in [CLS, SEP] else [w]
            tokens_xx = self.tokenizer.convert_tokens_to_ids(tokens)

            if w in [CLS, SEP]:
                is_head = [0]
            else:
                is_head = [1] + [0] * (len(tokens) - 1)

            tokens_x.extend(tokens_xx)
            is_heads.extend(is_head)

        triggers_y = [self.trigger_vocabulary.to_index(t) for t in triggers]
        head_indexes = []
        for i in range(len(is_heads)):
            if is_heads[i]:
                head_indexes.append(i)
        seqlen = len(tokens_x)
        return tokens_x, triggers_y, arguments, head_indexes, seqlen, words, triggers

    def to_dict(self, batch):
        tokens_x, triggers_y, arguments, head_indexes, seqlen, words, triggers = list(map(list, zip(*batch)))
        maxlen = np.array(seqlen).max()
        for i in range(len(tokens_x)):
            tokens_x[i] = tokens_x[i] + [0] * (maxlen - len(tokens_x[i]))
            head_indexes[i] = head_indexes[i] + [0] * (maxlen - len(head_indexes[i]))
            triggers_y[i] = triggers_y[i] + [self.trigger_vocabulary.to_index('<pad>')] * (maxlen - len(triggers_y[i]))
        return {
            'tokens_x': tokens_x,
            'triggers_y': triggers_y,
            'arguments': arguments,
            'head_indexes': head_indexes,
            'words': words,
            'triggers': triggers
        }

    def get_samples_weight(self):
        samples_weight = []
        for triggers in self.triggers_li:
            not_none = False
            for trigger in triggers:
                if trigger != 'O':
                    not_none = True
                    break
            if not_none:
                samples_weight.append(5.0)
            else:
                samples_weight.append(1.0)
        return np.array(samples_weight)
