import torch
from torch.utils.data import Dataset
from tqdm import tqdm


class DSTC7TrainDataset(Dataset):
    def __init__(self, corpus_file, tokenizer, max_length):
        self.data = list()

        with open(corpus_file, 'r') as file:
            lines = file.readlines()

        self._parse_data(lines, tokenizer, max_length)

    def _parse_data(self, lines, tokenizer, max_length):
        for line in tqdm(lines):
            split_line = line.split('\t')
            assert len(split_line) == 7

            context = split_line[5]
            seq_out = split_line[6]

            seq_in = context.split(' EOS ')[1:]
            input_ids = list()
            for utterance in seq_in:
                tokenized_u = tokenizer(utterance, add_special_tokens=False, truncation=True, max_length=max_length)
                input_ids.extend(tokenized_u.input_ids)
                input_ids.append(tokenizer.eos_token_id)

            labels = tokenizer(seq_out, add_special_tokens=False, truncation=True, max_length=max_length).input_ids
            labels.append(tokenizer.eos_token_id)

            self.data.append({
                'input_ids': torch.tensor(input_ids, dtype=torch.long),
                'attention_mask': torch.ones(len(input_ids), dtype=torch.long),
                'labels': torch.tensor(labels, dtype=torch.long),
                'context': context,
                'seq_out': [seq_out]
            })

    def __getitem__(self, item):
        return self.data[item]

    def __len__(self):
        return len(self.data)


class TestDataset(Dataset):
    def __init__(self, corpus_file, tokenizer, max_length, norm=True):
        self.data = list()

        with open(corpus_file, 'r') as file:
            lines = file.readlines()

        self._parse_data(lines, tokenizer, max_length, norm)

    def _parse_data(self, lines, tokenizer, max_length, norm):
        context_data = dict()
        for line in tqdm(lines):
            split_line = line.split('\t')
            assert len(split_line) == 7

            context = split_line[5].strip()
            seq_out = split_line[6].strip()

            if not context.startswith('START EOS '):
                continue
            else:
                context = context[len('START EOS '):]

            if norm:
                context = ' '.join(context.split())
                seq_out = ' '.join(seq_out.split())

            context = context.replace('EOS', tokenizer.eos_token)

            if context in context_data:
                context_data[context]['seq_out'].append(seq_out)
                continue

            seq_in = context
            input_ids = tokenizer(seq_in, add_special_tokens=False).input_ids
            input_ids.append(tokenizer.eos_token_id)

            if len(input_ids) > max_length:
                input_ids = input_ids[len(input_ids) - max_length:]

            assert len(input_ids) <= max_length
            context_data[context] = {
                'input_ids': torch.tensor(input_ids, dtype=torch.long),
                'attention_mask': torch.ones(len(input_ids), dtype=torch.long),
                'context': context,
                'seq_out': [seq_out]
            }

        for data in context_data.values():
            self.data.append(data)

    def __getitem__(self, item):
        return self.data[item]

    def __len__(self):
        return len(self.data)
