import torch
from torch.utils.data import Dataset
from tqdm import tqdm


class TestDataset(Dataset):
    def __init__(self, corpus_file, tokenizer, max_length, norm=True):
        self.data = list()

        with open(corpus_file, 'r') as file:
            lines = file.readlines()

        self._parse_data(lines, tokenizer, max_length, norm)

    def _parse_data(self, lines, tokenizer, max_length, norm):
        context_data = dict()
        for line in tqdm(lines):
            contents = line.split('\t')
            seq_in, seq_out_all = contents[0], contents[1:]

            seq_in = seq_in.strip()
            seq_out_all = [seq_out.strip() for seq_out in seq_out_all]

            if norm:
                seq_in = ' '.join(seq_in.split())
                seq_out_all = [' '.join(seq_out.split()) for seq_out in seq_out_all]

            seq_in = seq_in.replace('EOS', tokenizer.eos_token)

            if seq_in in context_data:
                context_data[seq_in]['seq_out'].extend(seq_out_all)
                continue

            input_ids = tokenizer(seq_in, add_special_tokens=False).input_ids
            input_ids.append(tokenizer.eos_token_id)

            if len(input_ids) > max_length:
                input_ids = input_ids[len(input_ids) - max_length:]

            assert len(input_ids) <= max_length
            context_data[seq_in] = {
                'input_ids': torch.tensor(input_ids, dtype=torch.long),
                'attention_mask': torch.ones(len(input_ids), dtype=torch.long),
                'context': seq_in,
                'seq_out': seq_out_all
            }

        for data in context_data.values():
            self.data.append(data)

    def __getitem__(self, item):
        return self.data[item]

    def __len__(self):
        return len(self.data)
