from torch.utils.data import Dataset, DataLoader
import logging
from typing import Dict, Sequence
import torch

class PreprocessedDataset(Dataset):
    """Dataset for loading preprocessed data."""

    def __init__(self, data_path: str):
        super(PreprocessedDataset, self).__init__()
        logging.warning(f"Loading preprocessed data from {data_path}...")
        data = torch.load(data_path)
        self.input_ids = [x[:-1] for x in data['input_ids']]
        self.labels = [x[1:] for x in data['labels']]
        if 'task_tags' in data:
            str_tags = data['task_tags']
            final_tags = set()
            for tags in str_tags:
                for tag in tags:
                    final_tags.add(tag)
            tag_mapping = {tag:idx for idx, tag in enumerate(final_tags)}
            with open(f'{data_path}.tag_mapping.txt', 'w') as f:
                f.write(str(tag_mapping))
            self.task_tags = [torch.tensor([tag_mapping[tag] for tag in tag_line]) for tag_line in str_tags]
            # self.task_tags 
        else:
            self.task_tags = [torch.tensor([0], dtype=torch.long)] * len(data['labels'])
        #loss_mask attention_mask position_ids
        # to be computed by get_ltor_masks ....
        self.loss_mask = [torch.tensor([0], dtype=torch.long)] * len(data['labels'])
        self.attention_mask = [torch.tensor([0], dtype=torch.long)] * len(data['labels'])
        self.position_ids = [torch.tensor([0], dtype=torch.long)] * len(data['labels'])
#        self.input_ids = self.input_ids[9:]
#        self.labels = self.labels[9:]
#        logging.warning("Skipped first 9 utterances")

    def __len__(self):
        return len(self.input_ids)

    def __getitem__(self, i) -> Dict[str, torch.Tensor]:
        return dict(tokens=self.input_ids[i], labels=self.labels[i], task_tags=self.task_tags[i], loss_mask=self.loss_mask[i], attention_mask=self.attention_mask[i], position_ids=self.position_ids[i])

# # Example usage
# def create_preprocessed_dataloader(data_path, batch_size=8):
#     dataset = PreprocessedDataset(data_path)
#     dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True)
#     return dataloader

# # Load preprocessed data
# dataloader = create_preprocessed_dataloader('processed_data.pt')
