from datasets import load_dataset
from pyparsing import line_end

# import torch
# from torch.utils.data import DataLoader
from utils.model_path_getter import load_yaml


class WordsDataset:
    def __init__(self,default = 'wiki'):
        self.path = load_yaml()["dataset_path"]
        # 加载数据集，指定文件路径
        self.dataset = load_dataset('parquet', data_files={
            'train': self.path + '/train-00000-of-00001.parquet',
            # 'validation': self.path + '/validation-00000-of-00001.parquet',
            # 'test': self.path + '/test-00000-of-00001.parquet'
        })
        print(self.dataset)
        self.train_dataset = self.dataset['train']
        # self.test_dataset = self.dataset['test']
        # self.validation_dataset = self.dataset['validation']
        # self.test_dataset = self.dataset['test']

    def load_texts(self, start=0, end=100):
        return self.train_dataset[start:end]['text']



# def load_texts(start=0, end=100):
#     path = load_yaml()["dataset_path"]
#     # 加载数据集，指定文件路径
#     dataset = load_dataset('parquet', data_files={
#         'train': path+'/train-00000-of-00001.parquet',
#         'validation':path+'/validation-00000-of-00001.parquet',
#         'test': path+'/test-00000-of-00001.parquet'
#     })
#
#     # 打印数据集结构
#     # print(dataset)
#     train_dataset = dataset['train']
#     # validation_dataset = dataset['validation']
#     # test_dataset = dataset['test']
#
#     # 打印训练数据集的前几条样本
#     # print(train_dataset[:100]['text'])
#     return train_dataset[start:end]['text']




# # 定义PyTorch DataLoader
# train_dataloader = DataLoader(train_dataset, batch_size=8, shuffle=True)
# validation_dataloader = DataLoader(validation_dataset, batch_size=8)
# test_dataloader = DataLoader(test_dataset, batch_size=8)
#
# # 示例：从DataLoader中获取一个batch
# for batch in train_dataloader:
#     print(batch)
#     break

if __name__ == '__main__':
    dataset = WordsDataset()
    print(dataset.load_texts(0, 2))
    