import time

from fsspec.implementations.dirfs import DirFileSystem
from huggingface_hub import HfFileSystem
from zkl_aiutils_datasets import ChainedIndexedNamedDatasets, MappedDataset, ThreadedBufferedDataset, \
    load_and_split_parquet_files


def load_text_dataset():
    parquet_files_dir_path = "datasets/wikimedia/wikipedia/20231101.en"
    fs = DirFileSystem(parquet_files_dir_path, HfFileSystem())
    splits_dataset = load_and_split_parquet_files(fs, {"train": 0.8, "valid": 0.1, "test": 0.1})
    splits_dataset = ChainedIndexedNamedDatasets({
        split_name: MappedDataset(split_dataset, lambda sample: sample['title'] + "\n\n" + sample['text'])
        for split_name, split_dataset in splits_dataset.named_children.items()})
    return splits_dataset


if __name__ == '__main__':
    dataset = ThreadedBufferedDataset(load_text_dataset(), 3000)
    for i, sample in enumerate(dataset):
        print(sample)
        print(f"{i=}")
        time.sleep(0.1)
