import numpy as np
import torch
from zkl_aiutils_datasets import ChannelsBatchedDataset, Dataset, MappedDataset, StridingBatchedDataset


def wrap_text_dataset_for_training(
    dataset: Dataset[str], *,
    chunk_tokens_n: int = 1,
    batch_samples_n: int,
    device: torch.device | str | None = None,
):
    # tokenize
    dataset = MappedDataset(dataset, lambda sample: bytes(sample, encoding='utf-8'))
    dataset = MappedDataset(dataset, lambda sample: np.frombuffer(sample, dtype=np.uint8))
    dataset = MappedDataset(dataset, lambda sample: np.asarray(sample, dtype=np.int64))

    # format
    dataset = MappedDataset(dataset, lambda sample: np.concat([[256], sample, [257]]))

    # chunk
    dataset = MappedDataset(dataset, lambda sample: StridingBatchedDataset(
        sample, batch_items_n=chunk_tokens_n + 1, stride_items_n=chunk_tokens_n))

    # batch
    dataset = ChannelsBatchedDataset(dataset, channels_n=batch_samples_n)
    dataset = MappedDataset(dataset, lambda sample: np.asarray(sample, dtype=np.int64))

    # load
    dataset = MappedDataset(dataset, lambda sample: torch.asarray(sample, dtype=torch.int64, device=device))

    # split
    dataset = MappedDataset(dataset, lambda sample: (sample[..., :-1], sample[..., +1:], None))

    return dataset
