import os
import sys
from typing import Iterable

import numpy as np
from tqdm import tqdm
from zkl_datasets import ConcatenatedDataset, Dataset, MappedDataset, RandomGatheringDataset, ShuffledDataset

from .vocab import SimpleVocabWithCount
from .writer import MultiSplitsTokenizedDatasetWriter


def build_preprocessed_dataset(
    path: os.PathLike | str,
    text_datasets: Iterable[Dataset[str]],
    splits_ratio: dict[str, float],
):
    text_datasets = tuple(text_datasets)

    # vocab

    print("Building vocab...", file=sys.stderr)
    vocab = SimpleVocabWithCount()
    concat_text_dataset = ConcatenatedDataset(text_datasets)
    for text in tqdm(concat_text_dataset):
        for token in text:
            vocab.add_token(token)

    print("Sorting vocab...")
    vocab = vocab.sort_by_count()

    # defining merging & shuffling & tokenizing
    print("Defining merging & shuffling & tokenizing...", file=sys.stderr)

    text_datasets = [ShuffledDataset(text_dataset, 256) for text_dataset in text_datasets]
    text_dataset = RandomGatheringDataset(text_datasets)
    tokenized_dataset = MappedDataset(text_dataset,
        lambda sample: np.asarray([vocab.index_token(token) for token in sample], dtype=np.int32))

    # processing & writing
    print("Processing & writing samples...", file=sys.stderr)

    writer = MultiSplitsTokenizedDatasetWriter(path, splits_ratio, vocab)
    for sample in tqdm(tokenized_dataset):
        assert isinstance(sample, np.ndarray)
        writer.write(sample)

    # done
    print("Done.", file=sys.stderr)
