import sys
from concurrent.futures.thread import ThreadPoolExecutor
from typing import Mapping

from zkl_pyutils_fsspec import FsLike, resolve_fs

from zkl_aiutils_datasets.grouping import ChainedIndexedNamedDatasets
from zkl_aiutils_datasets.processing import KnownSizeChainedDatasets
from zkl_aiutils_datasets.utils.split import random_split
from .parquet_file import ParquetFileDataset


def load_parquet_files(fs: FsLike):
    return _load_and_split_parquet_files(fs)


def load_and_split_parquet_files(
    fs: FsLike,
    weights: Mapping[str, float], *,
    random_seed: int = 42,
):
    return _load_and_split_parquet_files(fs, splits_weight=weights, split_random_seed=random_seed)


def _load_and_split_parquet_files(
    fs: FsLike, *,
    splits_weight: Mapping[str, float] | None = None,
    split_random_seed: int = 42,
):
    fs = resolve_fs(fs)

    parquet_files_path = fs.glob("**/*.parquet")

    from pyarrow.parquet import ParquetFile
    def load_parquet_file_dataset(parquet_file_path: str):
        dataset = ParquetFileDataset(lambda: ParquetFile(parquet_file_path, filesystem=fs))
        print(f"loaded {parquet_file_path}", file=sys.stderr)
        return dataset

    with ThreadPoolExecutor() as executor:
        parquet_files_dataset = list(executor.map(load_parquet_file_dataset, parquet_files_path))

    if splits_weight is None:
        return KnownSizeChainedDatasets(parquet_files_dataset)

    chunks_dataset = [
        parquet_row_group_dataset
        for parquet_file_dataset in parquet_files_dataset
        for parquet_row_group_dataset in parquet_file_dataset.children]
    splits_chunks = random_split(chunks_dataset, splits_weight, random_seed=split_random_seed)
    splits_dataset = ChainedIndexedNamedDatasets({
        split_name: KnownSizeChainedDatasets(split_chunks)
        for split_name, split_chunks in splits_chunks.items()})

    return splits_dataset
