import abc
import os
from functools import cached_property
from typing import Iterable, Iterator

import numpy as np
from zkl_datasets import ChunkedBinaryDataset, Dataset, DatasetIterator, DirGroupedDatasets, MappedDataset, sum_or_none

from .vocab import SimpleVocabWithCount


class TokenizedDataset(Dataset[np.ndarray], abc.ABC):
    @property
    @abc.abstractmethod
    def vocab(self) -> SimpleVocabWithCount:
        pass

    @property
    @abc.abstractmethod
    def total_tokens_n(self) -> int | None:
        pass

    @abc.abstractmethod
    def iter_docs_tokens_n(self) -> Iterator[int]:
        pass


class SingleSplitTokenizedDataset(MappedDataset[np.ndarray, bytes], TokenizedDataset):
    def __init__(self, vocab: SimpleVocabWithCount, dataset: ChunkedBinaryDataset):
        super().__init__(dataset, lambda sample: np.frombuffer(sample, dtype=np.int32))
        self._vocab = vocab
        self._dataset = dataset

    @property
    def vocab(self) -> SimpleVocabWithCount:
        return self._vocab

    @property
    def dataset(self) -> ChunkedBinaryDataset:
        return self._dataset

    @property
    def total_tokens_n(self) -> int | None:
        return self._dataset.total_size // 4

    def iter_docs_tokens_n(self) -> Iterator[int]:
        return map(lambda n: n // 4, self._dataset.iter_samples_size())


class MultiSplitsTokenizedDataset(DirGroupedDatasets[np.ndarray, SingleSplitTokenizedDataset], TokenizedDataset):
    def __init__(self, path: os.PathLike | str, keys: Iterable[str] | None = None):
        def load_child_dataset(child_path: os.PathLike | str):
            child_dataset = ChunkedBinaryDataset(child_path)
            return SingleSplitTokenizedDataset(self.vocab, child_dataset)

        super().__init__(path, keys, load_child_dataset)

    @cached_property
    def vocab(self) -> SimpleVocabWithCount:
        return SimpleVocabWithCount.load_from_file(os.path.join(self._path, "vocab.csv"))

    @cached_property
    def total_tokens_n(self) -> int | None:
        return sum_or_none(child.total_tokens_n for child in self.children.values())

    def iter_docs_tokens_n(self) -> Iterator[int]:
        for child in self.children.values():
            yield from child.iter_docs_tokens_n()


class TokenizedDatasetIterator(DatasetIterator[np.ndarray]):
    def __init__(self, dataset: TokenizedDataset):
        self._dataset = dataset
        self._dataset_iterator = DatasetIterator.wrap(iter(dataset))
        self._samples_tokens_n_iterator = dataset.iter_docs_tokens_n()
        self._iterated_tokens_n = 0

    @property
    def dataset(self) -> TokenizedDataset:
        return self._dataset

    @property
    def iterated_samples_n(self) -> int:
        return self._dataset_iterator.iterated_samples_n

    @property
    def iterated_tokens_n(self) -> int:
        return self._iterated_tokens_n

    def __next__(self) -> np.ndarray:
        sample = next(self._dataset_iterator)
        self._iterated_tokens_n += next(self._samples_tokens_n_iterator)
        return sample

    def skip(self, samples_n: int):
        for _ in range(samples_n):
            self._dataset_iterator.skip(1)
            self._iterated_tokens_n += next(self._samples_tokens_n_iterator)

    def skip_by_tokens_n(self, tokens_n: int):
        target_iterated_tokens_n = self._iterated_tokens_n + tokens_n
        while self._iterated_tokens_n < target_iterated_tokens_n:
            self._dataset_iterator.skip(1)
            self._iterated_tokens_n += next(self._samples_tokens_n_iterator)
