from typing import Iterable, Iterator

import numpy as np
import torch
import torch.distributed

from llmpt.preprocess import TokenizedDataset, TokenizedDatasetIterator, Vocab
from .vocab import VocabForNLP
from .vocab_transform import make_vocab_transform

DataBatch = tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]


class PreprocessedDatasetIterator(Iterator[DataBatch]):
    def __init__(self, *,
        dataset: TokenizedDataset,
        vocab: VocabForNLP | None = None,

        limit_repeats_n: int | None = 1,
        context_tokens_n: int,
        striding_tokens_n: int,
        batch_samples_n: int,
        device: str | torch.device | None = None,

        skip_docs_n: int = 0,
    ):
        try:
            process_rank = torch.distributed.get_rank()
            process_group_size = torch.distributed.get_world_size()
        except ValueError:
            process_rank = 0
            process_group_size = 1

        self._iterated_docs_n = skip_docs_n

        def iter_docs_from_dataset_with_recording():
            for sample in iter_docs_from_dataset(dataset, skip_docs_n, limit_repeats_n):
                self._iterated_docs_n += 1
                yield sample

        if process_rank == 0:
            iterator = iter_docs_from_dataset_with_recording()
            iterator = iter_and_transform(iterator, dataset.vocab, vocab) if vocab is not None else iterator
            iterator = iter_batched_tiles_from_docs(iterator,
                batch_samples_n * process_group_size, context_tokens_n + 1, striding_tokens_n,
                vocab.BEGIN_INDEX, vocab.END_INDEX, vocab.PADDING_INDEX)
            iterator = iter_and_load(iterator, device)
        else:
            iterator = None

        if process_group_size > 1:
            iterator = iter_and_scatter(iterator, batch_samples_n, context_tokens_n, device)

        iterator = iter_and_postprocess(iterator)
        self._iterator = iterator

    @property
    def iterated_docs_n(self) -> int:
        return self._iterated_docs_n

    def __next__(self):
        return next(self._iterator)


def compute_total_tokens_n(
    dataset: TokenizedDataset, *,
    limit_tokens_n: int | None = None,
    limit_repeats_n: int | None = 1,
) -> int | None:
    total_tokens_n = np.inf
    if limit_tokens_n is not None:
        total_tokens_n = min(total_tokens_n, limit_tokens_n)
    limit_repeats_n = limit_repeats_n
    if limit_repeats_n is not None:
        try:
            repeat_tokens_n = dataset.total_tokens_n
        except TypeError:
            repeat_tokens_n = np.inf
        total_tokens_n = min(total_tokens_n, repeat_tokens_n * limit_repeats_n)
    return int(total_tokens_n) if np.isfinite(total_tokens_n) else None


def iter_docs_from_dataset(
    dataset: TokenizedDataset,
    skip_docs_n: int = 0,
    limit_repeats_n: int | None = None,
) -> Iterator[np.ndarray]:
    iterator = TokenizedDatasetIterator(dataset)
    iterator.skip(skip_docs_n % len(dataset))

    iterated_repeats_n = skip_docs_n // len(dataset)
    while limit_repeats_n is None or iterated_repeats_n < limit_repeats_n:
        try:
            yield next(iterator)
        except StopIteration:
            iterator = TokenizedDatasetIterator(dataset)
            iterated_repeats_n += 1
            continue


def iter_and_transform(
    docs: Iterable[np.ndarray],
    org_vocab: Vocab,
    dst_vocab: Vocab,
) -> Iterator[np.ndarray]:
    transform = make_vocab_transform(org_vocab, dst_vocab)
    for doc in docs:
        yield transform(doc)


def iter_tiles_from_docs(
    docs: Iterable[np.ndarray],
    tile_size: int,
    tile_stride: int,
    begin_token: int,
    end_token: int,
    padding_token: int,
) -> Iterator[tuple[np.ndarray, int, int]]:
    for doc in docs:
        yield from iter_tiles_from_doc(doc, tile_size, tile_stride, begin_token, end_token, padding_token)


def iter_batched_tiles_from_docs(
    docs: Iterable[np.ndarray],
    batch_size: int,
    tile_size: int,
    tile_stride: int,
    begin_token: int,
    end_token: int,
    padding_token: int,
) -> Iterator[tuple[np.ndarray, np.ndarray, np.ndarray]]:
    docs_iterator = iter(docs)
    samples_iterator = [iter(()) for _ in range(batch_size)]

    while True:
        batch = []
        for sample_i in range(batch_size):
            while True:
                try:
                    sample = next(samples_iterator[sample_i])
                    batch.append(sample)
                    break
                except StopIteration:
                    try:
                        doc = next(docs_iterator)
                    except StopIteration:
                        return

                    samples_iterator[sample_i] = iter_tiles_from_doc(doc,
                        tile_size, tile_stride, begin_token, end_token, padding_token)
                    continue

        batch_tile, batch_offset, batch_progress = zip(*batch)
        batch_tile = np.stack(batch_tile, axis=0)
        batch_offset = np.stack(batch_offset, axis=0)
        batch_progress = np.stack(batch_progress, axis=0)
        yield batch_tile, batch_offset, batch_progress


def iter_tiles_from_doc(
    doc: np.ndarray,
    tile_size: int,
    tile_stride: int,
    begin_token: int,
    end_token: int,
    padding_token: int,
) -> Iterator[tuple[np.ndarray, int, int]]:
    doc = np.asarray(doc, dtype=np.int32)

    # first tile & last tile
    if len(doc) + 2 <= tile_size:
        tile = np.empty([tile_size], dtype=np.int32)
        tile[0] = begin_token
        tile[1:len(doc) + 1] = doc
        tile[len(doc) + 1] = end_token
        tile[len(doc) + 2:] = padding_token
        yield tile, 0, len(doc)
        return

    # first tile
    tile = np.empty([tile_size], dtype=np.int32)
    tile[0] = begin_token
    tile[1:tile_size] = doc[:tile_size - 1]
    yield tile, 0, tile_size - 1

    # middle tiles
    offset = tile_stride - 1
    while offset + tile_size <= len(doc):
        tile = doc[offset:offset + tile_size]
        yield tile, offset + 1, tile_stride
        offset += tile_stride

    # last tile
    tile = np.empty([tile_size], dtype=np.int32)
    tile[:len(doc) - offset] = doc[offset:]
    tile[len(doc) - offset] = end_token
    tile[len(doc) - offset + 1:] = padding_token
    yield tile, offset + 1, tile_stride - offset - tile_size + len(doc)


def iter_and_load(
    tiles: Iterable[tuple[np.ndarray, np.ndarray, np.ndarray]],
    device: str | torch.device | None,
) -> Iterator[tuple[torch.Tensor, torch.Tensor, torch.Tensor]]:
    for tile, offset, progress_tokens_n in tiles:
        tile = torch.asarray(tile, dtype=torch.int64, device=device)
        offset = torch.asarray(offset, dtype=torch.float32, device=device)
        progress_tokens_n = torch.asarray(progress_tokens_n, dtype=torch.int64, device=device)
        yield tile, offset, progress_tokens_n


def iter_and_scatter(
    tiles: Iterable[tuple[torch.Tensor, torch.Tensor, torch.Tensor]] | None,
    batch_size: int,
    context_size: int,
    device: str | torch.device | None = None,
) -> Iterator[tuple[torch.Tensor, torch.Tensor, torch.Tensor]]:
    while True:
        if tiles is not None:
            try:
                # noinspection PyTypeChecker
                sample = next(tiles)
            except StopIteration:
                sample = None
        else:
            sample = None

        has_next = sample is not None
        has_next = torch.asarray(has_next, dtype=torch.bool, device=device)
        torch.distributed.broadcast(has_next, 0)
        has_next = bool(has_next.cpu())
        if not has_next:
            return

        if sample is not None:
            tile, offset, progress_tokens_n = sample
            tile_list = list(torch.split(tile, batch_size, dim=0))
            offset_list = list(torch.split(offset, batch_size, dim=0))
            progress_tokens_n_list = list(torch.split(progress_tokens_n, batch_size, dim=0))
            tile = tile_list[0]
            offset = offset_list[0]
            progress_tokens_n = progress_tokens_n_list[0]
        else:
            tile_list = None
            offset_list = None
            progress_tokens_n_list = None
            tile = torch.empty([batch_size, context_size + 1], dtype=torch.int64, device=device)
            offset = torch.empty([batch_size], dtype=torch.float32, device=device)
            progress_tokens_n = torch.empty([batch_size], dtype=torch.int64, device=device)
        torch.distributed.scatter(tile, tile_list)
        torch.distributed.scatter(offset, offset_list)
        torch.distributed.scatter(progress_tokens_n, progress_tokens_n_list)
        yield tile, offset, progress_tokens_n


def iter_and_postprocess(
    tiles: Iterable[tuple[torch.Tensor, torch.Tensor, torch.Tensor]]
) -> Iterator[tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]]:
    for tile, offset, progress_tokens_n in tiles:
        in_tokens = tile[:, :-1]
        in_positions = torch.arange(0, in_tokens.shape[-1], dtype=offset.dtype, device=offset.device)
        in_positions = in_positions + torch.unsqueeze(offset, dim=-1)
        out_tokens = tile[:, 1:]
        out_tokens_mask = torch.ge(out_tokens, 1)
        yield in_tokens, in_positions, out_tokens, out_tokens_mask, progress_tokens_n
