import warnings
from typing import Iterable, Iterator

import numpy as np
import torch
from numpy.typing import ArrayLike

from .rewriting import TokenizedRewriting


def iter_rewritings_cache(
    cache: tuple[torch.Tensor, torch.Tensor, torch.Tensor], *,
    batch_size: int | None = None,
) -> Iterator[tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]]:
    cache_tokens_wid, cache_prompt_len, cache_target_len = cache
    device = cache_tokens_wid.device
    cache_size = cache_tokens_wid.shape[0]
    context_size = cache_tokens_wid.shape[1] - 1
    batch_size = cache_size if batch_size is None else batch_size

    for head in range(0, cache_size, batch_size):
        tail = min(head + batch_size, cache_size)

        batch_tokens_index = torch.arange(context_size, device=device)
        batch_tokens_wid = cache_tokens_wid[head:tail]
        batch_prompt_len = cache_prompt_len[head:tail]
        batch_target_len = cache_target_len[head:tail]

        batch_tokens_in_wid = batch_tokens_wid[:, :-1]
        batch_tokens_in_mask = (
            batch_tokens_index < torch.unsqueeze(batch_prompt_len + batch_target_len - 1, dim=1))
        batch_tokens_out_wid = batch_tokens_wid[:, 1:]
        batch_tokens_out_mask = torch.logical_and(
            batch_tokens_index < torch.unsqueeze(batch_prompt_len + batch_target_len - 1, dim=1),
            batch_tokens_index >= torch.unsqueeze(batch_prompt_len - 1, dim=1))

        yield (batch_tokens_in_wid,
               batch_tokens_in_mask,
               batch_tokens_out_wid,
               batch_tokens_out_mask)


def make_rewritings_cache(
    rewritings: Iterable[TokenizedRewriting], *,
    context_size: int,
    pad_wid: ArrayLike = 0,
    device: torch.device | str | None = None,
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
    rewritings = tuple(rewritings)
    pad_wid = torch.asarray(pad_wid, dtype=torch.int64, device=device)

    cache_tokens_wid = torch.empty([len(rewritings), context_size + 1], dtype=torch.int64, device=device)
    cache_prompt_len = torch.empty([len(rewritings)], dtype=torch.int64, device=device)
    cache_target_len = torch.empty([len(rewritings)], dtype=torch.int64, device=device)
    for sample_i, rewriting in enumerate(rewritings):
        if len(rewriting.prompt) + len(rewriting.target) > context_size + 1:
            warnings.warn(
                f"The context_size={context_size} is too small to hold the whole rewriting, "
                f"which requires at least {len(rewriting.prompt) + len(rewriting.target) - 1}."
                f"Please consider increase the context_size")
        if len(rewriting.prompt) > context_size + 1:
            sample_prompt_len = context_size + 1
            sample_target_len = 0
            sample_tokens = rewriting.prompt[:sample_prompt_len]
        elif len(rewriting.prompt) + len(rewriting.target) > context_size + 1:
            sample_prompt_len = len(rewriting.prompt)
            sample_target_len = context_size + 1 - len(rewriting.prompt)
            sample_tokens = np.concatenate([rewriting.prompt, rewriting.target[:sample_target_len]])
        else:
            sample_prompt_len = len(rewriting.prompt)
            sample_target_len = len(rewriting.target)
            sample_tokens = np.concatenate([rewriting.prompt, rewriting.target])

        sample_tokens = torch.asarray(sample_tokens, dtype=torch.int64, device=device)
        cache_tokens_wid[sample_i, :sample_prompt_len + sample_target_len] = sample_tokens
        cache_tokens_wid[sample_i, sample_prompt_len + sample_target_len:] = pad_wid

        sample_prompt_len = torch.asarray(sample_prompt_len, dtype=torch.int64, device=device)
        cache_prompt_len[sample_i] = sample_prompt_len

        sample_target_len = torch.asarray(sample_target_len, dtype=torch.int64, device=device)
        cache_target_len[sample_i] = sample_target_len

    return cache_tokens_wid, cache_prompt_len, cache_target_len
