from collections.abc import Iterable, Iterator, Mapping

import numpy as np

from .tokenizer import TextTokenizer


class SpecialsTextTokenizer(TextTokenizer):
    def __init__(self,
        base: TextTokenizer, *,
        added_specials: Iterable[str] | None = None,
        reused_specials: Mapping[str, int] | None = None,
    ):
        self._base = base
        self._added_specials = tuple(added_specials) if added_specials is not None else ()
        self._reused_specials = dict(reused_specials) if reused_specials is not None else {}

        if isinstance(self._base, SpecialsTextTokenizer):
            self._reused_specials = {**self._base.specials, **self._reused_specials}

        self._specials_name_to_wid = {}
        self._specials_wid_to_name = {}
        for i, name in enumerate(self._added_specials):
            wid = self._base.vocab_tokens_n + i
            self._specials_name_to_wid[name] = wid
            self._specials_wid_to_name[wid] = name
        for name, wid in self._reused_specials.items():
            self._specials_name_to_wid[name] = wid
            self._specials_wid_to_name[wid] = name

    @property
    def base(self) -> TextTokenizer:
        return self._base

    @property
    def specials(self) -> Mapping[str, int]:
        return self._specials_name_to_wid

    @property
    def vocab_tokens_n(self) -> int:
        return self._base.vocab_tokens_n + len(self._added_specials)

    def get_special_wid(self, name: str) -> int:
        return self._specials_name_to_wid[name]

    def get_special_name(self, wid: int) -> str:
        return self._specials_wid_to_name[wid]

    def transform(self, tokens: np.ndarray) -> np.ndarray:
        return tokens

    def encode(self, text: str) -> np.ndarray:
        tokens = self._base.encode(text)
        tokens = self.transform(tokens)
        return tokens

    def decode(self, tokens: np.ndarray) -> str:
        return "".join(self._iter_decode(tokens))

    def _iter_decode(self, tokens: np.ndarray) -> Iterator[str]:
        specials_index, = np.where(tokens >= self._base.vocab_tokens_n)

        head = 0
        for tail in specials_index:
            if tail > head:
                yield self._base.decode(tokens[head:tail])

            name = self._specials_wid_to_name[tokens[tail]]
            yield f"<|{name}|>"
            head = tail + 1

        if head < len(tokens):
            yield self._base.decode(tokens[head:])
