import numpy as np
from .tokenizer import TextTokenizer

from .specials import SpecialsTextTokenizer

unk_name = "unk"


class TruncatedTextTokenizer(SpecialsTextTokenizer):
    def __init__(self, base: TextTokenizer, vocab_tokens_n: int, unk_wid: int | None = None):
        assert base.vocab_tokens_n > vocab_tokens_n
        if unk_wid is None:
            added_specials = [unk_name]
            reused_specials = {}
        else:
            added_specials = []
            reused_specials = {unk_name: unk_wid}

        super().__init__(
            base=base,
            added_specials=added_specials,
            reused_specials=reused_specials)
        self._vocab_tokens_n = vocab_tokens_n

    @property
    def vocab_tokens_n(self) -> int:
        return self._vocab_tokens_n

    @property
    def unk_wid(self) -> int | None:
        return self.get_special_wid(unk_name)

    def transform(self, tokens: np.ndarray) -> np.ndarray:
        tokens = super().transform(tokens)
        tokens = np.where(tokens < self.vocab_tokens_n, tokens, self.unk_wid)
        return tokens
