from collections import Counter
from collections.abc import Iterable

import numpy as np
from tqdm import tqdm

from zkl_llmpt_iterator.tokenizer.tokenizer import TextTokenizer
from .vocab import SimpleVocabWithCount, Vocab


class UnicodeTokenizer(TextTokenizer):
    def __init__(self, vocab: Vocab):
        self._vocab = vocab

    @property
    def vocab(self) -> Vocab:
        return self._vocab

    @property
    def vocab_tokens_n(self) -> int:
        return len(self._vocab)

    def encode(self, text: str) -> np.ndarray:
        tokens = [self._vocab.index_token(char) for char in text]
        return np.asarray(tokens, dtype=np.int64)

    def decode(self, tokens: np.ndarray) -> str:
        chars = [self._vocab.get_token(token) for token in tokens]
        return "".join(chars)


def build_unicode_tokenizer(texts: Iterable[str]) -> UnicodeTokenizer:
    counter = Counter()
    for text in tqdm(texts):
        counter.update(text)
    vocab = SimpleVocabWithCount()
    for token, count in counter.most_common():
        vocab.add_token(token, count)
    return UnicodeTokenizer(vocab)
