from os.path import abspath, join

import numpy as np

from backend.experiment.pangu.tokenization_jieba import JIEBATokenizer
from backend.experiment.framework.tokenizer import BaseTokenizer


class PanguTokenizer(BaseTokenizer):
    def __init__(self):
        tokenizer_path = abspath(join(__file__, '../tokenizer'))
        self.__tokenizer = JIEBATokenizer(
            join(tokenizer_path, 'vocab.vocab'),
            join(tokenizer_path, 'vocab.model')
        )
        super().__init__(
            sample_start_token=[0],
            paragraph_sep_token=self.__tokenizer.tokenize('\n\n'),
            unknown_token=[0]
        )

    def tokenize(self, text: str) -> np.ndarray:
        return np.array(self.__tokenizer.tokenize(text))

    def token_texts(self, tokens: np.ndarray) -> str:
        return self.__tokenizer.convert_ids_to_tokens(tokens.tolist())


tokenizer = PanguTokenizer()
