import numpy as np
import abc
from typing import List, Union

from backend.experiment.framework.util import ListProgressVisibleMultiprocessing


class BaseTokenizer(metaclass=abc.ABCMeta):
    def __init__(
            self, sample_start_token: Union[int, list, np.ndarray],
            paragraph_sep_token: Union[int, list, np.ndarray],
            unknown_token: Union[int, list, np.ndarray]):
        self.__sample_start_token = np.array(sample_start_token)
        self.__paragraph_sep_token = np.array(paragraph_sep_token)
        self.__unknown_token = np.array(unknown_token)

    @property
    def sample_start_token(self) -> np.ndarray:
        return self.__sample_start_token

    @property
    def paragraph_sep_token(self) -> np.ndarray:
        return self.__paragraph_sep_token

    @property
    def unknown_token(self) -> np.ndarray:
        return self.__unknown_token

    @abc.abstractmethod
    def tokenize(self, text: str) -> np.ndarray:
        pass

    @abc.abstractmethod
    def token_texts(self, tokens: np.ndarray) -> str:
        pass


class SimpleSpecialTokenizer(BaseTokenizer):
    """
    为了解决现有token无法识别[SEP][CLS][UNK][MASK]符号的问题而设计的类
    """
    split_tag = '||'
    special_token_begin = '[['
    special_token_end = ']]'

    def __init__(
            self,
            sample_start_token: Union[int, list, np.ndarray],
            paragraph_sep_token: Union[int, list, np.ndarray],
            unknown_token: Union[int, list, np.ndarray],
            tokenizer,
            special_tokens_id: dict
    ):
        """
        :param tokenizer: 原有tokenizer
        :param special_tokens_id: 特殊的符号到其id的映射
        """
        super().__init__(sample_start_token, paragraph_sep_token, unknown_token)
        self.__tokenizer = tokenizer
        self.__special_tokens = special_tokens_id

    def tokenize(self, text: str) -> np.ndarray:
        for token, token_id in self.__special_tokens.items():
            text = text.replace(
                token,
                f'{self.split_tag}'
                f'{self.special_token_begin}{token_id}{self.special_token_end}'
                f'{self.split_tag}'
            )
        text = text.split(self.split_tag)
        # 过滤掉空字符串
        text = [each for each in text if len(each) > 0]
        tokens_id = []
        for each in text:
            each: str
            if each.startswith('[[') and each.endswith(']]'):
                tokens_id += [int(each[2:len(each) - 2])]
            else:
                tokens_id += self.__tokenizer.convert_tokens_to_ids(
                    self.__tokenizer.tokenize(each)
                )
        return np.array(tokens_id)

    def token_texts(self, tokens: np.ndarray) -> str:
        return ''.join(self.__tokenizer.convert_ids_to_tokens(list(tokens)))


class MultiprocessingTokenizer:
    def __init__(self, tokenizer: BaseTokenizer,
                 processes: int = 8):
        self.__processes = processes
        self.__tokenizer = tokenizer

    def tokenize(self, texts: List[str], batch: int, verbose: bool = False) -> \
            List[np.ndarray]:
        processor = ListProgressVisibleMultiprocessing(
            self.__processes, self._process_single)
        return processor.process(texts, verbose=verbose, batch=batch)

    def _process_single(self, text: str) -> np.ndarray:
        return self.__tokenizer.tokenize(text)
