
TOKEN_PAD = '[PAD]'
TOKEN_UNK = '[UNK]'
TOKEN_START = '[GO]'
TOKEN_END = '[EOS]'
TOKEN_MASK = '[MASK]'


class ChartTokenizer(object):
    """ 主要针对字符处理 """

    def __init__(self,
                 token_dict,
                 token_start=TOKEN_START,
                 token_end=TOKEN_END,
                 token_unk=TOKEN_UNK,
                 token_pad=TOKEN_PAD):
        """
        正对字符级别，token encoder

        :param token_dict: 词典 {char:index, ...,}
        :param token_start: 句子开始标记 默认 '[GO]'
        :param token_end: 句子结束标记 默认 '[EOS]'
        :param token_unk: 未登录标记 默认 '[UNK]'
        :param token_pad: 填充标记 默认 '[PAD]'
        """
        self._token_dict = token_dict
        self._token_dict_inv = {v: k for k, v in token_dict.items()}
        self._token_start = token_start
        self._token_end = token_end
        self._token_unk = token_unk
        self._token_pad = token_pad

        self._pad_index = self._token_dict.get(self._token_pad, 0)
        self._unk_index = self._token_dict.get(self._token_unk, 1)
        self._start_index = self._token_dict.get(self._token_start, 2)
        self._end_index = self._token_dict.get(self._token_end, 3)

    @staticmethod
    def _truncate(tokens, max_len=None, start=True, end=True):
        """ 截断超出的字符 """
        if max_len is None:
            return
        n = (1 if start else 0) + (1 if end else 0)
        del tokens[max_len - n:]

    def _convert_tokens_to_ids(self, tokens):
        """ token 转成 ids """
        return [self._token_dict.get(token, self._unk_index) for token in tokens]

    def tokenize(self, text):
        """ 文本处理成 token
        例如: "机器学习" -> ['机', '器', '学', '习']
        """
        tokens = self._tokenize(text)
        return tokens

    def encode(self, text, max_len=None, start=True, end=True):
        """ 编码 """
        tokens = self._tokenize(text)
        self._truncate(tokens, max_len, start, end)
        token_ids = self._convert_tokens_to_ids(tokens)
        if start:
            token_ids = [self._start_index] + token_ids
        if end:
            token_ids = token_ids + [self._end_index]
        if max_len is not None:
            pad_len = max_len - len(token_ids)
            token_ids += [self._pad_index] * pad_len
        return token_ids

    def decode(self, ids, cut_end=True):
        """解码

        :param ids: list id 列表
        :param cut_end: int 截断 end_id 后的token
        :return: list
        """
        ids_new = []
        if cut_end:
            for i in ids:
                if i == self._end_index:
                    break
                ids_new.append(i)
        else:
            ids_new = ids
        tokens = [self._token_dict_inv.get(i, self._token_unk) for i in ids_new]
        return tokens

    def _tokenize(self, text):
        """ 处理成token (简化处理) """
        spaced = ''
        for ch in text:
            spaced += ch + ' '
        tokens = []
        for word in spaced.strip().split():
            tokens += self._word_piece_tokenize(word)
        return tokens

    def _word_piece_tokenize(self, word):
        if word in self._token_dict:
            return [word]
        tokens = []
        start, stop = 0, 0
        while start < len(word):
            stop = len(word)
            while stop > start:
                sub = word[start:stop]
                if start > 0:
                    sub = '##' + sub
                if sub in self._token_dict:
                    break
                stop -= 1
            if start == stop:
                stop += 1
            tokens.append(sub)
            start = stop
        return tokens

