from transformers.tokenization_utils import PreTrainedTokenizer

import torch
import sentencepiece
import jieba


class GPTPanguTokenizer(PreTrainedTokenizer):
    # Ref: https://git.openi.org.cn/PCL-Platform.Intelligence/PanGu-Alpha/src/branch/master/tokenization_jieba.py
    vocab_files_names = {
        "model_file": "vocab.model"
    }

    def __init__(
            self,
            model_file,
            eos_token="<eot>",
            pad_token="<pad>",
            unk_token="<unk>",
            **kwargs
    ):
        self.sp = sentencepiece.SentencePieceProcessor()
        self.sp.Load(model_file=model_file)
        self.translator = str.maketrans(" \n", "\u2582\u2583")

        super().__init__(
            eos_token=eos_token,
            pad_token=pad_token,
            unk_token=unk_token,
            **kwargs
        )

        # special token ids
        self.eos_token_id = self.sp.piece_to_id(self.eos_token)

    def tokenize(self, text, **kwargs):
        """ Tokenize a string. """
        seg_list = [x.translate(self.translator) for x in jieba.cut(text, cut_all=False)]
        new_seg = " ".join(seg_list)
        return self.sp.encode(new_seg)

    def convert_tokens_to_ids(self, tokens):
        return tokens

    def convert_ids_to_tokens(self, ids):
        return self.decode(ids)

    def decode(self, tokens, **kwargs):
        if isinstance(tokens, torch.Tensor):
            tokens = tokens.tolist()

        text = self.sp.decode(tokens)
        text = text.replace(' ', '').replace('\u2582', ' ').replace('\u2583', '\n')
        return text

    def get_vocab(self):
        """Returns vocab as a dict."""
        vocab = {self.sp.id_to_piece(id): id for id in range(self.sp.get_piece_size())}
        vocab.update(self.get_added_vocab())
        return vocab

    def get_added_vocab(self):
        """Returns added tokens dict."""
        return {}

    def _tokenize(self, text, **kwargs):
        """Returns a tokenized string."""
        seg_list = [x.translate(self.translator) for x in jieba.cut(text, cut_all=False)]
        new_seg = " ".join(seg_list)
        return self.sp.encode_as_pieces(new_seg)

    @property
    def vocab_size(self):
        """Returns the size of vocabulary."""
        return self.sp.get_piece_size()

    def get_special_tokens_mask(self, token_ids_0, token_ids_1=None, already_has_special_tokens=False):
        """Returns a list of 0s and 1s, where 1 represents a special token."""
        if already_has_special_tokens:
            return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)

        if token_ids_1 is None:
            return [1 if token in [self.eos_token_id] else 0 for token in token_ids_0]
        return [1 if token in [self.eos_token_id] else 0 for token in token_ids_0 + token_ids_1]
