import os
import json
from transformers import BertTokenizer
from fish_tool import logs


class Vocab:
    def __init__(self, folder):
        vocab_path = os.path.join(folder, 'vocab.txt')
        trans_char_path = os.path.join(folder, 'trans_char.json')
        self.tokenizer = BertTokenizer(vocab_path)
        self.trans_char = json.load(open(trans_char_path, encoding='utf8'))

        self.pad_token_id = self.tokenizer.pad_token_id
        self.mask_token = self.tokenizer.mask_token
        self.sep_token_id = self.tokenizer.sep_token_id
        self.get_special_tokens_mask = self.tokenizer.get_special_tokens_mask
        self.convert_tokens_to_ids = self.tokenizer.convert_tokens_to_ids

    def save_vocabulary(self, model_save_path):
        self.tokenizer.save_vocabulary(model_save_path)
        trans_char_path = os.path.join(model_save_path, 'trans_char.json')
        with open(trans_char_path, 'w', encoding='utf8') as f:
            json.dump(self.trans_char, f, ensure_ascii=False, indent=2)

    def __len__(self):
        return len(self.tokenizer)

    def encode_plus(self, words, add_special_tokens=True, return_token_type_ids=True, return_attention_mask=True):
        # 会在开头增加[CLS]=101   末尾增加[SEP]=102
        words = [self.trans_char.get(w, w) for w in words]
        return self.tokenizer.encode_plus(words, add_special_tokens=add_special_tokens, return_token_type_ids=return_token_type_ids,
                                          return_attention_mask=return_attention_mask)

    def id_to_token(self, index):
        return self.tokenizer.ids_to_tokens.get(index, self.tokenizer.unk_token)

    def trans_id_to_token(self, index):
        t = self.id_to_token(index)
        if t[0] == '[' and t[-1] == ']':
            t = t[1:-1]
        return t
