import os
import json
import torch
import numpy as np
class AutoTokenizer():
    def __init__(self, len=-1, data_path:str='data'):
        '''
        len:            考虑padding, 带着cls和esp, 句子最多可以多长
        data_path:      数据路径, 必须有special_token_map和vocab，并按照格式给出
        '''
        self.to_id={}
        self.to_token={}
        self.len=len
        
        # 加载special token map 和 vocab
        special_token_map_dir=os.path.join(data_path, 'special_token_map.json')
        vocab_dir=os.path.join(data_path, 'vocab.json')
        vocab, self.special={}, {}
        with open(special_token_map_dir, 'r', encoding='utf-8') as file:
            self.special=json.load(file)
        with open(vocab_dir, 'r', encoding='utf-8')as file:
            vocab=json.load(file)
        
        # 加载to_id和to_token
        # 因为dict底层是哈希表，访问飞快，所以用来做映射最好了
        for key, value in vocab.items():
            self.to_id[key]=value
            self.to_token[value]=key
        for key, value in self.special.items():
            self.to_id[key]=value
            self.to_token[value]=key
            
    def convert_tokens_to_ids(self, tokens:str):
        # 截断token
        if self.len!=-1:
            tokens=tokens[:self.len-2]
        
        # 初始化变量
        char_list=list(tokens)
        result=[]
        
        # 添加开始字符
        result.append(self.special['[cls]'])
        
        # 添加每个字符
        for char in char_list:
            if char in self.to_id:
                result.append(self.to_id[char])
            else:
                result.append(self.special['[unk]'])
                
        # 添加填充和结尾
        if self.len!=-1:
            for i in range(self.len-len(tokens)-2):
                result.append(self.special['[pad]'])
        result.append(self.special['[esp]'])
        return result, min(len(result+2), self.len)
    
    def convert_ids_to_tokens(self, ids_list:list):
        result=str()
        for id in ids_list:
            result+=self.to_token[id]
        return result
    
    def __call__(self, tokens:str):
        return self.convert_tokens_to_ids(tokens)
    
if __name__=='__main__':
    sentence="你好， 世界 hello world 🐱🐱🐱🐱🐱🐱🐱🐱🐱🐱🐱🐱🐱🐱🐱🐱🐱🐱🐱🐱🐱🐱🐱🐱🐱🐱🐱🐱🐱🐱🐱🐱🐱🐱🐱🐱🐱🐱🐱🐱🐱🐱"
    tokenizer=AutoTokenizer(len=30)
    tokens=tokenizer(sentence)
    new_sentence=tokenizer.convert_ids_to_tokens(tokens)
    print(new_sentence)
    print(tokens)
    print(len(tokens))
            
        