import json
VOCAB_PATH=r'data\vocab.json'
SPECIAL_TOKEN_MAP_PATH=r'data\special_token_map.json'
CONFIG_PATH=r'data\config.json'

def add_token(m:map, data:str):
    for char in data:
        if char in m:
            m[char]+=1
        else:
            m[char]=1
            
def load_vocab(m:map):
    with open(VOCAB_PATH, 'r', encoding='utf-8') as file:
        m=json.load(file)

def main():
    my_map={}
    # 加载原有词汇表
    load_vocab(my_map)
    
    with open(r'data\test.txt', 'r', encoding='utf-8')as file:
        for line in file:
            add_token(my_map, line)
    print(f"文档中共有不同的token {len(my_map)} 个")
    
    # 获取有多少预定义的token
    with open(SPECIAL_TOKEN_MAP_PATH, 'r', encoding='utf-8')as file:
        special_token=json.load(file)
    len_special_token=len(special_token)
    
    # 转换回token编号
    for idx, key in enumerate(my_map):
        my_map[key]=idx+len_special_token
        
    # 写入vocab中
    with open(VOCAB_PATH, 'w', encoding='utf-8') as file:
        json.dump(my_map, file, indent=4, ensure_ascii=False)
        
    # 写入config
    with open(CONFIG_PATH, 'w', encoding='utf-8') as file:
        content={
            'vocab size':len_special_token+len(my_map),
            'special token size':len_special_token
        }
        json.dump(content, file, indent=4, ensure_ascii=False)
    

if __name__=='__main__':
    main()