# %% 测试huggingface transformer并增加词表
import json
import os
from tqdm import tqdm
from transformers import BertTokenizer, BertModel

# %%
tokenizer = BertTokenizer.from_pretrained("./chinese-roberta-wwm-ext")
model = BertModel.from_pretrained("./chinese-roberta-wwm-ext")
# %% 载入比赛数据下的所有token并计算频次
filename = "./preprocess_data/train.json"
with open(filename, "r", encoding="utf-8") as f:
    data = json.load(f)
# %%
word2count = {}
for _, v in tqdm(data.items()):
    texts = v["texts"]
    texts.append(v["name"])
    texts = "".join(texts)
    for char in texts:
        if char in ["“", "”", " "]:
            continue
        if char in word2count:
            word2count[char] += 1
        else:
            word2count[char] = 1

    
# %% 删去低于某个阈值的字
threshold = 5
word2count = {k: v for k, v in word2count.items() if v >= threshold}
# %% 统计不在tokenizer里的字的个数
count = 0
for word in tqdm(word2count):
    flag = tokenizer.add_tokens(word)
    if flag:
        count += 1
        print(word, word2count[word])
print(f"新增token数量: {count}")
# %%
print(len(tokenizer))
model.resize_token_embeddings(len(tokenizer)) 
folder = "./models/roberta-pretrained-with-expand-vocab"
if not os.path.exists(folder):
    os.makedirs(folder)
model.save_pretrained(folder)
# %%
tokenizer.save_pretrained(folder)
# %%
