from transformers import AutoTokenizer
from collections import Counter


def func1():
  # 示例数据
  texts = ["这是一个示例句子", "另一个示例句子"]

  # 分词
  all_tokens = []
  for text in texts:
    tokens = AutoTokenizer.from_pretrained("../model/ernie-tune-chn").tokenize(text)
    all_tokens.extend(tokens)

  # 统计词频
  counter = Counter(all_tokens)

  # 重新构建词库
  new_tokenizer = AutoTokenizer.from_pretrained("../model/ernie-tune-chn")
  new_tokenizer.add_tokens(list(counter.keys()))

  # 保存新的词库
  new_tokenizer.save_pretrained("new_tokenizer_path")

def fun2():
  from transformers import BertTokenizer
# 加载原始词汇表并重新映射索引
  tokenizer = BertTokenizer.from_pretrained("../model/ernie-tune-chn")
  tokenizer.save_vocabulary("vocab.txt")  # 保存修复后的词汇表

if __name__ == '__main__':
  fun2()