from transformers import BertTokenizer

model_dir = "/Users/wupeng/pythonProjects/huggingface/models/bert-base-chinese/models--bert-base-chinese/snapshots/c30a6ed22ab4564dc1e3b2ecbf6e766b0611a33f"

# 编码的数据
sents = [
    "你好，我是一个程序员，",
    "我目前掌握的技术栈有很多，主要的开发语言是Java，"
]
# 批量编码
tokenizer = BertTokenizer.from_pretrained(model_dir)
out = tokenizer.batch_encode_plus(batch_text_or_text_pairs=sents,
                                  add_special_tokens=True,
                                  truncation=True,
                                  # 编码后的长度
                                  max_length=15,
                                  # 补齐到最大长度
                                  padding="max_length",
                                  return_tensors=None,
                                  return_attention_mask=True,
                                  return_token_type_ids=True,
                                  return_special_tokens_mask=True,
                                  # 统计每个句子的长度
                                  return_length=True)

# 循环打印out
'''
input_ids : [[101, 872, 1962, 8024, 2769, 102], [101, 2769, 4680, 1184, 2958, 102]]
token_type_ids : [[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]]
special_tokens_mask : [[1, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 1]]
length : [6, 6]
attention_mask : [[1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1]]
'''
for key, value in out.items():
    print(key, ":", value)
# 解码
print(tokenizer.decode(out["input_ids"][0]))
print(tokenizer.decode(out["input_ids"][1]))
