import numpy as np
import torch 
from transformers import BertTokenizer, BertConfig, BertForMaskedLM, BertForNextSentencePrediction
from transformers import BertModel
 
model_name = 'distilbert_base_uncased'
MODEL_PATH = 'D:/Desktop/信息检索/Bert模型代码实践/distillbert_base_uncased/'
 
# a. 通过词典导入分词器
tokenizer = BertTokenizer.from_pretrained(model_name)
# b. 导入配置文件
model_config = BertConfig.from_pretrained(model_name)
# 修改配置
model_config.output_hidden_states = True
model_config.output_attentions = True
# 通过配置和路径导入模型
bert_model = BertModel.from_pretrained(MODEL_PATH, config = model_config)

print(tokenizer.encode('吾儿莫慌'))   # [101, 1434, 1036, 5811, 2707, 102]
 
sen_code = tokenizer.encode_plus('这个故事没有终点', "正如星空没有彼岸")
# print(sen_code)
# [101, 1434, 1036, 5811, 2707, 102]
#  {'input_ids': [101, 6821, 702, 3125, 752, 3766, 3300, 5303, 4157, 102, 3633, 1963, 3215, 4958, 3766, 3300, 2516, 2279, 102], 
#  'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1], 
#  'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]}