
import torch
import torch.nn as nn
from transformers import AutoModel,AutoTokenizer
# huawei-noah/TinyBERT_4L_zh
# model_path = "../tiny_bert_model"
# model_name = "bert-base-chinese"
model_name = r'C:\Users\peter\.cache\huggingface\hub\models--bert-base-chinese\snapshots\8d2a91f91cc38c96bb8b4556ba70c392f8d5ee55'
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModel.from_pretrained(model_name)
# from transformers import BertTokenizer,BertModel,BertConfig
# model_name = 'bert-base-uncased'
# config = BertConfig.from_pretrained(model_name)	# 这个方法会自动从官方的s3数据库下载模型配置、参数等信息（代码中已配置好位置）
# tokenizer = BertTokenizer.from_pretrained(model_name)		 # 这个方法会自动从官方的s3数据库读取文件下的vocab.txt文件
# model = BertModel.from_pretrained(model_name)		# 这个方法会自动从官方的s3数据库下载模型信息

def get_bert_encode_for_single(text):

    """
    使用bert-base-chinese模型对文本进行编码
    :param text:  输入的文本
    :return: 编码后的张量
    """
    # 通过tokenizer对文本进行编号
    indexed_tokens = tokenizer.encode(text)[1: -1]
    # print('text 编号: ',indexed_tokens, type(indexed_tokens))
    # [101, 872, 1962, 8024, 4886, 2456, 5664, 102]
    # 把列表转成张量
    print(indexed_tokens)
    tokens_tensor = torch.LongTensor([indexed_tokens])
    print(tokens_tensor)
    # 不自动进行梯度计算
    with torch.no_grad():
        output = model(tokens_tensor)
        print('model的输出: ', output)

    # return output[0]
    return output.last_hidden_state


if __name__ == '__main__':
    text = "你好啊"
    outputs = get_bert_encode_for_single(text)
    print('text编码:', outputs)