import torch
from transformers import BertTokenizer, BertModel

texts = [
    '你好呀',
    '我不好啊，good2013'
]

bert_tokenizer = BertTokenizer.from_pretrained('./bert-base-chinese')

batch_input_ids, batch_att_mask = [], []
# batch_input_ids存放id形式的文本，att_mask非pad部分为1，否则为0
for text in texts:
    encoded_dict = bert_tokenizer.encode_plus(
        text,
        max_length=10,
        padding='max_length',
        return_tensors='pt',
        truncation=True
    )
    batch_input_ids.append(encoded_dict['input_ids'])
    batch_att_mask.append(encoded_dict['attention_mask'])

# 将list转为tensor
batch_input_ids = torch.cat(batch_input_ids, dim=0)
batch_att_mask = torch.cat(batch_att_mask, dim=0)

# 加载bert模型
bert_model = BertModel.from_pretrained('./bert-base-chinese')

# 推理，查看bert输出
with torch.no_grad():
    outputs = bert_model(input_ids=batch_input_ids, attention_mask=batch_att_mask)
    print('last_hidden_state', outputs.last_hidden_state.shape)  # 可以接序列标注
    print('pooled_output', outputs.pooler_output.shape)  # 可以接分类
