from transformers import BertTokenizer, BertModel
import torch

# 加载 tokenizer 和模型
tokenizer = BertTokenizer.from_pretrained('D:\\work\program\\pytorch_models\\bert-base-chinese')
model = BertModel.from_pretrained('D:\\work\program\\pytorch_models\\bert-base-chinese')

# 输入两个句子
sentence1 = "北京是中国的首都。"
sentence2 = "中国首都是北京。"

# 编码句子
inputs1 = tokenizer(sentence1, return_tensors='pt', padding=True, truncation=True, max_length=512)
inputs2 = tokenizer(sentence2, return_tensors='pt', padding=True, truncation=True, max_length=512)

# 获取句向量
with torch.no_grad():
    outputs1 = model(**inputs1)
    outputs2 = model(**inputs2)

# 获取池化后的输出
vector1 = outputs1.pooler_output
vector2 = outputs2.pooler_output

# 计算余弦相似度
cosine_similarity = torch.nn.functional.cosine_similarity(vector1, vector2)
print(cosine_similarity.item())