from transformers import BertTokenizer, BertModel
import torch
from torch.nn.functional import cosine_similarity

# 实例化 BertTokenizer
tokenizer = BertTokenizer.from_pretrained('D:/wk/bert-base-uncased')
model = BertModel.from_pretrained('D:/wk/bert-base-uncased')
# 准备句子
sentences = [
    "Smartphone X",
    "DellXPSLaptop"
,"green tea"
,"green tea"
,"Samsung Galaxy S22"
,"Sony PlayStation 5"
,"Samsung Galaxy S22"
,"Nintendo Switch"
,"Samsung Galaxy S22"
,"healthy snacks"
,"nutrition scale"
,"Samsung Galaxy S22"
,"healthy snacks"
,"iPhone 14"
,"dhic"
,"dhic"
,"dhic"
,"healthy snacks"
,"supplements"
,"camping tent"
,"Sony WH-1000XM4 Wireless Noise Cancelling Headphones"
,"Apple AirPods Pro"
,"camping tent"
,"e-commerce trends 2024"
,"e-commerce trends 2024"
,"women's multivitamin"
,"women's multivitamins"
,"topical antibiotics for bacterial vaginosis"
,"iPhone 14"
,"Samsung Galaxy S22"
]
print(sentences)

# 对句子进行编码

inputs = tokenizer(sentences, return_tensors="pt", padding=True, truncation=True)
print(inputs)
outputs = model(**inputs)
print(outputs)

# 获取CLS token的隐藏状态作为句子嵌入
sentence_embeddings = outputs.last_hidden_state[:, 0, :]
print(sentence_embeddings)

# 计算Smartphone X与其他句子的相似度
smartphone_x_embedding = sentence_embeddings[0]
cosine_sims = cosine_similarity(smartphone_x_embedding.unsqueeze(0), sentence_embeddings, dim=-1)

# 获取相似度得分，并排序
similarity_scores = cosine_sims.squeeze()
print(similarity_scores)
sorted_indices = similarity_scores.argsort().tolist()[::-1]

# 打印与“Smartphone X”最相似的句子
for i in sorted_indices:
    if i != 0 and sentences[i] != "Smartphone X":
        print(f"Sentence: {sentences[i]}, Similarity: {similarity_scores[i]}")