from transformers import BertTokenizer, BertModel
import torch
from sklearn.metrics.pairwise import cosine_similarity
import numpy as np

# 加载预训练的BERT模型和分词器
tokenizer = BertTokenizer.from_pretrained('../similarity/bert-base-uncased')
model = BertModel.from_pretrained('../similarity/bert-base-uncased')

def get_embedding(text):
    # 将文本转换为BERT输入格式
    inputs = tokenizer(text, return_tensors='pt', padding=True, truncation=True)
    # 获取BERT模型输出
    with torch.no_grad():
        outputs = model(**inputs)
    # 取[CLS]标记的嵌入作为句子的表示
    cls_embedding = outputs.last_hidden_state[:, 0, :].numpy()  # 提取 [CLS] 标记的嵌入
    return cls_embedding

# 示例文档库
documents = [
    "I love spending time in the park on weekends.",
    "The scenery there is beautiful and the air is fresh.",
    "Recently, the weather has not been very good."
]

# 用户查询
query = "I enjoy walking in the park."

# 获取查询和文档的嵌入向量
query_embedding = get_embedding(query)
document_embeddings = [get_embedding(doc) for doc in documents]
# 确保嵌入向量是二维数组
query_embedding = query_embedding.reshape(1, -1)
document_embeddings = [emb.reshape(1, -1) for emb in document_embeddings]
# 将document_embeddings列表合并为一个二维数组
document_embeddings_matrix = np.vstack(document_embeddings)
# 计算查询与每个文档之间的余弦相似度
similarities = cosine_similarity(query_embedding, document_embeddings_matrix)

# 将相似度与文档索引配对
similarity_scores = list(zip(range(len(documents)), similarities[0]))

# 根据相似度分数对文档进行排序
sorted_documents = sorted(similarity_scores, key=lambda x: x[1], reverse=True)

# 输出排序后的文档及其相似度分数
for idx, score in sorted_documents:
    print(f"Document: {documents[idx]}")
    print(f"Cosine Similarity: {score:.4f}\n")