import numpy as np
import ollama
from sklearn.metrics.pairwise import cosine_similarity


# 提取嵌入向量的辅助函数
def get_embedding(model, text):
    """
    使用 Ollama 生成文本的嵌入向量
    """
    try:
        response = ollama.embeddings(model=model, prompt=text)
        return response['embedding']  # 或者 response.embedding，具体取决于对象结构
    except Exception as e:
        print(f"生成嵌入向量时出错: {e}")
        return None


# 文本库
text_library = [
    "天空是蓝色的因为瑞利散射",
    "海洋是蓝色的因为反射",
    "森林是绿色的因为叶绿素"
]

# 模型名称
model_name = 'llama3.2:latest'

# 生成文本库的嵌入向量
library_embeddings = [get_embedding(model_name, text) for text in text_library]

# 过滤掉生成失败的嵌入向量
library_embeddings = [embedding for embedding in library_embeddings if embedding is not None]

# 查询文本
query_text = '为什么天空是蓝色的？'
query_embedding = get_embedding(model_name, query_text)

if query_embedding is None:
    print("无法生成查询文本的嵌入向量，请检查模型或输入文本。")
else:
    # 将查询文本的嵌入向量转换为 numpy 数组
    query_embedding = np.array(query_embedding).reshape(1, -1)

    # 计算查询文本与文本库的相似度
    similarities = []
    for embedding in library_embeddings:
        embedding_array = np.array(embedding).reshape(1, -1)
        similarity = cosine_similarity(query_embedding, embedding_array)
        similarities.append(similarity[0][0])

    # 找到最相似的文本
    most_similar_index = np.argmax(similarities)
    print(f"查询文本: '{query_text}'")
    print(f"最相似的文本: '{text_library[most_similar_index]}'")
    print(f"相似度: {similarities[most_similar_index]:.4f}")
