from sentence_transformers import SentenceTransformer, util
from search import WebSearcher

# 初始化嵌入模型（建议本地运行，可用 huggingface 的开源模型）
embedder = SentenceTransformer("all-MiniLM-L6-v2")  # 小型高效模型

def rank_search_results(query, results, limit=3):
    """
    1. 通过语义相似度排序，上下文更贴近用户意图
    :param query: 问题
    :param results: 抓取的文档内容列表：[{
                    'title': '',
                    'link': '',
                    'snippet': '',
                    'content':''
                },...]
    :return: 按相似度排序并拼接后的字符串          
    """
    cleaned_results = [r.get('content', '') for r in results]
    if not cleaned_results:
        return ""
    
    # 计算查询和检索结果的嵌入向量
    query_embedding = embedder.encode(query, convert_to_tensor=True)
    result_embeddings = embedder.encode(cleaned_results, convert_to_tensor=True)
    
    # 计算余弦相似度
    similarities = util.cos_sim(query_embedding, result_embeddings)[0]
    
    # 按相似度排序并筛选前 limit
    ranked_results = sorted(zip(cleaned_results, similarities), key=lambda x: x[1], reverse=True)
    limit = min(limit, len(ranked_results))  # 获取limit和ranked_results长度的最小值

    return "\n".join([result[0] for result in ranked_results[:limit]])


# query = '开源的多模态AI大模型有哪些'

# searcher = WebSearcher()
# response = searcher.search_web(query=query, num_results=5)
# results = response.get('results', [])

# print(f"处理后的内容：\n\n{rank_search_results(query, results)}")
