from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity


# 查询函数
def query(query_text, documents, top_n=10, stop_words=r"stop_word.txt"):
    userstopwords = [line.strip() for line in open(stop_words, 'r', encoding='utf-8').readlines()]
    # 使用TF-IDF向量化器
    vectorizer = TfidfVectorizer(stop_words=userstopwords)
    tfidf_matrix = vectorizer.fit_transform(documents)

    # 将查询文本转换为TF-IDF向量
    query_vector = vectorizer.transform([query_text])

    # 计算查询向量与文档向量之间的余弦相似度
    cosine_similarities = cosine_similarity(query_vector, tfidf_matrix).flatten()

    # 获取相似度最高的前N个文档
    related_docs_indices = cosine_similarities.argsort()[-top_n:][::-1]

    # 返回相关文档及其相似度
    related_docs = [(documents[i], cosine_similarities[i]) for i in related_docs_indices]
    return related_docs


def query_by_field_id(query_text, documents, field, top_n=200, stop_words=r"stop_word.txt"):
    if field is None:
        return query(query_text, documents, top_n, stop_words)

    # 提取文档的content字段
    contents = [doc[field] for doc in documents]

    userstopwords = [line.strip() for line in open(stop_words, 'r', encoding='utf-8').readlines()]
    # 使用TF-IDF向量化器
    vectorizer = TfidfVectorizer(stop_words=userstopwords)
    tfidf_matrix = vectorizer.fit_transform(contents)

    # 将查询文本转换为TF-IDF向量
    query_vector = vectorizer.transform([query_text])

    # 计算查询向量与文档向量之间的余弦相似度
    cosine_similarities = cosine_similarity(query_vector, tfidf_matrix).flatten()

    # 获取相似度最高的前N个文档
    related_docs_indices = cosine_similarities.argsort()[-top_n:][::-1]

    # 返回相关文档及其相似度
    # related_docs = [(documents[i]['id'], documents[i][field], cosine_similarities[i]) for i in related_docs_indices]
    related_docs = [
        {'id': documents[i]['id'], 'content': documents[i][field], 'origin_content': documents[i]['origin_content']} for
        i in related_docs_indices]
    return related_docs
