from collections import Counter
import math


def average_doc_length(documents):
    # 获取所有的文档的长度之和
    total_length = sum(len(doc) for doc in documents)
    # 计算文档的总数量
    num_docs = len(documents)
    # 计算平均长度
    return total_length / num_docs


def calculate_idf(term, documents):
    # 统计文档的总数 5
    N = len(documents)
    # 统计包含term的文档数 4
    df = sum(1 for doc in documents if term in doc)
    # 按照BM25的改进算法计算idf值
    return math.log(((N - df + 0.5) / (df + 0.5)) + 1)


def bm25_score(query_terms, document, documents, k1=1.5, b=0.75):
    # 所有文档的平均长度
    avgdl = average_doc_length(documents)
    # 初始化BM25的分数
    score = 0.0
    # 获取当前文档的长度
    doc_len = len(document)
    # 统计当前文档的词频率
    term_freq = Counter(document)
    # 遍历每个查询词
    for term in query_terms:
        # 计算当前值的idf
        idf = calculate_idf(term, documents)
        # 当前词在文档中出现的次数
        f = term_freq[term]
        # 计算分母部分
        deom = f + k1 * (1 - b + b * doc_len / avgdl)
        # 计算TF
        tf = (f * (k1 + 1)) / deom if deom != 0 else 0
        # 计算BM25分数
        term_bm25 = idf * tf
        # 累计每个term的bm25分数
        score += term_bm25
    return score


if __name__ == "__main__":
    # 示例文档集合（每个文档为分词后的词列表）
    documents = [
        ["人工智能", "计算机科学", "分支", "创建", "系统"],
        ["人工智能", "人工智能", "子领域", "计算机", "学习"],
        ["深度学习", "机器学习", "分支", "神经网络", "学习"],
        ["自然语言处理", "人工智能", "应用", "计算机", "语言"],
        ["计算机视觉", "人工智能", "分支", "计算机", "视觉"],
    ]
    # 查询关键字
    query = ["人工智能", "学习"]

    for i, doc in enumerate(documents):
        score = bm25_score(query, doc, documents)
        print(f"文档{i+1}的BM25分数为{score}")
