
# encoding=utf-8
import jieba

strs=["BM25（Best Matching 25）是一种经典的信息检索算法，是基于Okapi TF-IDF算法的改进版本，旨在解决Okapi TF-IDF算法的一些不足之处。其被广泛应用于信息检索领域的排名函数，用于估计文档D与用户查询Q之间的相关性。它是一种基于概率检索框架的改进，特别是在处理长文档和短查询时表现出色。BM25的核心思想是基于词频(TF)和逆文档频率(IDF)来,同时还引入了文档的长度信息来计算文档D和查询Q之间的相关性。目前被广泛运用的搜索引擎ES就内置了BM25算法进行全文检索。","乒乓球拍卖完了","中国科学技术大学","我在北京大学打兵乓球","篮球也不错","杭州西湖美女很多"]
stop_words = set()
with open('./stopwords-master/cn_stopwords.txt', 'r', encoding='utf-8') as f:
    for line in f:
        stop_words.add(line.strip())


def word_weight(doc:list[str],query:str):
    return_value = {}
    doc_num = len(doc)
    doc_word_occurrence = {}
    for item in doc:
        seg_list = jieba.cut(item)
        seg_list = set(seg_list)
        for word in seg_list:
            if word in stop_words:
                continue
            if word in doc_word_occurrence:
                doc_word_occurrence[word] += 1
            else:
                doc_word_occurrence[word] = 1
    query_word_list = jieba.cut(query)
    query_word_list = set(query_word_list)
    for word in query_word_list:
        if word in stop_words:
            continue
        w = doc_word_occurrence.get(word)
        if w is None:
            w = 0
        molecule = doc_num - w + 0.5
        denominator = w + 0.5
        import math
        idf = math.log10(molecule / denominator)
        return_value[word] = idf
        # print(word, molecule, denominator, idf)
    return return_value

def doc_word_score(doc:list[str],query:str):
    return_value = {}
    doc_word_occurrence = {}
    doc_len = {}
    doc_len_num = 0
    for index in range(len(doc)):
        seg_list = jieba.cut(doc[index])
        dwo = {}
        zero = 0
        for item in seg_list:
            zero += 1
            if item in stop_words:
                continue
            w = dwo.get(item)
            if w is None:
                dwo[item] = 1
            else:
                dwo[item] += 1
        doc_word_occurrence[index] = dwo
        doc_len[index] = zero
        doc_len_num += zero
    # print(doc_word_occurrence)
    lave =doc_len_num / len(doc)
    query_word_list = jieba.cut(query)
    for word in query_word_list:
        if word in stop_words:
            continue
        return_value[word] = []
        for ind in range(len(doc_word_occurrence)):
            w = doc_word_occurrence[ind].get(word)
            if w is None:
                w = 0
            molecule = (1.2 + 1) * w
            denominator = 1.2 * (1-0.75 + 0.75 * (doc_len[ind] / lave)) + 1.2
            score = molecule  / denominator
            return_value[word].append(score)
            # print(ind,word,score)
    return return_value



def bm25_top(doc:list[str],query:str,top_k:int):
    w1 = word_weight(doc, query)
    w2 = doc_word_score(doc, query)
    return_value = {}
    for i in range(len(strs)):
        score = 0
        for key, value in w1.items():
            score += w2[key][i] * value
        return_value[i] = score
    return_value = dict(sorted(return_value.items(),key=lambda item: item[1],reverse=True))

    return return_value







if __name__ == '__main__':
    print(bm25_top(strs, "杭州有哪些好玩的",3))

    # for key,score_list in w2.items():
    #     score = 0
    #     for value in score_list:
    #         score += value * w1[key]
    #     print(key,score)

