
import math
import pickle
import json

def pre_pro(iftitle):
    #不同数据集
    if iftitle==1:
        path2="data/title_length"
    else:
        path2="data/file_length"
    f2 = open(path2,'rb')
    doc_length=pickle.load(f2)
    
    N=len(doc_length)
    print(N)
    len_sum=0
    for key,value in doc_length.items():
        len_sum+=value
    avgdl = (len_sum+0.0)/N
    #avgdl = sum([length + 0.0 for length in passage_word_count]) / N  # 文档平均长度
    print(avgdl)
    #从文件中获取倒排索引表
    '''
    path3="data/all_key_words_list_add_syn_extend"
    f3 =open(path3,'rb')
    index=pickle.load(f3)
    print(index)
    '''
    return avgdl,N,doc_length

#如果query中的词没有一个存在于某篇文章中，则不会去计算该篇文章与query的相关度
#将符合要求的文章编号存起来
def pre(query,iftitle):
    #如果query中的词没有一个存在于某篇文章中，则不会去计算该篇文章与query的相关度
    #将符合要求的文章编号存起来
    if iftitle==1:
        #数据集是文章标题的索引表
        with open("out/invertIndex_title.json", "r", encoding="UTF-8") as load_f:
            index = json.load(load_f)
    else:
        #数据集是文章内容的索引表
        with open("out/invertIndex.json", "r", encoding="UTF-8") as load_f:
            index = json.load(load_f)

    #print(index)
    word_in_doc={}
    count =0
    doc_list = []
    for k,v in index.items():
        if k in query:
            print(k)
            print(v)
            for key,value in v.items():
                if key not in doc_list:
                    doc_list.append(key)
                    print(key)
                '''
                count+=1
            word_in_doc[k]=doc_list
            '''

    '''
    doc_in_index = []
    print(word_in_doc)
    for k,v in word_in_doc.items():
        for item in v:
            if item not in doc_in_index:
                doc_in_index.append(item)
    '''
    doc_in_index=doc_list
    return doc_in_index

    #print(len(doc_right))

#求f/df/idf和求query与各文档的得分
def idf(index,query,doc_right,N,doc_length,avgdl):
    f={}
    df = {} #每个词以及出现了该词的文档数量
    idf={}
    for k,v in index.items():
        sum = len(v)
        word_fre={}
        for key,value in v.items():
            #sum+=len(value)
            word_fre[key]=len(value)
        f[k]=word_fre #每个词在每篇文章的出现次数
        df[k] = sum#这个词的总出现的文章数
        idf[k] = math.log(N-sum+0.5)-math.log(sum+0.5)

    print(df)
    print(f)

    k1 = 1
    b = 0.75

    doc_score = {}
    #这里可以改成for doc in doc_right:

    for doc in doc_right:
        score = 0
        for word in query:
            if word not in df:
                continue
            if doc not in f[word]:
                # doc这篇文章中没有word这个词
                continue
            score += idf[word] * (f[word][doc] * (k1 + 1)) / (f[word][doc] + k1 * (1 - b + b * doc_length[doc] / avgdl))
        doc_score[doc] = score
    print(doc_score)
    return doc_score

def score_sort(doc_score,search_sort):
    doc_sorted={}
    print("score_sort_test")
    print(search_sort)
    if search_sort == '按时间降序排序' or search_sort=='按时间升序排序':
        #按照时间排序
        doc_time_dic={}
        path = "data/data_clean_key_cut/"
        for doc,value in doc_score.items():
            doc_path = path + str(doc)
            f1 = open(doc_path, 'rb')
            doc_detail = pickle.load(f1)
            doc_time = doc_detail['time']
            doc_time_dic[doc]=doc_time

        doc_time_sorted={}
        if search_sort == "按时间降序排序":
            doc_time_sorted=sorted(doc_time_dic.items(), key=lambda kv: (kv[1], kv[0]), reverse=True)
        elif search_sort == "按时间升序排序":
            doc_time_sorted = sorted(doc_time_dic.items(), key=lambda kv: (kv[1], kv[0]))
            print(doc_time_sorted)
        for item in doc_time_sorted:
            doc_name=item[0]
            doc_rele=doc_score[doc_name]
            doc_sorted[doc_name]=doc_rele

    else:
        #按照相关度排序
        print("sort___")
        doc_sorted = sorted(doc_score.items(), key=lambda kv: (kv[1], kv[0]), reverse=True)


    #doc_sorted=sorted(doc_score.items(), key= lambda kv:(kv[1],kv[0]),reverse=True)
    print("test:")
    print(doc_sorted)
    return doc_sorted

def bm25(query,iftitle,doc_list,search_sort):

    #iftitle指示数据集是标题还是内容
    if iftitle == 1:
        # 数据集是文章标题的索引表
        with open("out/invertIndex_title.json", "r", encoding="UTF-8") as load_f:
            index = json.load(load_f)
    else:
        # 数据集是文章内容的索引表
        with open("out/invertIndex.json", "r", encoding="UTF-8") as load_f:
            index = json.load(load_f)


    avgdl,N, doc_length = pre_pro(iftitle)

    doc_score = idf(index, query, doc_list, N, doc_length,avgdl)
    
    '''
    for k, v in doc_score.items():
        print(k, end=' ')
        print(v)
    '''
    # print(doc_score)
    doc_sorted={}
    #还有排序的方式，根据时间/相关度，默认根据相关度
    doc_sorted = score_sort(doc_score,str(search_sort))
    #doc_sorted = sorted(doc_score.items(), key=lambda kv: (kv[1], kv[0]), reverse=True)
    print("doc_sorted:")
    print(doc_sorted)
    doc_output = doc_sorted
    '''
    for item in doc_sorted:
        doc_output[item[0]] = item[1]
        print(item)
    '''
    doc_output=dict(doc_output)
    #print(doc_output)

    #返回的是一个字典，文章标题以及相关度
    return doc_output

'''
query="鲸鱼"
query=["鲸鱼"]
pre(query,0)
'''