# coding=utf-8
import json
import sys
import jieba
import re
import math
import heapq
from pymongo import MongoClient

# 函数process_qery(query)：将查询转化为查询词集
def processQuery(query,stop_words):
    query = re.sub("[\s+\.\!\/_,$%^*\-(+\"\']+|[+——\-《》（）‘’”“：；！，。？?、~@#￥%……&*]+".decode("utf8"),
                              "".decode("utf8"), query)          # 去标点
    query_wordset = list(jieba.cut_for_search(query))
    new_query_wordset = list([])
    for word in query_wordset:
        if word not in stop_words:
            new_query_wordset.append(word)  # 去停用词
    # 如果有通配符的话
    if u'*' in new_query_wordset:
        pass
    return set(new_query_wordset)


# 函数tf_idf：计算tf_idf的方法
def tf_idf(tf,df,N):
    return (1+math.log(tf,10))*math.log(N/df,10)

# 函数fastCosScore()：根据查询，返回相关的
def fastCosScore(K,query_wordset,documentLengths,InvertedIndex,word_df):
    topK_result = []
    Scores = {}
    N = len(documentLengths)
    for w in query_wordset:
        if w not in InvertedIndex or w not in word_df:
            continue
        postings = InvertedIndex[w]
        w_df = word_df[w]
        for newsID in postings:
            if newsID not in Scores:
                Scores[newsID] = 0
                Scores[newsID] += tf_idf(postings[newsID],w_df,N)
            else:
                Scores[newsID] += tf_idf(postings[newsID],w_df,N)
    for newsID in Scores:
        Scores[newsID] /=documentLengths[newsID]
    topK_result = heapq.nlargest(K,Scores.items(),key=lambda x:x[1])
    return topK_result

#
# if __name__ == '__main__':
#     # 1.将索引等信息读到内存中
#     with open(u"./IndexData/InvertedIndex.json", 'r') as f:
#         InvertedIndex = json.load(f)
#     with open(u"./IndexData/word_df.json", 'r') as f:
#         word_df = json.load(f)
#     with open(u"./IndexData/permutermIndex.json", 'r') as f:
#         permIndex = json.load(f)
#     with open(u"./IndexData/documentVectors.json", 'r') as f:
#         documentVectors = json.load(f)
#     with open(u"./IndexData/documentLengths.json", 'r') as f:
#         documentLengths = json.load(f)
#     with open(u"./IndexData/stop_words.txt", 'r') as f:
#         gbk_stop_words = f.readlines()
#         stop_words = [x.strip().decode('gbk') for x in gbk_stop_words]
#     K = 20
#     # 2.连接MongoDB数据库
#     client = MongoClient('localhost', 27017)
#     news_db = client.news
#     wangyi_news = news_db.wangyi_news
#
#     while True:
#         query = raw_input("please input your query: ").decode(sys.stdin.encoding)
#         query_wordset = processQuery(query,stop_words)
#         topK_result = fastCosScore(K,query_wordset,documentLengths,InvertedIndex,word_df)
#         if len(topK_result)==0:
#             print 'Sorry,no revelant news...'
#         else:
#             findConditions = []
#             for w in topK_result:
#                 temp = {}
#                 temp['news_id'] = w[0]
#                 findConditions.append(temp)
#             topK_news = wangyi_news.find({'$or':findConditions},{'_id':-1,'news_title':1})
#             for i in range(topK_news.count()):
#                 print i+1,topK_news[i]['news_title']