# coding=utf-8
import jieba
from pymongo import MongoClient
import re
import json
import math

# 函数segment：分词(包括去除标点和停用词)
def segment(news,stop_words):
    news_all_content = news['news_title']+"".join(news['news_content'])
    news_all_content = re.sub("[\s+\.\!\/_,$%^*\-(+\"\']+|[+——\-《》（）‘’”“：；！，。？?、~@#￥%……&*]+".decode("utf8"),
                              "".decode("utf8"),news_all_content)      # 去标点
    seg_list = list(jieba.cut_for_search(news_all_content))
    new_seg_list = []
    for word in seg_list:
        if word not in stop_words:
            new_seg_list.append(word)       # 去停用词
    seg_list = news['news_keyword'] + new_seg_list
    return seg_list

# 函数tf_idf：计算tf_idf的方法
def tf_idf(tf,df,N):
    return (1+math.log(tf,10))*math.log(N/df,10)

# 函数updateTermDF：更新word_df
def updateWordDF(seg_list,word_df):
    for w in set(seg_list):         # 去重
        if w not in word_df:
            word_df[w] = 1
        else:
            word_df[w] += 1


# 函数updateSegLists：更新seg_lists:每个文档的词频向量
def updateSegLists(seg_lists,seg_list,newsID):
    word_tf = {}
    for w in seg_list:
        if w not in word_tf:
            word_tf[w]=1
        else:
            word_tf[w] += 1
    seg_lists[newsID] = word_tf


# 函数SPIMI_invert：对一个文件建立倒排索引，并将其加到总的倒排索引里。
def SPIMI_invert(one_news,stop_words,dictionary,word_df,invertedIndex,seg_lists):
    print one_news['news_id'],one_news['news_title']
    seg_list = segment(one_news,stop_words)

    # 0.更新seg_lists
    updateSegLists(seg_lists,seg_list,one_news['news_id'])

    # 1.更新word_df
    updateWordDF(seg_list,word_df)

    # 2.写成(term,newsID) List形式
    newsID = one_news['news_id']
    word_newsID = [(w, newsID) for w in seg_list]

    # 3.将这一篇文档加到倒排索引表中
    for w in word_newsID:
        if w[0] not in dictionary:
            dictionary.add(w[0])
            invertedIndex[w[0]] = {w[1]: 1}
        else:
            if w[1] not in invertedIndex[w[0]]:
                invertedIndex[w[0]][w[1]] = 1
            else:
                invertedIndex[w[0]][w[1]] += 1


# 函数buildInvertedIndex：对所有文档建立倒排索引
def buildInvertedIndex(all_news,begin_index,this_time_nums):
    # 1.读取停用词表
    f = open(u"./IndexData/stop_words.txt", 'r')  # 原文件是gbk编码格式
    gbk_stop_words = f.readlines()
    stop_words = [x.strip().decode('gbk') for x in gbk_stop_words]
    f.close()
    # 2.初始化word_df和dictionary
    word_df = {}
    if begin_index != 0:
        with open(u"./IndexData/word_df.json", 'r') as f:
            word_df = json.load(f)
    dictionary = set(word_df.keys())
    # 3.初始化invertedIndex
    invertedIndex = {}
    if begin_index != 0:
        with open(u"./IndexData/invertedIndex.json", 'r') as f:
            invertedIndex = json.load(f)

    seg_lists = {}
    # 4.建立倒排索引
    for i in range(this_time_nums):
        SPIMI_invert(all_news[begin_index + i],stop_words,dictionary,word_df,invertedIndex,seg_lists)
    # 5.存储倒排索引
    with open(u"./IndexData/word_df.json", 'w') as f:
        json.dump(word_df, f)
    with open(u"./IndexData/invertedIndex.json", 'w') as f:
        json.dump(invertedIndex, f)
    old_seg_lists = {}
    if begin_index != 0:
        with open(u"./IndexData/seg_lists.json", 'r') as f:
            old_seg_lists = json.load(f)
    with open(u"./IndexData/seg_lists.json", 'w') as f:
        seg_lists.update(old_seg_lists)
        json.dump(seg_lists,f)


# 函数buildDocumentVectors：建立前20维的文档向量，保存所有文档的文档语义长度。
def buildDocumentVectors(all_news,k,begin_index,this_time_nums):
    with open(u"./IndexData/seg_lists.json", 'r') as f:
        segVectors = json.load(f)
    with open(u"./IndexData/word_df.json", 'r') as f:
        word_df = json.load(f)
    documentLengths = {}
    documentVectors = {}
    N = all_news.count()
    for one in all_news[:begin_index+this_time_nums]:
        newsID = one['news_id']
        tf_vector = segVectors[newsID]
        tfidf_vector = {}
        for w in tf_vector:
            tfidf_vector[w] = tf_idf(tf_vector[w],word_df[w],N)
        values = tfidf_vector.values()
        documentLengths[newsID] = sum(map(lambda x:x*x,values))**0.5
        documentVectors[newsID] = dict(sorted(tfidf_vector.items(),key = lambda d:d[1],reverse = True)[:k])
    dV = {}
    if begin_index!=0:
        with open(u"./IndexData/documentVectors.json", 'r') as f:
            dV = json.load(f)
    documentVectors.update(dV)
    with open(u"./IndexData/documentVectors.json", 'w') as f:
        json.dump(documentVectors,f)
    dL = {}
    if begin_index != 0:
        with open(u"./IndexData/documentLengths.json", 'r') as f:
            dL = json.load(f)
    documentLengths.update(dL)
    with open(u"./IndexData/documentLengths.json", 'w') as f:
        json.dump(documentLengths, f)


# 函数buildbuildPermutermIndex：建立倒排索引
def buildPermutermIndex():
    dictionary = {}
    with open(u"./IndexData/word_df.json", 'r') as f:
        dictionary = json.load(f).keys()
    permutermIndex = {}
    for w in dictionary:
        for i in range(len(w)+1):
            pw = w[:i]+'$'+w[i:]
            permutermIndex[pw]=w
    with open(u"./IndexData/permutermIndex.json",'w') as f:
        json.dump(permutermIndex, f)



if __name__ == '__main__':
    # 1.连接MongoDB数据库
    client = MongoClient('localhost', 27017)
    #   访问news database
    news_db = client.news
    #   访问news database下的wangyi_news collection
    wangyi_news = news_db.wangyi_news
    #   查询所有文档的news_id,news_keyword,news_title和news_content
    all_news = wangyi_news.find({},{"_id": 0, "news_id": 1, "news_keyword": 1, "news_title": 1, "news_content": 1}).sort("news_id",1)

    # 辅助测试：开始处理的文档下标，前面的文档是以前处理过的。
    f = open(u"./IndexData/Begin_index.txt",'r')
    begin_index = int(f.readline())
    f.close()
    total = all_news.count()
    this_time_nums = total-begin_index
    # this_time_nums = 10
    print begin_index,total

    # 2.建立倒排索引
    buildInvertedIndex(all_news,begin_index,this_time_nums)
    # 3.建立文档向量
    k = 20
    buildDocumentVectors(all_news,k,begin_index,this_time_nums)

    # 辅助测试：存储处理的文档数
    f = open(u"./IndexData/Begin_index.txt",'w')
    f.write(str(begin_index+this_time_nums))
    f.close()

    # 4.建立倒排索引
    buildPermutermIndex()