#! -*- coding:utf-8 -*-

import pymongo
import codecs,sys
from pymongo import MongoClient
import jieba
from gensim import corpora, models, similarities
import nltk
import jieba.analyse
from nltk.tokenize import word_tokenize
from pprint import pprint  # pretty-printer

reload(sys)
sys.setdefaultencoding('utf-8')

kickpath="" #"/root/python/"

dics=[]
dits={}
labels={}
count=1
mydoclist =[]
courses=[]
questions=[]
uuids=[]



"""
    pre_process text line
"""
def pre_process_cn(courses, low_freq_filter = True):
    """
     简化的 中文+英文 预处理
        1.去掉停用词
        2.去掉标点符号
        3.处理为词干
        4.去掉低频词

    """
    texts_tokenized = []
    for document in courses:
        #document = str(document).decode("utf-8")
        texts_tokenized_tmp = []
        print "document:",document
        for word in word_tokenize(document):
            texts_tokenized_tmp += jieba.analyse.extract_tags(word,10)
        texts_tokenized.append(texts_tokenized_tmp)

    texts_filtered_stopwords = texts_tokenized

    #去除标点符号
    english_punctuations = [',', '.', ':', ';', '?', '(', ')', '[', ']', '&', '!', '*', '@', '#', '$', '%']
    texts_filtered = [[word for word in document if not word in english_punctuations] for document in texts_filtered_stopwords]

    #词干化
    from nltk.stem.lancaster import LancasterStemmer
    st = LancasterStemmer()
    texts_stemmed = [[st.stem(word) for word in docment] for docment in texts_filtered]

    #去除过低频词
    if low_freq_filter:
        all_stems = sum(texts_stemmed, [])
        stems_once = set(stem for stem in set(all_stems) if all_stems.count(stem) == 1)
        texts = [[stem for stem in text if stem not in stems_once] for text in texts_stemmed]
    else:
        texts = texts_stemmed
    pprint(texts)
    return texts


def jieba_preprocess_cn(courses, low_freq_filter = True):
    #jieba.analyse.set_stop_words("../extra_dict/stop_words.txt")
    #jieba.analyse.set_idf_path("../extra_dict/idf.txt.big");

    texts_tokenized = []
    print "len:",len(courses)
    for document in courses:
        texts_tokenized_tmp = []
        #print "document:",document
        words= jieba.cut(document,cut_all=True)
        #print 'cut document:',','.join(list(words))
        tages= jieba.analyse.extract_tags(document,500)
        #print 'extract_tags:',','.join(list(tages))
        #texts_tokenized.append(texts_tokenized_tmp)
        texts_tokenized.append(tages)

    texts_filtered_stopwords = texts_tokenized
    pprint(texts_filtered_stopwords)

    #去除标点符号
    english_punctuations = [',', '.', ':', ';', '?', '(', ')', '[', ']', '&', '!', '*', '@', '#', '$', '%']
    texts_filtered = [[word for word in document if not word in english_punctuations] for document in texts_filtered_stopwords]

    #去除过低频词
    if low_freq_filter:
        # remove words that appear only once
        from collections import defaultdict
        frequency = defaultdict(int)
        for text in texts_filtered:
            for token in text:
                frequency[token] += 1
        texts = [[token for token in text if frequency[token] > 1]     for text in texts_filtered]
        #stems_once = set(stem for stem in set(all_stems) if all_stems.count(stem) == 1)
        #texts = [[stem for stem in text if stem not in stems_once] for text in texts_filtered]
    else:
        texts = texts_filtered
    pprint(texts)
    return texts

def train_by_lsi(lib_texts):
    #为了能看到过程日志
    #import logging
    #logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)

    dictionary = corpora.Dictionary(lib_texts)
    #print(dictionary.token2id)

    corpus = [dictionary.doc2bow(text) for text in lib_texts]     #doc2bow(): 将collection words 转为词袋，用两元组(word_id, word_frequency)表示
    tfidf = models.TfidfModel(corpus)
    corpus_tfidf = tfidf[corpus]

    #拍脑袋的：训练topic数量为10的LSI模型
    lsi = models.LsiModel(corpus_tfidf, id2word=dictionary) #, num_topics=10)
    index = similarities.MatrixSimilarity(lsi[corpus])     # index 是 gensim.similarities.docsim.MatrixSimilarity 实例

    dictionary.save(kickpath+"kick.dict")
    lsi.save(kickpath+"kick.lsi")
    index.save(kickpath+"kick.index")
    return (index, dictionary, lsi)


if __name__ == '__main__':
    conn = MongoClient("okzor.com", 27017)
    db = conn.health
    db.authenticate("hou", "hou@123")
    content = db.kickchufang.find({'doctorId':'huanghuang'})
    index=0
    for i in content:
        line = str(i['desc'])  #.decode("utf-8") #.encode("GB18030"))
        #print "line:",line
        uuid = i['uuid']
        uuids.append(uuid)
        #print uuid, line
        courses.append(line)
        print str(index)
        index=index+1
        #if (index>10):
        #    break

    man_file = open(kickpath+"kick.uuids", 'w')
    print(uuids, man_file)
    man_file.close()
    courses_name = courses

    # 库建立完成 -- 这部分可能数据很大，可以预先处理好，存储起来
    lib_texts = jieba_preprocess_cn(courses)
    (index, dictionary, lsi) = train_by_lsi(lib_texts)