# -*- coding:utf-8 -*-
"""
    lda特征提取
"""
from __future__ import division
from dao.MySqlDAL import MySqlDAL
import util.word_seg_util as wordsutil
import json
from gensim import corpora, models, similarities
__author__ = 'shudongma.msd(风骐)'


'''
写回的keywords设计
{
    mood_type:'Positive',    # Positive 1 Neutral 0 Negative -1
    topK:[]    ,# 关键字列表
    stocks[]  # 涉及的股票
}
'''
sqlUtil = MySqlDAL()

# 训练模型
def lda_train(table_name):
    res = sqlUtil.get_dimensions_rows('select id,title,content,keywords,type,date,click_num from '+table_name)
    train_set = []
    for row in res:
        cutted_topK,cutted_stocks = wordsutil.cut_bag(row['content'])
        if len(cutted_topK) > 0:
            train_set.append(cutted_topK)

    dictionary = corpora.Dictionary(train_set)

    corpus = [dictionary.doc2bow(text) for text in train_set]
    tfidf = models.TfidfModel(corpus)
    corpus_tfidf = tfidf[corpus]
    lda = models.LdaModel(corpus_tfidf, id2word = dictionary, num_topics = 3)
    corpus_lda = lda[corpus_tfidf]
    print lda.print_topic(0)
    print lda.print_topic(1)
    print lda.print_topic(2)
    return lda



# 利用lda划分极性
def lda_seg(table_name,lda,dictionary,tfidf,corpus_tfidf):
    res = sqlUtil.get_dimensions_rows('select id,title,content,keywords,type,date,click_num from '+table_name+" where keywords is null")

    data_source = []
    for row in res:
        tmp_rec = dict()
        cutted_topK,cutted_stocks = wordsutil.cut_bag(row['content'])
        # print row['content']

        # 利用lda进行处理 topK
        # dic = corpora.Dictionary(train_set)
        # corpus = [dic.doc2bow(text) for text in train_set]
        # tfidf = models.TfidfModel(corpus)
        # corpus_tfidf = tfidf[corpus]
        # attitude = lda.get_topic()

        vec_bow = dictionary.doc2bow(cutted_topK)
        vec_tfidf = tfidf[vec_bow]
        index = similarities.MatrixSimilarity(corpus_tfidf)
        sims = index[vec_tfidf]
        similarity = list(sims)


        tmp_rec['topK'] = cutted_topK
        tmp_rec['stocks'] = cutted_stocks
        # 利用积极和消极词汇出现的频率来判断正负态度
        tmp_rec['mood_type'] = attitude
        # print wordsutil.estAttitude(tmp_rec['topK']),
        data_source.append((json.dumps(tmp_rec),row['id']))

    # 写入数据库data_keys=[] ,data_source=[(修改值1),(2),(3)...(过滤条件1),(2)...] ,filter_collection_key=[key1,key2,...]
    print sqlUtil.update_many_batch(['keywords'],data_source,table_name,['id'])

lda_train('tb_stock_news')


