# -*- coding:utf-8 -*-
"""
    分词,更新到数据库
"""
from __future__ import division
from dao.MySqlDAL import MySqlDAL
#import util.word_seg_util as wordsutil
import json
#import record_cluster
__author__ = 'shudongma.msd(风骐)'


'''
写回的keywords设计
{
    mood_type:'Positive',    # Positive 1 Neutral 0 Negative -1
    topK:[]    ,# 关键字列表
    stocks[]  # 涉及的股票
}
'''
sqlUtil = MySqlDAL()

# 利用积极和消极词汇出现的频率来判断正负态度
def word_seg(table_name):
    res = sqlUtil.get_dimensions_rows('select id,title,content,keywords,type,date,click_num from '+table_name +" where keywords is null")
    # res = sqlUtil.get_dimensions_rows('select id,title,content,keywords,type,date,click_num from '+_table)

    data_source = []
    for row in res:
        tmp_rec = dict()
        cutted_topK,cutted_stocks = wordsutil.cut_bag(row['content'])
        # print row['content']
        tmp_rec['topK'] = cutted_topK
        tmp_rec['stocks'] = cutted_stocks
        # 利用积极和消极词汇出现的频率来判断正负态度
        tmp_rec['mood_type'] = wordsutil.estAttitude(tmp_rec['topK'])
        # print wordsutil.estAttitude(tmp_rec['topK']),
        data_source.append((json.dumps(tmp_rec),row['id']))

    # 写入数据库data_keys=[] ,data_source=[(修改值1),(2),(3)...(过滤条件1),(2)...] ,filter_collection_key=[key1,key2,...]
    print sqlUtil.update_many_batch(['keywords'],data_source,table_name,['id'])


# 利用KMeans聚类来判断涨跌的态度
def kmeans_seg(table_name):
    res = sqlUtil.get_dimensions_rows('select id,title,content,keywords,type,date,click_num from '+table_name +" where keywords is null")
    # res = sqlUtil.get_dimensions_rows('select id,title,content,keywords,type,date,click_num from '+_table)

    cluster_data_list = []
    stocks_record_list = []
    for row in res:
        tmp_rec = dict()
        cutted_topK,cutted_stocks = wordsutil.cut_bag(row['content'])
        # print row['content']
        tmp_rec['topK'] = cutted_topK
        tmp_rec['stocks'] = cutted_stocks
        cluster_data_list.append(tmp_rec['topK'])
        tmp_rec['id'] = row['id']
        tmp_rec['content'] = row['content']
        stocks_record_list.append(tmp_rec)

    clustered = record_cluster.kMean_cluster(cluster_data_list)
    data_source = []
    for i in xrange(len(stocks_record_list)):
        tmp_rec = stocks_record_list[i]
        tmp_rec['mood_type'] = int(clustered[i])-1
        # if clustered[i] == 0:
        #     tmp_rec['mood_type'] = 'Negative'
        # elif clustered[i] == 1:
        #     tmp_rec['mood_type'] = 'Neutral'
        # elif clustered[i] == 2:
        #     tmp_rec['mood_type'] = 'Positive'
        tmp_id = tmp_rec['id']
        del(tmp_rec['id'])
        data_source.append((json.dumps(tmp_rec),tmp_id))
        print clustered[i]
        print tmp_rec['content']
        print '*'*10

    # print sqlUtil.update_many_batch(['keywords'],data_source,table_name,['id'])


def paper_mood_count():
    res = sqlUtil.get_dimensions_rows('select keywords from tb_stock_debate')
    p = 0
    n = 0
    m = 0
    for row in res:
        mood = json.loads(row["keywords"])
        if mood['mood_type']==1:
            p += 1
        elif mood['mood_type']==-1:
            n += 1
        elif mood['mood_type']==0:
            m += 1

    res = sqlUtil.get_dimensions_rows('select keywords from tb_stock_news')
    for row in res:
        mood = json.loads(row["keywords"])
        if mood['mood_type']==1:
            p += 1
        elif mood['mood_type']==-1:
            n += 1
        elif mood['mood_type']==0:
            m += 1

    print "pos:",p
    print "neg:",n
    print "mid:",m

# kmeans_seg('tb_stock_news')

# word_seg('tb_stock_debate')
# word_seg('tb_stock_news')


paper_mood_count()
