#!/user/bin/env python
# -*- coding:utf-8 -*-

import jieba
from sklearn import metrics
import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.naive_bayes import MultinomialNB

from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline

import pymongo
import jieba.analyse
from wordcloud import WordCloud

client = pymongo.MongoClient(host='localhost', port=27017)
db = client.MeituanComment
collection = db.comments


def cloud():
    for i in collection.find():
        if len(i['comments']) == 0:
            continue
        # print("【 Restaurant information %d】" % i)
        print('Name: ' + i['name'])
        print('Average Score: ' + i['avgScore'])
        # print('Average Price: ' + i['avgPrize'])
        # print('Number of Comments: %d' % i['commentNum'])
        # print('Address: ' + i['address'])
        print('URL: ' + 'https://www.meituan.com/meishi/%s/\n' % i['poiId'])
        create_cloud(i)


def create_cloud(dict):
    jieba.analyse.set_stop_words("CNstopwords.txt")
    s = ' '.join(dict['comments'])
    tags = jieba.analyse.extract_tags(s, topK=100, withWeight=True)
    text = dict['name'][0]
    l = len(tags)
    if l < 200:
        r = l
    else:
        r = 200
    for i in range(r):
        text += ' ' + str(tags[i][0])
    text += ' '.join(dict['tags'])
    wc = WordCloud(font_path='word.ttf',
                   background_color='white',
                   max_words=100,
                   ).generate(text)
    wc.to_file("./wordcloud/%s.jpg" % dict['name'])


def parse_comment():
    stop = [line.strip() for line in open('./HITstopwords.txt', encoding='UTF-8').readlines()]
    dic = {
        'com': [],
        'star': []
    }
    for c in collection.find({}, {"_id": 0, "comments": 1}):
        if c['comments']:
            for co in c['comments']:
                dic['star'].append(int(co[-1]) * 10)
                dic['com'].append(co[:-3])

    df = pd.DataFrame(dic)
    df = df.dropna()  # 去掉包含空值的所有行
    df = df.reset_index(drop=True)  # 重置索引（不保留原来的索引）

    df1 = pd.DataFrame(df, columns=['com', 'star'])  # 创建测试对象
    df1['sentiment'] = df.star.apply(lambda x: 1 if x >= 30 else 0)  # 1-好评 0-差评

    x = df1[['com']]
    y = df1.sentiment

    def cut_word(text):
        return " ".join(jieba.cut(text))
    x["cut_comment"] = x["com"].apply(cut_word)  # 分词
    # 划分训练集和测试集
    X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.2)
    # 去除停用词，匹配以数字开头的非单词字符，去掉以数字为特征值的列
    max = 0.8  # 过于平凡
    min = 3  # 过于独特
    vect = CountVectorizer(max_df=max, min_df=min, token_pattern=u'(?u)\\b[^\\d\\W]\\w+\\b', stop_words=frozenset(stop))
    pd.DataFrame(vect.fit_transform(X_train['cut_comment']).toarray(), columns=vect.get_feature_names())
    #### 模型构建、训练 ####
    nb = MultinomialNB()  # 实例化
    pipe = make_pipeline(vect, nb)  # pipeline构造器
    pipe.fit(X_train.cut_comment, y_train)  # 训练
    #### 测试集预测、结果 ####
    y_pred = pipe.predict(X_test.cut_comment)  # 对测试集进行预测（其中包括了转化和预测）
    print("准确度：")
    print(metrics.accuracy_score(y_test, y_pred))  # 模型对于测试集的准确率
    print("混淆矩阵：")                              # 行表示实际类别，列表示预测类别
    print(metrics.confusion_matrix(y_test, y_pred))  # 模型对于测试集的混淆矩阵
    #### 对整个数据集进行预测分类 ####
    y_pred_all = pipe.predict(x['cut_comment'])
    print("对于整个样本的预测正确率：")
    print(metrics.accuracy_score(y, y_pred_all))


    print("分类报告：")
    target_names = ['差评', '好评']
    print(metrics.classification_report(y, y_pred_all, target_names=target_names))
    # 正确率 预测为正例的样本中占真实结果总数的比例
    # 召回率 预测为整理的真实正例占所有真实正例的比例
    # f1指标 正确率与召回率的调和平均数



if __name__ == '__main__':
    print("***********************  analysis  *************************")
    parse_comment()
    print("******************  create word-cloud **********************")
    # cloud()
    print("***********************  FINISH  ***************************")
