import os.path
import sys

sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))

import click
import jieba
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.model_selection import train_test_split

from dp_analysis.analysis.base import AnalyseBase
from dp_analysis.db.connection import Mysql
from dp_analysis.utils.data_search import (
    search_region,
    search_cuisine
)
from dp_analysis.utils.decorate import decorate


class EmotionAnalysis(AnalyseBase):
    """
    用户评论情感分析
    """
    def __init__(self):
        super().__init__()

        # 设置用于预测的文本数量
        self.predict_number = 50

    @decorate(obj_name="EmotionAnalysis")
    def analyse(self, db, region, cuisine, limit):
        with Mysql("dp", db) as mysql:
            # 创建mysql连接
            connection = mysql.engine.connect()

            # 数据查询
            result_list = self.query(connection, region, cuisine, limit)
            # 选出 self.predict_number 数量的公司，用于数据预测
            use_predict_list = result_list[:self.predict_number]
            # 获取剩余的所有数据
            result_list = result_list[self.predict_number:]
            # 对查询到的数据进行划分
            # 分割为 特征值（评论内容），目标值（用户评分），店铺ID
            feature_list, target_list, shop_id_list = self.mapping_data(result_list)

            # 训练集及测试集样本划分
            # 使用 sklean 工具进行划分，训练集和测试集占比为 3 : 1
            # 使用随机抽样的方式，random_state 为随机数的种子
            train_feature, test_feature, train_target, test_target = train_test_split(
                feature_list, target_list, random_state=3, test_size=0.25)

            # 加载停用词
            self.read_stopwords()

            # 使用TF-IDF权重策略筛选出对分类有作用的词
            tv = self.tf_idf(train_feature)
            # 构建朴素贝叶斯分类器
            classifier = self.naive_bayes(tv, train_feature, train_target)
            # 数据预测，测试分类器的预测效果
            self.data_predict(tv, classifier, test_feature, test_target, use_predict_list)

    @staticmethod
    def mapping_data(result_list):
        """
        数据划分为：
        特征值 + 目标变量 + 店铺id
        """
        def _map(data):
            # 用户评分有以下等级：1、2、3、4、5，把1、2设置为0，4、5设置为1，从而转化为二分类问题
            if data < 3:
                return 0
            return 1
        # 获取特征值，使用python的map方法、
        # lambda 功能和函数一样，和函数的区别在于函数运行完后要等到程序都运行结束内存才释放，lambda是用完就释放
        feature_list = map(lambda d: d[-1], result_list)
        # 分词，把分词后的结果以空格分割为字符串，然后都放入list中
        feature_list = [" ".join(jieba.cut(feature)) for feature in feature_list]
        # 获取目标值、即用户评论的评分
        target_list = list(map(lambda d: _map(d[1]), result_list))
        # 获取店铺ID
        shop_id_list = list(map(lambda d: d[0], result_list))
        return feature_list, target_list, shop_id_list

    def tf_idf(self, train_feature):
        """
        使用TF-IDF权重策略
        """
        # 设置TF-IDF工具类
        tv = TfidfVectorizer(stop_words=self.stopwords_set, max_features=3000, ngram_range=(1, 2))
        # 加载所有的特征，内部会提取出对分类有作用的高频词
        tv.fit(train_feature)
        return tv

    @staticmethod
    def naive_bayes(tv, train_feature, train_target):
        """
        构建朴素贝叶斯分类器
        """
        # 创建朴素贝叶斯类对象
        classifier = MultinomialNB()
        # tv.transform(train_feature) 数据类型转化，转化为内部需要的数据类型
        # classifier.fit(tv.transform(train_feature), train_target) 构架分类器
        classifier.fit(tv.transform(train_feature), train_target)
        return classifier

    @staticmethod
    def query(connection, region, cuisine, limit):
        """
        数据查询
        """
        return connection.execute(f"SELECT a.shop_id, b.star_level, b.`comment` "
                                  f"FROM shop_info AS a, shop_comment AS b "
                                  f"WHERE a.shop_id = b.shop_id "
                                  f"AND b.star_level != 3 "
                                  f"AND a.region IN ({region}) "
                                  f"AND a.category IN ({cuisine}) "
                                  f"LIMIT {limit};").fetchall()

    def data_predict(self, tv, classifier, test_feature, test_target, use_predict_list):
        """
        数据预测
        """
        # 获取预测得分
        print(f"预测整体精确率如下：{self.output(classifier.score(tv.transform(test_feature), test_target))}\n")

        for (shop_id, star_level, comment) in use_predict_list:
            # 对每一条评论数据进行处理，获取对应的预测值
            # tv.transform([" ".join(jieba.cut(comment))]) 结构为矩阵，
            # [:, 1] 定位评论内容所在位置，获取评论内容，返回为矩阵
            # classifier.predict_proba 预测，返回得分
            predict_score = float(classifier.predict_proba(tv.transform([" ".join(jieba.cut(comment))]))[:, 1])
            print(f"comment={self.output(comment)}, "
                  f"actual_score={self.output(star_level)}, "
                  f"predict_score={self.output('%.02f' % predict_score)}\n")


@click.command()
@click.option("--db", default=0, help="请选择使用的数据库")
@click.option("--region", default="all", help="12大地区通过helpers.py获取\n若指定多个以逗号分割")
@click.option("--cuisine", default="all", help="默认所有菜系，各地区所有菜系通过helpers.py获取\n若指定多个以逗号分割")
@click.option("--limit", default=10000, help="请选择使用的评论数量")
def main(db, region, cuisine, limit):
    if region == "all":
        region = search_region(db)
    else:
        region = {reg for reg in region.split(',')}
    if not region:
        raise Exception("地区输入有误，请检查输入！")

    if cuisine == "all":
        cuisine = set()
        for reg in region:
            cuisine |= search_cuisine(db, reg)
    else:
        cuisine = {cui for cui in cuisine.split(',')}
    if not cuisine:
        raise Exception("菜系输入有误，请检查输入！")

    region = ','.join([f"{reg!r}" for reg in region])
    cuisine = ','.join([f"{cui!r}" for cui in cuisine])

    emotion_analysis = EmotionAnalysis()
    emotion_analysis.analyse(db, region, cuisine, limit)


if __name__ == '__main__':
    # 运行参考如下：
    # python emotion_analysis.py --db 0 --region 北京 --limit 10000
    main()
