# -*- coding: utf-8 -*-
__author__ = 'Jinkey'

import os
import jieba  # 分词引擎
from sklearn.feature_extraction.text import TfidfVectorizer  # 文档向量化
from sklearn.naive_bayes import MultinomialNB  # 贝叶斯分类器
from sklearn.pipeline import Pipeline  # 分析管道（组合向量化和分类器）
from sklearn.cross_validation import ShuffleSplit  # 交叉验证评估分类结果
from sklearn.metrics import precision_recall_curve, roc_curve, auc  # 结果评估器
from matplotlib import pylab  # 评估结果绘图工具
import numpy as np


def preproccess_data():
    # 从数据堂下载的4000条京东评论, 2000条正面,2000条负面
    for each_txt in os.listdir("data/emotion/chinese/pos"):
        # 处理正面样本,整合到一个文件当中
        with open("data/emotion/chinese/pos/%s" % each_txt, "r") as f:
            txt = ""
            for line in f.readlines():
                # 转码, 因为在 pycharm 打开乱码
                pre_txt = line.decode('GBK')
                txt = txt + pre_txt.encode('utf-8').strip()
            # 以追加的形式写入文本文件
            with open("data/emotion/chinese/sentiment_data.txt", "a") as data:
                data.write("pos\t" + txt + "\n")

    for each_txt in os.listdir("data/emotion/chinese/neg"):
        # 处理负面样本,整合到一个文件当中
        with open("data/emotion/chinese/neg/%s" % each_txt, "r") as f:
            txt = ""
            for line in f.readlines():
                # 转码, 因为在 pycharm 打开乱码
                pre_txt = line.decode('GBK')
                txt = txt + pre_txt.encode('utf-8').strip()
            # 以追加的形式写入文本文件
            with open("data/emotion/chinese/sentiment_data.txt", "a") as data:
                data.write("neg\t" + txt + "\n")


def setup_stopwords():
    # 加载停用词库（网上下载的）并返回
    with open('data/stopwords.txt', 'r') as f:
        stopwords = set([w.strip() for w in f])
    return stopwords


def load_comment_from_e_commerce():

    # 配置输出情感便签和评论数组的存储路径
    sentiments_path = "data/emotion/chinese/sentiments.npy"
    comments_path = "data/emotion/chinese/comments.npy"

    # 看看之前是否已经生成过 numpy 数组,有的话就直接读取
    if not os.path.exists(sentiments_path):
        with open("data/emotion/chinese/sentiment_data.txt", "r") as data:
            sentiments = []
            comments = []

            for line in data.readlines():
                line = line.strip().split("\t")
                sentiment = line[0]
                comment = line[1]
                sentiments.append(sentiment)
                comments.append(comment)

        # 处理成 numpy 数组
        sentiments = np.asarray(sentiments)
        comments = np.asarray(comments)

        # 保存 numpy 数组
        np.save(sentiments_path, sentiments)
        np.save(comments_path, comments)

        return sentiments, comments

    else:
        # 如果之前已经处理过,那就直接加载数组
        return np.load(sentiments_path), np.load(comments_path)


def jieba_comments_vectorizer():
    # 使用自定义分词器向量化文档
    vectorizer = TfidfVectorizer(min_df=3, stop_words=setup_stopwords(), tokenizer=lambda x: jieba.cut(x, cut_all=True), binary=False)
    return vectorizer


def create_classifier_pipeline():
    # 马尔科夫假设（Markov Assumption）：下一个词的出现仅依赖于它前面的一个或几个词 , ngram_range就是指依赖的前面 n 个词语
    tfidf_ngrams = jieba_comments_vectorizer()
    clf = MultinomialNB()
    pipeline = Pipeline([('vect', tfidf_ngrams), ('clf', clf)])  # 传入的 key 对建模没有什么影响,只是一个标记
    return pipeline


def train_model(classifier_pipeline, X, Y, name="NB ngram", plot=False):

    scores = []
    pr_scores = []
    precisions, recalls, thresholds = [], [], []
    false_positive_rates, true_positive_rates, roc_thresholds = [], [], []
    train_errors = []
    test_errors = []

    cross_validation = ShuffleSplit(
        n=len(X), n_iter=10, test_size=0.3, random_state=0)

    for train, test in cross_validation:
        print train, test
        X_train, y_train = X[train], Y[train]
        X_test, y_test = X[test], Y[test]

        classifier = classifier_pipeline()
        classifier.fit(X_train, y_train)

        train_score = classifier.score(X_train, y_train)
        test_score = classifier.score(X_test, y_test)
        print train_score, test_score
        train_errors.append(1 - train_score)
        test_errors.append(1 - test_score)

        scores.append(test_score)

        proba = classifier.predict_proba(X_test)

        false_positive_rate, true_positive_rate, roc_threshold = roc_curve(y_test, proba[:, 1])
        precision, recall, pr_thresholds = precision_recall_curve(y_test, proba[:, 1])
        print roc_threshold,pr_thresholds
        pr_scores.append(auc(recall, precision))
        precisions.append(precision)
        recalls.append(recall)
        thresholds.append(roc_threshold)

        false_positive_rates.append(false_positive_rate)
        true_positive_rates.append(true_positive_rate)
        roc_thresholds.append


    if plot:
        scores_to_sort = pr_scores
        median = np.argsort(scores_to_sort)[len(scores_to_sort) / 2]
        plot_pr(pr_scores[median], name, "01", precisions[median],
                recalls[median], label=name)
        plot_pr(pr_scores[median], name, "02", true_positive_rates[median],
                false_positive_rates[median], label=name)

        summary = (np.mean(scores), np.std(scores),
                   np.mean(pr_scores), np.std(pr_scores))
        print "%.3f\t%.3f\t%.3f\t%.3f\t" % summary

    return np.mean(train_errors), np.mean(test_errors)


def binary_sentiments(sentiments):
    # 机器无法识别“pos”和"neg"这种情感标签的,需要转换成0和1
    Y = np.zeros(sentiments.shape[0])
    for index, sentiment in enumerate(sentiments):
        if sentiment == "pos":
            Y[index] = 0
        elif sentiment == "neg":
            Y[index] = 1
    return Y


def plot_pr(auc_score, name, phase, precision, recall, label=None):
    pylab.clf()
    pylab.figure(num=None, figsize=(5, 4))
    pylab.grid(True)
    pylab.fill_between(recall, precision, color="#249F4B", alpha=0.7)
    pylab.plot(recall, precision, color="#249F4B", lw=1)
    pylab.xlim([0.0, 1.0])
    pylab.ylim([0.0, 1.0])
    pylab.xlabel('Recall', color="#249F4B")
    pylab.ylabel('Precision', color="#249F4B")
    pylab.title('P/R curve (AUC=%0.2f) / %s' % (auc_score, label), color="#249F4B")
    filename = name.replace(" ", "_")
    pylab.savefig(os.path.join("pr_%s_%s.png"%(filename, phase)), bbox_inches="tight")



sentiments, comments = load_comment_from_e_commerce()
sentiments = binary_sentiments(sentiments)
train_model(classifier_pipeline=create_classifier_pipeline, X=comments, Y=sentiments, name="sentiment_analysis_chinese", plot=True)





