#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import pandas as pd
import numpy as np
from collections import Counter
from math import sqrt
import random
import json
import os
import csv
import time
import datetime
import gensim
import tensorflow as tf
from sklearn.metrics import roc_auc_score, accuracy_score, precision_score, recall_score
import matplotlib.pyplot as plt


# 配置网络参数
class TrainSetting(object):
    epoches = 10
    evaluateEvery = 50
    checkpointEvery = 100
    learningRate = 0.01


class ModelSetting(object):
    # 词向量维度
    embeddingSize = 200
    numFilters = 128
    filterSizes = [2, 3, 4, 5]  # 在论文的基础上加入了size=2的卷积核，卷积层只有一层
    dropoutKeepProb = 0.5
    l2RegLambda = 0.01


class Config(object):
    sequenceLength = 10  # 取所有序列长度的均值或者最大值
    batchSize = 250
    # 标签为int 0/1
    dataSource = "train_procedure.csv"
    stopWordSource = "stop1.txt"  # 停用词表
    numClasses = 2
    rate = 0.7  # 训练集的比例
    training = TrainSetting()
    model = ModelSetting()


## 2 数据预处理
class DataProcess(object):
    def __init__(self, config):
        self._dataSource = config.dataSource
        self._stopWordSource = config.stopWordSource
        self._sequenceLength = config.sequenceLength  # 每条输入的序列处理为定长
        self._embeddingSize = config.model.embeddingSize
        self._batchSize = config.batchSize
        self._rate = config.rate
        self._stopWordDict = {}
        self.trainReviews = []
        self.trainLabels = []
        self.evalReviews = []
        self.evalLabels = []
        self.wordEmbedding = None
        self._wordToIndex = {}
        self._indexToWord = {}

    def _readData(self, filePath):
        df = pd.read_csv(filePath, names=['sentiment', 'review'])
        labels = df["sentiment"].tolist()
        review = df["review"].tolist()
        reviews = [line.strip().split() for line in review]
        return reviews, labels

    def _reviewProcess(self, review, sequenceLength, wordToIndex):
        reviewVec = np.zeros((sequenceLength))
        sequenceLen = sequenceLength

        # 判断当前的序列是否小于定义的固定序列长度
        if len(review) < sequenceLength:
            sequenceLen = len(review)
        for i in range(sequenceLen):
            if review[i] in wordToIndex:
                reviewVec[i] = wordToIndex[review[i]]
            else:
                reviewVec[i] = wordToIndex["UNK"]
        return reviewVec

    def _genTrainEvalData(self, x, y, rate):
        reviews = []
        labels = []

        # 遍历文本，将文本中的词转换成index表示(similar to one-hot)
        for i in range(len(x)):
            reviewVec = self._reviewProcess(
                x[i], self._sequenceLength, self._wordToIndex)
            reviews.append(reviewVec)
            labels.append([y[i]])
        # 分割训练集/测试集
        trainIndex = int(len(x) * rate)
        trainReviews = np.asarray(reviews[:trainIndex], dtype="int64")
        trainLabels = np.array(labels[:trainIndex], dtype="float32")
        evalReviews = np.asarray(reviews[trainIndex:], dtype="int64")
        evalLabels = np.array(labels[trainIndex:], dtype="float32")
        return trainReviews, trainLabels, evalReviews, evalLabels

    def _getWordEmbedding(self, words):
        """
        取出预训练好的word2vec中的词向量（gensim提前生成）
        """
        wordVec = gensim.models.Word2Vec.load('data/vecmodel')
        vocab = []
        wordEmbedding = []
        # 添加 "pad" 和 "UNK",
        vocab.append("pad")
        vocab.append("UNK")
        wordEmbedding.append(np.zeros(self._embeddingSize))
        wordEmbedding.append(np.random.randn(self._embeddingSize))

        for word in words:
            try:
                vector = wordVec.wv[word]
                vocab.append(word)
                wordEmbedding.append(vector)
            except:
                print(word + "词向量中不存在")
        return vocab, np.array(wordEmbedding)

    def _readStopWord(self, stopWordPath):
        with open(stopWordPath, "r") as f:
            stopWords = f.read()
            stopWordList = stopWords.split()  # **********
            self.stopWordDict = dict(
                zip(stopWordList, list(range(len(stopWordList)))))

    def _genVocabulary(self, reviews):
        """
        生成词向量和词汇-索引映射字典
        """

        allWords = [word for review in reviews for word in review]
        # 去掉停用词
        subWords = [word for word in allWords if word not in self.stopWordDict]
        # 统计词频 排序
        wordCount = Counter(subWords)
        sortWordCount = sorted(
            wordCount.items(), key=lambda x: x[1], reverse=True)
        # 去除低频词
        words = [item[0] for item in sortWordCount if item[1] >= 5]

        vocab, wordEmbedding = self._getWordEmbedding(words)
        self.wordEmbedding = wordEmbedding

        self._wordToIndex = dict(zip(vocab, list(range(len(vocab)))))
        self._indexToWord = dict(zip(list(range(len(vocab))), vocab))

        # 保存为json文件
        with open("data/wordToIndex.json", "w", encoding="utf-8") as f:
            json.dump(self._wordToIndex, f)

        with open("data/indexToWord.json", "w", encoding="utf-8") as f:
            json.dump(self._indexToWord, f)

    def gen_data(self):
        """
        初始化各种数据
        """
        # 初始化停用词
        self._readStopWord(self._stopWordSource)
        # 初始化数据集
        reviews, labels = self._readData(self._dataSource)
        # 初始化词汇-索引映射表和词向量矩阵
        self._genVocabulary(reviews)
        # 初始化训练集和测试集
        trainReviews, trainLabels, evalReviews, evalLabels = self._genTrainEvalData(
            reviews, labels, self._rate)
        self.trainReviews = trainReviews
        self.trainLabels = trainLabels
        self.evalReviews = evalReviews
        self.evalLabels = evalLabels


# 批量数据生成器
def nextBatch(x, y, batchSize):
    # 随机打乱数据集
    np.random.seed(1)
    perm = np.arange(len(x))
    np.random.shuffle(perm)
    x = x[perm]
    y = y[perm]
    numBatches = len(x) // batchSize
    for i in range(numBatches):
        start = i * batchSize
        end = start + batchSize
        batchX = np.array(x[start: end], dtype="int64")
        batchY = np.array(y[start: end], dtype="float32")
        yield batchX, batchY


# 构建网络模型
class TextCNN(object):
    def __init__(self, config, wordEmbedding):

        # 定义模型的输入
        self.inputX = tf.placeholder(
            tf.int32, [None, config.sequenceLength], name="inputX")
        self.inputY = tf.placeholder(tf.float32, [None, 1], name="inputY")
        # dropout防止过拟合
        self.dropoutKeepProb = tf.placeholder(
            tf.float32, name="dropoutKeepProb")
        # 定义l2损失
        l2Loss = tf.constant(0.0)

        # 词嵌入层
        with tf.name_scope("embedding"):

            # 利用预训练的词向量初始化词嵌入矩阵
            self.W = tf.Variable(
                tf.cast(wordEmbedding, dtype=tf.float32, name="word2vec"), name="W")
            # 利用词嵌入矩阵将输入的数据中的词转换成词向量，维度[batch_size, sequence_length, embedding_size]
            self.embeddedWords = tf.nn.embedding_lookup(self.W, self.inputX)
            # 卷积的输入是四维[batch_size, width, height, channel]，因此需要增加维度，用tf.expand_dims来增大维度
            self.embeddedWordsExpanded = tf.expand_dims(self.embeddedWords, -1)

        # 创建卷积和池化层
        pooledOutputs = []
        # 有三种尺寸的滤波器，3， 4， 5，textCNN是个多通道单层卷积的模型，可以看作三个单层的卷积模型的融合
        for i, filterSize in enumerate(config.model.filterSizes):
            with tf.name_scope("conv-maxpool-%s" % filterSize):
                # 卷积层，卷积核尺寸为filterSize * embeddingSize，卷积核的个数为numFilters
                # 初始化权重矩阵和偏置
                filterShape = [
                    filterSize, config.model.embeddingSize, 1, config.model.numFilters]
                W = tf.Variable(tf.truncated_normal(
                    filterShape, stddev=0.1), name="W")
                b = tf.Variable(tf.constant(
                    0.1, shape=[config.model.numFilters]), name="b")
                conv = tf.nn.conv2d(
                    self.embeddedWordsExpanded,
                    W,
                    strides=[1, 1, 1, 1],
                    padding="VALID",
                    name="conv")

                # ReLu激活函数
                h = tf.nn.relu(tf.nn.bias_add(conv, b), name="relu")

                # 池化层
                pooled = tf.nn.max_pool(h,
                    # ksize shape: [batch, height, width, channels]
                    ksize=[1, config.sequenceLength - filterSize + 1, 1, 1],
                    strides=[1, 1, 1, 1],
                    padding='VALID',
                    name="pool")
                pooledOutputs.append(pooled)  # 将三种size的filter的输出一起加入到列表中
        # 得到CNN网络的输出长度
        numFiltersTotal = config.model.numFilters * \
            len(config.model.filterSizes)
        # 池化后的维度不变，按照最后的维度channel来concat
        self.hPool = tf.concat(pooledOutputs, 3)
        # 摊平成二维的数据输入到全连接层
        self.hPoolFlat = tf.reshape(self.hPool, [-1, numFiltersTotal])
        # dropout
        with tf.name_scope("dropout"):
            self.hDrop = tf.nn.dropout(self.hPoolFlat, self.dropoutKeepProb)
        # 全连接层的输出
        with tf.name_scope("output"):
            outputW = tf.get_variable(
                "outputW",
                shape=[numFiltersTotal, 1],
                initializer=tf.contrib.layers.xavier_initializer())
            outputB = tf.Variable(tf.constant(0.1, shape=[1]), name="outputB")
            l2Loss += tf.nn.l2_loss(outputW)
            l2Loss += tf.nn.l2_loss(outputB)
            self.predictions = tf.nn.xw_plus_b(
                self.hDrop, outputW, outputB, name="predictions")
            self.binaryPreds = tf.cast(tf.greater_equal(
                self.predictions, 0.0), tf.float32, name="binaryPreds")
        # 交叉熵损失
        with tf.name_scope("loss"):
            losses = tf.nn.sigmoid_cross_entropy_with_logits(
                logits=self.predictions, labels=self.inputY)
            self.loss = tf.reduce_mean(
                losses) + config.model.l2RegLambda * l2Loss


#评价函数
def mean(item):
    return sum(item) / len(item)

def genMetrics(trueY, predY, binaryPredY):
    # predY为概率值，binarypredy为二值标签
    auc = roc_auc_score(trueY, predY)
    accuracy = accuracy_score(trueY, binaryPredY)
    precision = precision_score(trueY, binaryPredY)
    recall = recall_score(trueY, binaryPredY)
    return round(accuracy, 4), round(auc, 4), round(precision, 4), round(recall, 4)


# 四种指数可视化
def save_rate_figure(x_limit, acc_vi, recall_vi, precision_vi, auc_vi, output_file):
    plt.figure(figsize=(16, 12))
    rates = ['acc', 'recall', 'precision', 'auc']
    rates_list = [acc_vi, recall_vi, precision_vi, auc_vi]
    rates_h = [0.9, 0.95, 0.9, 0.95]
    for i, rate in enumerate(rates):
        plt.subplot(220 + i + 1)
        plt.plot([i for i in range(len(rates_list[i]))], rates_list[i],
                label="$" + rate + "$", color="red", linewidth=2)
        plt.xlabel("Batch")
        plt.ylabel("Num of " + rate)
        plt.title(rate + "_rate")
        plt.xlim(0, x_limit)
        plt.ylim(0.3, 1.0)
        plt.hlines(rates_h[i], 0, x_limit, colors="g", linestyles="dashed")
        plt.annotate(r'$'+rate+'= '+rates_h[i]+'$', xy=(x_limit / 2, rates_h[i]), xytext=(+30, -60), textcoords='offset points', fontsize=16,
                    arrowprops=dict(arrowstyle='->', connectionstyle='arc3,rad=.01'))
    plt.savefig(output_file)


# f1_score可视化
def save_f1score_figure(x_limit, output_file, precision_vi, recall_vi):
    one = np.array(precision_vi[x_limit//40:])
    two = np.array(recall_vi[x_limit//40:])
    f1_score = 2./(1./one + 1./two)

    plt.figure(figsize=(8,6))
    plt.plot([i for i in range(len(f1_score))],f1_score,
            label="$f1_score$",color="red",linewidth=2)
    plt.xlabel("Batch")
    plt.ylabel("Num of f1_score")
    plt.title("f1_score")
    plt.xlim(0, x_limit)
    plt.ylim(0.3,1.0)
    plt.hlines(0.95, 0, x_limit, colors = "g", linestyles = "dashed")
    plt.annotate(r'$F1score= 0.95$',xy=(x_limit / 2,0.95),xytext=(+30,-60),textcoords='offset points',fontsize=16,
                arrowprops=dict(arrowstyle='->',connectionstyle='arc3,rad=.01'))
    plt.savefig(output_file)


## 训练模型
if __name__ == '__main__':
    # 获取参数
    config = Config()

    # 数据处理
    data = DataProcess(config)
    data.gen_data()

    # 生成训练集和验证集
    trainReviews = data.trainReviews
    trainLabels = data.trainLabels
    evalReviews = data.evalReviews
    evalLabels = data.evalLabels
    wordEmbedding = data.wordEmbedding

    # 定义计算图
    with tf.Graph().as_default():
        session_conf = tf.ConfigProto(
            allow_soft_placement=True, log_device_placement=False)
        session_conf.gpu_options.allow_growth = True
        session_conf.gpu_options.per_process_gpu_memory_fraction = 0.7  # 配置gpu占用率

        sess = tf.Session(config=session_conf)
        with sess.as_default():
            cnn = TextCNN(config, wordEmbedding)

            # 网络初始状态
            globalStep = tf.Variable(0, name="globalStep", trainable=False)
            # 定义优化函数，传入学习速率参数
            optimizer = tf.train.AdamOptimizer(config.training.learningRate)
            # 计算梯度,得到梯度和变量
            gradsAndVars = optimizer.compute_gradients(cnn.loss)
            # 将梯度应用到变量下，生成训练器
            trainOp = optimizer.apply_gradients(
                gradsAndVars, global_step=globalStep)

            # -------------for tensorboard----------------
            gradSummaries = []
            for g, v in gradsAndVars:
                if g is not None:
                    tf.summary.histogram("{}/grad/hist".format(v.name), g)
                    tf.summary.scalar(
                        "{}/grad/sparsity".format(v.name), tf.nn.zero_fraction(g))

            outDir = os.path.abspath(os.path.join(os.path.curdir, "summarys"))
            lossSummary = tf.summary.scalar("loss", cnn.loss)
            summaryOp = tf.summary.merge_all()

            trainSummaryDir = os.path.join(outDir, "train")
            trainSummaryWriter = tf.summary.FileWriter(trainSummaryDir, sess.graph)
            evalSummaryDir = os.path.join(outDir, "eval")
            evalSummaryWriter = tf.summary.FileWriter(evalSummaryDir, sess.graph)

            # ----------------网络内变量--------------------
            # 初始化所有变量
            saver = tf.train.Saver(tf.global_variables(), max_to_keep=5)

            # ----------------保存模型----------------------
            builder = tf.saved_model.builder.SavedModelBuilder("data/savedModel")
            sess.run(tf.global_variables_initializer())

            def trainStep(batchX, batchY):
                feed_dict = {
                    cnn.inputX: batchX,
                    cnn.inputY: batchY,
                    cnn.dropoutKeepProb: config.model.dropoutKeepProb
                }
                _, summary, step, loss, predictions, binaryPreds = sess.run(
                    [trainOp, summaryOp, globalStep, cnn.loss,
                        cnn.predictions, cnn.binaryPreds],
                    feed_dict)
                acc, auc, precision, recall = genMetrics(
                    batchY, predictions, binaryPreds)
                print("step: {}, loss: {:.4f}, acc: {:.4f}, auc: {:.4f}, precision: {:.4f}, recall: {:.4f}".format(step, loss, acc,
                                                                                                                auc, precision, recall))
                loss_tr_vi.append(loss)
                acc_tr_vi.append(acc)
                auc_tr_vi.append(auc)
                precision_tr_vi.append(precision)
                recall_tr_vi.append(recall)
                trainSummaryWriter.add_summary(summary, step)

            def devStep(batchX, batchY):
                feed_dict = {
                    cnn.inputX: batchX,
                    cnn.inputY: batchY,
                    cnn.dropoutKeepProb: 1.0
                }
                summary, step, loss, predictions, binaryPreds = sess.run(
                    [summaryOp, globalStep, cnn.loss, cnn.predictions, cnn.binaryPreds],
                    feed_dict)
                acc, auc, precision, recall = genMetrics(
                    batchY, predictions, binaryPreds)
                evalSummaryWriter.add_summary(summary, step)
                return loss, acc, auc, precision, recall

            # 可视化数据
            loss_tr_vi = []
            acc_tr_vi = []
            auc_tr_vi = []
            precision_tr_vi = []
            recall_tr_vi = []
            loss_vi = []
            acc_vi = []
            auc_vi = []
            precision_vi = []
            recall_vi = []

            for i in range(config.training.epoches):
                # 训练模型
                print("Start training!")
                for batchTrain in nextBatch(trainReviews, trainLabels, config.batchSize):
                    trainStep(batchTrain[0], batchTrain[1])

                    currentStep = tf.train.global_step(sess, globalStep)
                    if currentStep % config.training.evaluateEvery == 0:
                        # 测试集输出
                        print("\nTesing:")
                        losses = []
                        accs = []
                        aucs = []
                        precisions = []
                        recalls = []

                        for batchEval in nextBatch(evalReviews, evalLabels, config.batchSize):
                            loss, acc, auc, precision, recall = devStep(
                                batchEval[0], batchEval[1])
                            losses.append(loss)
                            accs.append(acc)
                            aucs.append(auc)
                            precisions.append(precision)
                            recalls.append(recall)
                        print("step: {}, loss: {:.4f}, acc: {:.4f}, auc: {:.4f}, precision: {:.4f}, recall: {:.4f}".format(currentStep, mean(losses),
                                                                                                                        mean(accs), mean(aucs), mean(precisions), mean(recalls)))
                        print("One Test OVER!")
                        loss_vi.append(mean(losses))
                        acc_vi.append(mean(accs))
                        auc_vi.append(mean(aucs))
                        precision_vi.append(mean(precisions))
                        recall_vi.append(mean(recalls))
                    if currentStep % config.training.checkpointEvery == 0:
                        # 保存checkpoint文件
                        path = saver.save(sess, "data/my-model",
                                        global_step=currentStep)
                        print("Saved my checkpoint to {}\n".format(path))

            # 保存CNN模型
            inputs = {"inputX": tf.saved_model.utils.build_tensor_info(cnn.inputX),
                    "keepProb": tf.saved_model.utils.build_tensor_info(cnn.dropoutKeepProb)}

            outputs = {
                "binaryPreds": tf.saved_model.utils.build_tensor_info(cnn.binaryPreds)}

            prediction_signature = tf.saved_model.signature_def_utils.build_signature_def(inputs=inputs, outputs=outputs,
                                                                                        method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME)
            legacy_init_op = tf.group(
                tf.tables_initializer(), name="legacy_init_op")
            builder.add_meta_graph_and_variables(sess, [tf.saved_model.tag_constants.SERVING],
                                                signature_def_map={"predict": prediction_signature}, legacy_init_op=legacy_init_op)
            builder.save()

    ## 可视化
    # 训练集
    save_rate_figure(410, 'output1.png', acc_tr_vi,
                     recall_tr_vi, precision_tr_vi, auc_tr_vi)
    save_f1score_figure(410, 'output2.png', precision_tr_vi, recall_tr_vi)

    #测试集
    save_rate_figure(17, 'output3.png', acc_vi,
                     recall_vi, precision_vi, auc_vi)
    save_f1score_figure(17, 'output4.png', precision_vi, recall_vi)
