# 模型测试

import tensorflow as tf
import scipy.io as scio
import numpy as np
from sklearn.metrics import roc_auc_score, roc_curve, auc
from sklearn.model_selection import train_test_split
import scipy.io as scio
import matplotlib.pyplot as plt
import os
import datetime
from pyecharts import Sankey
import csv


# 读取mat文件
def readMat(matPath):
    return scio.loadmat(matPath)


# 新建文件夹
def mkdir(path):
    folder = os.path.exists(path)
    if not folder:  # 判断是否存在文件夹如果不存在则创建为文件夹
        os.makedirs(path)  # makedirs 创建文件时如果路径不存在会创建这个路径
        return
    else:
        return


# 重采样
def reSample(trainX, trainY, randProb):
    trainXTemp = []
    trainYTemp = []
    for sampleIndex in range(len(trainX)):
        if trainY[sampleIndex] < 0.5:
            if np.random.rand() < randProb:
                trainXTemp.append(trainX[sampleIndex])
                trainYTemp.append(trainY[sampleIndex])
        else:
            trainXTemp.append(trainX[sampleIndex])
            trainYTemp.append(trainY[sampleIndex])
    return trainXTemp, trainYTemp

# 参数设置：
matPath = '../dataSet20180417.mat'  # 数据集读取路径
csvPath = 'TFIDF(without header).csv' # 数据集读取路径
networkSize = [5, 5]  # MLP网络结构
batch_size = 200  # 批量梯度规模
epochMax = 5000  # 迭代代数
resampleProb = 0.25  # 重采样比例


# 创建训练记录文件夹
startTime = datetime.datetime.now()
nowTime = datetime.datetime.now().strftime('%Y%m%d_%H%M%S')
file = "result/MS" + nowTime + '_' + str(networkSize)
print("创建文件夹%s" % file)
mkdir(file)

# 读取数据集
dataSet = readMat(matPath)
labels = np.array(dataSet['labels'])
chara = []
csv_reader = csv.reader(open(csvPath))
for row in csv_reader:
    chara.append(row)
print("数据集读取完成")

# 数据集划分
trainX, testX, trainY, testY = train_test_split(chara, labels, test_size=0.2, random_state=42)
sizeX=len(trainX[0])
print('数据集划分完成')

# 重采样
trainX, trainY = reSample(trainX, trainY, resampleProb)

# 计算有多少个批次
n_batch = len(trainX) // batch_size

# 定义两个placeholder
x = tf.placeholder(tf.float32, shape=[None, sizeX])
y = tf.placeholder(tf.float32, shape=[None, 1])
predy = tf.placeholder(tf.float32, shape=[None, 1])

# 网络构建
# 初始化
W = []
b = []
La = []
L = []

# 第一层
W1 = tf.Variable(tf.truncated_normal([sizeX, networkSize[0]], stddev=0.1))
b1 = tf.Variable(tf.zeros([networkSize[0]]) + 0.1)
La1 = tf.matmul(x, W1) + b1
L1 = tf.nn.sigmoid(tf.matmul(x, W1) + b1)
W.append(W1)
b.append(b1)
La.append(La1)
L.append(L1)

# 后续几层
layderIndex=1
if len(networkSize) > 1:
    layderIndex = 2
    while True:
        if layderIndex > len(networkSize):
            break
        WItem = tf.Variable(
            tf.truncated_normal([networkSize[layderIndex - 2], networkSize[layderIndex - 1]], stddev=0.1))
        bItem = tf.Variable(tf.zeros([networkSize[layderIndex - 1]]) + 0.1)
        LItemDa = tf.matmul(L[-1], WItem) + bItem
        LItem = tf.nn.sigmoid(tf.matmul(L[-1], WItem) + bItem)
        W.append(WItem)
        b.append(bItem)
        La.append(LItemDa)
        L.append(LItem)
        layderIndex = layderIndex + 1

# 最后有一个全连接层
WItem = tf.Variable(tf.truncated_normal([networkSize[layderIndex - 2], 1], stddev=0.1))
bItem = tf.Variable(tf.zeros([1]) + 0.1)
LaItem = tf.matmul(L[-1], WItem) + bItem
LItem = tf.nn.sigmoid(tf.matmul(L[-1], WItem) + bItem)
W.append(WItem)
b.append(bItem)
La.append(LaItem)
L.append(LItem)
Loss = tf.reduce_mean(tf.square(y - L[-1]))
train_step = tf.train.AdamOptimizer(1e-2).minimize(Loss)
Lend = tf.cast(tf.equal(tf.round(L[-1]), 1), dtype=tf.float32)

trainLossList = []
testLossList = []
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    print('深度学习网络加载完成，开始训练')
    print("网络结构：%s " % (networkSize))
    for epoch in range(epochMax + 1):
        # 批量下降
        start = (epoch * batch_size) % len(trainX)
        end = min(start + batch_size, len(trainX))
        sess.run(train_step, feed_dict={x: trainX[start:end], y: trainY[start:end]})

        # 记录当前误差
        trainLoss = sess.run(Loss, feed_dict={x: trainX, y: trainY})
        trainLossList.append(trainLoss)
        validateLoss = sess.run(Loss, feed_dict={x: testX, y: testY})
        testLossList.append(validateLoss)
        if epoch % 100 == 0:
            print("正处于第 %s 代，训练误差：%s，测试误差：%s" % (epoch, trainLoss, validateLoss))

    endTime = datetime.datetime.now()  # 计算时间截止
    print("正在生成结果报告")

    # 报告参数计算
    predYScore = sess.run(LItem, feed_dict={x: testX, y: testY})
    predY = sess.run(Lend, feed_dict={x: testX, y: testY})
    confusion_matric_Train = tf.confusion_matrix(labels=testY, predictions=predY.T[0])
    confusion_matric = sess.run(confusion_matric_Train)
    TP = confusion_matric[1][1]
    TN = confusion_matric[0][0]
    FN = confusion_matric[1][0]
    FP = confusion_matric[0][1]

    # 报告结果数计算
    accuracy = (TP + TN) / (TP + TN + FN + FP)
    Specitivity = TN / (TN + FP)
    Sensitivity = TP / (TP + FN)
    fpr, tpr, thresholds = roc_curve(testY, predYScore.T[0])
    # fpr = FP/(FP+TN)
    # tpr = TP/(TP+FN)
    AUC = auc(fpr, tpr)

    # ROC曲线绘制
    plt.plot(fpr, tpr, lw=1, label='ROC(area = %0.2f)' % AUC)
    # plt.plot([0, 1], [0, 1], '--', color=(0.6, 0.6, 0.6), label='Luck')
    plt.xlim([-0.05, 1.05])
    plt.ylim([-0.05, 1.05])
    plt.xlabel('False Positive Rate')
    plt.ylabel('True Positive Rate')
    plt.savefig("%s/ROC.png" % (file))
    plt.close('all')

    # 测试误差和训练误差记录
    plt.plot(range(len(trainLossList)), trainLossList, label='Train Lost')
    plt.plot(range(len(testLossList)), testLossList, label='Test Lost')
    plt.xlabel('Epoch')
    plt.ylabel('Loss')
    plt.legend(('Train Lost', 'Test Lost'))
    plt.savefig("%s/loss.png" % (file))
    plt.title("Loss Process: %s" % (networkSize))
    plt.close('all')

    # 生成桑基图
    nodes = [{"name": "Real Positive", "normal": {"color": "#4B0082"}},
             {"name": "Real Negative", "normal": {"color": "#0000FF"}},
             {"name": "Predicted Positive", "normal": {"color": "#FFA500"}},
             {"name": "Predicted Negative", "normal": {"color": "#8B4513"}}]
    links = [{"source": "Real Positive", "target": "Predicted Positive", "value": TP},
             {"source": "Real Positive", "target": "Predicted Negative", "value": FN},
             {"source": "Real Negative", "target": "Predicted Positive", "value": FP},
             {"source": "Real Negative", "target": "Predicted Negative", "value": TN},
             ]
    sankey = Sankey("桑基图示例", width=1200, height=600)
    sankey.add("sankey", nodes, links, line_opacity=0.2,
               line_curve=0.5, line_color='black',
               is_label_show=True, label_pos='right')
    sankey.render("%s/Sankey.html" % (file))

    # 报告生成
    reportPath = "%s/SummaryReport.txt" % (file)
    with open(reportPath, "w") as f:
        f.write("confusionMatric:\n")
        f.write(str(confusion_matric))
        f.write("\nAccuracy : %s" % accuracy)
        f.write("\nSpecitivity : %s" % Specitivity)
        f.write("\nSensitivity : %s" % Sensitivity)
        f.write("\nAUC : %s" % AUC)
        f.write("\nTime Consuming: %s" % (endTime - startTime))

    print("报告已生成，请在%s文件夹下查看" % file)

    print("生成每层结果")
    # 每层输出结果展示
    Layer_output = []
    Layer_output_Append = np.array(range(len(testX))).reshape((len(testX), 1))
    for layer in La:
        layerOutput = sess.run(layer, feed_dict={x: testX, y: testY})
        Layer_output.append(layerOutput)
        Layer_output_Append = np.hstack((Layer_output_Append, layerOutput))
    fileHeader = ['id']
    for i in range(len(Layer_output_Append[0])):
        fileHeader.append("feature_%s" % str(i))
    csvPath = "%s/LayerOutput.csv" % (file)
    csvFile = open(csvPath, 'w')
    writer = csv.writer(csvFile)
    writer.writerow(fileHeader)

    for i in range(len(Layer_output_Append)):
        writer.writerow(Layer_output_Append[i])
    csvFile.close()
    print("每层结果生成完成")
    print("程序结束，用时%s" % (endTime - startTime))
