# 专门用于模型调参
import tensorflow as tf
import scipy.io as scio
import numpy as np
from sklearn.model_selection import KFold
from sklearn.metrics import roc_auc_score, roc_curve, auc
from sklearn.model_selection import train_test_split
import scipy.io as scio
import matplotlib.pyplot as plt
import os
import datetime
import csv


# 读取mat文件
def readMat(matPath):
    return scio.loadmat(matPath)


# 新建文件夹
def mkdir(path):
    folder = os.path.exists(path)
    if not folder:  # 判断是否存在文件夹如果不存在则创建为文件夹
        os.makedirs(path)  # makedirs 创建文件时如果路径不存在会创建这个路径
        return
    else:
        return


# 重采样
def reSample(trainX, trainY, randProb):
    trainXTemp = []
    trainYTemp = []
    for sampleIndex in range(len(trainX)):
        if trainY[sampleIndex] < 0.5:
            if np.random.rand() < randProb:
                trainXTemp.append(trainX[sampleIndex])
                trainYTemp.append(trainY[sampleIndex])
        else:
            trainXTemp.append(trainX[sampleIndex])
            trainYTemp.append(trainY[sampleIndex])
    return trainXTemp, trainYTemp


# 参数设置：
KFoldNum = 10  # 交叉验证次数
matPath = '../dataSet20180417.mat'  # 数据集读取路径
csvPath = 'TFIDF(without header).csv' # 数据集读取路径
networkSize = [10, 10]  # MLP网络结构
batch_size = 500  # 批量梯度规模
epochMax = 200  # 迭代代数
resampleProb = 0.25  # 重采样比例

# 创建训练记录文件夹
startTime = datetime.datetime.now()
nowTime = datetime.datetime.now().strftime('%Y%m%d_%H%M%S')
file = "result/CV" + nowTime + '_' + str(KFoldNum) + '_' + str(networkSize)
print("创建文件夹%s" % file)
mkdir(file)

# 读取数据集
dataSet = readMat(matPath)
labels = np.array(dataSet['labels'])
chara = []
csv_reader = csv.reader(open(csvPath))
for row in csv_reader:
    chara.append(row)
print("数据集读取完成")

# 数据集划分
trainX, testX, trainY, testY = train_test_split(chara, labels, test_size=0.2, random_state=42)
sizeX=len(trainX[0])
print('数据集划分完成')

# 重采样
trainX, trainY = reSample(trainX, trainY, resampleProb)

# 计算有多少个批次
n_batch = len(trainX) // batch_size

# 十折交叉
kf = KFold(n_splits=KFoldNum)

# 开始训练
validateStep = 0
confusion_matric = []
accuracy = []
Specitivity = []
Sensitivity = []
AUC = []
for trainIndex, validateIndex in kf.split(trainX):
    # 加载本次测试的数据集
    validateStep = validateStep + 1
    print("开始进行第 %s 次交叉验证" % validateStep)
    txdTemp = np.array(trainX)[trainIndex]
    tydTemp = np.array(trainY)[trainIndex]
    vxdTemp = np.array(trainX)[validateIndex]
    vydTemp = np.array(trainY)[validateIndex]

    # 定义两个placeholder
    x = tf.placeholder(tf.float32, shape=[None, sizeX])
    y = tf.placeholder(tf.float32, shape=[None, 1])
    predy = tf.placeholder(tf.float32, shape=[None, 1])

    # 网络构建
    # 初始化
    W = []
    b = []
    La = []
    L = []

    # 第一层
    W1 = tf.Variable(tf.truncated_normal([sizeX, networkSize[0]], stddev=0.1))
    b1 = tf.Variable(tf.zeros([networkSize[0]]) + 0.1)
    La1 = tf.matmul(x, W1) + b1
    L1 = tf.nn.sigmoid(tf.matmul(x, W1) + b1)
    W.append(W1)
    b.append(b1)
    La.append(La1)
    L.append(L1)

    # 后续几层
    if len(networkSize) > 1:
        layderIndex = 2
        while True:
            if layderIndex > len(networkSize):
                break
            WItem = tf.Variable(
                tf.truncated_normal([networkSize[layderIndex - 2], networkSize[layderIndex - 1]], stddev=0.1))
            bItem = tf.Variable(tf.zeros([networkSize[layderIndex - 1]]) + 0.1)
            LItemDa = tf.matmul(L[-1], WItem) + bItem
            LItem = tf.nn.sigmoid(tf.matmul(L[-1], WItem) + bItem)
            W.append(WItem)
            b.append(bItem)
            La.append(LItemDa)
            L.append(LItem)
            layderIndex = layderIndex + 1

    # 最后有一个全连接层
    WItem = tf.Variable(tf.truncated_normal([networkSize[layderIndex - 2], 1], stddev=0.1))
    bItem = tf.Variable(tf.zeros([1]) + 0.1)
    LaItem = tf.matmul(L[-1], WItem) + bItem
    LItem = tf.nn.sigmoid(tf.matmul(L[-1], WItem) + bItem)
    W.append(WItem)
    b.append(bItem)
    La.append(LaItem)
    L.append(LItem)
    Loss = tf.reduce_mean(tf.square(y - L[-1]))
    train_step = tf.train.AdamOptimizer(1e-2).minimize(Loss)
    Lend = tf.cast(tf.equal(tf.round(L[-1]), 1), dtype=tf.float32)

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        print('深度学习网络加载完成，开始训练')
        print("网络结构：%s " % (networkSize))
        for epoch in range(epochMax + 1):
            # 批量下降
            start = (epoch * batch_size) % len(txdTemp)
            end = min(start + batch_size, len(txdTemp))
            sess.run(train_step, feed_dict={x: txdTemp[start:end], y: tydTemp[start:end]})

            if epoch % 10 == 0:
                trainLoss = sess.run(Loss, feed_dict={x: txdTemp, y: tydTemp})
                validateLoss = sess.run(Loss, feed_dict={x: vxdTemp, y: vydTemp})
                print("交叉验证第 %s 次，正处于第 %s 代，训练误差：%s，测试误差：%s" % (validateStep, epoch, trainLoss, validateLoss))

        print("交叉验证第 %s 次训练完成，正在生成结果报告" % validateStep)
        # 报告参数计算
        predYScore = sess.run(LItem, feed_dict={x: vxdTemp, y: vydTemp})
        predY = sess.run(Lend, feed_dict={x: vxdTemp, y: vydTemp})
        confusion_matric_Train = tf.confusion_matrix(labels=vydTemp, predictions=predY.T[0])
        confusion_matric.append(sess.run(confusion_matric_Train))
        TP = confusion_matric[validateStep - 1][1][1]
        TN = confusion_matric[validateStep - 1][0][0]
        FN = confusion_matric[validateStep - 1][1][0]
        FP = confusion_matric[validateStep - 1][0][1]

        # 报告结果数计算
        accuracy.append((TP + TN) / (TP + TN + FN + FP))
        Specitivity.append(TN / (TN + FP))
        Sensitivity.append(TP / (TP + FN))
        fpr, tpr, thresholds = roc_curve(vydTemp, predYScore.T[0])
        # fpr = FP/(FP+TN)
        # tpr = TP/(TP+FN)
        AUC.append(auc(fpr, tpr))

        # ROC曲线绘制
        plt.plot(fpr, tpr, lw=1, label='ROC(area = %0.2f)' % AUC[-1])
        # plt.plot([0, 1], [0, 1], '--', color=(0.6, 0.6, 0.6), label='Luck')
        plt.xlim([-0.05, 1.05])
        plt.ylim([-0.05, 1.05])
        plt.xlabel('False Positive Rate')
        plt.ylabel('True Positive Rate')
        plt.savefig("%s/validation%sROC.png" % (file, validateStep))

        reportPath = "%s/validation%sReport.txt" % (file, validateStep)

        with open(reportPath, "w") as f:
            f.write("confusionMatric:\n")
            f.write(str(confusion_matric[-1]))
            f.write("\nAccuracy : %s" % accuracy[-1])
            f.write("\nSpecitivity : %s" % Specitivity[-1])
            f.write("\nSensitivity : %s" % Sensitivity[-1])
            f.write("\nAUC : %s" % AUC[-1])

        print("报告已生成，请在%s文件夹下查看" % file)

        layerOutput = []
        for LaItem in La:
            layerOutput.append(sess.run(LaItem, feed_dict={x: testX, y: testY}))

endTime = datetime.datetime.now()
# 输出总结结果：
reportPath = "%s/validationReportAll.txt" % (file)
accuracyAvg = np.mean(accuracy)
SpecitivityAvg = np.mean(Specitivity)
SensitivityAvg = np.mean(Sensitivity)
AUCAvg = np.mean(AUC)
with open(reportPath, "w") as f:
    f.write("\nAccuracy : AVG: %s, %s" % (accuracyAvg, accuracy))
    f.write("\nSpecitivity : AVG: %s, %s" % (SpecitivityAvg, Specitivity))
    f.write("\nSensitivity : AVG: %s, %s" % (SensitivityAvg, Sensitivity))
    f.write("\nAUC : AVG: %s, %s" % (AUCAvg, AUC))
    f.write("\nTime Consuming: %s" % (endTime - startTime))

print("程序结束，用时%s" % (endTime - startTime))
