import tensorflow as tf
import scipy.io as scio
import numpy as np
from sklearn.model_selection import train_test_split


# 读取mat文件
def readMat(matPath):
    return scio.loadmat(matPath)

# 参数设置
networkSize = [1000,500,10]
sizeX=10
batch_size = 1000
epochMax = 2000

# 加载数据集
matPath = '../dataSet20180403-K10.mat'
dataSet = readMat(matPath)
print('数据集读取完成')

# 读取数据集
test = np.array(dataSet['test'])
testX = np.array(dataSet['testX'])
testY = np.array(dataSet['testY'])
train = np.array(dataSet['train'])
trainX = np.array(dataSet['trainX'])
trainY = np.array(dataSet['trainY'])
print('数据集读取完成')

# 计算有多少个批次
n_batch = len(trainX) // batch_size

# 定义两个placeholder
x = tf.placeholder(tf.float32, shape=[None, 651])
y = tf.placeholder(tf.float32, shape=[None, 1])
predy = tf.placeholder(tf.float32, shape=[None, 1])

# 网络构建
# 初始化
W = []
b = []
L = []

# 定义两个placeholder
x = tf.placeholder(tf.float32, [None, 10])
y = tf.placeholder(tf.float32, [None, 1])

# 第一层
W1 = tf.Variable(tf.truncated_normal([sizeX, networkSize[0]], stddev=0.1))
b1 = tf.Variable(tf.zeros([networkSize[0]]) + 0.1)
L1 = tf.nn.tanh(tf.matmul(x, W1) + b1)
W.append(W1)
b.append(b1)
L.append(L1)

# 后续几层
if len(networkSize) > 1:
    layderIndex = 2
    while True:
        if layderIndex > len(networkSize):
            break
        WItem = tf.Variable(
            tf.truncated_normal([networkSize[layderIndex - 2], networkSize[layderIndex - 1]], stddev=0.1))
        bItem = tf.Variable(tf.zeros([networkSize[layderIndex - 1]]) + 0.1)
        LItem = tf.nn.relu(tf.matmul(L[-1], WItem) + bItem)
        W.append(WItem)
        b.append(bItem)
        L.append(LItem)
        layderIndex = layderIndex + 1

# 最后有一个全连接层
WItem = tf.Variable(tf.truncated_normal([networkSize[layderIndex - 2], 1], stddev=0.1))
bItem = tf.Variable(tf.zeros([1]) + 0.1)
LItem = tf.nn.relu(tf.matmul(L[-1], WItem) + bItem)
W.append(WItem)
b.append(bItem)
L.append(LItem)
Loss = tf.reduce_mean(tf.square(y - L[-1]))
# Loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=L[-1]))
train_step = tf.train.AdamOptimizer(1e-2).minimize(Loss)
Lend = tf.cast(tf.equal(tf.round(L[-1]), 1), dtype = tf.float32)

with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    print('深度学习网络加载完成，开始训练')
    print("网络结构：%s " % (networkSize))
    for epoch in range(epochMax + 1):
        # # 批量下降
        # start = (epoch * batch_size) % len(trainX)
        # end = min(start + batch_size, len(trainX))
        # sess.run(train_step, feed_dict={x: trainX[start:end], y: trainY[start:end]})

        # 不批量梯度下降
        sess.run(train_step, feed_dict={x: trainX, y: trainY})
        loss = sess.run(Loss, feed_dict={x: trainX, y: trainY})
        LendItem=sess.run(LItem,feed_dict={x: trainX, y: trainY})
        print(loss,LendItem)
        # for LItem in L:
        #     print(epoch,sess.run(LItem,feed_dict={x: trainX, y: trainY}))

        # if epoch % 10 == 0:
        #     # loss=sess.run(Loss,feed_dict={x: trainX[start:end], y: trainY[start:end]})
        #     loss = sess.run(Loss, feed_dict={x: trainX, y: trainY})
        #     predY = sess.run(Lend, feed_dict={x: testX, y: testY})
        #     confusion_matric_Test = tf.confusion_matrix(labels=testY.T[0], predictions=predY.T[0])
        #     print(testY,predY)
        #     confusion_matric_Test=sess.run(confusion_matric_Test)
        #     predY = sess.run(Lend, feed_dict={x: trainX, y: trainY})
        #     confusion_matric_Train = tf.confusion_matrix(labels=trainY.T[0], predictions=predY.T[0])
        #     confusion_matric_Train = sess.run(confusion_matric_Train)
        #
            # print("Iter： %s , Loss: %s \n Testing Confusion Matric: \n %s \n Training Confusion Matric: \n %s" % (epoch,loss, confusion_matric_Test, confusion_matric_Train))
        # print("Iter " + str(epoch) + ",Testing Confusion Matric " + str(test_acc) + ",Training Accuracy " + str(train_acc))
