import tensorflow as tf

#定义神经网络输入输出节点维数
INPUT_NODE=3
OUTPUT_NODE=2

#定义两层隐层节点个数
LAYER1_NODE=100
LAYER2_NODE=100

#定义学习率，学习衰减速度，正则系数，训练调整参数的次数以及平滑衰减率
LEARNING_RATE_BASE=0.5
LEARNING_RATE_DECAY=0.99
REGULARIZATION_RATE=0.0001
TRAINING_STEPS=20000
MOVING_AVERAGE_DECAY=0.99

#定义激活函数
def sigmoid(x):
  #return 1 / (1 + np.exp(-x))
  return tf.nn.relu(x)

#定义测试函数
def func(x):
  #return x * x
  #return [i * 2 for i in x]
  return [x[0] + x[1], x[2]]

#定义输出数据地方，None表示无规定一次输入多少训练样本
x = tf.placeholder(tf.float32, shape=[None, INPUT_NODE], name="x-input")
y_ = tf.placeholder(tf.float32, shape=[None, OUTPUT_NODE], name="y-input")

#依次定义每一层与上一层的连接和权重，权重随机初始化，连接shape需要一一对应
w1 = tf.Variable(tf.truncated_normal(shape=[INPUT_NODE, LAYER1_NODE], stddev=0.1))
b1 = tf.Variable(tf.constant(0.1, shape=[LAYER1_NODE]))

w2 = tf.Variable(tf.truncated_normal(shape=[LAYER1_NODE, LAYER2_NODE], stddev=0.1))
b2 = tf.Variable(tf.constant(0.1, shape=[LAYER2_NODE]))

w3 = tf.Variable(tf.truncated_normal(shape=[LAYER2_NODE, OUTPUT_NODE], stddev=0.1))
b3 = tf.Variable(tf.constant(0.1, shape=[OUTPUT_NODE]))

#设置前向传播
#第一层隐层，输入与权重矩阵乘加上偏置经激活函数作为输出
layer1 = sigmoid(tf.matmul(x, w1) + b1)
#第二层隐层，前一层输出与权重矩阵乘加上偏置经激活函数作为输出
layer2 = sigmoid(tf.matmul(layer1, w2) + b2)
#返回层，第二层隐层与权重矩阵乘加上偏置作为输出
y = tf.matmul(layer2, w3) + b3

#设置反向传播
#梯度下降法
#训练次数
global_step = tf.Variable(0, trainable=False)

#定义平滑变量，与平滑衰减率和训练次数有关，每次训练完会使用平滑过程
variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)
#将平滑变量应用到所有可训练变量，即没有标明trainable=False的变量
variable_averages_op = variable_averages.apply(tf.trainable_variables())

#设置输出平滑后的预测值
layer1_1 = sigmoid(tf.matmul(x, variable_averages.average(w1)) + variable_averages.average(b1))
layer2_1 = sigmoid(tf.matmul(layer1_1, variable_averages.average(w2)) + variable_averages.average(b2))
average_y = tf.matmul(layer2_1, variable_averages.average(w3) + variable_averages.average(b3))

#定义交叉熵和损失函数，但为什么传入的是label的arg_max(),就是对应分类的下标呢，我们迟点再说
cross_entropy=tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y,labels=tf.arg_max(y_,1))
#计算交叉熵的平均值，也就是本轮训练对所有训练样本的平均值
cross_entrip_mean=tf.reduce_mean(cross_entropy)

#定义正则化权重，并将其加上交叉熵作为损失函数
regularizer=tf.contrib.layers.l2_regularizer(REGULARIZATION_RATE)
regularization=regularizer(w1)+regularizer(w2)+regularizer(w3)
loss=cross_entrip_mean+regularization

#定义动态学习率，随着训练的步骤增加不断递减
learning_rate=tf.train.exponential_decay(LEARNING_RATE_BASE,global_step,900,LEARNING_RATE_DECAY)
#定义向后传播的算法，梯度下降发，注意后面的minimize要传入global_step
train_step=tf.train.GradientDescentOptimizer(learning_rate).minimize(loss,global_step=global_step)
#管理需要更新的变量，传入的参数是包含需要训练的变量的过程
train_op=tf.group(train_step,variable_averages_op)

#正确率预测
correct_prediction=tf.equal(tf.arg_max(average_y,1),tf.arg_max(y_,1))
accuracy=tf.reduce_mean(tf.cast(correct_prediction,tf.float32))

#训练集输入
#使用测试函数生成训练数据和测试数据
DATA_TRAIN_COUNT = 10
data_train_x = tf.Variable(tf.random_normal([DATA_TRAIN_COUNT, INPUT_NODE], stddev=1))
data_train_y = tf.Variable(tf.random_normal([DATA_TRAIN_COUNT, OUTPUT_NODE], stddev=1))  

#测试集输入
DATA_TEST_COUNT = 10
data_test_x = tf.Variable(tf.random_normal([DATA_TEST_COUNT, INPUT_NODE], stddev=1))
data_test_y = tf.Variable(tf.random_normal([DATA_TEST_COUNT, OUTPUT_NODE], stddev=1))


B = tf.placeholder(dtype=tf.int32, shape=[2])
C = tf.gather_nd(data_train_x, B)

D = tf.placeholder(dtype=tf.int32, shape=[2])
E = tf.gather_nd(data_test_x, D)

with tf.Session() as sess:
  #初始化所有变量
  tf.global_variables_initializer().run()
 
  #生成训练数据
  print(sess.run(data_train_x))
  train_x = []
  train_y = []
  for i in range(DATA_TRAIN_COUNT):
    train_x.append([])
    bb = []
    for j in range(INPUT_NODE):
      _aa = sess.run(C, feed_dict={B:[i,j]})
      train_x[i].append(_aa)
      bb.append(_aa)
    train_y.append(func(bb))
  print(train_x)
  print(train_y)
  opa = tf.assign(data_train_y, train_y)
  sess.run(opa)
  
  print(sess.run(data_train_x))
  print(sess.run(data_train_y))
  
  #生成测试数据
  print(sess.run(data_test_x))
  test_x  = []
  test_y = []
  for i in range(DATA_TEST_COUNT):
    test_x.append([])
    bb = []
    for j in range(INPUT_NODE):
      _aa = sess.run(E, feed_dict={D:[i,j]})
      test_x[i].append(_aa)
      bb.append(_aa)
    test_y.append(func(bb))
  print(test_x)
  print(test_y)
  opb = tf.assign(data_test_y, test_y)
  sess.run(opb)

  print(sess.run(data_test_x))
  print(sess.run(data_test_y))

  #训练集输入字典
  #validate_feed={x:data_train_x,y_:data_train_y}
  validate_feed={x:train_x,y_:train_y}
  #测试集输入字典
  #test_feed={x:data_test_x,y_:data_test_y}
  test_feed={x:test_x,y_:test_y}

  
  for i in range(TRAINING_STEPS):
    if i % 1000 == 0:
      validate_acc = sess.run(accuracy, feed_dict = validate_feed)
      print("After %d training steps, validation accuracy using average model is %g" %(i, validate_acc))
    #每一轮通过同一训练数据训练
    sess.run(train_op, feed_dict=validate_feed)
  #使用测试数据查看模型准确率
  test_acc = sess.run(accuracy, feed_dict=test_feed)
  print("After %d training steps, test accuracy using average model is %g" %(TRAINING_STEPS, test_acc))
