import tensorflow as tf
import numpy as np
# 本节重点 学习率

w = tf.Variable( tf.constant(5, dtype=tf.float32) )

# 损失函数
loss = tf.square(w+1)

# 反向传播方法
# 训练轮数参数，标注为不可训练
global_step = tf.Variable(0,trainable = False)
LEARNING_RATE_BASE = 0.1
LEARNING_RATE_DECAY = 0.99
LEARNING_RATE_STEP = 1

# 指数衰减学习率
learning_rate = tf.train.exponential_decay(
    LEARNING_RATE_BASE, # 学习率基数，初始值
    global_step, # 几轮，计数器
    LEARNING_RATE_STEP, # 多少轮更新一次
    LEARNING_RATE_DECAY, # 衰减率
    staircase=True # False 学习率为平滑曲线
)
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss,global_step=global_step)

with tf.Session() as sess:
    init_op = tf.global_variables_initializer() # 全局初始化参数，初始化节点
    sess.run(init_op)
    for i in range(40):
        sess.run(train_step)
        learning_rate_val = sess.run(learning_rate)
        global_step_val = sess.run(global_step)
        w_val = sess.run(w)
        total_loss = sess.run(loss)
        print("训练 %s 步后，权重 %f 学习率 %f 损失 %f" % (i,w_val,learning_rate_val,total_loss))
