import tensorflow as tf
# 初始参数w设置为5，定义为可训练
w = tf.Variable(tf.constant(5, dtype=tf.float32))
lr= 0.2  #学习率定位0.2
epoch = 40
for epoch in range(epoch): # for epoch定义顶层循环，表示对数据集循环epoch次
    with tf.GradientTape() as tape:#  with结构到grads框起了梯度的计算过程。
        loss = tf.square(w + 1)
    grads = tape.gradient(loss,w)#  .gradient函数告知谁对谁求导
    w.assign_sub(lr * grads) # .assign_sub对变量做自减即:w -= ln*grads 即 w = w - lr
    print( "After %s epoch,w is %f,loss is %f" % (epoch,w.numpy(),loss))
# lr初始值:e.2请自改学习率0.01 0.001看收敛过程

# 最终目的:找到 loss最小即w= -1的最优参数w
