import tensorflow as tf
# nan  not a number  不是一个数      填充了缺失值       学习率过大，超过数据计算范围  float32
# 代价函数曲线
import matplotlib.pyplot as plt

#
x_test = [[53., 92., 88.]]# y?


x_data = [[73., 80., 75.],
          [93., 88., 93.],
          [89., 91., 90.],
          [96., 98., 100.],
          [73., 66., 70.]]
y_data = [[152.],
          [185.],
          [180.],
          [196.],
          [142.]]
# 占位符
x = tf.placeholder(dtype=tf.float32, shape=[None, 3])
y = tf.placeholder(dtype=tf.float32, shape=[None, 1])
# 设置权重
w = tf.Variable(tf.random_normal([3, 1]))
b = tf.Variable(tf.random_normal([1]))
# 预测
y_ = tf.matmul(x, w) + b
# 代价函数
cost = tf.reduce_mean(tf.square(y-y_))
# 梯度下降        学习率关键点
train = tf.train.GradientDescentOptimizer(1e-7).minimize(cost)

cost_history = []
i_history = []
with tf.Session() as sess:
    # 变量初始化
    tf.global_variables_initializer().run()
    for i in range(2001):
        _, w_val, b_val, cost_val = sess.run([train, w, b, cost], feed_dict={x: x_data, y: y_data})
        # 记录代价数值和迭代次数 生成图像
        cost_history.append(cost_val)
        i_history.append(i)
        if i % 100 == 0:
            print(i, w_val, b_val, cost_val)
    # 进行预测y值
    print(sess.run(y_, feed_dict={x: x_test}))
plt.plot(i_history, cost_history)
plt.show()

