import tensorflow as tf

# 数据
x_train = [1, 2, 3, 4]
y_train = [0, -1, -2, -3]

# 模型参数
W = tf.Variable([0.3], dtype=tf.float32)
b = tf.Variable([-0.3], dtype=tf.float32)

# 损失函数
def loss_fn(x, y):
    linear_model = W * x + b
    return tf.reduce_sum(tf.square(linear_model - y))

# 优化器
optimizer = tf.keras.optimizers.SGD(0.01)

# 训练
for i in range(1000):
    with tf.GradientTape() as tape:
        loss = loss_fn(x_train, y_train)
    grads = tape.gradient(loss, [W, b])
    optimizer.apply_gradients(zip(grads, [W, b]))

# 输出模型参数
print("W = %s, b = %s" % (W.numpy(), b.numpy()))
