import numpy as np

# 生成示例数据：y ≈ 2x + 1
X = np.array([[1], [2], [3], [4]], dtype=np.float32)  # 输入特征 (4个样本)
y_true = np.array([[3], [5], [7], [9]], dtype=np.float32)  # 真实标签

# 初始化参数
np.random.seed(42)
W = np.random.randn(1, 1)  # 权重 (1x1矩阵)
b = np.zeros(1)  # 偏置

# 超参数
learning_rate = 0.01
epochs = 1000

# 梯度下降
for epoch in range(epochs):
    # 前向传播
    y_pred = np.dot(X, W) + b  # 预测值: (4x1) = (4x1) * (1x1) + (1)

    # 计算损失 (MSE)
    loss = np.mean((y_pred - y_true) ** 2)

    # 反向传播 (计算梯度)
    grad_y_pred = 2 * (y_pred - y_true) / X.shape[0]  # dL/dŷ (4x1)
    grad_W = np.dot(X.T, grad_y_pred)  # dL/dW = X^T * grad_y_pred (1x1)
    grad_b = np.sum(grad_y_pred)  # dL/db = sum(grad_y_pred)

    # 更新参数
    W -= learning_rate * grad_W
    b -= learning_rate * grad_b

    # 每100次打印进度
    if epoch % 100 == 0:
        print(f"Epoch {epoch}, Loss: {loss:.4f}, W: {W[0][0]:.4f}, b: {b[0]:.4f}")

# 最终结果
print("\n训练结果:")
print(f"真实函数: y = 2x + 1")
print(f"学习到的函数: y = {W[0][0]:.4f}x + {b[0]:.4f}")
print(f"最终损失: {loss:.4f}")

# 测试预测
x_test = np.array([[5], [6]])
y_test_pred = np.dot(x_test, W) + b
print("\n测试预测:")
for x, y in zip(x_test, y_test_pred):
    print(f"x={x[0]}, 预测y={y[0]:.4f}")