import numpy as np

# 初始化参数
np.random.seed(0)
w = np.random.randn(1)  # 随机初始化 w
b = np.zeros(1)  # 初始化 b 为零


def linear_regression(X, w, b):
    return np.dot(X, w) + b


def compute_loss(y_true, y_pred):
    return np.mean(np.square(y_true - y_pred))


def gradient_descent(X, y_true, w, b, learning_rate=0.01, num_epochs=100):
    m = len(y_true)  # 样本数量
    for epoch in range(num_epochs):
        # 计算预测值
        y_pred = linear_regression(X, w, b)

        # 计算损失函数
        loss = compute_loss(y_true, y_pred)

        # 计算梯度
        dw = (-2 / m) * np.dot(X.T, (y_true - y_pred))
        db = (-2 / m) * np.sum(y_true - y_pred)

        # 更新参数
        w -= learning_rate * dw
        b -= learning_rate * db

        # 每训练10次打印一次损失
        if (epoch + 1) % 10 == 0:
            print(f'Epoch [{epoch + 1}/{num_epochs}], Loss: {loss:.4f}')

    return w, b


# 准备数据
X = np.array([[1.0], [2.0], [3.0], [4.0], [5.0]])
y_true = np.array([3.0, 5.0, 7.0, 9.0, 11.0])

# 设置学习率和训练次数
learning_rate = 0.01
num_epochs = 100

# 执行梯度下降算法
w, b = gradient_descent(X, y_true, w, b, learning_rate, num_epochs)

print(f'Final parameters: w = {w[0]:.4f}, b = {b[0]:.4f}')
