import numpy as np

# 定义线性回归模型
def linear_regression_model(X, weights, bias):
    return np.dot(X, weights) + bias

# 定义均方误差损失函数
def mean_squared_error(y_true, y_pred):
    return ((y_true - y_pred) ** 2).mean()

# 梯度下降算法
def gradient_descent(X, y, weights, bias, learning_rate, iterations):
    m = X.shape[0]  # 样本数量

    for i in range(iterations):
        # 预测值
        y_pred = linear_regression_model(X, weights, bias)

        # 计算梯度
        dw = (2/m) * np.dot(X.T, (y_pred - y))
        db = (2/m) * np.sum(y_pred - y)

        print(dw, db)
        # 更新权重和偏置
        weights -= learning_rate * dw
        bias -= learning_rate * db

        # 打印损失值，观察训练过程
        if i % 100 == 0:
            loss = mean_squared_error(y, y_pred)
            print(f"Iteration {i}: Loss: {loss}")

    return weights, bias

# 准备数据
# 假设X是特征矩阵，y是目标变量
X = np.array([[1, 2], [2, 3], [3, 4], [4, 5]])
y = np.array([3, 5, 6, 7])

# 添加一列1s，用于偏置项
X = np.hstack([np.ones((X.shape[0], 1)), X])

# 初始化权重和偏置
weights = np.zeros(X.shape[1])
bias = 0

# 梯度下降参数
learning_rate = 0.01
iterations = 1

# 训练模型
weights, bias = gradient_descent(X, y, weights, bias, learning_rate, iterations)

print(f"Trained weights: {weights}")
print(f"Trained bias: {bias}")