import numpy as np
import matplotlib.pyplot as plt

# 生成一些随机数据用于演示
np.random.seed(0)
X = 2 * np.random.rand(100, 1)
y = 4 + 3 * X + np.random.randn(100, 1)

class LinearRegressionGD:
    def __init__(self, learning_rate=0.01, n_iters=1000):
        self.lr = learning_rate
        self.n_iters = n_iters
        self.weights = None
        self.bias = None

    def fit(self, X, y):
        n_samples, n_features = X.shape

        # 初始化参数
        self.weights = np.zeros(n_features)
        self.bias = 0

        # 梯度下降优化
        for _ in range(self.n_iters):
            # 线性预测
            y_pred = np.dot(X, self.weights) + self.bias

            # 确保 y 是一维数组
            y_flat = y.flatten()

            # 计算梯度
            dw = (1 / n_samples) * np.dot(X.T, (y_pred - y_flat)).flatten()
            db = (1 / n_samples) * np.sum(y_pred - y_flat)

            # 更新参数
            self.weights -= self.lr * dw
            self.bias -= self.lr * db

    def predict(self, X):
        return np.dot(X, self.weights) + self.bias

# 使用模型进行预测
model = LinearRegressionGD(learning_rate=0.1, n_iters=1000)
model.fit(X, y)
predictions = model.predict(X)

# 绘制结果
plt.scatter(X, y, color='blue', label='实际数据')
plt.plot(X, predictions, color='red', label='预测结果')
plt.title('梯度下降线性回归')
plt.xlabel('特征')
plt.ylabel('目标')
plt.legend()
plt.show()
