# 全局梯度下降
import numpy as np
import matplotlib.pyplot as plt

# 设置实际变量
X = 2 * np.random.rand(100, 1)
y = 4 + 3 * X + np.random.rand(100, 1)
# 设置偏置项（也称为截距项）
X_b = np.c_[np.ones((100, 1)), X]

# 设置学习率和学习批次
learning_rate = 0.001
n_iterations = 1000

# 初始话预测参数值
theta = np.random.rand(2, 1)

# 求解
for i in range(n_iterations):
    gradients = X_b.T.dot(X_b.dot(theta) - y)
    theta = theta - learning_rate * gradients
print(theta)
y_b = X_b.dot(theta)

plt.scatter(X, y, color='g', s=5, marker='o')
plt.plot(X, y_b, 'r-', linewidth=1)
plt.axis([0, 2, 4, 11])
plt.show()
