# 随机梯度下降

import numpy as np
import matplotlib.pyplot as plt

X = 2 * np.random.rand(100, 1)
y = 4 + 3 * X + np.random.rand(100, 1)
X_b = np.c_[np.ones((100, 1)), X]

# 设置轮次、批次,学习率
rounds = 1000
batch_size = 100
learning_rate = 0.001

# 初始theta
theta = np.random.rand(2, 1)

# 存储损失历史（可选）
loss_history = []

for _ in range(rounds):
    for _ in range(batch_size):
        # 随机获取X样本
        X_b_i = np.random.randint(batch_size)
        X_b_v = X_b[X_b_i:X_b_i + 1]
        y_b_v = y[X_b_i]
        gradients = X_b_v.T.dot(X_b_v.dot(theta) - y_b_v)
        theta = theta - learning_rate * gradients
    # 计算当前损失（可选）
    loss = np.mean((X_b.dot(theta) - y) ** 2)
    loss_history.append(loss)
print(theta)
print(loss)
plt.plot(loss_history)
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.title("Training Loss")
plt.show()
