# 小批量梯度下降
# （和随机梯度下降不一样的地方在于，随机梯度下降是每个轮次取一个数，小批量梯度下降是每个轮次取一批量的数据）

import numpy as np
import matplotlib.pyplot as plt

X = 2 * np.random.rand(100, 1)
y = 4 + 3 * X + np.random.rand(100, 1)
X_c = np.c_[np.ones((100, 1)), X]

# 超参数
rounds = 1000
n = 100
batch_size = 10
learning_rate = 0.001
num_batches = int(n / batch_size)

# 保存损失值
loss_history = []

# 初始化theta
theta = np.random.rand(2, 1)

for _ in range(rounds):
    for _ in range(num_batches):
        idx = np.random.randint(n)
        X_c_i = X_c[idx:idx + batch_size]
        y_i = y[idx:idx + batch_size]
        gradients = X_c_i.T.dot(X_c_i.dot(theta) - y_i)
        theta = theta - learning_rate * gradients
    loss = np.mean((X_c.dot(theta) - y) ** 2)
    loss_history.append(loss)
print(theta)
print(loss)
plt.plot(loss_history)
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.title("Training Loss")
plt.show()
