# 小批量梯度下降法
import numpy as np

# ==================== 数据准备阶段 ====================
# 创建数据集
X = 2 * np.random.rand(100, 1)
y = 4 + 3 * X + np.random.randn(100, 1)
X_b = np.c_[np.ones((100, 1)), X]

# ==================== 超参数设置阶段 ====================
# 设置超参数
learning_rate = 0.001  # 学习率
n_epochs = 10000  # 迭代次数
m = len(X_b)  # 样本数量
batch_size = 10  # 批量大小
num_batches = int(np.ceil(m / batch_size))  # 计算批量数量

# ==================== 模型初始化阶段 ====================
theta = np.random.randn(2, 1)

# 定义调整学习率的函数
t0, t1 = 5, 500  # 超参数


def learning_schedule(t):
    return t0 / (t + t1)


# ==================== 训练阶段 ====================
# 训练过程
for epoch in range(n_epochs):
    # 打乱数据索引顺序
    arr = np.arange(len(X))
    np.random.shuffle(arr)
    X_b = X_b[arr]
    y = y[arr]

    # 初始化epoch损失
    epoch_loss = 0

    for i in range(num_batches):
        # 取出当前批次数据
        random_index = np.random.randint(m)
        x_batch = X_b[random_index:random_index + batch_size]
        y_batch = y[random_index:random_index + batch_size]

        # 求gradient
        gradients = x_batch.T.dot(x_batch.dot(theta) - y_batch)

        # 计算并累加当前批次的损失
        batch_loss = np.mean((x_batch.dot(theta) - y_batch) ** 2)
        epoch_loss += batch_size * batch_loss  # 加权累加

        # 调整学习率
        learning_rate = learning_schedule(epoch * m + i)

        # 更新theta
        theta = theta - learning_rate * gradients

    # 计算整个epoch的平均损失
    epoch_loss /= m

    # 每1000轮打印一次训练信息
    if epoch % 1000 == 0:
        print(f"Epoch {epoch}: theta={theta.T},  learning_rate={learning_rate:.6f}, loss={epoch_loss:.4f}")

# ==================== 结果输出阶段 ====================
# 打印训练后得到的参数，应该接近真实值[4,3]
print("训练得到的参数:")
print(theta.T)
