import random
import torch
import matplotlib.pyplot as plt

"""
MBGD算法：是GD和SGD的折中，每次使用一个小批量的样本来更新参数，
既保证了训练速度，又能保证收敛的准确性‌。
工作原理：在每次迭代中，从训练集中随机抽取一个小批量样本（mini-batch），
计算这个小批量样本的梯度，并更新模型参数。
"""

def loss_fn_batch(w1, w2, batch_indices):
    batch_loss = 0
    for idx in batch_indices:
        batch_loss += (w1 * x1[idx] + w2 * x2[idx] - 0) ** 2
    # print(f"第{bs+1}批次的损失为：{batch_loss / len(batch_indices)}")
    return batch_loss / len(batch_indices)

# 1. 模拟数据   为了保证随机抽取 一批次样本 所以需要是定下标索引
n_samples = 100
x1 = torch.rand(n_samples)*2
x2 = torch.rand(n_samples)*2

# 2. 超参数
lr = 0.05
Epochs = 20
batch_size = 16
w1 = torch.tensor([-1.0], requires_grad=True)    # 开启自动求导，不用手动的计算了
w2 = torch.tensor([1.0], requires_grad=True)

''' 绘制等高线图 '''
X1 = torch.linspace(-1, 1, 100)
X2 = torch.linspace(-1, 1, 100)
xx1, xx2 = torch.meshgrid(X1, X2, indexing="ij")
loss = xx1 ** 2 + 2 * xx2 ** 2
fig = plt.figure()
ax = fig.add_subplot()
ax.contour(xx1, xx2, loss)
# 定义一个列表，用于存储梯度下降的路径点
points = []

# 3. MBGD算法的循环训练
for epoch in range(Epochs):
    points.append([w1.item(), w2.item()])
    total_loss = 0
    # 随机打乱数据索引
    # torch.randperm(n) 返回从0到n - 1的整数的随机排列  也就是随机下标索引序列
    indices = torch.randperm(n_samples)
    # 按批次处理
    for bs, i in enumerate(range(0, n_samples, batch_size)):
        batch_indices = indices[i:i+batch_size]
        loss = loss_fn_batch(w1, w2, batch_indices)
        total_loss += loss
        # 反向传播
        loss.backward()
        # 更新参数
        with torch.no_grad():
            w1 -= lr * w1.grad
            w2 -= lr * w2.grad
        # 清空梯度 zero_() 就地置零
        w1.grad.zero_()
        w2.grad.zero_()

    print(f"[{epoch+1}/{Epochs}] Loss: {(total_loss/bs).item():.4f}")

points = torch.tensor(points)
ax.plot(points[:,0], points[:,1], "ko-")
plt.show()

