import torch
import torch.nn as nn
import matplotlib.pyplot as plt
from torch.utils.data import Dataset, DataLoader

torch.manual_seed(42)

# 超参数
Epochs = 4000
lr = 0.01
_lambda = 0.001  # 正则化系数，通常较小
batch_size = 4


# 昨天讲到了，MBGD批量随机加载数据，今天补充一下torch中关于随机批量的API
class DS(Dataset):
    def __init__(self):
        super().__init__()
        self.x = torch.arange(10).reshape(-1, 1).float()
        self.y = 0.6 * self.x + 2  # y = wx+b
        self.y += torch.normal(0, 1, self.y.shape)

    def __len__(self):
        return len(self.x)

    def __getitem__(self, idx):
        return self.x[idx], self.y[idx]


ds = DS()
dl = DataLoader(ds, batch_size=batch_size, shuffle=True)
print(f"取到数据集下标为0的x和y {ds[0]}")
print(f"随机批量加载的dl的总批次是 {len(dl)}")
print(f"取到第一批的数据是 {next(enumerate(dl))}")

# 定义模型
model = nn.Sequential(
    nn.Linear(1, 100),
    nn.ReLU(),
    nn.Linear(100, 200),
    nn.ReLU(),
    nn.Linear(200, 100),
    nn.ReLU(),
    nn.Linear(100, 1)
)

# 损失函数 优化器
loss_fn = nn.MSELoss()
optimizer = torch.optim.SGD(model.parameters(), lr=lr, weight_decay=_lambda)

''' 准备绘图 '''
fig = plt.figure("show fitting and loss", (12, 6))
ax1 = fig.add_subplot(1, 2, 1)
ax1.scatter(ds.x, ds.y, c='r')
line, = ax1.plot(ds.x, model(ds.x).detach(), "o--b")
ax2 = fig.add_subplot(1, 2, 2)
loss_ls = []
epoch_ls = []

# 循环训练
for epoch in range(Epochs):
    total_loss = 0
    for i, (x, y) in enumerate(dl):
        optimizer.zero_grad()
        y_pre = model(x)
        loss = loss_fn(y_pre, y)
        ''' 正则化通过修改损失函数实现，常见的两种形式为L1正则化和L2正则化。 '''
        # l1_reg = 0.0
        # for p in model.parameters():
        #     l1_reg += torch.sum(p.abs())
        # loss = loss + _lambda * l1_reg

        # l2_reg = 0.0
        # for p in model.parameters():
        #     l2_reg += torch.sum(p.pow(2))
        # loss = loss + _lambda / 2 * l2_reg

        # 在优化器中通过设置 weight_decay=_lambda 来实现L2正则化

        total_loss += loss.item()
        loss.backward()
        optimizer.step()
    avg_loss = total_loss / len(dl)
    epoch_ls.append(epoch)
    loss_ls.append(avg_loss)
    if epoch == 0 or (epoch + 1) % 100 == 0:
        print(f"[{epoch + 1}/{Epochs}] Loss:{avg_loss:.4f}")
        ''' 更细绘图 '''
        with torch.no_grad():
            line.set_ydata(model(ds.x))

        ax2.plot(epoch_ls, loss_ls, "r-")
        plt.pause(0.01)


plt.show()
