# -*- coding: utf-8 -*- 
# @Time : 2022/4/4 9:47 
# @Author : zzuxyj 
# @File : 12-nn-optimAndBackward.py


""""
1.反向传播更新参数
2.优化器 SGD
"""

# 定义网络模型
import torch
import torchvision
from torch import nn
from torch.nn import Conv2d, MaxPool2d, Flatten, Linear, CrossEntropyLoss
from torch.utils.data import DataLoader


class Model(nn.Module):

    def __init__(self) -> None:
        super().__init__()
        self.model = nn.Sequential(
            Conv2d(in_channels=3, out_channels=32, kernel_size=5, stride=1, padding=2),
            MaxPool2d(kernel_size=2, stride=2),
            Conv2d(32, 32, 5, padding=2),
            MaxPool2d(2),
            Conv2d(32, 64, 5, padding=2),
            MaxPool2d(2),
            Flatten(),
            Linear(1024, 64),
            Linear(64, 10)
        )

    def forward(self, input):
        output = self.model(input)
        return output

if __name__ == '__main__':
    test_set = torchvision.datasets.CIFAR10("../dataset/CIFAR10" , download=True , train=True, transform=torchvision.transforms.ToTensor())
    dataloader = DataLoader(test_set , batch_size=1)
    loss = CrossEntropyLoss()
    model = Model()
    #定义优化器 随机梯度下降
    optim = torch.optim.SGD(model.parameters() , lr=0.01)
    # 有序调整学习率  step_size:多少步调整  gamma学习率调整倍数，默认为0.1倍，即下降10倍。
    scheduler = torch.optim.lr_scheduler.StepLR(optim , step_size= 5 ,gamma=0.1)
    #训练20轮次
    for epoch in range(20):
        running_loss = 0.0
        # 计步数 并 优化学习率
        scheduler.step()
        for data in dataloader:
            imgs ,target = data
            output = model(imgs)
            # print(output ,"=======================", target)
            result_loss = loss(output , target)
            #参数重置为0
            optim.zero_grad()
            #反向传播
            result_loss.backward()
            #优化参数
            optim.step()
            running_loss = running_loss + result_loss
        print(running_loss)

"""
PyTorch学习率调整策略通过torch.optim.lr_scheduler接口实现。PyTorch提供的学习率调整策略分为三大类，分别是
    有序调整：等间隔调整(Step)，按需调整学习率(MultiStep)，指数衰减调整(Exponential)和余弦退火CosineAnnealing。StepLR - 有序调整,MultiStepLR - 有序调整
    自适应调整：自适应调整学习率 ReduceLROnPlateau。
    自定义调整：自定义调整学习率 LambdaLR。
"""