# coding:utf-8
# Author : hiicy redldw
# Date : 2019/05/15
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim

class Net(nn.Module):
    def __init__(self):
        super(Net,self).__init__()
        self.conv1 = nn.Conv2d(in_channels=1,out_channels=6,kernel_size=5)
        self.conv2 = nn.Conv2d(6, 16, 5)
        # an affine operation: y = Wx + b
        self.fc1 = nn.Linear(16*5*5,120)
        self.fc2 = nn.Linear(120,84)
        self.fc3 = nn.Linear(84,10)
    def forward(self, x):
        x = F.max_pool2d(F.relu(self.conv1(x)),(2,2))
        x = F.max_pool2d(F.relu(self.conv2(x)),2)
        x = x.view(-1,self.num_flat_features(x))
        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))
        x = self.fc3(x)
        return x
    def num_flat_features(self,x):
        size = x.size()[1:] ## 除去批大小维度的其余维度
        num_features = 1
        for s in size:
            num_features*=s
        return num_features
inpt = torch.randn(1,1,32,32)
tnet = Net()

###### REW:分层学习率 #######
lr = 0.001
conv2_params = list(map(id,tnet.conv2.parameters()))
base_params = filter(lambda p:id(p) not in conv2_params,
                     tnet.parameters())
optimizer = torch.optim.SGD([
                            {"params":base_params},
                            {"params":tnet.conv2.parameters(),'lr':lr*100}
                            ],lr=lr,momentum=0.9)

#### REW:调整学习率衰减 #####
def adjust_lr(epoch):
    tlr = lr * (0.1 ** (epoch // 30))
    for g in optimizer.param_groups:
        g['lr'] = tlr
        break

adjust_lr(1)
optimizer.zero_grad()   # 清零梯度缓存

output = tnet(inpt)
# jt = list(tnet.parameters())
# print(out)
# 清零所有参数的梯度缓存，然后进行随机梯度的反向传播：
# tnet.zero_grad()
# out.backward(torch.randn(1, 10))
# 损失函数
target = torch.randn(10)
target = target.view(1,-1)
criterion = nn.MSELoss()
loss = criterion(output,target)
# print(loss.grad_fn)
tnet.zero_grad()     # 清零所有参数（parameter）的梯度缓存

print('conv1.bias.grad before backward')
print(tnet.conv1.bias.grad)

loss.backward()
optimizer.step()    # 更新参数

print('conv1.bias.grad after backward')
print(tnet.conv1.bias.grad)
# 更新权重
# optimizer = optim.SGD(tnet.parameters(),lr=0.01)
# 在训练的迭代中：
# optimizer.zero_grad()   # 清零梯度缓存
# output = tnet(inpt)
# loss = criterion(output, target)  
# loss.backward()
# optimizer.step()    # 更新参数
