import torchvision
import torch.nn as nn
import torch
from torch.autograd import Variable


net = torchvision.models.vgg16()

# for name in net.named_modules():
#     print(name)
# 查看网络层的参数 权值和偏置的自动求导参数
def print_value(net):
    for name,value in net.named_parameters():
        print('name: {0},\t grad: {1}'.format(name, value.requires_grad))

# 定义需要冻结的层
no_grad = {'features.0.weight','features.0.bias','features.2.weight',	 'features.2.bias'
,'features.5.weight',	'features.5.bias'}

for name,value in net.named_parameters():
    if name in no_grad:
        value.requires_grad = False
    else:
        value.requires_grad = True

# 最后在定义优化器时，只对requires_grad为True的层的参数进行更新。
optimizer = torch.optim.Adam(list(filter(lambda p:p.requires_grad,net.parameters())),lr=0.01)
print_value(net)
'''





# 简单的写法，使用高级函数的方式

# optimiter = torch.optim.SGD(list(filter(lambda name:name[0].required_grad,net.named_parameters())),lr=0.01)


# 不同层使用不同的学习率
'''

conv1_param = []
conv2_param = []

for name,params in net.named_parameters():
    if "features" in name:
        conv1_param += [params]
    else:
        conv2_param += [params]


# 然后在优化器中进行如下配置
# optimiter = torch.optim.Adam([{"param":conv1_param,"lr":0.01},{"param":conv2_param,"lr":0.05}],weight_decay=1e-3)