import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim
import numpy as np
 
 
class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        #init中只写带参数的层？
        self.conv1 = nn.Conv2d(1, 6, 5) #输入通道数*输出通道数*5*5
        self.conv2 = nn.Conv2d(6, 16, 5)
        self.fc1 = nn.Linear(16*5*5, 120) #输入*输出
        self.fc2 = nn.Linear(120, 84)
        self.fc3 = nn.Linear(84, 10)
 
    def forward(self, x):
        x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))
        x = F.max_pool2d(F.relu(self.conv2(x)), 2) #同(2,2)
        x = x.view(-1, np.prod(x.size()[1:]))
        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))
        x = self.fc3(x)
        return x
        
net = Net()

#计算
input = torch.randn(1, 1, 32, 32)
out = net(input)
print('输出:',out)

#训练 
criterion = nn.MSELoss()
target = torch.randn(10).view(1, -1).float()
loss = criterion(out, target) #定义loss
print('loss:',loss)
optimizer = torch.optim.SGD(net.parameters(), lr=0.01)
optimizer.zero_grad() #在反向传播前，先要将所有参数的梯度清零
loss.backward()
optimizer.step() #更新参数
#固定部分参数
#optimizer.SGD(filter(lambda p: p.requires_grad, net.parameters()), lr=1e-3)

#参数
print('参数名:')
for name in net.state_dict():
    print(name)
print('参数名得参数:',net.state_dict()['conv2.weight'].mean())
params = list(net.named_parameters())#get the index by debuging
print('index得参数:',params[2][1],params[2][1].data)
print('每一层的参数名和参数值')
for name,param in net.named_parameters():
    print(name,':',param.size())
for layer in net.modules():
    if(isinstance(layer, nn.Conv2d)):
        print('层得参数:',layer.weight.mean())
print('每一层的参数值')
for param in net.parameters(): 
    print(param.size())
print('同named_parameters')    
for name in net.state_dict():
    print(name,net.state_dict()[name].size())
print('这个是啥:')    
for var_name in optimizer.state_dict():
    print(var_name, "\t", optimizer.state_dict()[var_name])
torch.save(net.state_dict(), 'param.pth') #只保存参数
torch.save(net, 'model.pth') #保存整个模型
#加载
model = Net()
model.load_state_dict(torch.load('param.pth'))
model = torch.load('model.pth')
model.eval() #将dropout和batch normalization层设置为测试模式，不然会导致不一致的结果
    
#GPU
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
net.to(device)  #将模型转化为GPU模式
print('GPU个数:',torch.cuda.device_count())
net = nn.DataParallel(net)  #多GPU支持