import torch
import  torch.nn.functional as F
import torch.nn as nn


class Net(nn.Module):
    def __init__(self):
        super(Net,self).__init__()
         
        self.conv1 = nn.Conv2d(1,6,3)  #  (6,1,3,3)
        self.conv2 = nn.Conv2d(6,16,3) # (16,6,3,3)
        
        self.fc1 = nn.Linear(16*6*6,120) # (120 576) alias (120)
        self.fc2 = nn.Linear(120,84)  # (84,120) 84
        self.fc3 = nn.Linear(84,10)  # (10,84)  10
        
    def forward(self,x):
        # x的维度 (batchsize,1,x,x)
        x=F.max_pool2d(F.relu(self.conv1(x)),(2,2))  # (((32+2*0-3)/1)+1)/2 = 15  ((w+2*p-k)/s+1)/2
        x=F.max_pool2d(F.relu(self.conv2(x)),2)
        
        x = x.view(-1,self.get_last_axis(x))
        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))
        x = self.fc3(x)
        
        return x
        
    
    def get_last_axis(self,d):
        v = 1
        for _ in d.shape[1:]:
            v*=_
        return v
        


if __name__ == '__main__':
    net = Net()
    print(net.fc1.weight)
    # p = net.parameters()
    # print(net.parameters)
    # for _ in p:
    #     print(_.data.sub_())
    input = torch.randn(4,1,32,32)
    out = torch.rand(4,10)
    x = net(input)
    criterion = nn.MSELoss()
    loss = criterion(x,out)
    print(loss)
    loss.backward()
    
    # print(net.grad)
    
    print(net.parameters)
    
    
    learn_rate = 0.01
    
    for f in net.parameters():
        d = f.grad.data
        print(f.grad.data)
        # f.data.sub_(learn_rate*f.grad.data)
        
    
    print(x)
    