import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets,transforms
import matplotlib.pyplot as plt

batch_size = 512
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
trainloader = torch.utils.data.DataLoader(datasets.MNIST('data',train=True,download=True,
                                                         transform=transforms.Compose([transforms.ToTensor()])),batch_size=batch_size,shuffle=True)
testloader = torch.utils.data.DataLoader(datasets.MNIST('data',train=False,download=False,
                                                         transform=transforms.Compose([transforms.ToTensor()])),batch_size=batch_size,shuffle=True)

class Net(nn.Module):
    def __init__(self):
        super(Net,self).__init__()
        self.conv1 = nn.Conv2d (1,6,5,padding=2)
        self.conv2 = nn.Conv2d (6,16,5)
        self.fc1 = nn.Linear(16*5*5,120)
        self.fc2 = nn.Linear(120,84)
        self.clf = nn.Linear(84,10)

    def forward(self, x):
        # conv1
        x = self.conv1(x)
        # 激活函数sigmoid()
        x = torch.sigmoid(x)
        # 平均池化层，kernel=2x2，步长2
        x = F.avg_pool2d(x, 2)
        # conv2
        x = self.conv2(x)
        # 激活函数sigmoid()
        x = torch.sigmoid(x)
        # 平均池化层，2x2，步长2
        x = F.avg_pool2d(x, 2)
        # 展平，从第1维开始展平
        x = x.view(x.size(0), -1)
        # 全连接层1
        x = self.fc1(x)
        # 激活函数sigmoid()
        x = torch.sigmoid(x)
        # 全连接层2
        x = self.fc2(x)
        # 激活函数sigmoid()
        x = torch.sigmoid(x)
        # 分类层
        x = self.clf(x)
        return x

model = Net().to(device)
optimizer = optim.Adam(model.parameters(),lr=1e-2)
model

epochs = 30
accs,losses = [],[]
for epochs in range(epochs):
    for batch_idx,(x,y) in enumerate(trainloader):
        x,y = x.to(device),y.to(device)
        optimizer.zero_grad()
        out = model(x)
        loss = F.cross_entropy(out,y)
        loss.backward()
        optimizer.step()

    correct = 0
    testloss = 0
    with torch.no_grad():
        for batch_idx,(x,y) in enumerate(trainloader):
            x,y = x.to(device),y.to(device)
            out = model(x)
            testloss += F.cross_entropy(out,y).item()
            pred = out.max(dim=1,keepdim=True)[1]
            correct += pred.eq(y.view_as(pred)).sum().item()

    acc = correct/len(testloader.dataset)
    testloss = testloss/(batch_idx+1)
    accs.append(acc)
    losses.append(testloss)
    print('epoch:{}，loss:{:.4f},acc：{:.4f}'.format(epochs,testloss,acc))
    feature1 = F.sigmoid(model.conv1(x))
    #feature1 = F.avg_pool2d(feature1,kernel_size=2,stride=2)
    feature2 = F.sigmoid(model.conv2(feature1))
    #feature2 = F.avg_pool2d(feature2,kernel_size=2,stride=2)

    n = 5
    img = x.detach().cpu().numpy()[:n]
    feature_map1 = feature1.detach().cpu().numpy()[:n]
    feature_map2 = feature2.detach().cpu().numpy()[:n]

    fig,ax = plt.subplots(3,n,figsize=(10,10))
    for i in range(n):
        ax[0,i].imshow(img[i].sum(0),cmap='gray')
        #ax[0,i].axis('off')
        ax[1,i].imshow(feature_map1[i].sum(0),cmap='gray')
        #ax[1,i].axis('off')
        ax[2,i].imshow(feature_map2[i].sum(0), cmap='gray')
        #ax[1,i].axis('off')
    plt.show()
