# @Author：zh
# @Data：2021/12/10 10:43
# @：PyCharm
# Python版本：3.7
# torch                         1.8.1+cu111
# torchaudio                    0.8.1
# torchvision                   0.9.1+cu111

#　运用ＣＮＮ分析ＭＮＩＳＴ手写数字分类
# 参考：https://blog.csdn.net/qq_36022260/article/details/83831377
from torch.utils.data import DataLoader
from torchvision.datasets import mnist
from torch import  nn
from torch.autograd import Variable
from torch import  optim
from torchvision import transforms
import torch


'''
（1）Training set images: train-images-idx3-ubyte.gz (9.9 MB, 解压后 47 MB, 包含 60,000 个样本)
（3）Test set images: t10k-images-idx3-ubyte.gz (1.6 MB, 解压后 7.8 MB, 包含 10,000 个样本)

（2）Training set labels: train-labels-idx1-ubyte.gz (29 KB, 解压后 60 KB, 包含 60,000 个标签)
（4）Test set labels: t10k-labels-idx1-ubyte.gz (5KB, 解压后 10 KB, 包含 10,000 个标签)
'''


# 定义CNN ：网络的复杂程度影响了一个epoch的运算时间（当然还有数据的数量）
class CNN(nn.Module):
    def __init__(self):
        super(CNN,self).__init__()

    ##  输入数据格式 [N C H W]
        self.layer1 = nn.Sequential(
            ### 参数:输入通道数  输出通道数 卷积核大小
            nn.Conv2d(1,16,kernel_size=3), # 16, [26 ,26]
            nn.BatchNorm2d(16),
            nn.ReLU(inplace=True))

        self.layer2 = nn.Sequential(
            nn.Conv2d(16,32,kernel_size=3),# 32,[ 24, 24]
            nn.BatchNorm2d(32),
            # inplace表示覆盖运算
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2,stride=2)) # 32,[ 12,12 ]    (24-2) /2 +1

        self.layer3 = nn.Sequential(
            nn.Conv2d(32,64,kernel_size=3), # 64,[10,10]
            nn.BatchNorm2d(64),
            nn.ReLU(inplace=True))

        self.layer4 = nn.Sequential(
            nn.Conv2d(64,128,kernel_size=3),  # 128,[8,8]
            nn.BatchNorm2d(128),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2,stride=2))  # 128, [4,4]

        self.fc = nn.Sequential(
            nn.Linear(128 * 4 * 4,1024),
            nn.ReLU(inplace=True),
            nn.Linear(1024,128),
            nn.ReLU(inplace=True),
            nn.Linear(128,10))

    def forward(self,x):
        x = self.layer1(x)
        x = self.layer2(x)
        x = self.layer3(x)
        x = self.layer4(x)
        ## view与reshape类似 将前面多维度的tensor展平成一维
        ## 参数： 10000=batchSize  128*4*4
        x = x.view(x.size(0),-1)
        x = self.fc(x)
        ## [10000,10]
        return x


device = torch.device('cuda:0')


# 预处理=>将各种预处理组合在一起
# transforms.Compose()类详解：串联多个transform操作
# torchvision.transforms: 常用的图片变换，例如裁剪、旋转等；
# transforms.ToTensor()
        # 先由HWC转置为CHW格式；
        # 再转为float类型；
        # 最后，每个像素除以255。
# 逐channel的对图像进行标准化（均值变为0。5，标准差变为0.5），可以加快模型的收敛 ==[0,1]  （0,1）==[-1,1]
data_tf = transforms.Compose([transforms.ToTensor(),transforms.Normalize([0.5],[0.5])])
## 等价于下面代码
'''
def data_tf(x):
    x = np.array(x, dtype='float32') / 255
    x = (x - 0.5) / 0.5 # 标准化，这个技巧之后会讲到
    x = x.reshape((-1,)) # 拉平成一个一维向量  ## 这里展平是因为之前的代码在后面要接全连接层
    x = torch.from_numpy(x)
    return x
'''

# 使用内置函数下载mnist数据集(E盘目录下面)
train_set = mnist.MNIST('\\minst',train=True,transform=data_tf,download=True)
test_set = mnist.MNIST('\\minst',train=False,transform=data_tf,download=True)

## 训练接数据格式：[Data/batchSize,BatchSize, 1, 28, 28]
## shuffle表示打乱数据顺序
train_data = DataLoader(train_set,batch_size=10000,shuffle=True)
test_data = DataLoader(test_set,batch_size=2048,shuffle=False)

## 输入数据 的一个 batch的形状
img,label=next(iter(train_data))
print("图像shape:"+str(img.shape))
print("标签shape："+str(label.shape))

net = CNN()

##转换为GPU训练
net.to(device)

##损失函数：交叉熵
criterion = nn.CrossEntropyLoss()
## 优化器：随机梯度下降
optimizer = optim.SGD(net.parameters(),1e-1)

nums_epoch = 20

# 开始训练
losses =[]
acces = []
eval_losses = []
eval_acces = []


for epoch in range(nums_epoch):
    train_loss = 0
    train_acc = 0
    net = net.train()

    ##
    for img , label in train_data:
        #img = img.reshape(img.size(0),-1)

        ## tensor不能反向传播，variable可以反向传播。  准换为Variable是为了进行bp运算
        img = Variable(img)
        label = Variable(label)

        ## 输入数据装载到GPU
        img=img.to(device)
        label=label.to(device)

        # 前向传播  ：输入数据 :[10000,10]
        out = net(img)
        ## 更新损失值 等于前面的交叉熵
        loss = criterion(out,label)
        # 反向传播
        optimizer.zero_grad() ##梯度置零
        loss.backward() ##反向传播
        optimizer.step() ## 梯度下降

        ## torch.max(0)和 torch.max(1)分别是找出tensor里每列/每行中最大的值，并返回索引（即为对应的预测数字）
        _,pred = out.max(1)

        num_correct = (pred == label).sum().item()

        ##img.shape[0]=batchSize ==>num/batchSize
        acc = num_correct / img.shape[0]

        # 误差累加
        train_loss += loss.item()
        ## 准确度累加
        train_acc += acc

    eval_loss = 0
    eval_acc = 0

    # 测试集不训练
    for img , label in test_data:
        #img = img.reshape(img.size(0),-1)
        img = Variable(img)
        label = Variable(label)

        img=img.to(device)
        label=label.to(device)

        out = net(img)

        loss = criterion(out,label)

        _ , pred = out.max(1)
        num_correct = (pred==label).sum().item()
        acc = num_correct / img.shape[0]

        # 记录误差
        eval_loss += loss.item()
        eval_acc += acc

    #     ##计算平均损失与精度 ： N/batchSize=len(train_data)
    Train_Loss=train_loss / len(train_data)
    Train_Acc=train_acc / len(train_data)
    Test_Loss=eval_loss / len(test_data)
    Test_Acc=eval_acc / len(test_data)

    print('Epoch {} Train Loss {} Train  Accuracy {} Test Loss {} Test Accuracy {}'.format(epoch+1,Train_Loss,Train_Acc,Test_Loss,Test_Acc))

