# coding=utf-8
# 学习单位  : 郑州大学
# @Author  : 铭同学
# @Time    : 2021/10/14 17:33
# @Software: PyCharm

# Inception Module (层中层，卷积层中添加1*1的卷积层)
# 复杂的卷积层进行封装，然后在Net类别中进行串行调用
# 多分类 Softmax
import torch
from torchvision import transforms       # 图像处理
from torchvision import datasets         # 数据集
from torch.utils.data import DataLoader  # 数据载入
import torch.nn as nn
import torch.nn.functional as F          # 函数包--激活函数使用（relu()）
import torch.optim as optim              # 优化器

'''
搞神经网络，深度学习，核心在于模型的搭建，网络层次的处理。
适合增量开发，每开发一个层次测试一下，看与预期结果，维度，通道数，宽高等信息是否一致
千万不能叭叭叭的一通写，最后直接跑代码，一旦网络复杂度太高，及其容易出问题！！！
应对网络复杂度，可以把复杂的层次结构用类封装起来，然后在Net类别中顺序执行。
'''

# 1. Prepare Dataset
batch_size = 64
transform = transforms.Compose([
    transforms.ToTensor(),  # 将PIL Image转换为Tensor（这时通道在前，通道(c)*宽度(w)*高度(h)）
    transforms.Normalize((0.1307, ), (0.3081, ))    # 对数据标准化转换为0-1分布
])

# 准备数据集
train_dataset = datasets.MNIST(root='../dataset/mnist/' #数据路径
                               ,train=True  # 作为训练集
                               ,download=True   # 如果路径无数据进行下载（第一次直接下载）
                               ,transform=transform
                               )
# 批量载入数据集
train_loader = DataLoader(train_dataset #数据集
                          ,shuffle=True # 打乱数据
                          ,batch_size=batch_size    #批量大小
                          )

# 准备数据集
test_dataset = datasets.MNIST(root='../dataset/mnist/' #数据路径
                              ,train=False
                              ,download=True   # 如果路径无数据进行下载
                              ,transform=transform
                              )
# 批量载入数据集
test_loader = DataLoader(test_dataset #数据集
                         ,shuffle=False # 不打乱数据，有利于结果观察
                         ,batch_size=batch_size    #批量大小
                         )
# 2. Design Model

# Inception Module (层中层，卷积层中添加1*1的卷积层--kernel_size=1)
# 用于减少运算成本
class InceptionA(nn.Module):
    def __init__(self, in_channels):
        super(InceptionA, self).__init__()
        self.branch1x1 = nn.Conv2d(in_channels, 16, kernel_size=1)
        self.branch5x5_1 = nn.Conv2d(in_channels,16, kernel_size=1)
        self.branch5x5_2 = nn.Conv2d(16, 24, kernel_size=5, padding=2)
        self.branch3x3_1 = nn.Conv2d(in_channels, 16, kernel_size=1)
        self.branch3x3_2 = nn.Conv2d(16, 24, kernel_size=3, padding=1)
        self.branch3x3_3 = nn.Conv2d(24, 24, kernel_size=3, padding=1)
        self.branch_pool = nn.Conv2d(in_channels, 24, kernel_size=1)

    def forward(self, x):
        branch1x1 = self.branch1x1(x)
        branch5x5 = self.branch5x5_1(x)
        branch5x5 = self.branch5x5_2(branch5x5)
        branch3x3 = self.branch3x3_1(x)
        branch3x3 = self.branch3x3_2(branch3x3)
        branch3x3 = self.branch3x3_3(branch3x3)
        branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
        branch_pool = self.branch_pool(branch_pool)
        outputs = [branch1x1, branch5x5, branch3x3, branch_pool]
        return torch.cat(outputs, dim=1)

# 复杂的Inception Module进行封装，然后在Net类别中进行串行调用
# 模型类Net
class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        # 那么这些通道数是怎么计算出来的，暂时还没搞明白--10月14日
        self.conv1 = nn.Conv2d(1, 10, kernel_size=5)    # 输入通道数为1，输出通道数为10，卷积核为5的卷积层1
        self.conv2 = nn.Conv2d(88, 20, kernel_size=5)   # 输入通道数为88，输出通道数为20，卷积核为5的卷积层2

        self.incep1 = InceptionA(in_channels=10)    # 输入通道数为10的Inception层1
        self.incep2 = InceptionA(in_channels=20)    # 输入通道数为20的Inception层2

        self.mp = nn.MaxPool2d(2)   # 最大池化层--参数2（宽高除以2）
        # 1408怎么计算呢，一般是输入上边各种需要的参数，然后得到输出结果的size=1408，下一层需要的维度
        self.fc = nn.Linear(1408, 10)   # 线形层
        # 当然线性层还可以更细致，1408-1024-512-...-10

    def forward(self, x):
        in_size = x.size(0) # 获取初始数据的维度
        x = F.relu(self.mp(self.conv1(x)))
        x = self.incep1(x)  # 调用Inception层1
        x = F.relu(self.mp(self.conv2(x)))
        x = self.incep2(x)  # 调用Inception层2
        x = x.view(in_size, -1) # 保持输出结果=初始数据的维度
        # 线性层需要x的维度保持与初始数据一致？
        x = self.fc(x)
        return x

# 实例化模型
model = Net()

# 迁移到GPU "cuda:0"代表第一块显卡
# device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# model.to(device)

# 3. Construct Loss and Optimizer
criterion = torch.nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.5)    # 加一个动（冲）量（其作用就是摆脱局部最优，奔向全局最优）

# 4. Train and Test
def train(epoch):
    running_loss = 0.0
    # TypeError: __array__() takes 1 positional argument but 2 were given
    # 解决办法，降低pillow版本到8.2即可，pip install pillow==8.2
    for batch_idx, data in enumerate(train_loader, 0): # 这一行读数据失败--已经解决
        inputs, target = data
        # 迁移到GPU
        # inputs, target = inputs.to(device), target.to(device)
        optimizer.zero_grad()   # 梯度归零，避免累加

        # forward + backward + update
        outputs = model(inputs)
        loss = criterion(outputs, target)
        loss.backward()
        optimizer.step()

        running_loss += loss.item() # item()用于取出值本身，对每个批次的损失进行求和
        # 每300个批次输出一次
        if batch_idx % 300 == 299:
            print('[%d, %5d] loss: %.3f' % (epoch + 1, batch_idx + 1, running_loss / 300))
            running_loss = 0.0 # 损失清零

def test():
    correct = 0
    total = 0
    # 测试时不计算梯度（求梯度是很贵的事情，非必要即可不求）
    with torch.no_grad():
        for data in test_loader:
            images, labels = data
            # 迁移到GPU
            # images, labels = inputs.to(device), target.to(device)
            outputs = model(images)
            _, predicted = torch.max(outputs.data, dim=1)   # 按照每一行求最大值（就是最大概率），输出两个结果（数值本身和对应下标）我们只要下标predicted(也就是对应的分类)
            total += labels.size(0) # 计算总数
            # 计算预测正确的个数
            correct += (predicted == labels).sum().item()

    print('Accuracy on test set: %.2f%% [%d / %d]' % ((100 * correct / total), correct, total))


if __name__ == '__main__':
    pass
    for epoch in range(10):
        train(epoch)
        test()



'''
[1,   300] loss: 1.020
[1,   600] loss: 0.204
[1,   900] loss: 0.142
Accuracy on test set: 97 % [9704 / 10000]
[2,   300] loss: 0.109
[2,   600] loss: 0.096
[2,   900] loss: 0.090
Accuracy on test set: 97 % [9793 / 10000]
[3,   300] loss: 0.075
[3,   600] loss: 0.078
[3,   900] loss: 0.069
Accuracy on test set: 98 % [9810 / 10000]
[4,   300] loss: 0.064
[4,   600] loss: 0.060
[4,   900] loss: 0.060
Accuracy on test set: 98 % [9832 / 10000]
[5,   300] loss: 0.055
[5,   600] loss: 0.054
[5,   900] loss: 0.054
Accuracy on test set: 98 % [9838 / 10000]
[6,   300] loss: 0.046
[6,   600] loss: 0.049
[6,   900] loss: 0.049
Accuracy on test set: 98 % [9863 / 10000]
[7,   300] loss: 0.046
[7,   600] loss: 0.042
[7,   900] loss: 0.042
Accuracy on test set: 98 % [9884 / 10000]
[8,   300] loss: 0.037
[8,   600] loss: 0.039
[8,   900] loss: 0.042
Accuracy on test set: 98 % [9876 / 10000]
[9,   300] loss: 0.034
[9,   600] loss: 0.037
[9,   900] loss: 0.037
Accuracy on test set: 98 % [9867 / 10000]
[10,   300] loss: 0.037
[10,   600] loss: 0.030
[10,   900] loss: 0.032
Accuracy on test set: 98 % [9892 / 10000]

'''
