import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from torch.autograd import Variable
import numpy as np
import matplotlib.pyplot as plt


class LeNet_5(nn.Module):
    def __init__(self):
        super(LeNet_5, self).__init__()
        self.conv1 = nn.Conv2d(in_channels=1, out_channels=6, kernel_size=5, stride=1, padding=(2, 2))
        self.conv2 = nn.Conv2d(in_channels=6, out_channels=16, kernel_size=5, stride=1, padding=0)
        self.fc1 = nn.Linear(in_features=400, out_features=120)
        self.fc2 = nn.Linear(in_features=120, out_features=84)
        self.fc3 = nn.Linear(in_features=84, out_features=10)

    def forward(self, x):
        # 第一层卷积层
        x = self.conv1(x)
        x = F.sigmoid(x)
        x = F.max_pool2d(x, 2)
        # 第二层卷积层
        x = self.conv2(x)
        x = F.sigmoid(x)
        x = F.max_pool2d(x, 2)
        # 将数据拉到一维
        x = torch.flatten(x, 1)
        # 第一层全连接层
        x = self.fc1(x)
        x = F.sigmoid(x)
        # 第二层全连接层
        x = self.fc2(x)
        x = F.sigmoid(x)
        # 第三层全连接层
        x = self.fc3(x)
        out = F.log_softmax(x, dim=1)
        # 输出
        return out


def train(epochs, model, criterion, optimizer, train_loader, test_loader):
    for epoch in range(epochs):
        sum_loss = 0
        print('epoch {}'.format(epoch))
        for i, data in enumerate(train_loader):
            img, label = data
            optimizer.zero_grad()
            out = model(img)
            loss = criterion(out, label)
            loss.backward()
            optimizer.step()
            # 每训练100个batch打印一次平均loss
            sum_loss += loss.item()
            if i % 100 == 99:
                print('[Epoch:%d, batch:%d] train loss: %.03f' % (epoch + 1, i + 1, sum_loss / 100))
                sum_loss = 0.0
        total = 0
        correct = 0
        for data in test_loader:
            test_inputs, labels = data
            outputs_test = model(test_inputs)
            _, predicted = torch.max(outputs_test.data, 1)  # 输出得分最高的类
            total += labels.size(0)  # 统计50个batch 图片的总个数
            correct += (predicted == labels).sum()  # 统计50个batch 正确分类的个数

        print('第{}个epoch的识别准确率为：{}%'.format(epoch + 1, 100 * correct.item() / total))


# def imshow(img):
#     img = img / 2 + 0.5  # unnormalize
#     npimg = img.numpy()
#     plt.imshow(np.transpose(npimg, (1, 2, 0)))
#     plt.show()


model = LeNet_5()
# print(model)
# 载入训练集和测试集
transform = torchvision.transforms.ToTensor()  # 定义数据预处理方式：转换 PIL.Image 成 torch.FloatTensor

train_data = torchvision.datasets.MNIST(root="D:\\file\\学习\\神经网络\\neural_machine_translation_learn\\LeNet_v5"
                                             "\\data\\python_mnist\\mnist\\train",  # 数据目录，这里目录结构要注意。
                                        train=True,  # 是否为训练集
                                        transform=transform,  # 加载数据预处理
                                        download=True)  # 是否下载
test_data = torchvision.datasets.MNIST(root="D:\\file\\学习\\神经网络\\neural_machine_translation_learn\\LeNet_v5\\data"
                                            "\\python_mnist\\mnist\\train",
                                       train=False,
                                       transform=transform,
                                       download=True)
# 数据加载器:组合数据集和采样器
train_loader = torch.utils.data.DataLoader(dataset=train_data, batch_size=64, shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=test_data, batch_size=64, shuffle=False)
# torchvision.utils.make_grid 将图片进行拼接
dataiter = iter(test_loader)
# images, labels = next(dataiter)
# imshow(torchvision.utils.make_grid(images))
critierion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=0.001, momentum=0.9)  # 选择SGD，学习率取0.001
train(epochs=8, model=model, criterion=critierion, optimizer=optimizer, train_loader=train_loader,
      test_loader=test_loader)
