import copy

import torch
from torch import nn    # 常用于神经网络的构建
import torch.nn.functional as F     # 一些常用的函数
from torch import optim     # 引入优化器

from torchvision import datasets,transforms     #数据集和预处理
from utils import plot_curve, plot_image, one_hot  # 在已写的文件中引入工具
import warnings
# 忽略警告
warnings.filterwarnings("ignore")

# GPU运行设置
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")


# 数据预处理
transform = transforms.Compose([
    transforms.ToTensor(),  # 将数据转化成张量
    transforms.Normalize(
        (0.1307),(0.3081)
    )
])

# 卷积结果计算公式:
# 长度: H2 = (H1 - Fh + 2*P)/S +1
# 宽度: W2 = (W1 - Fw + 2*P)/S +1
# 其中H1,W1表示输入的高度、宽度;H2,W2,表示输出特征图的高度，宽度；
# Fh,Fw表示卷积核的长和宽的大小;S表示滑动窗口的步长,P表示边界填充(加几圈0)


# 实例化卷积神经网络
class CNN(nn.Module):
    def __init__(self):     # 注意是两个下划线
        super(CNN, self).__init__()
        self.conv1 = nn.Sequential(    # 输入大小为(1,28,28)
            nn.Conv2d(
                in_channels=1,      # 单通道,灰度图
                out_channels=16,    # 要得到多少个特征图
                kernel_size=(5,5),      # 卷积核大小
                stride=(1,1),           # 滑动窗口步长
                padding=2,              # 如果希望卷积后大小和原来一样,padding = (kernel_size -1)/2;if stride=1
            ),
            # 此时特征图大小为(16,28,28)
            nn.ReLU(),      #激活函数
            nn.MaxPool2d(kernel_size=2) #池化层 特征图为(16,14,14)
        )
        self.conv2 = nn.Sequential(
            nn.Conv2d(
                in_channels=16,     # 输入通道数
                out_channels=32,    # 输出通道数
                kernel_size=(5,5),
                stride=(1,1),
                padding=2,
            ),
            # 此时特征图大小为(32,14,14)
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=2) # 池化层,特征图大小为(32,7,7)
        )
        self.out = nn.Linear(32*7*7,10) # 全连接层得到结果

    def forward(self,x):
        x = self.conv1(x)
        x = self.conv2(x)
        x = x.view(x.shape[0],-1) # flatten操作 x的shape(batch_size,32*7*7)
        x = self.out(x)     # 全连接层 x.shape(batch_size,10)
        return x

# 计算准确率函数
def accuracy(predictions,labels):
    # torch.max返回值[0]是最大值,[1]最大值的行(列)索引
    pred = torch.max(predictions.data,dim=1)[1]
    rights = pred.eq(labels.data.view_as(pred)).sum()
    return rights,len(labels)   # 预测正确的个数, 总标签的个数

if __name__ == '__main__':
    # 引入训练集,测试集
    train_dataset = datasets.MNIST("./data/",train=True,
                                   transform=transform,download=True)
    test_dataset = datasets.MNIST("./data/",train=False,
                                  transform=transform,download=False)

    # 定义超参数
    num_classes =10     # 标签的种类数
    num_epochs =3       # 训练轮数

    # 批量加载数据容器
    batch_size =64
    trainloader = torch.utils.data.DataLoader(dataset=train_dataset,
                                              batch_size=batch_size,shuffle=True)
    testloader = torch.utils.data.DataLoader(dataset=test_dataset,
                                             batch_size=batch_size,shuffle=False)
    # iter()是一个迭代函数，next()函数是看迭代生成的下一个
    imgbatch, labelbatch = next(iter(trainloader))
    # print(imgbatch.shape)
    # print(labelbatch.shape)
    # print(imgbatch.min(),imgbatch.max())
    # 展示数据图片
    # plot_image(imgbatch, labelbatch, "image sample")

    # 实例化模型
    model = CNN()
    # print(model)
    # GPU加载模型
    model = model.to(device)

    # 交叉熵损失函数
    # 输入参数
    # input:2-D tensor,shape为 batch*n
    # target: 大小为 n 的 1—D tensor，包含类别的索引(0到 n-1)
    loss_func = nn.CrossEntropyLoss()
    # 定义优化器
    optimizer =optim.Adam(model.parameters(),lr=1e-2)

    # 设置最好的准确率 为了保存最好的模型
    best_auc = 0.0
    # 开始训练模型
    for epoch in range(num_epochs):
        # 当前epoch的结果保存下来
        train_rights =[]

        # 针对容器中的每一个批进行循环
        for batch_idx,(data,target) in enumerate(trainloader):
            model.train()
            # print("batch data.shape:",data.shape)
            # print("batch target shape:",target.shape)
            # 模型训练数据
            # GPU训练代码改动
            data = data.to(device)
            target = target.to(device)
            output = model(data)
            # 计算损失函数
            loss = loss_func(output,target)
            # 优化器梯度清零
            optimizer.zero_grad()
            # 反向传播
            loss.backward()
            # 优化器更新参数
            optimizer.step()
            # 统计正确个数
            right = accuracy(output,target)
            train_rights.append(right)

            # 每100批进行一次测试
            if batch_idx % 100 ==0:

                model.eval()
                # 存储验证的正确的个数数量
                val_rights =[]

                for (data,target) in testloader:
                    # GPU改动代码
                    data = data.to(device)
                    target = target.to(device)
                    output = model(data)
                    right = accuracy(output,target)
                    val_rights.append(right)

                # 准确率计算
                train_r = sum([elem[0] for elem in train_rights])
                train_size = sum([elem[1] for elem in train_rights])
                val_r = sum([elem[0] for elem in val_rights])
                val_size = sum([elem[1] for elem in val_rights])

                val_auc = 100.0 * val_r / val_size

                print("当前epoch: {} [{}/{} ({:.0f})%]\t损失:{:.6f}\t 训练集准确率:{:.2f}% \t测试集准确率:{:.2f}%".format(
                    epoch,
                    batch_idx * batch_size, len(train_dataset),
                    100.0 * batch_idx*batch_size / len(train_dataset),
                    loss.data,
                    100.0 * train_r / train_size,
                    100.0 * val_r / val_size
                ))

                # 记录最好的模型
                if val_auc > best_auc:
                    best_auc = val_auc
                    best_model = copy.deepcopy(model.state_dict())

    # 模型保存
    print("best_auc : {:.2f}%".format(best_auc))
    torch.save(best_model,"Minist_CNN.pth")
