import time

import torch
import torchvision
from torch import nn
from torch.utils.tensorboard import SummaryWriter

# from model import *
from torch.utils.data import DataLoader
#训练模型


#准备数据集
train_data = torchvision.datasets.CIFAR10("./dataset",train=True,download=True,transform=torchvision.transforms.ToTensor())
test_data = torchvision.datasets.CIFAR10("./dataset",train=False,transform=torchvision.transforms.ToTensor(),download=True)


#length输出两个数据集的长度
train_data_size = len(train_data)
test_data_size = len(test_data)
#格式化字符串
print("训练数据集的长度为：{}".format(train_data_size))
print("测试数据集的长度为：{}".format(test_data_size))

#利用dataloader加载数据集
train_dataloader = DataLoader(train_data,batch_size=64)
test_dataloader = DataLoader(test_data,batch_size=64)


#创建网络模型
class MyModule(nn.Module):
    def __init__(self):
        super(MyModule, self).__init__()
        self.model = nn.Sequential(
            nn.Conv2d(3,32,5,1,padding=2),
            nn.MaxPool2d(2),
            nn.Conv2d(32,32,5,1,2),
            nn.MaxPool2d(2),
            nn.Conv2d(32,64,5,1,2),
            nn.MaxPool2d(2),
            nn.Flatten(),
            nn.Linear(64*4*4,64),
            nn.Linear(64,10)
        )

    def forward(self,x):
        x = self.model(x)
        return x


mymodule = MyModule()
if torch.cuda.is_available():
    mymodule = mymodule.cuda()

#创建损失函数
loss_fn = nn.CrossEntropyLoss()
if torch.cuda.is_available():
    loss_fn = loss_fn.cuda()
#创建优化器
learning_rate = 1e-2
optimizer = torch.optim.SGD(mymodule.parameters(),lr=learning_rate)

#记录训练的次数
total_train_step = 0
total_test_step = 0

#训练轮次
epoch = 10

#添加tensorboard
writer = SummaryWriter("train_logs")

start_time = time.time()
for i in range(epoch):
    print("----第{}轮训练开始----".format(i+1))

    #训练步骤开始
    mymodule.train()#让网络进入训练状态（在网络中存在batchnorm和drpout的时候需要写）
    for data in train_dataloader:
        imgs,targets = data
        if torch.cuda.is_available():
            imgs = imgs.cuda()
            targets = targets.cuda()
        outputs = mymodule(imgs)
        #计算模型输出outputs与目标targets之间的损失
        loss = loss_fn(outputs,targets)
        #优化器优化模型
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        total_train_step += 1
        if total_train_step % 100 == 0:
            end_time = time.time()
            print(end_time-start_time)
            print("训练次数：{}，loss是：{}".format(total_train_step,loss.item()))
            writer.add_scalar("train_loss",loss,total_train_step)
        #loss和loss.item()的区别：打印loss时会打印一个tensor类型的数据，打印loss.item()时会把tensor类型转换为一个真实的数字

    #每训练完一轮，需要在测试集上测试，测试模型有没有训练好，以测试数据集的损失或者测试数据集上的正确率评估模型有没有训练好

    #在测试的过程中，不需要再对模型进行调优，利用现有模型进行测试

    #表示with里面的代码没有梯度，不会对参数进行调优
    #测试步骤开始
    mymodule.eval()
    total_test_loss = 0
    total_accuracy = 0
    with torch.no_grad():
        for data in test_dataloader:
            imgs,targets = data
            if torch.cuda.is_available():
                imgs = imgs.cuda()
                targets = targets.cuda()
            outputs = mymodule(imgs)
            loss = loss_fn(outputs,targets)
            total_test_loss = total_test_loss + loss
            accurary = (outputs.argmax(1) == targets).sum()
            total_accuracy = total_accuracy + accurary

    print("整体测试集上的Loss:{}".format(total_test_loss))
    print("整体测试集上的正确率为：{}".format(total_accuracy/test_data_size))
    writer.add_scalar("test_loss",total_test_loss,total_test_step)
    writer.add_scalar("test_accurary",total_accuracy/test_data_size,total_test_step)
    total_test_step = total_test_step+1

    torch.save(mymodule,"mymodule_{}.pth".format(i))
    #torch.save(mymodule.state_dict(),"mymodule_{}.pth".format(i))
    print("模型已保存")

writer.close()