import time
import torch
import torchvision
from torch import nn
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from torchvision import transforms

device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print("start on {} device.".format(device))


# 保存训练模型名称
save_model_name = "vgg16_base_cifar10"
# 训练的轮数
epoch = 50
# 一批数据大小
batch_size = 128
# 丢弃率
dropout = 0.5
# 数据集路径
# /kaggle/input/dogs-vs-cats
dataset_path = "./dataset"
# tensorboard路径
writer = SummaryWriter("./logs")
# 学习率
# learning_rate = 0.01
# 1e-2=1 x (10)^(-2) = 1 /100 = 0.01
learning_rate = 1e-2

transform = transforms.ToTensor()
train_data = torchvision.datasets.CIFAR10(dataset_path, train=True, download=True,
                                          transform=transform)
train_dataloader = DataLoader(train_data, batch_size=batch_size, shuffle=True, num_workers=0, drop_last=False)

test_data = torchvision.datasets.CIFAR10(dataset_path, train=False, download=True,
                                         transform=transform)
test_dataloader = DataLoader(test_data, batch_size=batch_size, shuffle=False, num_workers=0, drop_last=False)

train_data_size = len(train_data)
test_data_size = len(test_data)
print("训练数据集长度: {}".format(train_data_size))
print("测试数据集长度: {}".format(test_data_size))

class VGG16_BASE(nn.Module):
    def __init__(self):
        super(VGG16_BASE, self).__init__()
        self.features = nn.Sequential(
            nn.Conv2d(3, 64, kernel_size=3, padding=1),
            nn.BatchNorm2d(64),
            nn.ReLU(inplace=True),
            nn.Conv2d(64, 64, kernel_size=3, padding=1),
            nn.BatchNorm2d(64),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=2),

            nn.Conv2d(64, 128, kernel_size=3, padding=1),
            nn.BatchNorm2d(128),
            nn.ReLU(inplace=True),
            nn.Conv2d(128, 128, kernel_size=3, padding=1),
            nn.BatchNorm2d(128),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=2),

            nn.Conv2d(128, 256, kernel_size=3, padding=1),
            nn.BatchNorm2d(256),
            nn.ReLU(inplace=True),
            nn.Conv2d(256, 256, kernel_size=3, padding=1),
            nn.BatchNorm2d(256),
            nn.ReLU(inplace=True),
            nn.Conv2d(256, 256, kernel_size=3, padding=1),
            nn.BatchNorm2d(256),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=2),

            nn.Conv2d(256, 512, kernel_size=3, padding=1),
            nn.BatchNorm2d(512),
            nn.ReLU(inplace=True),
            nn.Conv2d(512, 512, kernel_size=3, padding=1),
            nn.BatchNorm2d(512),
            nn.ReLU(inplace=True),
            nn.Conv2d(512, 512, kernel_size=3, padding=1),
            nn.BatchNorm2d(512),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=2),

            nn.Conv2d(512, 512, kernel_size=3, padding=1),
            nn.BatchNorm2d(512),
            nn.ReLU(inplace=True),
            nn.Conv2d(512, 512, kernel_size=3, padding=1),
            nn.BatchNorm2d(512),
            nn.ReLU(inplace=True),
            nn.Conv2d(512, 512, kernel_size=3, padding=1),
            nn.BatchNorm2d(512),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=2)
        )
        self.classifier = nn.Sequential(
            nn.Linear(512, 4096),
            nn.ReLU(True),
            nn.Dropout(p=dropout),
            nn.Linear(4096, 4096),
            nn.ReLU(True),
            nn.Dropout(p=dropout),
            nn.Linear(4096, 10),
        )

    def forward(self, x):
        output = self.features(x)
        # output = output.view(output.size(0), -1)
        output = torch.flatten(output, 1)
        output = self.classifier(output)
        return output

# 网络模型相关初始化
# 创建网络模型
mymod = VGG16_BASE()
print(mymod)

mymod = mymod.to(device)
# 损失函数
loss_fn = nn.CrossEntropyLoss()
# loss_fn = loss_fn.to(device)

# 优化器
optimizer = torch.optim.SGD(mymod.parameters(), lr=learning_rate)

# 保存训练数据绘图
train_loss_list = []
train_accuracy_list = []

test_loss_list = []
test_accuracy_list = []

epoch_step = []
print("start train.")
for i in range(epoch):
    epoch_step.append(i)

    mymod.train()
    start_time = time.time()
    total_train_loss_pre_epoch = 0
    total_train_accuracy_pre_epoch = 0
    for data in train_dataloader:
        imgs, targets = data
        # print(imgs.shape)
        imgs = imgs.to(device)
        targets = targets.to(device)
        outputs = mymod(imgs)
        accuracy = (outputs.argmax(1) == targets).sum()
        total_train_accuracy_pre_epoch += accuracy.item()

        loss = loss_fn(outputs, targets)
        total_train_loss_pre_epoch += loss.item() * targets.size(0)

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

    end_train_time = time.time()
    print("第{}轮训练用时：{} s".format(i, end_train_time - start_time))
    print("第{}轮训练loss：{}".format(i, total_train_loss_pre_epoch / train_data_size))
    print("第{}轮训练精度：{}".format(i, total_train_accuracy_pre_epoch / train_data_size))
    print("------")
    train_loss_list.append(total_train_loss_pre_epoch / train_data_size)
    train_accuracy_list.append(total_train_accuracy_pre_epoch / train_data_size)

    writer.add_scalar("train_loss", total_train_loss_pre_epoch / train_data_size, i)
    writer.add_scalar("train_acc", total_train_accuracy_pre_epoch / train_data_size, i)

    mymod.eval()
    total_test_loss_pre_epoch = 0
    total_test_accuracy_pre_epoch = 0
    # 去掉梯度
    with torch.no_grad():
        for data in test_dataloader:
            imgs, targets = data
            imgs = imgs.to(device)
            targets = targets.to(device)
            outputs = mymod(imgs)

            loss = loss_fn(outputs, targets)
            total_test_loss_pre_epoch += loss.item() * targets.size(0)
            # 统计在测试集上正确的个数，然后累加起来
            accuracy = (outputs.argmax(1) == targets).sum()
            total_test_accuracy_pre_epoch += accuracy.item()

    end_test_time = time.time()
    print("第{}轮测试用时：{} s".format(i, end_test_time - end_train_time))
    print("第{}轮测试loss：{}".format(i, total_test_loss_pre_epoch / test_data_size))
    print("第{}轮测试精度：{}".format(i, total_test_accuracy_pre_epoch / test_data_size))
    print("------")
    test_loss_list.append(total_test_loss_pre_epoch / test_data_size)
    test_accuracy_list.append(total_test_accuracy_pre_epoch / test_data_size)

    writer.add_scalar("test_loss", total_test_loss_pre_epoch / test_data_size, i)
    writer.add_scalar("test_acc", total_test_accuracy_pre_epoch / test_data_size, i)

torch.save(mymod, "./{}.pth".format(save_model_name))

import matplotlib.pyplot as plt

plt.figure()
plt.plot(epoch_step, train_accuracy_list)
plt.title("train_accuracy")
plt.xlabel("step")
plt.ylabel("acc")
plt.grid(True)

plt.figure()
plt.plot(epoch_step, test_accuracy_list)
plt.title("test_accuracy")
plt.xlabel("step")
plt.ylabel("acc")
plt.grid(True)
plt.show()

plt.figure()
plt.plot(epoch_step, train_loss_list, label="train")
plt.plot(epoch_step, test_loss_list, label="test")
plt.legend()
plt.title("loss")
plt.xlabel("step")
plt.ylabel("loss")
plt.grid(True)

plt.figure()
plt.plot(epoch_step, train_accuracy_list, label="train")
plt.plot(epoch_step, test_accuracy_list, label="test")
plt.legend()
plt.title("accuracy")
plt.xlabel("step")
plt.ylabel("acc")
plt.grid(True)
plt.show()