import time
import torch
import torchvision.datasets
from torch import nn
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter

device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print("start on {} device.".format(device))

# 训练的轮数
epoch = 20
# 一批数据大小
batch_size = 64
# 丢弃率
dropout = 0.5
# 数据集路径
dataset_path = "./cifar10_dataset"
# tensorboard路径
writer = SummaryWriter("./logs")
# 学习率
# learning_rate = 0.01
# 1e-2=1 x (10)^(-2) = 1 /100 = 0.01
learning_rate = 1e-2

train_data = torchvision.datasets.CIFAR10(dataset_path, train=True, download=True,
                                          transform=torchvision.transforms.ToTensor())
train_dataloader = DataLoader(train_data, batch_size=batch_size, shuffle=True, num_workers=0, drop_last=False)

test_data = torchvision.datasets.CIFAR10(dataset_path, train=False, download=False,
                                         transform=torchvision.transforms.ToTensor())
test_dataloader = DataLoader(test_data, batch_size=batch_size, shuffle=True, num_workers=0, drop_last=False)


class VGG16_BASE(nn.Module):
    def __init__(self):
        super(VGG16_BASE, self).__init__()
        self.features = nn.Sequential(
            nn.Conv2d(3, 64, kernel_size=3, padding=1),
            nn.ReLU(inplace=True),
            nn.Conv2d(64, 64, kernel_size=3, padding=1),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=2),

            nn.Conv2d(64, 128, kernel_size=3, padding=1),
            nn.ReLU(inplace=True),
            nn.Conv2d(128, 128, kernel_size=3, padding=1),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=2),

            nn.Conv2d(128, 256, kernel_size=3, padding=1),
            nn.ReLU(inplace=True),
            nn.Conv2d(256, 256, kernel_size=3, padding=1),
            nn.ReLU(inplace=True),
            nn.Conv2d(256, 256, kernel_size=3, padding=1),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=2),

            nn.Conv2d(256, 512, kernel_size=3, padding=1),
            nn.ReLU(inplace=True),
            nn.Conv2d(512, 512, kernel_size=3, padding=1),
            nn.ReLU(inplace=True),
            nn.Conv2d(512, 512, kernel_size=3, padding=1),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=2),

            nn.Conv2d(512, 512, kernel_size=3, padding=1),
            nn.ReLU(inplace=True),
            nn.Conv2d(512, 512, kernel_size=3, padding=1),
            nn.ReLU(inplace=True),
            nn.Conv2d(512, 512, kernel_size=3, padding=1),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=2),
        )
        self.classifier = nn.Sequential(
            nn.Flatten(),
            # 原始模型vgg16输入image大小是224 x 224
            # 我们测试的自己模仿写的模型输入image大小是32 x 32
            # 大小是小了 7 x 7倍
            # nn.Linear(512 * 7 * 7, 4096),
            nn.Linear(512 * 1 * 1, 4096),
            nn.ReLU(True),
            nn.Dropout(p=dropout),
            nn.Linear(4096, 4096),
            nn.ReLU(True),
            nn.Dropout(p=dropout),
            nn.Linear(4096, 10),
        )

    def forward(self, x):
        output = self.features(x)
        output = self.classifier(output)
        return output


train_data_size = len(train_data)
test_data_size = len(test_data)
print("训练数据集长度: {}".format(train_data_size))
print("测试数据集长度: {}".format(test_data_size))
# 创建网络模型
mymod = VGG16_BASE()
print(mymod)
mymod = mymod.to(device)
# 损失函数
loss_fn = nn.CrossEntropyLoss()
loss_fn = loss_fn.to(device)

# 优化器
optimizer = torch.optim.SGD(mymod.parameters(), lr=learning_rate)

total_train_step = 0
total_test_step = 0
test_acc_step = 0
print("start train.")
for i in range(epoch):
    mymod.train()
    start_time = time.time()
    for data in train_dataloader:
        imgs, targets = data
        # print(imgs.shape)
        imgs = imgs.to(device)
        targets = targets.to(device)
        outputs = mymod(imgs)

        loss = loss_fn(outputs, targets)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        if total_train_step % 1000 == 0:
            writer.add_scalar("train_loss", loss.item(), total_train_step)
        total_train_step += 1

    end_time = time.time()
    print("第{}轮训练用时：{} s".format(i, end_time - start_time))
    mymod.eval()
    total_test_loss = 0
    total_accuracy = 0
    # 去掉梯度
    with torch.no_grad():
        for data in test_dataloader:
            imgs, targets = data
            imgs = imgs.to(device)
            targets = targets.to(device)
            outputs = mymod(imgs)

            loss = loss_fn(outputs, targets)
            total_test_loss += loss.item()
            # 统计在测试集上正确的个数，然后累加起来
            accuracy = (outputs.argmax(1) == targets).sum()
            total_accuracy += accuracy

            if total_test_step % 1000 == 0:
                writer.add_scalar("test_loss", loss.item(), total_test_step)
            total_test_step += 1

    writer.add_scalar("test_accuracy", total_accuracy / test_data_size, test_acc_step)
    test_acc_step += 1

torch.save(mymod, "./vgg16_base.pth")
