import torch
import torchvision
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, SubsetRandomSampler
import numpy as np
from torch.utils.tensorboard import SummaryWriter

from SK_ResNet import sk_resnet
from SK_ResNet import resnet18

# 数据集路径
root = "../dataset"
# 批加载图片数量
batch_size = 64
# 测试集中用于验证集的比例
valid_size = 0.2

transform_train = torchvision.transforms.Compose([
    torchvision.transforms.ToTensor(),
    torchvision.transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])

# 训练集
train_set = torchvision.datasets.CIFAR100(root=root, train=True, transform=transform_train,
                                          download=True)
# 验证集
valid_set = torchvision.datasets.CIFAR100(root=root, train=True, transform=transform_train,
                                          download=True)

indices = list(range(len(train_set)))
# 随机洗牌
np.random.shuffle(indices)
# 分割点
split = int(np.floor(valid_size * len(train_set)))
# 把数据集分成训练集和验证集
train_idx, valid_idx = indices[split:], indices[:split]
# 按照给定索引，从数据集中采样
train_sampler = SubsetRandomSampler(train_idx)
valid_sampler = SubsetRandomSampler(valid_idx)
print("用于训练的数据量：{}".format(len(train_sampler)))
print("用于验证的数据量：{}".format(len(valid_sampler)))

# loader
train_dataloader = DataLoader(train_set, batch_size=batch_size, sampler=train_sampler)
valid_dataloader = DataLoader(valid_set, batch_size=batch_size, sampler=valid_sampler)

# 创建网络模型
sk_resnet = sk_resnet()
# 损失函数
loss_fn = CrossEntropyLoss()
# 优化器
lr = 1e-2  # 学习率
optimizer = torch.optim.SGD(sk_resnet.parameters(), lr=lr)

# 记录训练次数
train_step = 0
# 记录验证次数
valid_step = 0
# 训练轮数
epoch = 10
# 验证集最高正确率
best_acc = 0

# 添加tensorboard
writer = SummaryWriter("../log_train_{}".format(epoch))

for i in range(epoch):
    print("------第{}轮训练开始------".format(i + 1))

    # 训练步骤开始
    sk_resnet.train()
    for data in train_dataloader:
        imgs, targets = data
        output = sk_resnet(imgs)
        train_loss = loss_fn(output, targets)

        # 优化器优化模型
        optimizer.zero_grad()
        train_loss.backward()
        optimizer.step()

        train_step += 1

        if train_step % 100 == 0:
            print("训练次数：{} \tloss：{}".format(train_step, train_loss.item()))
            writer.add_scalar("train_loss", train_loss.item(), train_step)

    # 验证步骤开始
    sk_resnet.eval()
    total_valid_loss = 0
    total_accuracy = 0
    with torch.no_grad():
        for data in valid_dataloader:
            imgs, targets = data
            output = sk_resnet(imgs)
            loss = loss_fn(output, targets)
            total_valid_loss += loss

            accuracy = (output.argmax(1) == targets).sum()
            total_accuracy += accuracy
    print("验证集集上的平均loss：{}".format(total_valid_loss / len(valid_sampler)))
    print("验证集上的accuracy：{}%".format(100 * total_accuracy / len(valid_sampler)))
    valid_step += 1

    writer.add_scalar("test_loss", total_valid_loss / len(valid_sampler), valid_step)
    writer.add_scalar("acc", total_accuracy / len(valid_sampler), valid_step)

    # 保存模型
    if total_accuracy / len(valid_sampler) > best_acc:
        best_acc = total_accuracy / len(valid_sampler)
        torch.save(sk_resnet.state_dict(), "../Module/sk_resnet_{}.pth".format(epoch))
        print("模型已保存")

writer.close()
