# 数据集下载地址 https://www.cs.toronto.edu/~kriz/cifar.html 

import torch
from torchvision import transforms
from torch.utils.data import DataLoader
import glob

from cifar_dataset import MyDataset
from resnet import ResNet

IMAGE_PATH = "D:/vllm/cifar"

"""
train_transform = transforms.Compose([
    transforms.RandomResizedCrop((28, 28)),
    transforms.RandomHorizontalFlip(),
    transforms.RandomVerticalFlip(),
    transforms.RandomRotation(90),
    transforms.RandomGrayscale(0.1),
    transforms.ColorJitter(0.3, 0.3, 0.3, 0.3),
    transforms.ToTensor(),
])
"""
train_transform = transforms.Compose([
    transforms.RandomCrop(28),
    transforms.RandomHorizontalFlip(),
    transforms.ToTensor(),
])

test_transform = transforms.Compose([
    transforms.Resize((28, 28)),
    transforms.ToTensor(),
])


def main():
    batch_size = 128
    epoch_num = 30
    lr = 0.01
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print("device: ", device)
    model = ResNet().to(device)

    im_train_list = glob.glob("{}/train/*/*.png".format(IMAGE_PATH))
    im_test_list = glob.glob("{}/test/*/*.png".format(IMAGE_PATH))
    train_dataset = MyDataset(im_train_list, transform=train_transform)
    test_dataset = MyDataset(im_test_list, transform=test_transform)
    train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=4)
    test_dataloader = DataLoader(test_dataset, batch_size=batch_size, shuffle=True, num_workers=4)
    print(len(train_dataset))
    print(len(test_dataset))

    loss_func = torch.nn.CrossEntropyLoss()  # 交叉熵损失函数
    optimizer = torch.optim.Adam(model.parameters(), lr=lr)  # 自适应学习率
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=5, gamma=0.9)
    for epoch in range(epoch_num):
        print(" epoch is ", epoch)
        model.train() #train BN dropout

        for i, data in enumerate(train_dataloader):
            inputs, labels = data
            inputs, labels = inputs.to(device), labels.to(device)

            outputs = model(inputs)
            loss = loss_func(outputs, labels)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            #print("step", i, "loss is:", loss.item())
        torch.save(model.state_dict(), "cifar.model")
        scheduler.step()
        print("lr is ", optimizer.param_groups[0]["lr"])

        model.eval()
        sum_loss = 0
        sum_correct = 0
        for i, data in enumerate(test_dataloader):
            inputs, labels = data
            inputs, labels = inputs.to(device), labels.to(device)

            outputs = model(inputs)
            loss = loss_func(outputs, labels)
            _, pred = torch.max(outputs.data, dim=1)
            correct = pred.eq(labels.data).cpu().sum()

            sum_loss += loss.item()
            sum_correct += correct.item()

        test_loss = sum_loss * 1.0 / len(test_dataloader)
        test_correct = sum_correct * 1.0 / len(test_dataloader) / batch_size
        print("loss is ", test_loss, " correct is ", test_correct)

if __name__ == "__main__":
    main()
