from torchvision import models
import glob
import os.path

import numpy as np
import tensorboardX
import torch
from torchvision import transforms
from torch.utils.data import DataLoader
import torch.nn as nn

from cifar10.data.dataset import MyDataset

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")


class Resnet18(nn.Module):
    def __init__(self):
        super(Resnet18, self).__init__()
        self.model = models.resnet18(pretrained=True)
        self.num_features = self.model.fc.in_features
        self.model.fc = nn.Linear(self.num_features, 10)

    def forward(self, x):
        out = self.model(x)
        return out


def run():
    if not os.path.exists("log"):
        os.mkdir("log")
    writer = tensorboardX.SummaryWriter("log")
    step_n = 0

    train_transform = transforms.Compose([
        transforms.RandomCrop(28),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
    ])
    test_transform = transforms.Compose([
        transforms.Resize((28, 28)),
        transforms.ToTensor(),
    ])

    im_train_list = glob.glob("../data/train/*/*")
    im_test_list = glob.glob("../data/test/*/*")

    train_dataset = MyDataset(im_train_list, transform=train_transform)
    test_dataset = MyDataset(im_test_list, transform=test_transform)

    train_data_loader = DataLoader(dataset=train_dataset, batch_size=6, shuffle=True, num_workers=4)
    test_data_loader = DataLoader(dataset=test_dataset, batch_size=6, shuffle=False, num_workers=4)

    print(len(train_dataset))
    print(len(test_dataset))

    net = Resnet18().to(device)
    loss_func = nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(net.parameters(), lr=0.01)
    # optimizer=torch.optim.SGD(net.parameters(),lr=0.01,momentum=0.9,weight_decay=5e-4)
    # step_size，每进行step_size次epoch进行一次学习率的调整，学习率变为上一次学习率的gamma倍
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer=optimizer, step_size=5, gamma=0.9)
    epoch = 1
    for cur_epoch in range(epoch):
        net.train()  # 表明当前是在训练中，batchnm和dropout会实时更新
        for i, (inputs, labels) in enumerate(train_data_loader):
            print("epoch:", epoch)
            print("index:", i)
            inputs = inputs.to(device)
            labels = labels.to(device)
            out = net(inputs)
            loss = loss_func(out, labels)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            print("损失:", loss.item())
            _, pred = torch.max(out.data, dim=1)
            correct = pred.eq(labels).cpu().sum()
            print("正确率:", 100.0 * correct / len(labels))
            print("学习率:", optimizer.state_dict()["param_groups"][0]["lr"])
            writer.add_scalar("train loss", loss.item(), global_step=step_n)
            writer.add_scalar("train correct", 100.0 * correct / len(labels), global_step=step_n)
            step_n += 1
            print("*" * 100)
        scheduler.step()  # 更新学习率

    net.eval()
    loss_list = []
    acc_list = []
    for i, (inputs, labels) in enumerate(test_data_loader):
        print("index:", i)
        inputs = inputs.to(device)
        labels = labels.to(device)
        out = net(inputs)
        loss = loss_func(out, labels)
        print("损失:", loss.item())
        loss_list.append(loss.item())
        _, pred = torch.max(out.data, dim=1)
        print("正确率:", 100.0 * correct / len(labels))
        acc_list.append(100.0 * correct / len(labels))
        print("学习率:", optimizer.state_dict()["param_groups"][0]["lr"])
        writer.add_scalar("test correct", loss.item(), global_step=step_n)
        writer.add_scalar("test correct", 100.0 * correct / len(labels), global_step=step_n)
        print("*" * 100)
    print(np.array(loss_list).mean())
    print(np.array(acc_list).mean())


if __name__ == '__main__':
    run()
