import torch
from torch.nn import CrossEntropyLoss
from tqdm import tqdm
import matplotlib.pyplot as plt
from nets.alex_net import AlexNet
from utils import to_abs_path, ensure_pdir_exist
from dataloader import flower_dataloader, cifar10_dataloader
from torchvision import models
from torch.optim.lr_scheduler import CosineAnnealingWarmRestarts

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')


class TrainConfig:
    def __init__(self):
        self.lr = 1e-3
        self.epochs = 100
        self.batch_size = 4096
        self.num_class = -1

        self.draw_step = 1
        self.draw_count = 0
        self.train_acc = []
        self.test_acc = []
        self.draw_path = None

    def set_draw_filename(self, filename):
        self.draw_path = to_abs_path(f'img/{filename}')
        ensure_pdir_exist(self.draw_path)


class FlowerAlexNetConfig(TrainConfig):
    def __init__(self):
        super().__init__()
        self.lr = 1e-4
        self.num_class = 5
        self.set_draw_filename('flower.png')


class Cifar10AlexNetConfig(TrainConfig):
    def __init__(self):
        super().__init__()
        self.epochs = 200
        self.lr = 3e-4
        self.batch_size = 4096
        self.num_class = 10
        self.set_draw_filename('cifar10_pro.png')


class VGG16Config(TrainConfig):
    def __init__(self):
        super().__init__()
        self.num_class = 10
        self.lr = 3e-4
        self.epochs = 60
        self.batch_size = 256
        self.set_draw_filename("vgg_cifar10.png")


class GoogLeNetFlowerConfig(TrainConfig):
    def __init__(self):
        super().__init__()
        self.num_class = 5
        self.lr = 1e-3
        self.epochs = 60
        self.batch_size = 256
        self.set_draw_filename("googlenet_flower.png")


def test(net, test_loader):
    old_training = net.training
    net.eval()
    total = 0
    correct = 0
    with torch.no_grad():
        for x, y in test_loader:
            x, y = x.to(device), y.to(device)
            out: torch.Tensor = net(x)
            total += x.shape[0]
            correct += (out.argmax(1) == y).sum().item()

    net.train(old_training)
    return correct / total


def draw_score(config, train_acc, test_acc):
    config.draw_count += 1
    config.train_acc.append(train_acc)
    config.test_acc.append(test_acc)
    if config.draw_count % config.draw_step == 0:
        plt.figure()
        plt.plot(config.train_acc, label='train_acc')
        plt.plot(config.test_acc, label='test_acc')
        plt.legend()
        plt.savefig(config.draw_path)
        plt.close()


def train(net, config, train_loader, test_loader, googlenet=False):
    net.to(device)
    optimizer = torch.optim.AdamW(
        net.parameters(),
        lr=config.lr,  # 初始学习率：1e-4~1e-3（通常比SGD小）
        # weight_decay=1e-4  # AdamW的weight_decay需谨慎设置
    )
    criterion = CrossEntropyLoss()
    # scheduler = CosineAnnealingWarmRestarts(
    #     optimizer,
    #     T_0=10,  # 初始重启周期（10个epoch后重启）
    #     T_mult=2,  # 重启周期倍增系数（下次重启间隔变为20、40...）
    #     eta_min=1e-6  # 最小学习率
    # )
    net.train()
    for epoch in range(config.epochs):
        total = 0
        total_loss = 0
        correct = 0
        for i, (x, y) in tqdm(enumerate(train_loader)):
            x, y = x.to(device), y.to(device)
            if not googlenet:
                out: torch.Tensor = net(x)
                loss = criterion(out, y)
            else:
                out, aux2, aux1 = net(x)
                loss = criterion(out, y) + 0.3 * criterion(aux2, y) + 0.2 * criterion(aux1, y)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            total += x.shape[0]
            total_loss += loss.item() * x.shape[0]
            correct += (out.argmax(1) == y).sum().item()

        # scheduler.step()
        train_acc = correct / total
        test_acc = test(net, test_loader)
        print(f'epoch: {epoch + 1}/{config.epochs}, loss: {total_loss / total:.4f}, '
              f'train_acc: {train_acc:.4f} '
              f'test_acc: {test_acc:.4f}')
        draw_score(config, train_acc, test_acc)


def _test_draw():
    config = FlowerAlexNetConfig()
    draw_score(config, 0.11, 0.12)
    draw_score(config, 0.13, 0.14)
    draw_score(config, 0.15, 0.16)
    draw_score(config, 0.17, 0.18)
    draw_score(config, 0.19, 0.20)
    draw_score(config, 0.21, 0.22)
    draw_score(config, 0.23, 0.24)
    draw_score(config, 0.25, 0.26)
    draw_score(config, 0.27, 0.28)


def train_alex_net_cifar10():
    config = Cifar10AlexNetConfig()
    net = AlexNet(config.num_class, last_fc_size=512)
    train_loader, test_loader = cifar10_dataloader(config.batch_size)
    train(net, config, train_loader, test_loader)
    # epoch: 85/200, loss: 0.3491, train_acc: 0.8797 test_acc: 0.8770
    # 未训练完中止，图像增强后0.7暴涨0.877
    pass


def train_alex_net_flower():
    config = FlowerAlexNetConfig()
    net = AlexNet(config.num_class)
    train_loader, test_loader = flower_dataloader(config.batch_size)
    train(net, config, train_loader, test_loader)


def train_vgg():
    config = VGG16Config()
    net = models.vgg16(num_classes=config.num_class)
    train_loader, test_loader = cifar10_dataloader(config.batch_size, resize=(224, 224))
    train(net, config, train_loader, test_loader)


def train_googlenet():
    config = GoogLeNetFlowerConfig()
    net = models.googlenet(num_classes=config.num_class)
    train_loader, test_loader = flower_dataloader(config.batch_size, resize=(224, 224))
    train(net, config, train_loader, test_loader, googlenet=True)
    # epoch: 58/60, loss: 0.0112, train_acc: 0.9997 test_acc: 0.7995
    pass


if __name__ == '__main__':
    # _test_draw()
    # train_alex_net_flower()
    train_alex_net_cifar10()
    # train_vgg()
    # train_googlenet()
    pass
