import os.path
import time
import matplotlib.pyplot as plt
import numpy as np
from van import Van
from functools import partial
from van_util import save_model, EarlyStopping, timeSince, cosine_warmup_lr, LabelSmoothing, EMA
from tqdm import tqdm
from data_loader import *
import torch.nn as nn

"""
    Author: Dong Sun, Chuanjie Wu
    comment: 数据加载部分代码参考了韩雪的LoadData.py

    参考：
    warmup+余弦退火
    参考网页1：https://blog.csdn.net/qq_40268672/article/details/121145630
"""


def train(model, device, epoch, train_loader, opt, crit, lossv_train, sche=None):
    model.train()
    loss_train = 0
    for batch_idx, (data, target) in enumerate(tqdm(train_loader)):
        data = data.to(device)
        target = target.to(device)
        opt.zero_grad()
        output = model(data)
        loss = crit(output, target)
        loss_train += loss.data.item()
        loss.backward()
        opt.step()
        if sche is not None:
            sche.step()

    loss_train /= len(train_loader)
    lossv_train.append(loss_train)
    print('Train epoch {} finished. Loss: {:.6f}'.format(epoch, loss_train))


def validate(model, device, validation_loader, loss_vector, accuracy_vector, crit, schedule_flag=False):
    model.eval()
    # with torch.no_grad():
    val_loss, correct = 0, 0
    for data, target in validation_loader:
        data = data.to(device)
        target = target.to(device)
        output = model(data)
        val_loss += crit(output, target).data.item()
        pred = output.data.max(1)[1]  # get the index of the max log-probability
        correct += pred.eq(target.data).cpu().sum()

    val_loss /= len(validation_loader)
    loss_vector.append(val_loss)
    accuracy = 100. * correct.to(torch.float32) / len(validation_loader.dataset)
    accuracy_vector.append(accuracy)

    if schedule_flag is True:
        _Scheduler_ReduceLROnPlateau.step(val_loss)

    print('Validation set: Average loss: {:.4f}, Accuracy: {}/{} ({:.1f}%)'.format(
        val_loss, correct, len(validation_loader.dataset), accuracy))

def ema_train(ema_model, device, epoch, train_loader, opt, crit, lossv_train, sche=None):
    model=ema_model.model
    model.train()
    loss_train = 0
    for batch_idx, (data, target) in enumerate(tqdm(train_loader)):
        data = data.to(device)
        target = target.to(device)
        opt.zero_grad()
        output = model(data)
        loss = crit(output, target)
        loss_train += loss.data.item()
        loss.backward()
        opt.step()
        ema_model.update()
        if sche is not None:
            sche.step()

    loss_train /= len(train_loader)
    lossv_train.append(loss_train)
    print('Train epoch {} finished. Loss: {:.6f}'.format(epoch, loss_train))

def ema_validate(ema_model, device, validation_loader, loss_vector, accuracy_vector, crit, schedule_flag=False):
    model=ema_model.model
    ema_model.apply_shadow()
    model.eval()
    # with torch.no_grad():
    val_loss, correct = 0, 0
    for data, target in validation_loader:
        data = data.to(device)
        target = target.to(device)
        output = model(data)
        val_loss += crit(output, target).data.item()
        pred = output.data.max(1)[1]  # get the index of the max log-probability
        correct += pred.eq(target.data).cpu().sum()

    val_loss /= len(validation_loader)
    loss_vector.append(val_loss)
    accuracy = 100. * correct.to(torch.float32) / len(validation_loader.dataset)
    accuracy_vector.append(accuracy)

    if schedule_flag is True:
        _Scheduler_ReduceLROnPlateau.step(val_loss)

    print('Validation set: Average loss: {:.4f}, Accuracy: {}/{} ({:.1f}%)'.format(
        val_loss, correct, len(validation_loader.dataset), accuracy))
    ema_model.restore()

if __name__ == "__main__":
    for path in model_dir:
        if not os.path.isdir(path):
            os.mkdir(path)
    # 判断是否有GPU
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # para for training
    lr = 1e-6
    lr_min = 4e-6
    lr_max = 1e-3
    epochs = 200  # 原论文300epoch
    weight_decay = 5e-2
    drop_out = 0.7
    drop_path_rate = 0.7
    warmup_epoch = 20
    patience_es = 20  # 6
    eps_smooth = 0.1

    # VAN-B0
    VANB0 = Van(img_size=32, ffn_ratio=[8, 8, 4, 4], embed_dims=[32, 64, 160, 256], depth=[3, 3, 5, 2],
                drop_out=drop_out, drop_path_rate=drop_path_rate, norm_layer=partial(nn.LayerNorm, eps=1e-6)).to(device)
    # VAN-B1
    VANB1 = Van(img_size=32, ffn_ratio=[8, 8, 4, 4], embed_dims=[64, 128, 320, 512], depth=[2, 2, 4, 2],
                drop_out=drop_out, drop_path_rate=drop_path_rate, norm_layer=partial(nn.LayerNorm, eps=1e-6)).to(device)
    # VAN-B2
    VANB2 = Van(img_size=32, ffn_ratio=[8, 8, 4, 4], embed_dims=[64, 128, 320, 512], depth=[3, 3, 12, 3],
                drop_out=drop_out, drop_path_rate=drop_path_rate, norm_layer=partial(nn.LayerNorm, eps=1e-6)).to(device)
    # VAN-B3
    VANB3 = Van(img_size=32, ffn_ratio=[8, 8, 4, 4], embed_dims=[64, 128, 320, 512], depth=[3, 5, 27, 3],
                drop_out=drop_out, drop_path_rate=drop_path_rate, norm_layer=partial(nn.LayerNorm, eps=1e-6)).to(device)
    model = VANB1
    ema=EMA(model,0.99)
    ema.register()
    model_save_dir = model_dir[1]

    # 原文提到AdamW有momentum 但是我没找到  SGD效果差
    # decay 5e-2
    optimizer_AdamW = torch.optim.AdamW(model.parameters(), weight_decay=weight_decay, lr=lr)

    optimizer = optimizer_AdamW

    criterion_CE_LS = LabelSmoothing(eps=eps_smooth)
    criterion = criterion_CE_LS

    # scheduler
    _Scheduler_ReduceLROnPlateau = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', min_lr=5e-6,
                                                                              patience=8, cooldown=3, factor=0.1,
                                                                              verbose=True)
    lr_scheduler = _Scheduler_ReduceLROnPlateau

    # Early stopping
    Early_Stop = EarlyStopping(save_path=model_save_dir + '/best_model', patience=patience_es, mode='acc')

    t = time.localtime()
    log_name = model_save_dir + f'/log/log_mo{t.tm_mon}_d{t.tm_mday}_h{t.tm_hour}_m{t.tm_min}/log.txt'
    if not os.path.isdir(log_name[:-8]):
        os.mkdir(log_name[:-8])
    with open(log_name, 'a') as f:
        str_out = 'lr_max|lr_min|weight_decay|batch_size|dropout|warmup_epoch|epochs|eps_smooth\n'
        f.write(str_out)
        str_out = f'{lr_max}|{lr_min}|{weight_decay}|{batch_size}|{drop_out}|{warmup_epoch}|{epochs}|{eps_smooth}\n'
        f.write(str_out)
        str_out = 'epoch|val_acc|val_loss|train_loss\n'
        f.write(str_out)

    lossv, accv = [], []
    lossv_train = []
    epoch_end = 0
    start = time.time()

    for epoch in range(1, epochs + 1):
        epoch_end = epoch
        # warm up + cosine LR
        cosine_warmup_lr(optimizer, current_epoch=epoch, max_epoch=epochs,
                         warmup_epoch=warmup_epoch, lr_min=lr_min, lr_max=lr_max)

        train(model=model, device=device, epoch=epoch, train_loader=train_loader,
              opt=optimizer, crit=criterion, lossv_train=lossv_train)
        validate(model=model, device=device, validation_loader=test_loader, loss_vector=lossv,
                 accuracy_vector=accv, crit=criterion)

        Early_Stop(epoch=epoch, val_acc=accv[epoch - 1], val_loss=lossv[epoch - 1], model=model, optimizer=optimizer,
                   criterion=criterion, lr_scheduler=None)

        with open(log_name, 'a') as f:
            str_out = '%d|%.4f|%.6f|%.6f\n' % (epoch, accv[epoch - 1], lossv[epoch - 1], lossv_train[epoch - 1])
            f.write(str_out)
        end = timeSince(start)
        print(f'End of epoch {epoch}: {end}\n')
        if Early_Stop.get_stop_flag():
            break

        # 每20epoch保存一次模型
        if epoch % 20 == 0:
            save_model(model=model, optimizer=optimizer, criterion=criterion, epoch=epoch, save_path=model_save_dir,
                       acc=accv[epoch - 1], loss=lossv[epoch - 1], lr_scheduler=None)

    plt.figure(figsize=(5, 3))
    plt.plot(np.arange(1, epoch_end + 1), lossv)
    plt.title('validation loss')
    plt.savefig(log_name[:-7] + '_valloss.png')

    plt.figure(figsize=(5, 3))
    plt.plot(np.arange(1, epoch_end + 1), accv)
    plt.title('validation accuracy')
    plt.savefig(log_name[:-7] + '_acc.png')

    plt.figure(figsize=(5, 3))
    plt.plot(np.arange(1, epoch_end + 1), lossv_train)
    plt.title('train loss')
    plt.savefig(log_name[:-7] + '_traloss.png')

    plt.show()
