import time
from van import Van
import torch.nn as nn
from van_util import save_model, EarlyStopping, timeSince, cosine_warmup_lr, LabelSmoothing
from tqdm import tqdm
from data_loader import *
from functools import partial
from train import print_figure

"""
    用于恢复中断的训练流程 若train增加新功能则需要在此同步更新
    Author: Dong Sun
    
    尚未启用EMA
    early_stop无法复原 建议patience_es手动调小一点
    需要手动修改：
    checkpoints_path
    log_path
    model类型 以及保存地址
    如果用了scheduler需要自己加上加载的语句
"""

checkpoints_path = './checkpoint/ablationstudy/log/log_mo5_d29_h19_m26_128/ckpt_80_acc69.7500.pth'
log_path = './checkpoint/ablationstudy/log/log_mo5_d29_h19_m26_128/log.txt'
para_name = ['lr_max', 'lr_min', 'weight_decay', 'batch_size', 'dropout', 'warmup_epoch', 'epochs', 'eps_smooth']


def load_log(log_path, end_epoch, val_loss, train_loss, acc):
    """
    :param log_path: 日志地址
    :param end_epoch: 重启开始前的最后一个epoch
    :param val_loss:
    :param train_loss:
    :param acc:
    :return: 日志中存储的参数字典
    """
    with open(log_path, 'r') as f0:
        reload_lines = f0.readlines()
    # 清空多余的日志行
    with open(log_path, 'w') as f1:
        for index, line in enumerate(reload_lines):
            if index <= 2:
                f1.write(line)
            else:
                row = line.strip().split('|')
                epo = int(row[0])
                if epo > end_epoch:
                    break
                f1.write(line)

    with open(log_path, 'r') as f:
        lines = f.readlines()
        para = {}
        for index, line in enumerate(lines):
            if index <= 2:
                if index == 1:
                    row_para = line.strip().split('|')
                    for i, p in enumerate(para_name):
                        para[p] = float(row_para[i])
                continue
            row = line.strip().split('|')
            acc.append(float(row[1]))
            val_loss.append(float(row[2]))
            train_loss.append(float(row[3]))

    return para


def train(model, device, epoch, train_loader, opt, crit, lossv_train, sche=None):
    model.train()
    loss_train = 0
    for batch_idx, (data, target) in enumerate(tqdm(train_loader)):
        data = data.to(device)
        target = target.to(device)
        opt.zero_grad()
        output = model(data)
        loss = crit(output, target)
        loss_train += loss.data.item()
        loss.backward()
        opt.step()
        if sche is not None:
            sche.step()

    loss_train /= len(train_loader)
    lossv_train.append(loss_train)
    print('Train epoch {} finished. Loss: {:.6f}'.format(epoch, loss_train))


def validate(model, device, validation_loader, loss_vector, accuracy_vector, crit, schedule_flag=False):
    model.eval()
    # with torch.no_grad():
    val_loss, correct = 0, 0
    for data, target in validation_loader:
        data = data.to(device)
        target = target.to(device)
        output = model(data)
        val_loss += crit(output, target).data.item()
        pred = output.data.max(1)[1]  # get the index of the max log-probability
        correct += pred.eq(target.data).cpu().sum()

    val_loss /= len(validation_loader)
    loss_vector.append(val_loss)
    accuracy = 100. * correct.to(torch.float32) / len(validation_loader.dataset)
    accuracy_vector.append(accuracy)

    print('Validation set: Average loss: {:.4f}, Accuracy: {}/{} ({:.1f}%)'.format(
        val_loss, correct, len(validation_loader.dataset), accuracy))


if __name__ == "__main__":
    for path in model_dir:
        if not os.path.isdir(path):
            os.mkdir(path)
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    checkpoints = torch.load(checkpoints_path)
    lossv, accv = [], []
    lossv_train = []
    paras = load_log(log_path, checkpoints['epoch'], lossv, lossv_train, accv)

    # para for training
    lr = 1e-6  # 实际上不起效果
    lr_min = paras['lr_min']
    lr_max = paras['lr_max']
    epochs = int(paras['epochs'])
    weight_decay = paras['weight_decay']
    drop_out = paras['dropout']
    drop_path_rate = drop_out
    warmup_epoch = int(paras['warmup_epoch'])
    patience_es = 15
    eps_smooth = paras['eps_smooth']

    # VAN-B0
    VANB0 = Van(img_size=128, ffn_ratio=[8, 8, 4, 4], embed_dims=[32, 64, 160, 256], depth=[3, 3, 5, 2],
                drop_out=drop_out, drop_path_rate=drop_path_rate, norm_layer=partial(nn.LayerNorm, eps=1e-6)).to(device)
    # VAN-B1
    VANB1 = Van(img_size=128, ffn_ratio=[8, 8, 4, 4], embed_dims=[64, 128, 320, 512], depth=[2, 2, 4, 2],
                drop_out=drop_out, drop_path_rate=drop_path_rate, norm_layer=partial(nn.LayerNorm, eps=1e-6)).to(device)

    model = VANB0
    model.load_state_dict(checkpoints['net'])
    model_save_dir = model_dir[6]

    # 原文提到AdamW有momentum 但是我没找到  SGD效果差
    # decay 5e-2
    optimizer = torch.optim.AdamW(model.parameters(), weight_decay=weight_decay, lr=lr)
    optimizer.load_state_dict(checkpoints['optimizer'])

    criterion = LabelSmoothing(eps=eps_smooth)
    criterion.load_state_dict(checkpoints['criterion'])

    # Early stopping
    Early_Stop = EarlyStopping(save_path=model_save_dir + '/best_model', patience=patience_es, mode='acc')



    epoch_end = 0
    epoch_continue = checkpoints['epoch'] + 1
    start = time.time()
    for epoch in range(epoch_continue, epochs + 1):
        epoch_end = epoch
        # warm up + cosine LR
        cosine_warmup_lr(optimizer, current_epoch=epoch, max_epoch=epochs,
                         warmup_epoch=warmup_epoch, lr_min=lr_min, lr_max=lr_max)

        train(model=model, device=device, epoch=epoch, train_loader=train_loader,
              opt=optimizer, crit=criterion, lossv_train=lossv_train)
        validate(model=model, device=device, validation_loader=test_loader, loss_vector=lossv,
                 accuracy_vector=accv, crit=criterion)

        Early_Stop(epoch=epoch, val_acc=accv[epoch - 1], val_loss=lossv[epoch - 1], model=model, optimizer=optimizer,
                   criterion=criterion, lr_scheduler=None)

        with open(log_path, 'a') as f:
            str_out = '%d|%.4f|%.6f|%.6f\n' % (epoch, accv[epoch - 1], lossv[epoch - 1], lossv_train[epoch - 1])
            f.write(str_out)
        end = timeSince(start)
        print(f'End of epoch {epoch}: {end}\n')
        if Early_Stop.get_stop_flag():
            break

        # 每20epoch保存一次模型
        if epoch % 20 == 0:
            save_model(model=model, optimizer=optimizer, criterion=criterion, epoch=epoch, save_path=log_path[:-8],
                       acc=accv[epoch - 1], loss=lossv[epoch - 1], lr_scheduler=None)
        # TODO: 仅消融实验使用 明天记得去掉
        if epoch == 100:
            break

    print_figure(lossv, lossv_train, accv, epoch_end, log_path[:-7])
