import time

from module import models_mae as models_mae
from util.dataset import *
from util.loss import *


def prepare_model(chkpt_dir, arch='mae_vit_large_patch16'):
    # build model
    model = getattr(models_mae, arch)()
    # load model
    checkpoint = torch.load(chkpt_dir, map_location=torch.device('cuda'))
    msg = model.load_state_dict(checkpoint['model'], strict=False)
    print(msg)
    return model


def run_one_image(img, model, mask_ratio=0.75):
    x = torch.tensor(img)

    # make it a batch-like
    x = x.unsqueeze(dim=0)
    x = torch.einsum('nhwc->nchw', x)

    # run MAE
    loss, y, mask = model(x.float(), mask_ratio)
    y = model.unpatchify(y)
    y = torch.einsum('nchw->nhwc', y).detach().cpu()

    # visualize the mask
    mask = mask.detach()
    mask = mask.unsqueeze(-1).repeat(1, 1, model.patch_embed.patch_size[0] ** 2 * 3)  # (N, H*W, p*p*3)
    mask = model.unpatchify(mask)  # 1 is removing, 0 is keeping
    mask = torch.einsum('nchw->nhwc', mask).detach().cpu()

    x = torch.einsum('nchw->nhwc', x)

    # masked image
    im_masked = x * (1 - mask)

    # MAE reconstruction pasted with visible patches
    im_paste = x * (1 - mask) + y * mask

    # make the plt figure larger
    plt.rcParams['figure.figsize'] = [24, 24]

    plt.subplot(1, 4, 1)
    show_image(x[0], "original")

    plt.subplot(1, 4, 2)
    show_image(im_masked[0], "masked")

    plt.subplot(1, 4, 3)
    show_image(y[0], "reconstruction")

    plt.subplot(1, 4, 4)
    show_image(im_paste[0], "reconstruction + visible")

    plt.show()


if __name__ == '__main__':
    import datetime
    print(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + ' run ' + __file__.split('/')[-1])
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument('--lr', action='store', type=float, default=1e-4)
    parser.add_argument('--lr_decline', action='store', type=int, default=700)
    parser.add_argument('--mask_ratio', action='store', type=float, default=0.8)
    parser.add_argument('--epochs', action='store', type=int, default=1000)
    parser.add_argument('--batch_size', action='store', type=int, default=8)
    parser.add_argument('--show_loss', action='store', type=int, default=10)
    parser.add_argument('--model_name', action='store', type=str, default='mae_transistor')
    parser.add_argument('--data_path', action='store', type=str, required=True)
    parser.add_argument('--pre_mae_path', action='store', type=str, default=None)
    args = parser.parse_args()

    # prepare data
    dataset = MVTecMAETrainDataset(args.data_path, resize_shape=[224, 224])
    dataloader = DataLoader(dataset, batch_size=args.batch_size, shuffle=True)

    # prepare model
    import os
    if args.pre_mae_path is None:
        os.system("wget -nc https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_large_ganloss.pth")
        mae_model = prepare_model('mae_visualize_vit_large_ganloss.pth', 'mae_vit_large_patch16')
    else:
        mae_model = prepare_model(args.pre_mae_path, 'mae_vit_large_patch16')
    mae_model.cuda()
    print('Model loaded.')

    # set parameters
    epochs = args.epochs
    optimizer = torch.optim.AdamW(mae_model.parameters(), lr=args.lr, betas=(0.9, 0.95))
    decline = args.lr_decline
    milestones = [i * decline for i in range(1, epochs // decline)]
    scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones, gamma=0.1)
    mask_ratio = args.mask_ratio
    show_loss = args.show_loss
    loss_l2 = torch.nn.modules.loss.MSELoss()
    loss_ssim = SSIM()

    # train
    start_time = time.time()
    print('start training.')
    average_loss = 0
    i = 0
    for epoch in range(epochs):
        if epoch % show_loss == 0:
            average_loss = 0
            i = 0
        for image_batch in dataloader:
            image_batch = image_batch["image"].cuda()
            loss, rec_image, mask = mae_model(image_batch, mask_ratio)
            rec_image = mae_model.unpatchify(rec_image)
            ssim_loss = loss_ssim(rec_image, image_batch)
            loss += ssim_loss

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            i += 1
            average_loss += loss

        scheduler.step()
        if epoch % show_loss == show_loss - 1:
            average_loss /= i
            time_cost = time.time() - start_time
            print(f'epoch: {epoch + 1} loss: {average_loss} time cost: {int(time_cost)}')

    torch.save(mae_model, args.model_name + '.pth')
    print('model saved in ' + args.model_name + '.pth')
