import torch
import data as Data
import model as Model
import argparse
import logging
import core.logger as Logger
import core.metrics as Metrics
from core.wandb_logger import WandbLogger
from tensorboardX import SummaryWriter
import os
import numpy as np
import VAE.model
from VAE import losses

if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument('-c', '--config', type=str, default='config/sr_sr3_div2k.json',
                        help='JSON file for configuration')
    parser.add_argument('-p', '--phase', type=str, choices=['train', 'val'],
                        help='Run either train(training) or val(generation)', default='train')
    parser.add_argument('-gpu', '--gpu_ids', type=str, default=None)
    parser.add_argument('-debug', '-d', action='store_true')
    parser.add_argument('-enable_wandb', action='store_true')
    parser.add_argument('-log_wandb_ckpt', action='store_true')
    parser.add_argument('-log_eval', action='store_true')

    # parse configs
    args = parser.parse_args()
    opt = Logger.parse(args)
    # Convert to NoneDict, which return None for missing key.
    opt = Logger.dict_to_nonedict(opt)

    device = torch.device('cuda' if opt['gpu_ids'] is not None else 'cpu')

    # logging
    torch.backends.cudnn.enabled = True
    torch.backends.cudnn.benchmark = True

    Logger.setup_logger(None, opt['path']['log'],
                        'train', level=logging.INFO, screen=True)
    Logger.setup_logger('val', opt['path']['log'], 'val', level=logging.INFO)
    logger = logging.getLogger('base')
    logger.info(Logger.dict2str(opt))
    tb_logger = SummaryWriter(log_dir=opt['path']['tb_logger'])

    # Initialize WandbLogger
    if opt['enable_wandb']:
        import wandb
        wandb_logger = WandbLogger(opt)
        wandb.define_metric('validation/val_step')
        wandb.define_metric('epoch')
        wandb.define_metric("validation/*", step_metric="val_step")
        val_step = 0
    else:
        wandb_logger = None

    # dataset
    for phase, dataset_opt in opt['datasets'].items():
        if phase == 'train' and args.phase != 'val':
            train_set = Data.create_dataset(dataset_opt, phase)
            train_loader = Data.create_dataloader(
                train_set, dataset_opt, phase)
        elif phase == 'val':
            val_set = Data.create_dataset(dataset_opt, phase)
            val_loader = Data.create_dataloader(
                val_set, dataset_opt, phase)
    logger.info('Initial Dataset Finished')

    # model
    # diffusion = Model.create_model(opt)
    logger.info('Initial Model Finished')
    vae = VAE.model.VAE(embed_dim=96).to(device)
    vae.encoder.load_state_dict(torch.load("experiments/sr_div2k_240314_150759!!/checkpoint/50000_178_vae_encoder.pth"))
    # vae.decoder.load_state_dict(torch.load("experiments/sr_div2k_240306_110424/checkpoint/300000_3_vae_decoder.pth"))
    # vae.quant_conv.load_state_dict(torch.load("experiments/sr_div2k_240306_110424/checkpoint/300000_3_vae_quant_conv.pth"))
    # vae.post_quant_conv.load_state_dict(torch.load("experiments/sr_div2k_240306_110424/checkpoint/300000_3_post_quant_conv.pth"))

    loss = torch.nn.L1Loss().to(device)
    loss = losses.LPIPSWithDiscriminator(disc_start=1000).to(device)
    opt_ae = torch.optim.Adam(list(vae.decoder.parameters())+
                                list(vae.quant_conv.parameters())+
                                list(vae.post_quant_conv.parameters()),
                                lr=0.0002, betas=(0.5, 0.9))
    opt_disc = torch.optim.Adam(loss.discriminator.parameters(), lr=0.0002, betas=(0.5, 0.9))

    # Train
    current_step = 0
    current_epoch = 0
    n_iter = opt['train']['n_iter']

    if opt['path']['resume_state']:
        logger.info('Resuming training from epoch: {}, iter: {}.'.format(
            current_epoch, current_step))

    # diffusion.set_new_noise_schedule(
    #     opt['model']['beta_schedule'][opt['phase']], schedule_phase=opt['phase'])
    if opt['phase'] == 'train':
        while current_step < n_iter:
            current_epoch += 1
            for _, train_data in enumerate(train_loader):
                current_step += 1
                vae.encoder.eval()
                if current_step > n_iter:
                    break
                # diffusion.feed_data(train_data)
                train_data['HR'] = train_data['HR'].to(device)
                train_data['SR'] = train_data['SR'].to(device)
                train_data['LR'] = train_data['LR'].to(device)
                # diffusion.optimize_parameters()
                input = torch.cat([train_data['HR'], train_data['SR']], dim=1)
                reconstructions, posterior = vae(input, train_data['LR'])
                opt_ae.zero_grad()
                # aeloss = loss(reconstructions, train_data['HR'])
                aeloss, log_dict_ae = loss(train_data['HR'], reconstructions, posterior, 0, 300000,
                                                last_layer=vae.get_last_layer(), split="train")
                #
                aeloss.backward()
                opt_ae.step()
                #
                opt_disc.zero_grad()
                discloss, log_dict_disc = loss(train_data['HR'], reconstructions, posterior, 1, 300000,
                                                last_layer=vae.get_last_layer(), split="train")
                discloss.backward()
                opt_disc.step()

                # log
                if current_step % opt['train']['print_freq'] == 0:
                    message = '<epoch:{:3d}, iter:{:8,d}> l_ae:{:.4e}'.format(
                        current_epoch, current_step, aeloss.item())
                    logger.info(message)

                # validation
                if current_step % opt['train']['val_freq'] == 0:
                    avg_psnr = 0.0
                    avg_ssim = 0.0
                    idx = 0
                    result_path = '{}/{}'.format(opt['path']
                                                 ['results'], current_epoch)
                    checkpoint_path = '{}'.format(opt['path']['checkpoint'])
                    os.makedirs(checkpoint_path, exist_ok=True)
                    os.makedirs(result_path, exist_ok=True)

                    # diffusion.set_new_noise_schedule(
                    #     opt['model']['beta_schedule']['val'], schedule_phase='val')
                    vae.eval()
                    for _,  val_data in enumerate(val_loader):
                        idx += 1
                        # diffusion.feed_data(val_data)
                        # diffusion.test(continous=False)
                        # visuals = diffusion.get_current_visuals()
                        val_data['HR'] = val_data['HR'].to(device)
                        val_data['SR'] = val_data['SR'].to(device)
                        val_data['LR'] = val_data['LR'].to(device)
                        input = torch.cat([val_data['HR'], val_data['SR']], dim=1)
                        reconstructions, posterior = vae(input, val_data['LR'])
                        z = vae.post_quant_conv(posterior.sample())

                        sr_img = Metrics.tensor2img(reconstructions.detach().float().cpu())  # uint8
                        z_img = Metrics.tensor2img(z.detach().float().cpu())
                        hr_img = Metrics.tensor2img(val_data['HR'].detach().float().cpu())  # uint8
                        lr_img = Metrics.tensor2img(val_data['LR'].detach().float().cpu())  # uint8
                        fake_img = Metrics.tensor2img(val_data['SR'].detach().float().cpu())  # uint8

                        # generation
                        Metrics.save_img(
                            hr_img, '{}/{}_{}_hr.png'.format(result_path, current_step, idx))
                        Metrics.save_img(
                            z_img, '{}/{}_{}_z.png'.format(result_path, current_step, idx))
                        Metrics.save_img(
                            sr_img, '{}/{}_{}_sr.png'.format(result_path, current_step, idx))
                        Metrics.save_img(
                            lr_img, '{}/{}_{}_lr.png'.format(result_path, current_step, idx))
                        Metrics.save_img(
                            fake_img, '{}/{}_{}_inf.png'.format(result_path, current_step, idx))
                        # tb_logger.add_image(
                        #     'Iter_{}'.format(current_step),
                        #     np.transpose(np.concatenate(
                        #         (fake_img, sr_img, hr_img), axis=1), [2, 0, 1]),
                        #     idx)
                        avg_psnr += Metrics.calculate_psnr(
                            sr_img, hr_img)
                        avg_ssim += Metrics.calculate_ssim(sr_img, hr_img)

                    vae.train()

                    avg_psnr = avg_psnr / idx
                    avg_ssim = avg_ssim / idx
                    # log
                    logger.info('# Validation # PSNR: {:.4e} # SSIM: {:.4e}'.format(avg_psnr, avg_ssim))
                    logger_val = logging.getLogger('val')  # validation logger
                    logger_val.info('<epoch:{:3d}, iter:{:8,d}> psnr: {:.4e} ssim:{:.4e}'.format(
                        current_epoch, current_step, avg_psnr, avg_ssim))
                    # tensorboard logger
                    # tb_logger.add_scalar('psnr', avg_psnr, current_step)


                if current_step % opt['train']['save_checkpoint_freq'] == 0:
                    logger.info('Saving models and training states.')
                    torch.save(vae.encoder.state_dict(), '{}/{}_{}_vae_encoder.pth'.format(checkpoint_path, current_step, idx))
                    torch.save(vae.decoder.state_dict(), '{}/{}_{}_vae_decoder.pth'.format(checkpoint_path, current_step, idx))
                    torch.save(vae.quant_conv.state_dict(), '{}/{}_{}_vae_quant_conv.pth'.format(checkpoint_path, current_step, idx))
                    torch.save(vae.post_quant_conv.state_dict(), '{}/{}_{}_post_quant_conv.pth'.format(checkpoint_path, current_step, idx))
                    # diffusion.save_network(current_epoch, current_step)

            #         if wandb_logger and opt['log_wandb_ckpt']:
            #             wandb_logger.log_checkpoint(current_epoch, current_step)
            #
            # if wandb_logger:
            #     wandb_logger.log_metrics({'epoch': current_epoch-1})

        # save model
        logger.info('End of training.')
    # else:
    #     logger.info('Begin Model Evaluation.')
    #     avg_psnr = 0.0
    #     avg_ssim = 0.0
    #     idx = 0
    #     result_path = '{}'.format(opt['path']['results'])
    #     os.makedirs(result_path, exist_ok=True)
    #     for _,  val_data in enumerate(val_loader):
    #         idx += 1
    #         diffusion.feed_data(val_data)
    #         diffusion.test(continous=True)
    #         visuals = diffusion.get_current_visuals()
    #
    #         hr_img = Metrics.tensor2img(visuals['HR'])  # uint8
    #         lr_img = Metrics.tensor2img(visuals['LR'])  # uint8
    #         fake_img = Metrics.tensor2img(visuals['INF'])  # uint8
    #
    #         sr_img_mode = 'grid'
    #         if sr_img_mode == 'single':
    #             # single img series
    #             sr_img = visuals['SR']  # uint8
    #             sample_num = sr_img.shape[0]
    #             for iter in range(0, sample_num):
    #                 Metrics.save_img(
    #                     Metrics.tensor2img(sr_img[iter]), '{}/{}_{}_sr_{}.png'.format(result_path, current_step, idx, iter))
    #         else:
    #             # grid img
    #             sr_img = Metrics.tensor2img(visuals['SR'])  # uint8
    #             Metrics.save_img(
    #                 sr_img, '{}/{}_{}_sr_process.png'.format(result_path, current_step, idx))
    #             Metrics.save_img(
    #                 Metrics.tensor2img(visuals['SR'][-1]), '{}/{}_{}_sr.png'.format(result_path, current_step, idx))
    #
    #         Metrics.save_img(
    #             hr_img, '{}/{}_{}_hr.png'.format(result_path, current_step, idx))
    #         Metrics.save_img(
    #             lr_img, '{}/{}_{}_lr.png'.format(result_path, current_step, idx))
    #         Metrics.save_img(
    #             fake_img, '{}/{}_{}_inf.png'.format(result_path, current_step, idx))
    #
    #         # generation
    #         eval_psnr = Metrics.calculate_psnr(Metrics.tensor2img(visuals['SR'][-1]), hr_img)
    #         eval_ssim = Metrics.calculate_ssim(Metrics.tensor2img(visuals['SR'][-1]), hr_img)
    #
    #         avg_psnr += eval_psnr
    #         avg_ssim += eval_ssim
    #
    #         if wandb_logger and opt['log_eval']:
    #             wandb_logger.log_eval_data(fake_img, Metrics.tensor2img(visuals['SR'][-1]), hr_img, eval_psnr, eval_ssim)
    #
    #     avg_psnr = avg_psnr / idx
    #     avg_ssim = avg_ssim / idx
    #
    #     # log
    #     logger.info('# Validation # PSNR: {:.4e}'.format(avg_psnr))
    #     logger.info('# Validation # SSIM: {:.4e}'.format(avg_ssim))
    #     logger_val = logging.getLogger('val')  # validation logger
    #     logger_val.info('<epoch:{:3d}, iter:{:8,d}> psnr: {:.4e}, ssim：{:.4e}'.format(
    #         current_epoch, current_step, avg_psnr, avg_ssim))
    #
    #     if wandb_logger:
    #         if opt['log_eval']:
    #             wandb_logger.log_eval_table()
    #         wandb_logger.log_metrics({
    #             'PSNR': float(avg_psnr),
    #             'SSIM': float(avg_ssim)
    #         })
