import argparse
import json
import deepspeed

import torch
from accelerate.utils import gather
from deepspeed.ops.adam import FusedAdam
# from apex.optimizers import FusedAdam
from torch.utils.data import random_split

from sgm.data.image_data import ImageDataset
from sgm.data.video_data import VideoDataset
from sgm.models import VQGAN


def get_args():
    parser = argparse.ArgumentParser()
    parser.add_argument_group('data')
    parser.add_argument('--folder', default='/workspace/data/videos', type=str,
                        help='training video files Dir')
    parser.add_argument('--image_size', default=(256, 256), type=tuple,
                        help='img size')
    parser.add_argument('--num_frames', default=60 + 1, type=int, help='number frames of video')
    parser.add_argument('--valid_frac', default=0.05, type=float, help='fraction of videos of train used for valid')

    parser.add_argument_group("model opt")
    parser.add_argument('--dim', default=1024, type=int,
                        help='dim after patch embedding')
    parser.add_argument('--fp16', default=True, type=bool)
    parser.add_argument('--codebook_size', default=2 ** 16, type=int,
                        help='number of tokens')
    parser.add_argument('--image_patch_size', default=8, type=int,
                        help='image width&height patch size')
    parser.add_argument('--temporal_patch_size', default=3, type=int,
                        help='time axis patch-size')
    parser.add_argument('--spatial_num_layers', default=6, type=int,
                        help='number of spatial transformer block layers')
    parser.add_argument('--temporal_num_layers', default=6, type=int,
                        help='number of temporal transformer block layers')
    parser.add_argument('--discriminator_attn_res_layers', default=3, type=int,
                        help='number of discriminator transformer block layers')
    parser.add_argument('--attn_dropout', default=0., type=float,
                        help='attn drop-out')
    parser.add_argument('--ffn_dropout', default=0.1, type=float,
                        help='ffn drop-out')
    parser.add_argument('--use_hinge_loss', default=False, type=bool,
                        help='whether use hinge loss')
    parser.add_argument('--lookup_free_quantization', default=False, type=bool,
                        help='look-up free quantizer can enlarge the vocab-size')

    parser.add_argument_group('train opt')
    parser.add_argument('--local_rank', default=-1, type=int, help='local rank passed from distributed launcher')
    parser.add_argument('--train_on_images', default=False, type=bool,
                        help='training on images if true else video')
    parser.add_argument('--num_epochs', default=1, type=int,
                        help='training epoch')
    parser.add_argument('--gradient_checkpointing', default=True, type=bool,
                        help='use gradient checkpointing')
    parser.add_argument('--batch_size', default=32, type=int,
                        help='training batch size')
    parser.add_argument('--vq_lr', default=3e-5, type=float,
                        help='vqgan learning rate')
    parser.add_argument('--discr_lr', default=1e-4, type=float,
                        help='discr learning rate')
    parser.add_argument('--vae_max_grad_norm', default=10.0, type=float,
                        help='vae grad norm')
    parser.add_argument('--discriminator_max_grad_norm', default=10.0, type=float,
                        help='discriminator grad norm')
    parser.add_argument('--grad_accum_steps', default=1, type=int,
                        help='gradient accumulation steps')
    parser.add_argument('--use_ema', default=False, type=bool,
                        help='use ema')
    parser.add_argument('--use_vgg_and_gan', default=False, type=bool)
    parser.add_argument('--ema_update_after_step', default=1, type=int,
                        help='ema update after step')
    parser.add_argument('--ema_update_steps', default=1, type=int,
                        help='ema update steps')
    parser.add_argument('--log_steps', default=1, type=int)
    parser.add_argument('--save_steps', default=5000, type=int,
                        help='saving model when training steps arrive')
    parser.add_argument('--eval_steps', default=5000, type=int,
                        help='eval model reconstruction quality')
    parser.add_argument('--apply_grad_penalty_steps', default=4, type=int,
                        help='model/result save path')
    parser.add_argument('--output_dir', default='./output', type=str,
                        help='model/result save path')

    parser = deepspeed.add_config_arguments(parser)
    args = parser.parse_args()
    return args


def main():
    args = get_args()

    ds_config = json.load(open(args.deepspeed_config))
    vae = VQGAN(
        dim=args.dim,
        codebook_size=args.codebook_size,
        image_size=args.image_size,
        image_patch_size=args.image_patch_size,
        temporal_patch_size=args.temporal_patch_size,
        spatial_num_layers=args.spatial_num_layers,
        temporal_num_layers=args.temporal_num_layers,
        discriminator_attn_res_layers=args.discriminator_attn_res_layers,
        attn_dropout=args.attn_dropout,
        ffn_dropout=args.ffn_dropout,
        use_vgg_and_gan=args.use_vgg_and_gan,
        use_hinge_loss=args.use_hinge_loss,
        gradient_checkpointing=args.gradient_checkpointing,
        lookup_free_quantization=args.lookup_free_quantization)

    # build dataset
    if args.train_on_images:
        train_dataset = ImageDataset(args.folder, args.image_size)
    else:
        train_dataset = VideoDataset(args.folder, args.image_size, num_frames=args.num_frames)

    if args.valid_frac > 0:
        train_size = int((1 - args.valid_frac) * len(train_dataset))
        valid_size = len(train_dataset) - train_size
        train_dataset, eval_dataset = random_split(train_dataset, [train_size, valid_size],
                                                   generator=torch.Generator().manual_seed(42))

        print(
            f'training with dataset of {len(train_dataset)} samples and validating '
            f'with randomly split {len(eval_dataset)} samples')

    else:
        eval_dataset = train_dataset
        print('valid data is the same')

    # vae = vae.half()
    parameters = filter(lambda p: p.requires_grad, vae.parameters())
    optimizer = FusedAdam(parameters, lr=args.vq_lr)

    model_engine, optimizer, train_loader, __ = deepspeed.initialize(
        args=args,
        optimizer=optimizer,
        model=vae,
        training_data=train_dataset,
        dist_init_required=True,
    )

    step = 0
    # accelerator = deepspeed.get_accelerator()
    batch_size = model_engine.train_batch_size()
    total_steps = args.num_epochs * len(train_dataset) // batch_size

    for _ in range(args.num_epochs):
        for data in train_loader:
            step += 1
            data = data.to(model_engine.local_rank)
            # with torch.autocast(enabled=True, dtype=torch.float16):
            loss, loss_dict, _ = model_engine(data)

            model_engine.backward(loss)
            model_engine.step()

            gather_loss = gather(loss).mean().item()
            for k, v in loss_dict.items():
                loss_dict[k] = gather(v).mean().item()

            if step % args.log_steps == 0 and args.local_rank == 0:
                logs_str = " | ".join([f"{k}: {v}" for k, v in loss_dict.items()])
                print(f"[{step}|{total_steps}] {logs_str}")

            if step % args.save_steps == 0:
                model_engine.save_checkpoint(args.output_dir)


if __name__ == '__main__':
    main()
