# Copyright (C) 2022 ByteDance Inc.
# All rights reserved.
# Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).

# The software is made available under Creative Commons BY-NC-SA 4.0 license
# by ByteDance Inc. You can use, redistribute, and adapt it
# for non-commercial purposes, as long as you (a) give appropriate credit
# by citing our paper, (b) indicate any changes that you've made,
# and (c) distribute any derivative works under the same license.

# THE AUTHORS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ALL
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ANY PARTICULAR PURPOSE.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL
# DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING
# OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.

import argparse
import math
import random
import os
import einops
import torch
from torch import nn, autograd, optim
from torch.nn import functional as F
from torch.utils import data
from torchvision import utils

from models import make_model, DualBranchDiscriminator
from utils.dataset import MaskDataset
from utils.ckpt import CkptManager

from tqdm import tqdm
from utils.distributed import (
    get_rank,
    synchronize,
    reduce_loss_dict,
    reduce_sum,
    get_world_size,
)

import wandb
import functools
from utils.inception_utils import prepare_inception_metrics, sample_gema_rnn
from visualize.utils import color_map
import random
import numpy as np
import torch.backends.cudnn as cudnn


def seed(seed):
    cudnn.benchmark = False
    cudnn.deterministic = True

    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)


def get_lambda_kl(step, kl_start, kl_end, step_start, step_end):
    """linearly ramp up lambda_kl

    :param step: 
    :param kl_start: 
    :param kl_end: 
    :param step_start: 
    :param step_end: 
    :returns: 

    """
    if step <= step_end:
        return (step_end - step) / (step_end - step_start) * kl_start + (
            step - step_start
        ) / (step_end - step_start) * kl_end
    else:
        return kl_end


def i211(x):
    """int to -1 and 1
    """
    return 2 * (x / 255) - 1


def any211(x, dim=(1, 2, 3, 4)):
    """any value to -1 and 1

    :param x: b n 1 h w

    """
    x -= torch.amin(x, dim=dim, keepdim=True)
    x /= torch.amax(x, dim=dim, keepdim=True)
    return 2 * x - 1


def pad2size(x, H, W, h, w, mode="constant", value=None):
    """pad x from small size (h, w) to larger size (H, W)

    :param x: b c h w
    :param H: 
    :param W: 
    :param h: 
    :param w: 
    :param mode: 
    :param value: 
    :returns: b c H W

    """
    top = (H - h) // 2
    bottom = H - top - h
    left = (W - w) // 2
    right = W - left - w
    return F.pad(x, (top, bottom, left, right), mode, value)


def make_sample_image(name, sample_img, nrow=None, is_fig=False, **kwargs):
    if is_fig:
        return wandb.Image(sample_img, "RGB", name)
    n_sample = len(sample_img)
    if nrow is None:
        nrow = int(math.ceil(n_sample ** 0.5))

    img = utils.make_grid(sample_img, nrow=nrow, **kwargs)
    img = img.permute(1, 2, 0).numpy()
    img = wandb.Image(img, "RGB", name)
    return img


def data_sampler(dataset, shuffle, distributed):
    if distributed:
        return data.distributed.DistributedSampler(dataset, shuffle=shuffle)

    if shuffle:
        return data.RandomSampler(dataset)

    else:
        return data.SequentialSampler(dataset)


def requires_grad(model, flag=True):
    for p in model.parameters():
        p.requires_grad = flag


def accumulate(model1, model2, decay=0.999):
    par1 = dict(model1.named_parameters())
    par2 = dict(model2.named_parameters())

    for k in par1.keys():
        par1[k].data.mul_(decay).add_(1 - decay, par2[k].data)


def sample_data(loader):
    while True:
        for batch in loader:
            yield batch


def d_logistic_loss(real_pred, fake_pred):
    real_loss = F.softplus(-real_pred)
    fake_loss = F.softplus(fake_pred)

    return real_loss.mean() + fake_loss.mean()


def d_r1_loss(real_pred, real_img, real_mask):
    grad_real_img, grad_real_mask = autograd.grad(
        outputs=real_pred.sum(), inputs=[real_img, real_mask], create_graph=True
    )
    grad_penalty_img = (
        grad_real_img.pow(2).reshape(grad_real_img.shape[0], -1).sum(1).mean()
    )
    grad_penalty_seg = (
        grad_real_mask.pow(2).reshape(grad_real_mask.shape[0], -1).sum(1).mean()
    )

    return grad_penalty_img, grad_penalty_seg


def g_nonsaturating_loss(fake_pred):
    loss = F.softplus(-fake_pred).mean()

    return loss


def g_path_regularize(fake_img, latents, mean_path_length, decay=0.01):
    noise = torch.randn_like(fake_img) / math.sqrt(
        fake_img.shape[2] * fake_img.shape[3]
    )
    (grad,) = autograd.grad(
        outputs=(fake_img * noise).sum(), inputs=latents, create_graph=True
    )
    # reshape grad to match original code shape
    grad = einops.rearrange(grad, "n b l c -> b (n l) c")
    path_lengths = torch.sqrt(grad.pow(2).sum(2).mean(1))

    path_mean = mean_path_length + decay * (path_lengths.mean() - mean_path_length)

    path_penalty = (path_lengths - path_mean).pow(2).mean()

    return path_penalty, path_mean.detach(), path_lengths


def make_noise(batch, latent_dim, n_noise, device):
    if n_noise == 1:
        return torch.randn(batch, latent_dim, device=device)

    noises = torch.randn(n_noise, batch, latent_dim, device=device).unbind(0)

    return noises


def mixing_noise(batch, latent_dim, prob, device):
    if prob > 0 and random.random() < prob:
        return make_noise(batch, latent_dim, 2, device)

    else:
        return [make_noise(batch, latent_dim, 1, device)]


def mixing_style(generator):
    with torch.no_grad():
        latent1, kls1, _ = generator.generate_latents(
            args.batch, truncation=1.0, truncation_latent=None
        )  # n b l c
        latent2, kls2, _ = generator.generate_latents(
            args.batch, truncation=1.0, truncation_latent=None
        )
        # mixing
        for mi in range(args.batch):
            # 0:skin, 1:eye, ..., 12: bck, 13 coarse
            inject_idx = random.randint(0, 13)
            if inject_idx == 13:  # coarse
                latent1[:-1, mi, : args.base_layers, :] = latent2[
                    :-1, mi, : args.base_layers, :
                ]  # -1 means remove background
            elif inject_idx == args.bkg_idx:  # background
                latent1[inject_idx, mi, :, :] = latent2[inject_idx, mi, :, :]
            else:
                st_idx = random.randint(0, 1)  # shape or texture
                if st_idx == 0:  # shape
                    latent1[
                        inject_idx, mi, args.base_layers : args.depth_layers, :
                    ] = latent2[inject_idx, mi, args.base_layers : args.depth_layers, :]
                else:
                    latent1[inject_idx, mi, args.depth_layers :, :] = latent2[
                        inject_idx, mi, args.depth_layers :, :
                    ]
            kls = [
                (kl1 + kl2) / 2 for kl1, kl2 in zip(kls1, kls2)
            ]  # use mean to approximate kl

        return latent1.clone(), kls


def color_segmap(sample_seg, color_map):
    sample_seg = torch.argmax(sample_seg, dim=1)
    sample_mask = torch.zeros(
        (sample_seg.shape[0], sample_seg.shape[1], sample_seg.shape[2], 3),
        dtype=torch.float,
    )
    for key in color_map:
        sample_mask[sample_seg == key] = torch.tensor(color_map[key], dtype=torch.float)
    sample_mask = sample_mask.permute(0, 3, 1, 2)
    return sample_mask


def save_sample_image(folder, name, sample_img, global_step, writer=None, **kwargs):
    n_sample = len(sample_img)
    utils.save_image(
        sample_img,
        os.path.join(ckpt_dir, f"{folder}/{name}_{str(global_step).zfill(6)}.jpeg"),
        nrow=int(math.ceil(n_sample ** 0.5)),
        **kwargs,
    )
    if writer is not None:
        writer.add_image(
            name,
            utils.make_grid(sample_img, nrow=int(math.ceil(n_sample ** 0.5)), **kwargs),
            global_step,
        )


def train(args, loader, generator, discriminator, g_optim, d_optim, g_ema, device, cm):
    global color_map

    if args.debug:
        torch.autograd.set_detect_anomaly(True)
        seed(args.seed)
        wandb.watch([generator, discriminator], log="all", log_freq=args.debug_every)
    if args.resume_ckpt is not None:
        ckpt = cm.load(args.resume_ckpt, args.resume_ckpt_dir)

        args.start_iter = int(args.resume_ckpt.split(".")[0].split("_")[0]) + 1

        generator.load_state_dict(ckpt["g"])
        discriminator.load_state_dict(ckpt["d"])
        g_ema.load_state_dict(ckpt["g_ema"])

        g_optim.load_state_dict(ckpt["g_optim"])
        d_optim.load_state_dict(ckpt["d_optim"])
        print(f"load model from {args.resume_ckpt} ok!")
    get_inception_metrics = prepare_inception_metrics(args.inception, False)
    # sample func for calculate FID
    sample_fn = functools.partial(
        sample_gema_rnn,
        g_ema=g_ema,
        device=device,
        truncation=1.0,
        mean_latent=None,
        batch_size=args.batch,
    )

    loader = sample_data(loader)
    pbar = range(args.iter)
    pbar = tqdm(pbar, initial=args.start_iter, dynamic_ncols=True, smoothing=0.01)

    mean_path_length = 0

    d_loss_val = 0
    r1_img_loss = torch.tensor(0.0, device=device)
    r1_seg_loss = torch.tensor(0.0, device=device)
    g_loss_val = 0
    path_loss = torch.tensor(0.0, device=device)
    path_lengths = torch.tensor(0.0, device=device)
    mean_path_length_avg = 0
    loss_dict = {}

    if args.distributed:
        g_module = generator.module
        d_module = discriminator.module
    else:
        g_module = generator
        d_module = discriminator

    accum = 0.5 ** (32 / (10 * 1000))

    if args.change_order:
        if args.light_mode:
            orders = torch.tensor([1, 7], dtype=torch.long)
            color_map_orders = [1, 7, 0]
            args.bkg_idx = 2
        else:
            orders = torch.tensor(
                [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 0], dtype=torch.long
            )
            color_map_orders = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 0]
    else:
        orders = range(args.seg_dim)
        color_map_orders = range(args.seg_dim)

    color_map = {
        i: color_map[color_map_orders[i]] for i in range(len(color_map_orders))
    }
    print("Start Training Iterations...")
    for idx in pbar:
        i = idx + args.start_iter

        if args.rampup_lambda_kl:
            lambda_kl = get_lambda_kl(
                i,
                args.lambda_kl_start,
                args.lambda_kl_end,
                args.lambda_kl_step_start,
                args.lambda_kl_step_end,
            )
        else:
            lambda_kl = args.lambda_kl
        if i > args.iter:
            print("Done!")
            break

        acc_real_batchs = []
        for b_i in range(args.backward_every):
            real_batch = next(loader)
            real_img = real_batch["image"]
            real_mask = real_batch["mask"]
            b, n, h, w = real_mask.shape
            # reorder mask
            real_mask = real_mask[:, orders, :, :]
            # add bkg if not all parts are used
            if len(orders) < n:
                filter_mask = (real_mask + 1) / 2  # b n h w 0~1
                bkg = torch.ones((b, 1, h, w))
                bkg[filter_mask.sum(dim=1, keepdim=True) == 1.0] = -1.0  # b 1 h w
                real_mask = torch.cat([real_mask, bkg], dim=1)  # b n+1 h w
            acc_real_batchs.append((real_img, real_mask))

        ### Train Discriminator ###
        requires_grad(generator, False)
        requires_grad(discriminator, True)

        discriminator.zero_grad()
        loss_dict["d"] = torch.tensor(
            0.0, dtype=torch.float32, device=torch.device(device)
        )
        loss_dict["real_score"] = torch.tensor(
            0.0, dtype=torch.float32, device=torch.device(device)
        )
        loss_dict["fake_score"] = torch.tensor(
            0.0, dtype=torch.float32, device=torch.device(device)
        )
        for real_img, real_mask in acc_real_batchs:
            real_img, real_mask = real_img.to(device), real_mask.to(device)
            fake_img, fake_seg, _, _ = generator()

            fake_pred = discriminator(fake_img, fake_seg)
            real_pred = discriminator(real_img, real_mask)

            d_loss = d_logistic_loss(real_pred, fake_pred) / args.backward_every

            loss_dict["d"] += d_loss
            loss_dict["real_score"] += real_pred.mean()
            loss_dict["fake_score"] += fake_pred.mean()

            d_loss.backward()
        d_optim.step()

        d_regularize = i % args.d_reg_every == 0

        if d_regularize:
            loss_dict["r1_img"] = torch.tensor(
                0.0, dtype=torch.float32, device=torch.device(device)
            )
            loss_dict["r1_seg"] = torch.tensor(
                0.0, dtype=torch.float32, device=torch.device(device)
            )
            discriminator.zero_grad()
            for real_img, real_mask in acc_real_batchs:
                real_img, real_mask = real_img.to(device), real_mask.to(device)
                real_img.requires_grad = True
                real_mask.requires_grad = True
                real_pred = discriminator(real_img, real_mask)
                r1_img_loss, r1_seg_loss = d_r1_loss(real_pred, real_img, real_mask)
                r1_img_loss, r1_seg_loss = (
                    r1_img_loss / args.backward_every,
                    r1_seg_loss / args.backward_every,
                )
                loss_dict["r1_img"] += r1_img_loss
                loss_dict["r1_seg"] += r1_seg_loss

                (
                    (args.r1_img / 2 * r1_img_loss + args.r1_seg / 2 * r1_seg_loss)
                    * args.d_reg_every
                    + 0 * real_pred[0]
                ).backward()

            d_optim.step()

        ### Train Generator ###
        requires_grad(generator, True)
        requires_grad(discriminator, False)
        loss_dict["g"] = torch.tensor(
            0.0, dtype=torch.float32, device=torch.device(device)
        )
        loss_dict["mask"] = torch.tensor(
            0.0, dtype=torch.float32, device=torch.device(device)
        )
        loss_dict["kl"] = torch.tensor(
            0.0, dtype=torch.float32, device=torch.device(device)
        )
        generator.zero_grad()
        for g_i in range(args.backward_every):

            # possible do style mixing
            prob = random.random()
            if args.mixing > 0 and prob < args.mixing:
                ipt_latents, kl_losses = mixing_style(generator)
                fake_img, fake_seg, fake_seg_coarse, _, _, _, _ = generator(
                    input_is_latent=True, ipt_latents=ipt_latents, return_all=True
                )
            else:

                fake_img, fake_seg, fake_seg_coarse, _, _, kl_losses, _ = generator(
                    return_all=True
                )

            fake_pred = discriminator(fake_img, fake_seg)
            g_loss = g_nonsaturating_loss(fake_pred) / args.backward_every
            kl_loss = torch.mean(torch.stack(kl_losses)) / args.backward_every

            # segmentation mask loss
            fake_seg_downsample = F.adaptive_avg_pool2d(
                fake_seg, fake_seg_coarse.shape[2:4]
            )
            mask_loss = (
                torch.square(fake_seg_coarse - fake_seg_downsample).mean()
                / args.backward_every
            )

            (g_loss + args.lambda_mask * mask_loss + lambda_kl * kl_loss).backward()
            loss_dict["g"] += g_loss
            loss_dict["mask"] += mask_loss
            loss_dict["kl"] += kl_loss
        g_optim.step()

        g_regularize = args.path_regularize > 0 and i % args.g_reg_every == 0

        if g_regularize:
            loss_dict["path"] = torch.tensor(
                0.0, dtype=torch.float32, device=torch.device(device)
            )

            loss_dict["path_length"] = torch.tensor(
                0.0, dtype=torch.float32, device=torch.device(device)
            )
            mean_path_length_ = torch.tensor(
                0.0, dtype=torch.float32, device=torch.device(device)
            )
            generator.zero_grad()
            path_batch_size = max(1, args.batch // args.path_batch_shrink)
            for gr_i in range(args.backward_every):
                fake_img, fake_seg, latents, _, _ = generator(
                    g_regularize=True,
                    return_latents=True,
                    path_batch_size=path_batch_size,
                )

                path_loss, mean_path_length, path_lengths = g_path_regularize(
                    fake_img, latents, mean_path_length
                )

                weighted_path_loss = (
                    args.path_regularize * args.g_reg_every * path_loss
                ) / args.backward_every

                if args.path_batch_shrink:
                    weighted_path_loss += (
                        0 * fake_img[0, 0, 0, 0] + 0 * fake_seg[0, 0, 0, 0]
                    )

                weighted_path_loss.backward()
                loss_dict["path"] += path_loss / args.backward_every
                loss_dict["path_length"] += path_lengths.mean() / args.backward_every
                mean_path_length_ += mean_path_length / args.backward_every

            g_optim.step()

            mean_path_length_avg = (
                reduce_sum(mean_path_length_).item() / get_world_size()
            )

        accumulate(g_ema, g_module, accum)

        ### Summarize Information ###
        loss_reduced = reduce_loss_dict(loss_dict)

        d_loss_val = loss_reduced["d"].mean().item() if "d" in loss_reduced else 0.0
        g_loss_val = loss_reduced["g"].mean().item() if "g" in loss_reduced else 0.0
        r1_img_val = (
            loss_reduced["r1_img"].mean().item() if "r1_img" in loss_reduced else 0.0
        )
        r1_seg_val = (
            loss_reduced["r1_seg"].mean().item() if "r1_seg" in loss_reduced else 0.0
        )
        path_loss_val = (
            loss_reduced["path"].mean().item() if "path" in loss_reduced else 0.0
        )
        real_score_val = (
            loss_reduced["real_score"].mean().item()
            if "real_score" in loss_reduced
            else 0.0
        )
        fake_score_val = (
            loss_reduced["fake_score"].mean().item()
            if "fake_score" in loss_reduced
            else 0.0
        )
        path_length_val = (
            loss_reduced["path_length"].mean().item()
            if "path_length" in loss_reduced
            else 0.0
        )
        mask_loss_val = (
            loss_reduced["mask"].mean().item() if "mask" in loss_reduced else 0.0
        )
        kl_loss_val = loss_reduced["kl"].mean().item() if "kl" in loss_reduced else 0.0

        pbar.set_description(
            (
                f"[{i:06d}] d: {d_loss_val:.4f}; g: {g_loss_val:.4f}; "
                f"real: {real_score_val:.4f}; fake: {fake_score_val:.4f}; "
                f"r1_img: {r1_img_val:.4f}; r1_seg: {r1_seg_val:.4f}; "
                f"path: {path_loss_val:.4f}; mean path: {mean_path_length_avg:.4f}; "
                f"kl: {kl_loss_val :.4f}; "
            )
        )
        if get_rank() == 0:
            wandb.log(
                {
                    "scores/real_score": real_score_val,
                    "scores/fake_score": fake_score_val,
                    "r1/img": r1_img_val,
                    "r1/seg": r1_seg_val,
                    "path/path_loss": path_loss_val,
                    "path/path_length": path_length_val,
                    "loss/d": d_loss_val,
                    "loss/g": g_loss_val,
                    "loss/mask": mask_loss_val,
                    "loss/kl": kl_loss_val,
                    "lambda_kl": lambda_kl,
                },
                step=i,
            )
            if i % args.viz_every == 0:
                with torch.no_grad():
                    g_ema.eval()
                    acc_sample_imgs = []
                    acc_sample_segs = []
                    acc_sample_seg_coarse = []
                    acc_depths = [[] for _ in range(args.seg_dim)]
                    for viz_i in range(args.backward_every):
                        sample_img, sample_seg, sample_seg_coarse, depths, _, _, _ = g_ema(
                            return_all=True
                        )
                        acc_sample_imgs.append(sample_img.detach().cpu())
                        acc_sample_segs.append(sample_seg.detach().cpu())
                        acc_sample_seg_coarse.append(sample_seg_coarse.detach().cpu())
                        for dpti in range(args.seg_dim):
                            acc_depths[dpti].append(depths[dpti])
                    # cat to B
                    sample_img = torch.cat(acc_sample_imgs, dim=0)  # B c h w
                    sample_seg = torch.cat(acc_sample_segs, dim=0)
                    sample_seg_coarse = torch.cat(acc_sample_seg_coarse, dim=0)
                    depths = [torch.cat(ds, dim=0) for ds in acc_depths]
                    sample_mask = i211(
                        color_segmap(sample_seg.detach().cpu(), color_map)
                    )  # b c h w
                    sample_mask_coarse = i211(
                        color_segmap(sample_seg_coarse.detach().cpu(), color_map)
                    )  # b c h w
                    sample_mask_coarse = pad2size(
                        sample_mask_coarse,
                        args.size,
                        args.size,
                        args.coarse_size,
                        args.coarse_size,
                        value=-1,
                    )
                    depths = torch.stack(depths, dim=1).detach().cpu()  # B n 1 h w
                    depths = any211(depths, dim=(1, 2, 3, 4))  # to -1,1
                    depths = depths.expand(-1, -1, 3, -1, -1)  # B n 3 h w
                    depths = pad2size(
                        depths,
                        args.size,
                        args.size,
                        args.coarse_size,
                        args.coarse_size,
                        value=-1,
                    )  # B n 3 h w
                    viz_all = torch.cat(
                        [
                            sample_img[:, None, :, :, :],  # B 1 3 h w
                            sample_mask[:, None, :, :, :],  # B 1 3 h w
                            sample_mask_coarse[:, None, :, :, :],  # B 1 3 h w
                            depths,  # B 13 3 h w
                        ],
                        dim=1,
                    )  # B 3+13 c h w
                    viz_all = einops.rearrange(viz_all, "b n c h w -> (b n) c h w")

                    viz_all = make_sample_image(
                        "img_mask_coarse_depths",
                        viz_all,
                        3 + args.seg_dim,
                        normalize=True,
                        value_range=(-1, 1),
                    )
                    wandb.log({"sample": viz_all}, step=i)

            if i % args.fid_every == 0 and i > args.start_iter:
                print("==================Start calculating FID==================")
                IS_mean, IS_std, FID = get_inception_metrics(
                    sample_fn, num_inception_images=10000, use_torch=False
                )
                print(
                    "[val] iteration {0:06d}: FID: {1:.4f}, IS_mean: {2:.4f}, IS_std: {3:.4f}".format(
                        i, FID, IS_mean, IS_std
                    )
                )
                wandb.log({"FID": FID, "IS_mean": IS_mean, "IS_std": IS_std}, step=i)

            if i % args.save_every == 0 and i > args.start_iter:
                save_this_dict = {
                    "g": g_module.state_dict(),
                    "d": d_module.state_dict(),
                    "g_ema": g_ema.state_dict(),
                    "g_optim": g_optim.state_dict(),
                    "d_optim": d_optim.state_dict(),
                    "args": args,
                }
                anchor = False
                if i % args.anchor_every == 0:
                    anchor = True
                cm.save(save_this_dict, f"{i:07d}", anchor=anchor)


if __name__ == "__main__":
    device = "cuda"

    parse_boolean = lambda x: not x in ["False", "false", "0"]
    parser = argparse.ArgumentParser()

    parser.add_argument("--dataset", type=str, required=True)
    parser.add_argument("--inception", type=str, help="inception pkl", required=True)
    parser.add_argument("--ckpt", type=str, default=None)

    parser.add_argument("--iter", type=int, default=200001)
    parser.add_argument("--batch", type=int, default=4)
    parser.add_argument("--n_sample", type=int, default=16)
    parser.add_argument("--size", type=int, default=256)
    parser.add_argument("--r1_img", type=float, default=10)
    parser.add_argument("--r1_seg", type=float, default=1000)
    parser.add_argument("--path_regularize", type=float, default=0.5)
    parser.add_argument("--path_batch_shrink", type=int, default=2)
    parser.add_argument("--d_reg_every", type=int, default=16)
    parser.add_argument("--g_reg_every", type=int, default=4)
    parser.add_argument("--viz_every", type=int, default=2000)
    parser.add_argument("--save_every", type=int, default=10000)

    parser.add_argument("--mixing", type=float, default=0.0)
    parser.add_argument("--lr", type=float, default=0.002)
    parser.add_argument("--channel_multiplier", type=int, default=2)

    parser.add_argument("--seg_dim", type=int, default=13)
    parser.add_argument("--aug", action="store_true", help="augmentation")

    # Semantic StyleGAN
    parser.add_argument(
        "--local_layers",
        type=int,
        default=10,
        help="number of layers in local generators",
    )
    parser.add_argument(
        "--base_layers",
        type=int,
        default=2,
        help="number of layers with shared coarse structure code",
    )
    parser.add_argument(
        "--depth_layers",
        type=int,
        default=6,
        help="number of layers before outputing pseudo-depth map",
    )
    parser.add_argument(
        "--local_channel",
        type=int,
        default=64,
        help="number of channels in local generators",
    )
    parser.add_argument(
        "--coarse_channel",
        type=int,
        default=512,
        help="number of channels in coarse feature map",
    )
    parser.add_argument(
        "--coarse_size",
        type=int,
        default=64,
        help="size of the coarse feature map and segmentation mask",
    )
    parser.add_argument(
        "--min_feat_size", type=int, default=16, help="size of downsampled feature map"
    )
    parser.add_argument(
        "--residual_refine",
        action="store_true",
        help="whether to use residual to refine the coarse mask",
    )
    parser.add_argument(
        "--detach_texture",
        action="store_true",
        help="whether to detach between depth layers and texture layers",
    )
    parser.add_argument(
        "--transparent_dims",
        nargs="+",
        default=[],
        type=int,
        help="the indices of transparent classes",
    )
    parser.add_argument(
        "--lambda_mask",
        type=float,
        default=100.0,
        help="weight of the mask regularization loss",
    )

    parser.add_argument("--num_workers", type=int, default=8)
    parser.add_argument("--local_rank", type=int, default=0)
    parser.add_argument("--rnn_layers", type=int, default=4, help="rnn layers")
    parser.add_argument(
        "--rnn_lr", type=float, default=1e-4, help="learning rate for generator rnn"
    )
    parser.add_argument(
        "--ckpt_dir",
        type=str,
        default="./checkpoint",
        help="checkpoint dir for training",
    )
    parser.add_argument("--resume_ckpt_dir", type=str, help="checkpoint dir for resume")
    parser.add_argument(
        "--ckpt_hist_cnt", type=int, default=10, help="saved ckpt history count"
    )
    parser.add_argument(
        "--anchor_every",
        type=int,
        default=5000,
        help="save ckpt as anchor which will not be overwrite",
    )
    parser.add_argument(
        "--resume_ckpt",
        type=str,
        default=None,
        help="path to the checkpoints to resume training",
    )
    parser.add_argument(
        "--wandb", action="store_true", help="use weights and biases logging"
    )
    parser.add_argument("--wandb_dir", type=str, help="wandb dir path")
    parser.add_argument("--wandb_name", type=str, help="wandb run name")
    parser.add_argument("--wandb_id", type=str, help="wandb run id for resume")
    parser.add_argument(
        "--wandb_resume",
        type=str,
        help="wandb resume setting can be allow, must, never,auto or None",
    )
    parser.add_argument("--debug", action="store_true", help="enable wandb.watch")
    parser.add_argument("--debug_every", type=int, default=1, help="wandb watch freq")
    parser.add_argument("--project", type=str, help="wandb project name")
    parser.add_argument(
        "--backward_every",
        type=int,
        default=1,
        help="backward every this step for gradient accumulation",
    )
    parser.add_argument(
        "--lambda_kl", type=float, default=0.00025, help="kl loss factor"
    )
    parser.add_argument(
        "--rampup_lambda_kl",
        type=parse_boolean,
        default=False,
        help="if true, will rampup lambda_kl",
    )
    parser.add_argument(
        "--lambda_kl_start", type=float, help="start lambda_kl value for rampup"
    )
    parser.add_argument(
        "--lambda_kl_end", type=float, help="end lambda_kl value for rampup"
    )
    parser.add_argument(
        "--lambda_kl_step_start", type=int, help="start step to rampup lambda_kl"
    )
    parser.add_argument(
        "--lambda_kl_step_end", type=int, help="end step to rampup lambda_kl"
    )
    parser.add_argument(
        "--light_mode", action="store_true", help="if true only use skin and hair"
    )
    parser.add_argument(
        "--rnn_hidden_dim", type=int, default=512, help="rnn hidden dim"
    )
    parser.add_argument(
        "--seed", type=int, default=413, help="seed for deterministic mode"
    )
    parser.add_argument(
        "--fid_every", type=float, default=1000, help="calculate FID every this step"
    )
    parser.add_argument(
        "--bkg_idx", type=int, default=12, help="background index, from 0"
    )
    parser.add_argument(
        "--change_order",
        type=parse_boolean,
        default=True,
        help="if True will change part order",
    )

    #############################################################################
    args = parser.parse_args()

    n_gpu = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
    args.n_gpu = n_gpu

    args.distributed = n_gpu > 1

    if args.distributed:
        torch.cuda.set_device(args.local_rank)
        torch.distributed.init_process_group(backend="nccl", init_method="env://")
        synchronize()

    args.latent = 512
    args.n_mlp = 8

    args.start_iter = 0

    generator = make_model(args, verbose=(args.local_rank == 0)).to(device)

    discriminator = DualBranchDiscriminator(
        args.size,
        args.size,
        img_dim=3,
        seg_dim=args.seg_dim,
        channel_multiplier=args.channel_multiplier,
    ).to(device)

    g_ema = make_model(args, verbose=(args.local_rank == 0)).to(device)
    g_ema.eval()
    accumulate(g_ema, generator, 0)

    g_reg_ratio = args.g_reg_every / (args.g_reg_every + 1)
    d_reg_ratio = args.d_reg_every / (args.d_reg_every + 1)

    g_optim = optim.Adam(
        [
            {
                "params": generator.other_params(),
                "lr": args.lr * g_reg_ratio,
                "betas": (0 ** g_reg_ratio, 0.99 ** g_reg_ratio),
            },
            {"params": generator.rnn_params(), "lr": args.rnn_lr, "amsgrad": True},
        ]
    )
    d_optim = optim.Adam(
        discriminator.parameters(),
        lr=args.lr * d_reg_ratio,
        betas=(0 ** d_reg_ratio, 0.99 ** d_reg_ratio),
    )

    if args.distributed:
        find_unused_parameters = True
        generator = nn.parallel.DistributedDataParallel(
            generator,
            device_ids=[args.local_rank],
            output_device=args.local_rank,
            broadcast_buffers=False,
            find_unused_parameters=find_unused_parameters,
        )

        discriminator = nn.parallel.DistributedDataParallel(
            discriminator,
            device_ids=[args.local_rank],
            output_device=args.local_rank,
            broadcast_buffers=False,
            find_unused_parameters=find_unused_parameters,
        )

    wandb.init(
        id=args.wandb_id,
        resume=args.wandb_resume,
        project=args.project,
        dir=args.wandb_dir,
        name=args.wandb_name,
        config=args,
    )

    dataset = MaskDataset(
        args.dataset, resolution=args.size, label_size=args.seg_dim, aug=args.aug
    )
    print("Loading train dataloader with size ", len(dataset))

    loader = data.DataLoader(
        dataset,
        batch_size=args.batch,
        sampler=data_sampler(dataset, shuffle=True, distributed=args.distributed),
        num_workers=args.num_workers // 2,
        drop_last=True,
        worker_init_fn=random.seed(args.seed) if args.debug else None,
    )

    torch.backends.cudnn.benchmark = True
    cm = CkptManager(
        f"{args.ckpt_dir}/{args.wandb_name}", args.ckpt_hist_cnt, None, True
    )

    print("Start Training...")
    train(args, loader, generator, discriminator, g_optim, d_optim, g_ema, device, cm)
