# Copyright (C) 2021 NVIDIA Corporation.  All rights reserved.
# Licensed under The MIT License (MIT)
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

import os
import argparse
import torch
from models import make_model

import functools
from utils.inception_utils import sample_gema_rnn, prepare_inception_metrics

if __name__ == "__main__":
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    parser = argparse.ArgumentParser(description="Calculate FID score for generators")
    parser.add_argument(
        "--ckpt", type=str, required=True, help="path to the checkpoint file"
    )
    parser.add_argument(
        "--inception", type=str, required=True, help="pre-calculated inception file"
    )
    parser.add_argument(
        "--batch", default=8, type=int, help="batch size for inception networks"
    )
    parser.add_argument(
        "--n_sample",
        type=int,
        default=50000,
        help="number of samples used for embedding calculation",
    )

    parser.add_argument("--dataset", type=str, required=True)

    parser.add_argument("--iter", type=int, default=200001)
    parser.add_argument("--size", type=int, default=256)
    parser.add_argument("--r1_img", type=float, default=10)
    parser.add_argument("--r1_seg", type=float, default=1000)
    parser.add_argument("--path_regularize", type=float, default=0.5)
    parser.add_argument("--path_batch_shrink", type=int, default=2)
    parser.add_argument("--d_reg_every", type=int, default=16)
    parser.add_argument("--g_reg_every", type=int, default=4)
    parser.add_argument("--viz_every", type=int, default=2000)
    parser.add_argument("--save_every", type=int, default=10000)

    parser.add_argument("--mixing", type=float, default=0.3)
    parser.add_argument("--lr", type=float, default=0.002)
    parser.add_argument("--channel_multiplier", type=int, default=2)

    parser.add_argument("--seg_dim", type=int, default=13)
    parser.add_argument("--aug", action="store_true", help="augmentation")

    # Semantic StyleGAN
    parser.add_argument(
        "--local_layers",
        type=int,
        default=10,
        help="number of layers in local generators",
    )
    parser.add_argument(
        "--base_layers",
        type=int,
        default=2,
        help="number of layers with shared coarse structure code",
    )
    parser.add_argument(
        "--depth_layers",
        type=int,
        default=6,
        help="number of layers before outputing pseudo-depth map",
    )
    parser.add_argument(
        "--local_channel",
        type=int,
        default=64,
        help="number of channels in local generators",
    )
    parser.add_argument(
        "--coarse_channel",
        type=int,
        default=512,
        help="number of channels in coarse feature map",
    )
    parser.add_argument(
        "--coarse_size",
        type=int,
        default=64,
        help="size of the coarse feature map and segmentation mask",
    )
    parser.add_argument(
        "--min_feat_size", type=int, default=16, help="size of downsampled feature map"
    )
    parser.add_argument(
        "--residual_refine",
        action="store_true",
        help="whether to use residual to refine the coarse mask",
    )
    parser.add_argument(
        "--detach_texture",
        action="store_true",
        help="whether to detach between depth layers and texture layers",
    )
    parser.add_argument(
        "--transparent_dims",
        nargs="+",
        default=(10, 12),
        type=int,
        help="the indices of transparent classes",
    )
    parser.add_argument(
        "--lambda_mask",
        type=float,
        default=100.0,
        help="weight of the mask regularization loss",
    )

    parser.add_argument("--num_workers", type=int, default=8)
    parser.add_argument("--local_rank", type=int, default=0)
    parser.add_argument("--rnn_layers", type=int, default=4, help="rnn layers")
    parser.add_argument(
        "--rnn_lr", type=float, default=1e-4, help="learning rate for generator rnn"
    )
    parser.add_argument(
        "--ckpt_dir",
        type=str,
        default="./checkpoint",
        help="checkpoint dir for training",
    )
    parser.add_argument("--resume_ckpt_dir", type=str, help="checkpoint dir for resume")
    parser.add_argument(
        "--ckpt_hist_cnt", type=int, default=10, help="saved ckpt history count"
    )
    parser.add_argument(
        "--anchor_every",
        type=int,
        default=5000,
        help="save ckpt as anchor which will not be overwrite",
    )
    parser.add_argument(
        "--resume_ckpt",
        type=str,
        default=None,
        help="path to the checkpoints to resume training",
    )
    parser.add_argument(
        "--wandb", action="store_true", help="use weights and biases logging"
    )
    parser.add_argument("--wandb_dir", type=str, help="wandb dir path")
    parser.add_argument("--wandb_name", type=str, help="wandb run name")
    parser.add_argument("--wandb_id", type=str, help="wandb run id for resume")
    parser.add_argument(
        "--wandb_resume",
        type=str,
        help="wandb resume setting can be allow, must, never,auto or None",
    )
    parser.add_argument("--debug", action="store_true", help="enable wandb.watch")
    parser.add_argument("--debug_every", type=int, default=1, help="wandb watch freq")
    parser.add_argument("--project", type=str, help="wandb project name")
    parser.add_argument(
        "--backward_every",
        type=int,
        default=1,
        help="backward every this step for gradient accumulation",
    )
    parser.add_argument(
        "--lambda_kl", type=float, default=0.00025, help="kl loss factor"
    )
    parser.add_argument(
        "--light_mode", action="store_true", help="if true only use skin and hair"
    )
    parser.add_argument(
        "--rnn_hidden_dim", type=int, default=512, help="rnn hidden dim"
    )
    parser.add_argument(
        "--seed", type=int, default=413, help="seed for deterministic mode"
    )
    parser.add_argument(
        "--fid_every", type=float, default=1000, help="calculate FID every this step"
    )
    parser.add_argument(
        "--bkg_idx", type=int, default=12, help="background index, from 0"
    )
    args = parser.parse_args()

    args.latent = 512
    args.n_mlp = 8
    print("Loading model...")
    ckpt = torch.load(args.ckpt)
    model = make_model(args).to(device).eval()
    model.load_state_dict(ckpt["g_ema"])
    mean_latent = model.style(torch.randn(50000, 512, device=device)).mean(0)

    get_inception_metrics = prepare_inception_metrics(args.inception, False)
    sample_fn = functools.partial(
        sample_gema_rnn,
        g_ema=model,
        device=device,
        truncation=1.0,
        mean_latent=None,
        batch_size=args.batch,
    )

    print("==================Start calculating FID==================")
    IS_mean, IS_std, FID = get_inception_metrics(
        sample_fn, num_inception_images=args.n_sample, use_torch=False
    )
    print(
        "FID: {0:.4f}, IS_mean: {1:.4f}, IS_std: {2:.4f}".format(FID, IS_mean, IS_std)
    )
