import argparse
import os
import pickle

from utils import util


class Options:
    def __init__(self):
        super(Options, self).__init__()
        parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
        parser = self.initialize(parser)
        opt, parser = self.gather_options(parser)
        self.opt = opt
        self.parser = parser

    def initialize(self, parser):
        # init
        parser.add_argument('--name', type=str, default='spade_sketch_colorization', help='name of the experiment. It '
                                                                                    'decides where to store samples '
                                                                                    'and models')
        # freq
        parser.add_argument('--display_freq', type=int, default=1000, help='frequency of showing training results on screen')
        parser.add_argument('--print_freq', type=int, default=100, help='frequency of showing training results on console')
        parser.add_argument('--save_latest_freq', type=int, default=5000, help='frequency of saving the latest results')
        parser.add_argument('--save_epoch_freq', type=int, default=10, help='frequency of saving checkpoints at the end of epochs')

        parser.add_argument('--device', type=str, default='cuda')
        parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here')
        parser.add_argument('--phase', type=str, default='train', help='train, val, test, etc')
        parser.add_argument('--num_workers', type=int, default=8, help='thread of loading data')
        parser.add_argument('--use_tensorboard', type=bool, default=True, help='whether apply tensorboard to log')

        parser.add_argument('--batch_size', type=int, default=4, help='batch size of model input')
        parser.add_argument('--ngf', type=int, default=64, help='# of discrim filters in first conv layer')
        parser.add_argument('--D_type', type=str, default='structure', help='decide structure discriminator or color discriminator')
        parser.add_argument('--n_layers_D', type=int, default=4, help='# layers in each discriminator')
        parser.add_argument('--num_D', type=int, default=2, help='number of discriminators to be used in multiscale')
        parser.add_argument('--no_ganFeat_loss', action='store_true', help='if specified, do *not* use discriminator feature matching loss')

        parser.add_argument('--load_size', type=int, default=256,
                            help='Scale images to this size. The loaded image will be converted to --image_size.')
        parser.add_argument('--image_size', type=int, default=256, help='output size of generated image')
        parser.add_argument('--preprocess_mode', type=str, default='scale_width_and_crop',
                            help='scaling and cropping of images at load time.', choices=(
            "resize_and_crop", "crop", "scale_width", "scale_width_and_crop", "scale_shortside",
            "scale_shortside_and_crop", "fixed", "none"))
        parser.add_argument('--aspect_ratio', type=float, default=1.0,
                            help='The ratio width/height. The final height of the load image will be crop_size/aspect_ratio')
        parser.add_argument('--init_type', type=str, default='xavier', help='networks initialization [normal|xavier|kaiming|orthogonal]')
        parser.add_argument('--init_variance', type=float, default=0.02, help='variance of the initialization distribution')

        # dataset
        parser.add_argument('--dataroot', type=str, default='data/preprocessed_data')
        parser.add_argument('--dataset_mode', type=str, default='animepair')
        parser.add_argument('--no_pairing_check', action='store_true',
                            help='If specified, skip sanity check of correct label-image file pairing')
        parser.add_argument('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly')
        # Augmentation
        parser.add_argument('--no_flip', action='store_true', help='if specified, do not flip the images for data argumentation')

        # train
        parser.add_argument('--continue_train', action='store_true', help='continue training: load the latest model')
        parser.add_argument('--which_epoch', type=str, default='latest',
                            help='which epoch to load? set to latest to use latest cached model')
        parser.add_argument('--niter', type=int, default=50, help='# of iter at starting learning rate. This is NOT the total #epochs. Totla #epochs is niter + niter_decay')
        parser.add_argument('--niter_decay', type=int, default=50, help='# of iter to linearly decay learning rate to zero')
        parser.add_argument('--optimizer', type=str, default='adam')
        parser.add_argument('--beta1', type=float, default=0.5, help='momentum term of adam')
        parser.add_argument('--beta2', type=float, default=0.999, help='momentum term of adam')
        parser.add_argument('--lr', type=float, default=0.0002, help='initial learning rate for adam')
        parser.add_argument('--D_steps_per_G', type=int, default=1, help='number of discriminator iterations per generator iterations.')
        parser.add_argument('--no_TTUR', action='store_true', help='Use TTUR training scheme')
        parser.add_argument('--no_compute_kld_loss', action='store_true',help='compute kld loss in training')
        parser.add_argument('--z_dim', type=int, default=256,
                            help="dimension of the latent z vector")
        # gan
        parser.add_argument('--gan_mode', type=str, default='hinge', help='(ls|original|hinge)')
        #weight
        parser.add_argument('--weight_gan', type=float, default=10.0, help='weight of all gan loss')
        parser.add_argument('--lambda_feat', type=float, default=10.0, help='weight for feature matching loss')
        parser.add_argument('--lambda_vgg', type=float, default=10.0, help='weight for vgg loss')
        parser.add_argument('--fm_ratio', type=float, default=0.1, help='vgg fm loss weight comp with ctx loss')
        parser.add_argument('--weight_perceptual', type=float, default=0.01)
        parser.add_argument('--lambda_kld', type=float, default=0.05)
        parser.add_argument('--ctx_w', type=float, default=0.1, help='ctx loss weight')
        parser.add_argument('--use_22ctx', action='store_true', help='if true, also use 2-2 in ctx loss')
        return parser

    def parse(self):
        self.print_options(self.opt)
        self.save_options(self.opt)
        return self.opt

    def option_file_path(self, opt, makedir=False):
        expr_dir = os.path.join(opt.checkpoints_dir, opt.name)
        if makedir:
            util.mkdirs(expr_dir)
        file_name = os.path.join(expr_dir, 'opt')
        return file_name

    def save_options(self, opt):
        file_name = self.option_file_path(opt, makedir=True)
        with open(file_name + '.txt', 'wt') as opt_file:
            for k, v in sorted(vars(opt).items()):
                comment = ''
                default = self.parser.get_default(k)
                if v != default:
                    comment = '\t[default: %s]' % str(default)
                opt_file.write('{:>25}: {:<30}{}\n'.format(str(k), str(v), comment))

        with open(file_name + '.pkl', 'wb') as opt_file:
            pickle.dump(opt, opt_file)

    def gather_options(self, parser):
        opt = parser.parse_args()
        return opt, parser

    def print_options(self, opt):
        name = opt.name + 'base options'
        message = ''
        message += '-------------- {0} ---------------\n'.format(name)
        for k, v in sorted(vars(opt).items()):
            comment = ''
            default = self.parser.get_default(k)
            if v != default:
                comment = '\t[default: %s]' % str(default)
            message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment)
        message += '------------------------- End -------------------------'
        print(message)