import tensorflow as tf
import logging 

flags = tf.app.flags


############################
#    hyper parameters      #
############################

# for training
flags.DEFINE_integer('batch_size', 32, 'batch size')
flags.DEFINE_integer('epoch', 200, 'epoch')
flags.DEFINE_integer('save_freq', 20, 'the frequency of saving model, in minutes')
flags.DEFINE_float('leak_relu_neg_slop', 0.2, 'neg slop of leaky relu')
flags.DEFINE_float('lr', 0.0002, 'learning rate for adam')
flags.DEFINE_float('beta1', 0.5, 'beta1 for adam')
flags.DEFINE_float('max_lambda_e', 0.0001, 'the mistaken weight of discriminator given encoder')
flags.DEFINE_float('slope_lambda_e', 500000, 'inverse of increase slop for lambda_e')
# if lambda_e == 0.001, 合成的图像变形比较明显
#flags.DEFINE_float('max_lambda_e', 0.001, 'the mistaken weight of discriminator given encoder')
#flags.DEFINE_float('slope_lambda_e', 250000, 'inverse of increase slop for lambda_e')
############################
#   environment setting    #
############################
flags.DEFINE_bool("init_data", False, "if initiating data set from `origin_data_dir` before training")
flags.DEFINE_string('log_lvl', 'debug', 'log level, option: info, warn, debug, error')
flags.DEFINE_string('dataset', 'CelebA_np', 'the path for numpy dataset')
flags.DEFINE_string('sample_splits', '0.8,0.9', 'data set splits')
flags.DEFINE_string('logdir', 'logdir', 'logs directory')
flags.DEFINE_string('imgdir', 'imgdir', 'directory where reconstructed images saved')
flags.DEFINE_integer('img_size', 128, 'all images will be resized to this squared size')
flags.DEFINE_integer('attr_num', 26, 'number of attribute of images')
flags.DEFINE_integer('train_sum_freq', 200, 'the frequency of saving train summary(step)')
flags.DEFINE_integer('valid_sum_freq', 5000, 'the frequency of saving valid summary(step)')
flags.DEFINE_string("model_dir", "fade_net_model", "path where model check point saved")
flags.DEFINE_string("origin_data_dir", "../DataSets/CelebA", "path where origin dataset saved")
flags.DEFINE_integer("thread_num", 3, "multi-process num")
flags.DEFINE_integer("img_part_num", 32*100, "image num in one partition saved in disk")
############################
#   distributed setting    #
############################
flags.DEFINE_integer('num_gpu', 2, 'number of gpus for distributed training')
flags.DEFINE_integer('batch_size_per_gpu', 128, 'batch size on 1 gpu')
flags.DEFINE_integer('thread_per_gpu', 4, 'Number of preprocessing threads per tower.')

cfg = tf.app.flags.FLAGS
cfg.log_lvl = {"info": logging.INFO,
               "warn": logging.WARN,
               "debug": logging.DEBUG,
               "error": logging.DEBUG}[cfg.log_lvl.lower()]
