import tensorflow as tf
import logging 

flags = tf.app.flags
global_parser = tf.app.flags._global_parser

############################
#    hyper parameters      #
############################

# For separate margin loss
flags.DEFINE_float('m_plus', 0.9, 'the parameter of m plus')
flags.DEFINE_float('m_minus', 0.1, 'the parameter of m minus')
flags.DEFINE_float('m_recons', 0.02, 'reconstruction error max margin')
flags.DEFINE_float('lambda_val', 0.5, 'down weight of the loss for absent digit classes')

# for training
flags.DEFINE_integer('batch_size', 128, 'batch size')
flags.DEFINE_integer('epoch', 500, 'epoch')
flags.DEFINE_integer('save_freq', 200, 'the frequency of saving model')
flags.DEFINE_integer('iter_routing', 3, 'number of iterations in routing algorithm')
flags.DEFINE_float('init_lr', 0.001, 'initial learning rate for adam')
flags.DEFINE_float('decay_lr', 0.97, 'decay percent every time')
flags.DEFINE_integer('decay_step', 1000, 'decay interval' )
#flags.DEFINE_float('decay_lr', 0.82, 'decay percent every time')
#flags.DEFINE_integer('decay_step', 1000, 'decay interval' )
# 能达到99.35%
#flags.DEFINE_float('decay_lr', 0.7, 'decay percent every time')
#flags.DEFINE_integer('decay_step', 800, 'decay interval' )
# 在8000次迭代后停止改善了,训练集合也没有达到100%。说明衰减太厉害
#flags.DEFINE_float('decay_lr', 0.5, 'decay percent every time')
#flags.DEFINE_integer('decay_step', 3600, 'decay interval' )
# 在8000次迭代后指标变差了，训练集达到100%。说明过拟合，test发散了
flags.DEFINE_float('stddev', 0.01, 'stddev for W initializer')
flags.DEFINE_float('regularization_scale', 0.1, 'regularization coefficient for reconstruction loss')


############################
#   environment setting    #
############################
flags.DEFINE_string('log_lvl', 'debug', 'log level, option: info, warn, debug, error')
flags.DEFINE_string('dataset', '../MNIST_data', 'the path for dataset')
flags.DEFINE_boolean('is_training', True, 'train or predict phase')
flags.DEFINE_integer('num_threads', 8, 'number of threads of enqueueing exampls')
flags.DEFINE_string('logdir', 'logdir', 'logs directory')
flags.DEFINE_integer('train_sum_freq', 100, 'the frequency of saving train summary(step)')
flags.DEFINE_integer('test_sum_freq', 500, 'the frequency of saving test summary(step)')
flags.DEFINE_string("model_dir", "caps_net_model", "path where model check point saved")
############################
#   distributed setting    #
############################
flags.DEFINE_integer('num_gpu', 2, 'number of gpus for distributed training')
flags.DEFINE_integer('batch_size_per_gpu', 128, 'batch size on 1 gpu')
flags.DEFINE_integer('thread_per_gpu', 4, 'Number of preprocessing threads per tower.')

cfg = tf.app.flags.FLAGS
cfg.log_lvl = {"info": logging.INFO,
               "warn": logging.WARN,
               "debug": logging.DEBUG,
               "error": logging.DEBUG}[cfg.log_lvl.lower()]


if __name__ == "__main__":
    print("%r" % vars(cfg)['__flags'])
