"""
/* Copyright 2018 The Enflame Tech Company. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
"""
# !/usr/bin/python
# coding=utf-8

import tensorflow as tf
import os
from collections import OrderedDict
from datetime import datetime

from utils.dtu_utils import *

FLAGS = tf.app.flags.FLAGS

"""basic flags"""
tf.app.flags.DEFINE_string('build_id', "9999",
                           help=("""Build id. Will be set as default 9999 when no specific build id"""))
tf.app.flags.DEFINE_string('ip_layout', "",
                           help=("""1c8s|4c32s"""))
tf.app.flags.DEFINE_string('platform', "dtu_leo",
                           help=("Platform of executing"""
                                 """Optional: dtu_mock|dtu_vdk|dtu_fpga|dtu_zebu|dtu_leo|cpu|gpu|dtu_leo"""))
tf.app.flags.DEFINE_string('nn_base_info', 'E2020SW102NN0002B0409',
                           help=("""model build version"""))
tf.app.flags.DEFINE_string('test_log_name', '',
                           help=("""test log file name"""))
tf.app.flags.DEFINE_string('data_dir', '',
                           help=("""path of dataset"""))

"""optimizer flags"""
tf.app.flags.DEFINE_float('momentum', 0.9,
                          help=("""Momentum parameter used in the MomentumOptimizer."""))
tf.app.flags.DEFINE_float('rmsprop_decay', 0.9,
                          help=("""Decay term for RMSProp."""))
tf.app.flags.DEFINE_float('rmsprop_momentum', 0.9,
                          help=("""Momentum in RMSProp."""))
tf.app.flags.DEFINE_float('rmsprop_epsilon', 1.0,
                          help=("""Epsilon term for RMSProp."""))
tf.app.flags.DEFINE_float('adam_beta1', 0.9,
                          help=("""Beta2 term for the Adam optimizer"""))
tf.app.flags.DEFINE_float('adam_beta2', 0.999,
                          help=("""Beta2 term for the Adam optimizer"""))
tf.app.flags.DEFINE_float('adam_epsilon', 1e-8,
                          help=("""Epsilon term for the Adam optimizer"""))

"""session config flags"""
tf.app.flags.DEFINE_boolean('log_device_placement', True,
                            help=("""config parameters: log_device_placement flag"""))
tf.app.flags.DEFINE_boolean('allow_soft_placement', True,
                            help=("""config parameters: allow_soft_placement flag"""))

"""test process flags"""
tf.app.flags.DEFINE_boolean('is_training', False,
                            help=("""True for training; False for inference"""))
tf.app.flags.DEFINE_enum('device', 'cpu', ['cpu', 'gpu', 'dtu', 'tpu', 'xla_gpu', 'xla_cpu'],
                         help=("""Assign on which device to execute model"""))
tf.app.flags.DEFINE_enum('model', 'resnet',
                         ['resnet', 'alexnet', 'googlenet', 'vgg', 'inception_v2', 'inception_v3', 'inception_v4',
                          'resnet_cifar', 'enflame_alexnet', 'resnet6', 'resnet14', 'resnet101', 'resnet152',
                          'resnet200', 'resnet50_v1', 'resnet50_v1.5', 'resnet50_v2', 'resnet18_v2', 'lenet'],
                         help=("""Model name for training/inference"""))
tf.app.flags.DEFINE_enum('dataset', 'imagenet2',
                         ['imagenet2', 'imagenet10', 'imagenet50', 'imagenet', 'mnist', 'cifar10', 'flowers'],
                         help=(""""dataset for training"""))
tf.app.flags.DEFINE_enum('dtype', 'fp32', ['fp16', 'fp32', 'bf16'],
                         help=("""Brain floating point format"""))
tf.app.flags.DEFINE_enum('data_format', 'NHWC', ['NHWC', 'CHNW', 'NCHW'],
                         help=("""Data format of input data. NHWC[channels last] for CPU|TPU"""
                               """NCHW for GPU[channels first]. For dtu, optional is NHWC|CHNW"""
                               """CHNW will improve performance."""))
tf.app.flags.DEFINE_integer('batch_size', 128,
                            help=("""Number of images to process in a batch"""))
tf.app.flags.DEFINE_integer('epoch', 1,
                            help=("""Number of epochs"""))
tf.app.flags.DEFINE_integer('epochs_between_evals', 5,
                            help=("""The number of epochs to train between evaluations"""))
tf.app.flags.DEFINE_integer('display_step', 10,
                            help=("""Number of every steps to display loss"""
                                  """just work when FLAGS.display_loss is True"""))
tf.app.flags.DEFINE_enum('optimizer', 'momentum', ['sgd', 'momentum', 'rmsprop', 'adam', 'lars'],
                         help=("""Optimizer for training process"""))

"""learning rate flags"""
tf.app.flags.DEFINE_enum('lr_type', 'decay', ['fixed', 'decay', 'poly', 'cosine', 'exp'],
                        help=("""Specifies how the learning rate is decayed"""))
tf.app.flags.DEFINE_float('base_learning_rate', 128e-3,
                        help=("""Base learning rate when batch size is 4for 1c."""))

tf.app.flags.DEFINE_float('label_smoothing', 0.,
                          help=(""""""))  # TODO Complete help information
tf.app.flags.DEFINE_float('weight_decay', 0.0001,
                          help=("""weight decay of L2loss"""))
tf.app.flags.DEFINE_boolean('use_resource', True,
                            help=("""True to enable use ResourceVariable; False for disable"""))
tf.app.flags.DEFINE_integer('training_step_per_epoch', 0,
                            help=("""Number of training steps for each epoch"""
                                  """Default None, all images of training dataset will be feed"""
                                  """Artificial value will set step number for one epoch"""))
tf.app.flags.DEFINE_integer('evaluate_step_per_epoch', 0,
                            help=("""Number of evaluate steps for each epoch"""
                                  """Default None, all images of evaluate dataset will be feed"""
                                  """Artificial value will set step number for one epoch"""))
tf.app.flags.DEFINE_integer('avg_device_id', 4,
                            help=("""virtual device id for reduce mean of cluster based training"""))
tf.app.flags.DEFINE_integer('loss_device_id', 0,
                            help=("""Loss on which device of intra-chip distribution is applied"""))
tf.app.flags.DEFINE_integer('default_device_id', 0,
                            help=("""main graph device id"""))
tf.app.flags.DEFINE_boolean('use_synthetic_data', False,
                            help=("""True for synthetic_data, just for training_once"""))
tf.app.flags.DEFINE_float('dropout_rate', 0.5,
                          help=("""Dropout rate of model building."""
                                """Will be fixed as 1 when inference"""))

"""optional function flags"""
tf.app.flags.DEFINE_boolean('enable_evaluate', False,
                            help=("""True to use eval; False for disable"""))
tf.app.flags.DEFINE_boolean('enable_profiler', False,
                            help=("""True to enable TF meta profiler; False for disable"""))
tf.app.flags.DEFINE_boolean('enable_dump_graph', False,
                            help=("""True for synthetic_data, just for training_once"""))
tf.app.flags.DEFINE_boolean('enable_upload_log', False,
                            help=("""True to enable log uploading to remote server; False for disable"""))
tf.app.flags.DEFINE_boolean('enable_saver', False,
                            help=("""True to enable save training checkpoints snapshot; False for disable"""))
tf.app.flags.DEFINE_boolean('debug_mode', False,
                            help=("""debug mode, fixed seed and shuffle=False"""))
tf.app.flags.DEFINE_boolean('xla_jit', False,
                            help=("""True to use jit or placer op tf.device"""))
tf.app.flags.DEFINE_boolean('enable_shared_vars', False,
                            help=("""use shared vars for cluster based distribute training"""))
tf.app.flags.DEFINE_boolean('enable_l2_loss', True,
                            help=("""whether to use l2loss"""))

class InfoDict(object):
    def __init__(self):
        self.base_info = self._base_information()
        self.opt_info = self._optimiazer_information()
        self.config_info = self._config_information()
        self.test_info = self._test_information()
        self.func_info = self._functional_information()

    def _base_information(self):
        base_info = OrderedDict([('nn_base_info', FLAGS.nn_base_info),
                                 ('build_id', FLAGS.build_id),
                                 ('ip_layout', FLAGS.ip_layout),
                                 ('platform', FLAGS.platform),
                                 ('test_log_name', FLAGS.test_log_name),
                                 ('test_id', get_uuid()),
                                 ('start_time', datetime.now())
                                 ])
        return base_info

    def _optimiazer_information(self):
        opt_info = OrderedDict([('momentum', FLAGS.momentum),
                                ('rmsprop_decay', FLAGS.rmsprop_decay),
                                ('rmsprop_momentum', FLAGS.rmsprop_momentum),
                                ('rmsprop_epsilon', FLAGS.rmsprop_epsilon),
                                ('adam_beta1', FLAGS.adam_beta1),
                                ('adam_beta2', FLAGS.adam_beta2),
                                ('adam_epsilon', FLAGS.adam_epsilon),
                                ])
        return opt_info

    def _config_information(self):
        if FLAGS.debug_mode:
            inter_op_parallelism_threads = 1
            intra_op_parallelism_threads = 1
        else:
            inter_op_parallelism_threads = 0
            intra_op_parallelism_threads = 0
        config_info = OrderedDict([('inter_op_parallelism_threads', inter_op_parallelism_threads),
                                   ('intra_op_parallelism_threads', intra_op_parallelism_threads),
                                   ('log_device_placement', FLAGS.log_device_placement),
                                   ('allow_soft_placement', FLAGS.allow_soft_placement)
                                   ])
        return config_info

    def _test_information(self):
        test_info = OrderedDict([('is_training', FLAGS.is_training),
                                 ('device', FLAGS.device.lower()),
                                 ('model', FLAGS.model.lower()),
                                 ('dataset', FLAGS.dataset.lower()),
                                 ('dtype', FLAGS.dtype.lower()),
                                 ('data_format', FLAGS.data_format.upper()),
                                 ('batch_size', FLAGS.batch_size),
                                 ('epoch', FLAGS.epoch),
                                 ('display_step', FLAGS.display_step),
                                 ('optimizer', FLAGS.optimizer.lower()),
                                 ('lr_type', FLAGS.lr_type),
                                 ('base_learning_rate', FLAGS.base_learning_rate),
                                 ('label_smoothing', FLAGS.label_smoothing),
                                 ('weight_decay', FLAGS.weight_decay),
                                 ('use_resource', FLAGS.use_resource),
                                 ('training_step_per_epoch', FLAGS.training_step_per_epoch),
                                 ('evaluate_step_per_epoch', FLAGS.evaluate_step_per_epoch),
                                 ('avg_device_id', FLAGS.avg_device_id),
                                 ('loss_device_id', FLAGS.loss_device_id),
                                 ('default_device_id', FLAGS.default_device_id),
                                 ('use_synthetic_data', FLAGS.use_synthetic_data),
                                 ('dropout_rate', FLAGS.dropout_rate),
                                 ('enable_horovod', False),
                                 ('hvd_size', 1),
                                 ('local_rank', 0),
                                 ('rank', 0)
                                 ])
        return test_info

    def _functional_information(self):
        func_info = OrderedDict([('enable_evaluate', FLAGS.enable_evaluate),
                                 ('enable_profiler', FLAGS.enable_profiler),
                                 ('enable_dump_graph', FLAGS.enable_dump_graph),
                                 ('enable_upload_log', FLAGS.enable_upload_log),
                                 ('enable_saver', FLAGS.enable_saver),
                                 ('debug_mode', FLAGS.debug_mode),
                                 ('xla_jit', FLAGS.xla_jit),
                                 ('enable_shared_vars', FLAGS.enable_shared_vars),
                                 ('enable_l2_loss', FLAGS.enable_l2_loss),
                                 ])
        return func_info


if __name__ == '__main__':
    info_dict = InfoDict()
    test_info = info_dict.test_info
