#
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
'Functions to build DetectionModel training optimizers.'
from npu_bridge.npu_init import *
import tensorflow as tf
#import horovod.tensorflow as hvd
from object_detection.utils import learning_schedules
#loss scale
from npu_bridge.estimator.npu.npu_loss_scale_optimizer import NPULossScaleOptimizer

def npu_tf_optimizer(opt):
    npu_opt = NPUDistributedOptimizer(opt)
    #loss scale
    loss_scale_manager = ExponentialUpdateLossScaleManager(init_loss_scale=32768, incr_every_n_steps=1000, decr_every_n_nan_or_inf=2, decr_ratio=0.5)
    if int(os.getenv('RANK_SIZE')) == 1:
        npu_opt = NPULossScaleOptimizer(npu_opt, loss_scale_manager)
    else:
        npu_opt = NPULossScaleOptimizer(npu_opt, loss_scale_manager, is_distributed=True)
    return npu_opt

def build(optimizer_config):
    'Create optimizer based on config.\n\n  Args:\n    optimizer_config: A Optimizer proto message.\n\n  Returns:\n    An optimizer and a list of variables for summary.\n\n  Raises:\n    ValueError: when using an unsupported input data type.\n  '
    optimizer_type = optimizer_config.WhichOneof('optimizer')
    optimizer = None
    summary_vars = []
    if (optimizer_type == 'rms_prop_optimizer'):
        config = optimizer_config.rms_prop_optimizer
        learning_rate = _create_learning_rate(config.learning_rate)
        summary_vars.append(learning_rate)
        optimizer = npu_tf_optimizer(tf.train.RMSPropOptimizer(learning_rate, decay=config.decay, momentum=config.momentum_optimizer_value, epsilon=config.epsilon))
    if (optimizer_type == 'momentum_optimizer'):
        config = optimizer_config.momentum_optimizer
        learning_rate = _create_learning_rate(config.learning_rate)
        summary_vars.append(learning_rate)
        optimizer = npu_tf_optimizer(tf.train.MomentumOptimizer(learning_rate, momentum=config.momentum_optimizer_value))
    if (optimizer_type == 'adam_optimizer'):
        config = optimizer_config.adam_optimizer
        learning_rate = _create_learning_rate(config.learning_rate)
        summary_vars.append(learning_rate)
        optimizer = npu_tf_optimizer(tf.train.AdamOptimizer(learning_rate))
    if (optimizer is None):
        raise ValueError(('Optimizer %s not supported.' % optimizer_type))
    optimizer = optimizer
    if optimizer_config.use_moving_average:
        optimizer = tf.contrib.opt.MovingAverageOptimizer(optimizer, average_decay=optimizer_config.moving_average_decay)
    return (optimizer, summary_vars)

def _create_learning_rate(learning_rate_config):
    'Create optimizer learning rate based on config.\n\n  Args:\n    learning_rate_config: A LearningRate proto message.\n\n  Returns:\n    A learning rate.\n\n  Raises:\n    ValueError: when using an unsupported input data type.\n  '
    learning_rate = None
    learning_rate_type = learning_rate_config.WhichOneof('learning_rate')
    if (learning_rate_type == 'constant_learning_rate'):
        config = learning_rate_config.constant_learning_rate
        learning_rate = tf.constant(config.learning_rate, dtype=tf.float32, name='learning_rate')
    if (learning_rate_type == 'exponential_decay_learning_rate'):
        config = learning_rate_config.exponential_decay_learning_rate
        learning_rate = learning_schedules.exponential_decay_with_burnin(tf.train.get_or_create_global_step(), config.initial_learning_rate, config.decay_steps, config.decay_factor, burnin_learning_rate=config.burnin_learning_rate, burnin_steps=config.burnin_steps, min_learning_rate=config.min_learning_rate, staircase=config.staircase)
    if (learning_rate_type == 'manual_step_learning_rate'):
        config = learning_rate_config.manual_step_learning_rate
        if (not config.schedule):
            raise ValueError('Empty learning rate schedule.')
        learning_rate_step_boundaries = [x.step for x in config.schedule]
        learning_rate_sequence = [config.initial_learning_rate]
        learning_rate_sequence += [x.learning_rate for x in config.schedule]
        learning_rate = learning_schedules.manual_stepping(tf.train.get_or_create_global_step(), learning_rate_step_boundaries, learning_rate_sequence, config.warmup)
    if (learning_rate_type == 'cosine_decay_learning_rate'):
        config = learning_rate_config.cosine_decay_learning_rate
        learning_rate = learning_schedules.cosine_decay_with_warmup(tf.train.get_or_create_global_step(), config.learning_rate_base, config.total_steps, config.warmup_learning_rate, config.warmup_steps, config.hold_base_rate_steps)
    if (learning_rate is None):
        raise ValueError(('Learning_rate %s not supported.' % learning_rate_type))
    return learning_rate
