"""
/* Copyright 2018 The Enflame Tech Company. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
"""
# !/usr/bin/python
# coding=utf-8

from __future__ import print_function

import tensorflow as tf
from tensorflow.python.ops import array_ops
import numpy as np
import os
import sys

FLAGS = tf.app.flags.FLAGS

from utils.network import build_model
from data_utils.data_processing import DataProcessing
from utils.dtu_utils import *
from utils.hw_info import HWInfo
from utils.learning_rate import LearningRate
from utils.optimizer import get_optimizer
from utils.loss import LossFunc
from config.datasets import dataset_mapping
from utils.dtu_logger import LOGGER as logger

try:
    import horovod.tensorflow as hvd
except ImportError as error:
    logger.warn("Horovod import failed")

ROOT_PATH = os.path.dirname(os.path.abspath(__file__))
LOG_DIR = "{}/logs/".format(ROOT_PATH)
REPORT_DIR = "{}/test_report/".format(ROOT_PATH)
CKPT_DIR = "{}/models/checkpoints/".format(ROOT_PATH)
EVENT_DIR = "{}/models/tensorboard".format(ROOT_PATH)


class Benchmark(object):
    def __init__(self, test_info, dtype):
        self.params = test_info
        self.dtype = dtype
        self.cpu_device = HWInfo('cpu').available_device_list[0]
        self.default_device = HWInfo(self.params['device']).get_available_device()[0]

    def _training_step_num_per_epoch(self):
        if self.params['training_step_per_epoch'] != 0:
            step_num_per_epoch = self.params['training_step_per_epoch']
        else:
            train_image_number = dataset_mapping[self.params['dataset']]['train_images_number']
            one_step_image_number = self.params['batch_size'] * self.params['num_cluster'] * self.params['hvd_size']
            assert one_step_image_number <= train_image_number, 'Batchsize too large for dataset:{}'.format(
                self.params['dataset'])
            step_num_per_epoch = int(np.floor(train_image_number / one_step_image_number))
        return step_num_per_epoch

    def _evaluate_step_num_per_epoch(self):
        if self.params['evaluate_step_per_epoch'] != 0:
            step_num_per_epoch = self.params['evaluate_step_per_epoch']
        else:
            eval_image_number = dataset_mapping[self.params['dataset']]['val_images_number']
            one_step_image_number = self.params['batch_size']
            assert one_step_image_number <= eval_image_number, 'Batchsize too large for dataset:{}'.format(
                self.params['dataset'])
            step_num_per_epoch = int(np.floor(eval_image_number / one_step_image_number))

        return step_num_per_epoch

    def _exclude_batch_norm(self, name):
        """exclude variables of BN from l2loss calculation"""
        return 'BatchNorm' not in name and 'preact' not in name and 'postnorm' not in name and 'batch_normalization' not in name

    def _generate_data(self, data_dir, is_training):
        """
            Generate input data for training/evaluate/inference.
            Both real dataset and synthetic data are supported.
            Args:
                data_dir: The filepath of dataset, only tfrecords supported now.
                is_training: A bool
                    if training: dataset will be shuffled.
                    if not: batch size only for one cluster.
            Return:
                An uninitialized dataset iterator.
        """
        dataset = DataProcessing(is_training=is_training, data_dir=data_dir, dtype=self.dtype, params=self.params,
                                 target_device=self.default_device)

        batch_size = self.params['batch_size']

        if self.params['use_synthetic_data']:
            data = dataset.get_synth_input_fn(batch_size=batch_size)
        else:
            data = dataset.input_fn(batch_size=batch_size)

        iterator = data.make_initializable_iterator()
        return iterator

    def build_training_graph(self, iterator, dropout_rate, global_step, reuse=False):
        with tf.device(self.cpu_device):
            weight_init = tf.glorot_uniform_initializer(dtype=tf.float32)
            (image_train, label_train) = iterator.get_next()
            assert image_train.dtype == self.dtype
            if FLAGS.debug_mode:
                tf.summary.image('images', tf.cast(image_train, tf.float32), max_outputs=6)

        with tf.device(self.default_device):
            with tf.variable_scope('', reuse=reuse, use_resource=self.params['use_resource']):
                logits, _ = build_model(image_train, self.params, is_training=True, weight_init=weight_init,
                                        dropout_rate=dropout_rate, reuse=reuse)
                logits = tf.cast(logits, tf.float32)
                loss_func = LossFunc(self.params)
                cross_entropy = loss_func(logits=logits, labels=label_train)
                tf.identity(cross_entropy, name='cross_entropy')
                tf.summary.scalar('cross_entropy', cross_entropy, family='cross_entropy')
                params = tf.trainable_variables()
                loss = cross_entropy
                if FLAGS.enable_l2_loss:
                    l2_loss = self.params['weight_decay'] * tf.add_n(
                        [tf.nn.l2_loss(tf.cast(v, tf.float32)) for v in params if self._exclude_batch_norm(v.name)])
                    tf.summary.scalar('l2_loss', l2_loss)
                    if self.params['optimizer'] != "lars":
                        loss += l2_loss
                learning_rate = LearningRate(self.params, self._training_step_num_per_epoch(),
                                             global_step).get_learning_rate(self.params['lr_type'])

                tf.identity(learning_rate, name='learning_rate')
                tf.summary.scalar('learning_rate', learning_rate, family='learning_rate')
                opt = get_optimizer(self.params, learning_rate)

                accuracy = tf.metrics.accuracy(label_train, tf.argmax(logits, axis=1), name="metric")
                loss_op = loss

                tf.identity(accuracy[1], name='train_accuracy')
                # tf.identity(accuracy_top_5[1], name='train_accuracy_top_5')
                tf.summary.scalar('train_accuracy', accuracy[1], family='train_accuracy')
                # tf.summary.scalar('train_accuracy_top_5', accuracy_top_5[1])
                if self.params['enable_horovod']:
                    opt = hvd.DistributedOptimizer(opt, compression=hvd.Compression.fp16 if self.params[
                                                                                                'dtype'] == 'fp16' else hvd.Compression.none)
                update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
                train_op = opt.minimize(loss, global_step=global_step)
                update_op = tf.group(update_ops, train_op, name='train_ops_group')
        return update_op, accuracy, loss_op

    def build_evaluate_graph(self, iterator, dropout_rate, reuse=True):
        with tf.device(self.cpu_device):
            # weight_init = tf.contrib.layers.variance_scaling_initializer(dtype=tf.float32)
            weight_init = tf.glorot_uniform_initializer(dtype=tf.float32)
            try:
                (images_eval, labels_eval) = iterator.get_next()
                assert images_eval.dtype == self.dtype
            except Exception as ex:
                logger.error("Dataset iterator failed. Error message: {}".format(ex))
                sys.exit(6)
        with tf.device(self.default_device):
            with tf.variable_scope('', reuse=reuse, use_resource=self.params['use_resource']):
                logits, _ = build_model(images_eval, self.params, is_training=False, weight_init=weight_init,
                                        dropout_rate=dropout_rate, reuse=reuse)
                logits = tf.cast(logits, tf.float32)
                correct_predictions = tf.equal(tf.cast(tf.argmax(logits, axis=1), tf.int32), labels_eval)
                accuracy = tf.reduce_mean(tf.cast(correct_predictions, tf.float32))
                tf.identity(accuracy, name='accuracy')
                with tf.device(self.cpu_device):
                    accuracy_top_5 = tf.reduce_mean(
                        tf.cast(tf.nn.in_top_k(predictions=logits, targets=labels_eval, k=5), dtype=tf.float32))
                    tf.identity(accuracy_top_5, name='accuracy_top_5')

        return accuracy, accuracy_top_5

    def build_inference_graph(self, iterator):
        with tf.device(self.cpu_device):
            # weight_init = tf.contrib.layers.variance_scaling_initializer(dtype=tf.float32)
            weight_init = tf.glorot_uniform_initializer(dtype=tf.float32)
            (image_infer, label_infer) = iterator.get_next()
            assert image_infer.dtype == self.dtype
            tf.summary.image('images', tf.cast(image_infer, tf.float32), max_outputs=6)
        with tf.device(self.default_device):
            with tf.variable_scope('', reuse=tf.AUTO_REUSE, use_resource=self.params['use_resource']):
                logits, _ = build_model(image_infer, self.params, is_training=False, weight_init=weight_init)
                predictions = {
                    'predict class': tf.argmax(tf.nn.softmax(logits), axis=1),
                    'real class': label_infer,
                    'prob': tf.nn.softmax(logits, name='softmax_tensor')}
        return predictions

    def variables_initial_op(self):
        global_init_op = tf.global_variables_initializer()
        local_init_op = tf.local_variables_initializer()

        if self.params['optimizer'] == 'adam':
            collection_variables = tf.get_collection(tf.GraphKeys.LOCAL_VARIABLES)
            collection_init_op = tf.variables_initializer(collection_variables)
            return tf.group(*(collection_init_op, global_init_op, local_init_op))
        else:
            return tf.group(*(global_init_op, local_init_op))


if __name__ == '__main__':
    params = {
        'batch_size': 4
    }
    bench = Benchmark(params, dtype=tf.float32)
    print(bench.cpu_device)
