from __future__ import division, print_function, absolute_import

import numpy as np
import tensorflow.compat.v1 as tf

HEIGHT = 32
WIDTH = 32
DEPTH = 3

NUM_CLASSES = 10


def cnn_model(features, mode, params):
    dropout_rate = params['dropout_rate']
    print_shape = params['print_shape']
    is_training = mode == tf.estimator.ModeKeys.TRAIN

    print("------------- before ----------------", features.get_shape())
    with tf.name_scope('Input'):
        # Input Layer
        input_layer = tf.reshape(features, [-1, HEIGHT, WIDTH, DEPTH], name='input_reshape')
        tf.summary.image('input', input_layer)
        if print_shape:
            print("------------- after -----------------", input_layer.get_shape())

    with tf.name_scope('Conv_1'):
        # Convolutional Layer #1
        conv1 = tf.layers.conv2d(input_layer, 32, 5, padding='same', activation=tf.nn.relu, trainable=is_training)
        tf.summary.histogram('Convolution_layers/conv1', conv1)
        # Pooling Layer #1
        pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=(2, 2), strides=2, padding='same')
        norm1 = tf.nn.lrn(pool1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm1')
        if print_shape:
            print("------- Conv_1 ----------", pool1.get_shape())

    with tf.name_scope('Conv_2'):
        # Convolutional Layer #2 and Pooling Layer #2
        conv2 = tf.layers.conv2d(norm1, 64, 5, padding='same', activation=tf.nn.relu, trainable=is_training)
        tf.summary.histogram('Convolution_layers/conv2', conv2)
        pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=(2, 2), strides=2, padding='same')
        norm2 = tf.nn.lrn(pool2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm2')
        if print_shape:
            print("------- Conv_2 ----------", pool2.get_shape())

    with tf.name_scope('Conv_3'):
        # Convolutional Layer #3 and Pooling Layer #3
        conv3 = tf.layers.conv2d(norm2, 96, 3, padding='same', activation=tf.nn.relu, trainable=is_training)
        tf.summary.histogram('Convolution_layers/conv3', conv3)
        pool3 = tf.layers.max_pooling2d(inputs=conv3, pool_size=(2, 2), strides=2, padding='same')
        norm3 = tf.nn.lrn(pool3, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm3')
        if print_shape:
            print("------- Conv_3 ----------", pool3.get_shape())

    with tf.name_scope('Conv_4'):
        # Convolutional Layer #4 and Pooling Layer #4
        conv4 = tf.layers.conv2d(norm3, 64, 3, padding='same', activation=tf.nn.relu, trainable=is_training)
        tf.summary.histogram('Convolution_layers/conv4', conv4)
        norm4 = tf.nn.lrn(conv4, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm4')
        pool4 = tf.layers.max_pooling2d(inputs=norm4, pool_size=(2, 2), strides=1, padding='same')
        if print_shape:
            print("------- Conv_4 ----------", pool4.get_shape())

    with tf.name_scope('Dense_Dropout'):
        # Dense Layer
        pool_flat = tf.layers.flatten(pool4)
        dense = tf.layers.dense(inputs=pool_flat, units=1024, activation=tf.nn.relu, trainable=is_training)
        dropout = tf.layers.dropout(inputs=dense, rate=dropout_rate, training=is_training)
        tf.summary.histogram('fully_connected_layers/dropout', dropout)
        if print_shape:
            print("------- Dense_Dropout ----------", dropout.get_shape())

    with tf.name_scope('Predictions'):
        # Logits Layer
        logits = tf.layers.dense(inputs=dropout, units=10, trainable=is_training)
        return logits


def cnn_model_fn(features, labels, mode, params):
    """Model function for CNN."""
    learning_rate = params['learning_rate']

    logits = cnn_model(features, mode, params)
    predicted_logit = tf.argmax(input=logits, axis=1, output_type=tf.int32)
    scores = tf.nn.softmax(logits, name='softmax_tensor')

    # Generate Predictions
    predictions = {
        'classes': predicted_logit,
        'probabilities': scores
    }

    export_outputs = {
        'prediction': tf.estimator.export.ClassificationOutput(scores=scores,
                                                               classes=tf.cast(predicted_logit, tf.string))
    }

    # For PREDICTION mode
    if mode == tf.estimator.ModeKeys.PREDICT:
        return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions, export_outputs=export_outputs)

    # For TRAIN and EVAL modes
    print(labels.shape, logits.shape)
    loss = tf.losses.softmax_cross_entropy(onehot_labels=labels, logits=logits)

    accuracy = tf.metrics.accuracy(tf.argmax(labels, axis=1), predicted_logit)
    train_accuracy = tf.reduce_mean(
        tf.cast(tf.equal(tf.argmax(labels, axis=1, output_type=tf.int32), predicted_logit), tf.float32))

    eval_metric = {'test_accuracy': accuracy}

    # Configure the Training Op (for TRAIN mode)
    if mode == tf.estimator.ModeKeys.TRAIN:
        # tf.summary.scalar('accuracy', accuracy[0])
        tf.summary.scalar('train_accuracy', train_accuracy)
        train_op = tf.contrib.layers.optimize_loss(
            loss=loss,
            global_step=tf.train.get_global_step(),
            learning_rate=learning_rate,
            learning_rate_decay_fn=lambda lr, step: tf.train.exponential_decay(learning_rate,
                                                                               tf.train.get_global_step(),
                                                                               780,
                                                                               0.94,
                                                                               staircase=True),
            optimizer='Adam')
    else:
        train_op = None

    # EstimatorSpec fully defines the model to be run by an Estimator.
    return tf.estimator.EstimatorSpec(
        mode=mode,
        loss=loss,
        train_op=train_op,
        eval_metric_ops=eval_metric,
        # A dict of name/value pairs specifying the metrics that will be calculated when the model runs in EVAL mode.
        predictions=predictions,
        export_outputs=export_outputs)


if __name__ == '__main__':
    params_ = {
        'learning_rate': 0.001,
        'dropout_rate': 0.4,
        'print_shape': True
    }
    input_tensor = tf.constant(np.ones([128, 32, 32, 3]), dtype=tf.float32)
    result = cnn_model(input_tensor, tf.estimator.ModeKeys.TRAIN, params_)
    init = tf.global_variables_initializer()
    sess = tf.Session()
    sess.run(init)
    print(sess.run(tf.shape(result)))
    summary = tf.summary.FileWriter('./temp', sess.graph)
    summary.flush()
