from __future__ import division, print_function, absolute_import

import numpy as np
import tensorflow.compat.v1 as tf

HEIGHT = 227
WIDTH = 227
DEPTH = 3

NUM_CLASSES = 10


def alex_net_model(features, mode, params):
    dropout_rate = params['dropout_rate']
    print_shape = params['print_shape']
    is_training = mode == tf.estimator.ModeKeys.TRAIN

    print("------------- before ----------------", features.get_shape())
    with tf.name_scope('Input'):
        # Input Layer
        input_layer = tf.reshape(features, [-1, HEIGHT, WIDTH, DEPTH], name='input_reshape')
        tf.summary.image('input', input_layer)
        if print_shape:
            print("------------- after -----------------", input_layer.get_shape())

    with tf.name_scope('Conv_1'):
        # Convolutional Layer #1
        conv1 = tf.layers.conv2d(input_layer, 96, 11, 4, 'valid', activation=tf.nn.relu, trainable=is_training)
        tf.summary.histogram('Convolution_layers/conv1', conv1)
        norm1 = tf.nn.lrn(conv1, depth_radius=2, bias=1.0, alpha=2e-05, beta=0.75, name='norm1')
        # Pooling Layer #1
        pool1 = tf.layers.max_pooling2d(norm1, pool_size=3, strides=2, padding='valid', name='pool1')
        if print_shape:
            print("------- Conv_1 ----------", pool1.get_shape())

    with tf.name_scope('Conv_2'):
        # Convolutional Layer #2
        conv2 = tf.layers.conv2d(pool1, 256, 5, 1, 'same', activation=tf.nn.relu, trainable=is_training)
        tf.summary.histogram('Convolution_layers/conv2', conv2)
        norm2 = tf.nn.lrn(conv2, depth_radius=2, bias=1.0, alpha=2e-05, beta=0.75, name='norm2')
        # Pooling Layer #2
        pool2 = tf.layers.max_pooling2d(norm2, pool_size=3, strides=2, padding='valid', name='pool2')
        if print_shape:
            print("------- Conv_2 ----------", pool2.get_shape())

    with tf.name_scope('Conv_3'):
        # Convolutional Layer #3
        conv3 = tf.layers.conv2d(pool2, 384, 3, 1, 'same', activation=tf.nn.relu, trainable=is_training,)
        tf.summary.histogram('Convolution_layers/conv3', conv3)
        if print_shape:
            print("------- Conv_3 ----------", conv3.get_shape())

    with tf.name_scope('Conv_4'):
        conv4 = tf.layers.conv2d(conv3, 384, 3, 1, 'same', activation=tf.nn.relu, trainable=is_training)
        tf.summary.histogram('Convolution_layers/conv4', conv4)
        if print_shape:
            print("------- Conv_4 ----------", conv4.get_shape())

    with tf.name_scope('Conv_5'):
        conv5 = tf.layers.conv2d(conv4, 256, 3, 1, 'same', activation=tf.nn.relu, trainable=is_training,)
        tf.summary.histogram('Convolution_layers/conv5', conv4)
        pool5 = tf.layers.max_pooling2d(conv5, pool_size=3, strides=2, padding='valid', name='pool5')
        if print_shape:
            print("------- Conv_5 ----------", pool5.get_shape())

    with tf.name_scope('Dense_Dropout1'):
        # Dense Layer #1
        pool_flat = tf.layers.flatten(pool5)
        dense1 = tf.layers.dense(pool_flat, units=4096, activation=tf.nn.relu, trainable=is_training)
        dropout1 = tf.layers.dropout(dense1, rate=dropout_rate, training=is_training)
        tf.summary.histogram('fully_connected_layers/dropout1', dropout1)
        if print_shape:
            print("------- Dense_Dropout1 ----------", dropout1.get_shape())

    with tf.name_scope('Dense_Dropout2'):
        # Dense Layer #2
        dense2 = tf.layers.dense(dropout1, units=4096, activation=tf.nn.relu, trainable=is_training)
        dropout2 = tf.layers.dropout(dense2, rate=dropout_rate, training=is_training)
        tf.summary.histogram('fully_connected_layers/dropout2', dropout2)
        if print_shape:
            print("------- Dense_Dropout1 ----------", dropout1.get_shape())

    with tf.name_scope('Predictions'):
        # Logits Layer
        logits = tf.layers.dense(dropout2, units=1000, trainable=is_training)
        return logits


if __name__ == '__main__':
    params_ = {
        'learning_rate': 0.001,
        'dropout_rate': 0.4,
        'print_shape': True
    }
    input_tensor = tf.constant(np.ones([128, HEIGHT, WIDTH, DEPTH]), dtype=tf.float32)
    result = alex_net_model(input_tensor, tf.estimator.ModeKeys.TRAIN, params_)
    init = tf.global_variables_initializer()
    sess = tf.Session()
    sess.run(init)
    print(sess.run(tf.shape(result)))
    summary = tf.summary.FileWriter('./temp', sess.graph)
    summary.flush()
