from __future__ import division, print_function, absolute_import

import numpy as np
import tensorflow.compat.v1 as tf

HEIGHT = 224
WIDTH = 224
DEPTH = 3

NUM_CLASSES = 10


def vgg_net_19_model(features, mode, params):
    """
    The convolution stride is fixed to 1 pixel.
    """
    dropout_rate = params['dropout_rate']
    print_shape = params['print_shape']
    is_training = mode == tf.estimator.ModeKeys.TRAIN

    print("------------- before ----------------", features.get_shape())
    with tf.name_scope('Input'):
        # Input Layer
        input_layer = tf.reshape(features, [-1, HEIGHT, WIDTH, DEPTH], name='input_reshape')
        tf.summary.image('input', input_layer)
        if print_shape:
            print("------------- after -----------------", input_layer.get_shape())

    # Group #1
    with tf.name_scope('Conv_1_1'):
        conv1_1 = tf.layers.conv2d(input_layer, 64, 3, padding='same', activation=tf.nn.relu, trainable=is_training)
    with tf.name_scope('Conv_1_2'):
        conv1_2 = tf.layers.conv2d(conv1_1, 64, 3, padding='same', activation=tf.nn.relu, trainable=is_training)
        if print_shape:
            print("------- Conv_1_2 ----------", conv1_2.get_shape())

    # Max Pooling #1
    pool1 = tf.layers.max_pooling2d(conv1_2, pool_size=2, strides=2, padding='same', name='pool1')

    # Group #2
    with tf.name_scope('Conv_2_1'):
        conv2_1 = tf.layers.conv2d(pool1, 128, 3, padding='same', activation=tf.nn.relu, trainable=is_training)
    with tf.name_scope('Conv_2_2'):
        conv2_2 = tf.layers.conv2d(conv2_1, 128, 3, padding='same', activation=tf.nn.relu, trainable=is_training)
        if print_shape:
            print("------- Conv_2_2 ----------", conv2_2.get_shape())

    # Max Pooling #2
    pool2 = tf.layers.max_pooling2d(conv2_2, pool_size=2, strides=2, padding='same', name='pool2')

    # Group #3
    with tf.name_scope('Conv_3_1'):
        conv3_1 = tf.layers.conv2d(pool2, 256, 3, padding='same', activation=tf.nn.relu, trainable=is_training)
    with tf.name_scope('Conv_3_2'):
        conv3_2 = tf.layers.conv2d(conv3_1, 256, 3, padding='same', activation=tf.nn.relu, trainable=is_training)
    with tf.name_scope('Conv_3_3'):
        conv3_3 = tf.layers.conv2d(conv3_2, 256, 3, padding='same', activation=tf.nn.relu, trainable=is_training)
    with tf.name_scope('Conv_3_4'):
        conv3_4 = tf.layers.conv2d(conv3_3, 256, 3, padding='same', activation=tf.nn.relu, trainable=is_training)
        if print_shape:
            print("------- Conv_3_4 ----------", conv3_4.get_shape())

    # Max Pooling #3
    pool3 = tf.layers.max_pooling2d(conv3_4, pool_size=2, strides=2, padding='same', name='pool3')

    # Group #4
    with tf.name_scope('Conv_4_1'):
        conv4_1 = tf.layers.conv2d(pool3, 512, 3, padding='same', activation=tf.nn.relu, trainable=is_training)
    with tf.name_scope('Conv_4_2'):
        conv4_2 = tf.layers.conv2d(conv4_1, 512, 3, padding='same', activation=tf.nn.relu, trainable=is_training)
    with tf.name_scope('Conv_4_3'):
        conv4_3 = tf.layers.conv2d(conv4_2, 512, 3, padding='same', activation=tf.nn.relu, trainable=is_training)
    with tf.name_scope('Conv_4_4'):
        conv4_4 = tf.layers.conv2d(conv4_3, 512, 3, padding='same', activation=tf.nn.relu, trainable=is_training)
        if print_shape:
            print("------- Conv_4_4 ----------", conv4_4.get_shape())

    # Max Pooling #4
    pool4 = tf.layers.max_pooling2d(conv4_4, pool_size=2, strides=2, padding='same', name='pool4')

    # Group #5
    with tf.name_scope('Conv_5_1'):
        conv5_1 = tf.layers.conv2d(pool4, 512, 3, padding='same', activation=tf.nn.relu, trainable=is_training)
    with tf.name_scope('Conv_5_2'):
        conv5_2 = tf.layers.conv2d(conv5_1, 512, 3, padding='same', activation=tf.nn.relu, trainable=is_training)
    with tf.name_scope('Conv_5_3'):
        conv5_3 = tf.layers.conv2d(conv5_2, 512, 3, padding='same', activation=tf.nn.relu, trainable=is_training)
    with tf.name_scope('Conv_5_4'):
        conv5_4 = tf.layers.conv2d(conv5_3, 512, 3, padding='same', activation=tf.nn.relu, trainable=is_training)
        if print_shape:
            print("------- Conv_5_4 ----------", conv5_4.get_shape())

    # Max Pooling #5
    pool5 = tf.layers.max_pooling2d(conv5_4, pool_size=2, strides=2, padding='same', name='pool5')

    with tf.name_scope('FC_6'):
        pool_flat = tf.layers.flatten(pool5)
        dense1 = tf.layers.dense(pool_flat, units=4096, activation=tf.nn.relu, trainable=is_training)
        dropout1 = tf.layers.dropout(dense1, rate=dropout_rate, training=is_training)
        tf.summary.histogram('fully_connected_layers_6', dropout1)
        if print_shape:
            print("------- Dense_Dropout1 ----------", dropout1.get_shape())

    with tf.name_scope('FC_7'):
        # Dense Layer #2
        dense2 = tf.layers.dense(dropout1, units=4096, activation=tf.nn.relu, trainable=is_training)
        dropout2 = tf.layers.dropout(dense2, rate=dropout_rate, training=is_training)
        tf.summary.histogram('fully_connected_layers_7', dropout2)
        if print_shape:
            print("------- Dense_Dropout1 ----------", dropout1.get_shape())

    with tf.name_scope('Predictions'):
        # Logits Layer
        logits = tf.layers.dense(dropout2, units=1000, trainable=is_training)
        return logits


if __name__ == '__main__':
    params_ = {
        'learning_rate': 0.001,
        'dropout_rate': 0.4,
        'print_shape': True
    }
    input_tensor = tf.constant(np.ones([128, HEIGHT, WIDTH, DEPTH]), dtype=tf.float32)
    result = vgg_net_19_model(input_tensor, tf.estimator.ModeKeys.TRAIN, params_)
    init = tf.global_variables_initializer()
    sess = tf.Session()
    sess.run(init)
    print(sess.run(tf.shape(result)))
    summary = tf.summary.FileWriter('./temp', sess.graph)
    summary.flush()
