from __future__ import division, print_function, absolute_import

import tensorflow.compat.v1 as tf

num_classes = 10


def weight_variable(shape):
    initializer = tf.initializers.he_normal()
    return tf.Variable(initializer(shape))


def bias_variable(shape):
    initial = tf.constant(0.0, shape=shape)
    return tf.Variable(initial)


# Create some wrappers for simplicity
def conv2d(x, W, b, strides=1):
    """Conv2D wrapper, with bias and relu activation"""
    x = tf.nn.conv2d(x, W, strides=[1, strides, strides, 1], padding='SAME')
    x = tf.nn.bias_add(x, b)
    # tf.Print(x)
    return tf.nn.relu(x)


def max_pooling2d(x, k=2):
    """MaxPool2D wrapper"""
    return tf.nn.max_pool2d(x, ksize=[1, k, k, 1], strides=[1, k, k, 1], padding='SAME')


def lrn(x, depth_radius=4, bias=1.0, alpha=0.001 / 9.0, beta=0.75):
    """Local Response Normalization."""
    return tf.nn.lrn(x, depth_radius, bias, alpha, beta)


def cnn_model(x, keep_prob):
    # Tensor input become 4-D: [Batch Size, Height, Width, Channel]
    with tf.name_scope('Conv_1'):
        # Convolution Layer #1
        wc1 = weight_variable([5, 5, 3, 32])
        bc1 = bias_variable([32])
        conv1 = conv2d(x, wc1, bc1)
        # Max Pooling (down-sampling)
        pool1 = max_pooling2d(conv1, k=2)
        norm1 = lrn(pool1)
        # (128, 16, 16, 32)
        print(pool1.get_shape())

    with tf.name_scope('Conv_2'):
        wc2 = weight_variable([5, 5, 32, 64])
        bc2 = bias_variable([64])
        # Convolution Layer #2
        conv2 = conv2d(norm1, wc2, bc2)
        # Max Pooling (down-sampling)
        pool2 = max_pooling2d(conv2, k=2)
        norm2 = lrn(pool2)
        # (128, 8, 8, 64)
        print(pool2.get_shape())

    with tf.name_scope('Conv_3'):
        wc3 = weight_variable([3, 3, 64, 96])
        bc3 = bias_variable([96])
        # Convolution Layer #3
        conv3 = conv2d(norm2, wc3, bc3)
        # Max Pooling (down-sampling)
        pool3 = max_pooling2d(conv3, k=2)
        norm3 = lrn(pool3)
        # (128, 4, 4, 96)
        print(pool3.get_shape())

    with tf.name_scope('Conv_4'):
        wc4 = weight_variable([3, 3, 96, 64])
        bc4 = bias_variable([64])
        # Convolution Layer #4
        conv4 = conv2d(norm3, wc4, bc4)
        norm4 = lrn(conv4)
        # Max Pooling (down-sampling)
        pool4 = max_pooling2d(norm4, k=1)
        # (128, 4, 4, 64)
        print(pool4.get_shape())

    with tf.name_scope('Dense_Dropout'):
        wd1 = weight_variable([4 * 4 * 64, 1024])
        bd1 = bias_variable([1024])
        # Fully connected layer
        # Reshape conv2 output to fit fully connected layer input
        pool_flat = tf.reshape(pool4, [-1, wd1.get_shape().as_list()[0]])
        fc1 = tf.add(tf.matmul(pool_flat, wd1), bd1)
        fc1 = tf.nn.relu(fc1)
        # Apply Dropout
        dropout = tf.nn.dropout(fc1, rate=1 - keep_prob)
        print(dropout.get_shape())

    with tf.name_scope('Predictions'):
        wout = weight_variable([1024, num_classes])
        bout = bias_variable([num_classes])
        # Output, class prediction
        out = tf.add(tf.matmul(dropout, wout), bout)
        return out


def cnn_model_fn(X, Y, params):
    # Construct model
    logits = cnn_model(X, params['keep_prob'])

    # Define loss and optimizer
    loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=Y))
    optimizer = tf.train.AdamOptimizer(learning_rate=params['learning_rate'])
    train_op = optimizer.minimize(loss_op)

    # Evaluate model
    correct_pred = tf.equal(tf.argmax(logits, 1), tf.argmax(Y, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32), name='accuracy')
    return loss_op, train_op, accuracy
