from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import tensorflow as tf


def fully_connected(prev_layer, num_units):
    layer = tf.layers.dense(prev_layer, num_units, use_bias=True, activation=None)
    layer = tf.nn.relu(layer)
    return layer


def conv_layer(prev_layer, filters, kernel_size, strides, padding='SAME'):
    # strides = 2 if layer_depth % 3 == 0 else 1
    conv_layer = tf.layers.conv2d(prev_layer, filters, kernel_size, strides, padding, use_bias=True, activation=None)
    conv_layer = tf.nn.relu(conv_layer)
    return conv_layer


class EnflameAlexnet(object):
    def __init__(self, x, rate, num_classes=1000, is_training=False):
        self.x = x
        self.rate = rate
        self.num_classes = num_classes
        self.is_training = is_training
        # conv1.0
        conv10 = tf.layers.conv2d(self.x, filters=96, kernel_size=3,
                                  strides=2, padding="SAME", activation=tf.nn.relu, name="conv1_0")
        print(conv10.shape)
        self.conv10 = conv10

        pool10 = tf.layers.max_pooling2d(conv10, pool_size=3, strides=2, padding="SAME", name="pool1_0")
        print(pool10.shape)
        self.pool10 = pool10

        # conv1.1
        conv11 = tf.layers.conv2d(pool10, filters=256, kernel_size=3,
                                  strides=1, padding="SAME", activation=tf.nn.relu, name="conv1_1")
        print(conv11.shape)
        self.conv11 = conv11
        pool11 = tf.layers.max_pooling2d(conv11, pool_size=3, strides=2, padding="SAME", name="pool1_1")
        print(pool11.shape)
        self.pool11 = pool11

        # conv1.2
        conv112 = tf.layers.conv2d(pool11, filters=256, kernel_size=3, strides=1, padding="SAME", activation=tf.nn.relu,
                                   name="conv1_2")
        print(conv112.shape)
        self.conv112 = conv112

        pool12 = tf.layers.max_pooling2d(conv112, pool_size=3, strides=2, padding="SAME", name="pool1_2")
        print(pool12.shape)
        self.pool12 = pool12

        # conv1.3
        conv113 = tf.layers.conv2d(pool12, filters=384, kernel_size=3, strides=1, padding="SAME", activation=tf.nn.relu,
                                   name="conv1_3")
        print(conv113.shape)
        self.conv113 = conv113

        # conv2
        ##conv  3*3 2, 1 pool
        # print("conv5")
        conv2 = tf.layers.conv2d(conv113, filters=256, kernel_size=3, strides=2, padding="SAME", activation=tf.nn.relu,
                                 name="conv2_0")
        print(conv2.shape)
        self.conv2 = conv2

        # print("pool3")
        pool20 = tf.layers.max_pooling2d(conv2, 3, strides=2, padding="SAME", name="pool2_0")
        print(pool20.shape)
        self.pool20 = pool20
        # flattened = tf.reshape(pool20, [-1,  4 * 4 * 256])
        # flatten
        out = tf.layers.flatten(pool20, name='pool12_0_flatten')
        self.pool12_out = out

        # fully connect Fc6
        fc1 = tf.layers.dense(out, 4096, name='fc6', activation=tf.nn.relu)
        self.fc1 = fc1
        if self.is_training:
            fc1 = tf.layers.dropout(fc1, rate=self.rate)
        else:
            fc1 = tf.layers.dropout(fc1, rate=1.0, name='fc6_dropout')
        print(fc1.shape)
        self.fc1_drop = fc1

        # Fc 7
        fc2 = tf.layers.dense(fc1, 4096, name='fc7', activation=tf.nn.relu)
        self.fc2 = fc2
        if self.is_training:
            fc2 = tf.layers.dropout(fc2, rate=self.rate)
        else:
            fc2 = tf.layers.dropout(fc2, rate=1.0, name='fc7_dropout')
        print(fc2.shape)
        self.fc2_drop = fc2

        # fc8
        fc3 = tf.layers.dense(fc2, self.num_classes, name='fc8')
        print(fc3.shape)
        self.fc3 = fc3
