import tensorflow as tf
import tensorflow.contrib.slim as slim

def interfence(inputs,
                   scope,
                   num_classes=1000,
                   is_training=True,
                   reuse=None,
                   ):
    batch_norm_params = {
        'decay': 0.95,
        'epsilon': 0.001,
        'updates_collections': tf.GraphKeys.UPDATE_OPS,
        'scale': True,
        'is_training': is_training,
    }

    print('using cnn network')
    with tf.variable_scope(scope, [inputs], reuse=reuse) as sc:
        with slim.arg_scope([slim.conv2d, slim.fully_connected, slim.max_pool2d, slim.batch_norm], outputs_collections='end_points'):
            with tf.variable_scope('layer1'):
                net = slim.conv2d(inputs=inputs, num_outputs=64, kernel_size=[3, 3], stride=1, padding='SAME', activation_fn=None)
                net = slim.batch_norm(net, scale=True, is_training=is_training, decay=batch_norm_params['decay'])
                net = tf.nn.relu(net)
                net = slim.max_pool2d(net, kernel_size=[2, 2], stride=2)

            with tf.variable_scope('layer2'):
                net = slim.conv2d(inputs=net, num_outputs=128, kernel_size=[3, 3], stride=1, padding='SAME', activation_fn=None)
                net = slim.batch_norm(net, scale=True, is_training=is_training, decay=batch_norm_params['decay'])
                net = tf.nn.relu(net)
                net = slim.max_pool2d(net, kernel_size=[2, 2], stride=2)

            with tf.variable_scope('layer3'):
                net = slim.conv2d(inputs=net, num_outputs=256, kernel_size=[3, 3], stride=1, padding='SAME', activation_fn=None)
                net = slim.batch_norm(net, scale=True, is_training=is_training, decay=batch_norm_params['decay'])
                net = tf.nn.relu(net)
                net = slim.max_pool2d(net, kernel_size=[2, 2], stride=2)

            net = tf.reduce_mean(net, [1, 2], keepdims=True, name='global_pool')
            net = slim.flatten(net)

            with tf.variable_scope('layer4'):
                net = slim.fully_connected(inputs=net, num_outputs=64, activation_fn=None)
                net = slim.batch_norm(net, scale=True, is_training=is_training, decay=batch_norm_params['decay'])
                net = tf.nn.relu(net)

            with tf.variable_scope('layer5'):
                net = slim.fully_connected(inputs=net, num_outputs=num_classes, activation_fn=None, normalizer_fn=None)

            end_points = slim.utils.convert_collection_to_dict('end_points')

            return net, end_points

class network:
    def __init__(self, scope, num_classes):
        self.scope = scope

        with tf.variable_scope(scope):
            self.inputs_train = tf.placeholder(dtype=tf.float32, shape=[None, 32, 32, 3], name='inputs_train')
            self.labels_train = tf.placeholder(dtype=tf.uint8, shape=[None], name='labels_train')

            self.inputs_test = tf.placeholder(dtype=tf.float32, shape=[None, 32, 32, 3], name='inputs_test')
            self.labels_test = tf.placeholder(dtype=tf.uint8, shape=[None], name='labels_test')

            labels_train_onehot = tf.one_hot(self.labels_train, depth=num_classes, dtype=tf.float32)
            labels_test_onehot = tf.one_hot(self.labels_test, depth=num_classes, dtype=tf.float32)

            self.outputs_train, _ = interfence(inputs=self.inputs_train, scope='cnn', num_classes=num_classes, is_training=True, reuse=False)
            self.outputs_test, _ = interfence(inputs=self.inputs_test, scope='cnn', num_classes=num_classes, is_training=False, reuse=True)

            self.logits_loss_train = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=self.outputs_train, labels=labels_train_onehot))
            self.logits_loss_test = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=self.outputs_test, labels=labels_test_onehot))

            self.predict_train = tf.argmax(tf.nn.softmax(self.outputs_train, axis=1), axis=1)
            self.accuracy_train = tf.reduce_mean(tf.cast(tf.equal(self.predict_train, tf.argmax(labels_train_onehot, axis=1)), tf.float32))
            self.predict_test = tf.argmax(tf.nn.softmax(self.outputs_test, axis=1), axis=1)
            self.accuracy_test = tf.reduce_mean(tf.cast(tf.equal(self.predict_test, tf.argmax(labels_test_onehot, axis=1)), tf.float32))

            self.learn_rate = tf.placeholder(dtype=tf.float32, name='learn_rate')
            opt = tf.train.AdamOptimizer(self.learn_rate)

            self.variables_to_train = tf.trainable_variables()

            g_var_list = tf.global_variables()
            bn_moving_vars = [g for g in g_var_list if 'moving_mean' in g.name]
            bn_moving_vars += [g for g in g_var_list if 'moving_variance' in g.name]
            self.var_to_save = self.variables_to_train + bn_moving_vars

            update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
            with tf.control_dependencies(update_ops):
                gradients = opt.compute_gradients(loss=self.logits_loss_train, var_list=self.variables_to_train)
                self.grad_updates = opt.apply_gradients(gradients)

            self.saver = tf.train.Saver(var_list=self.var_to_save)
