# -*- coding: utf-8 -*-
# @Time    : 2018/7/8 9:39 AM
# @Author  : Edwin
# @File    : densenet.py
# @Software: PyCharm

import tensorflow as tf

slim = tf.contrib.slim

def trunc_normal(stddev) : return tf.truncated_normal_initializer(stddev=stddev)

def bn_act_conv_drp(current, num_outputs, kernel_size, scope='block'):
    current = slim.batch_norm(current,scope=scope+'bn')
    current = tf.nn.relu(current)
    current = slim.conv2d(current,num_outputs,kernel_size,scope=scope+'_conv')
    current = slim.dropout(current,scope=scope+'_dropout')
    return current

def block(net,layers,growth,scope = 'block'):
    for idx in range(layers):
        bottleblock = bn_act_conv_drp(net,4*growth,[1,1],scope=scope+'conv11'+str(idx))
        tmp = bn_act_conv_drp(bottleblock,growth,[3,3],scope=scope+'conv33'+str(idx))
        net = tf.concat(axis=3,values=(bottleblock,tmp))
    return net

def bn_drp_scope(is_training=True, keep_prob=0.8):
    keep_prob = keep_prob if is_training else 1
    with slim.arg_scope(
        [slim.batch_norm],
            scale=True, is_training=is_training, updates_collections=None):
        with slim.arg_scope(
            [slim.dropout],
                is_training=is_training, keep_prob=keep_prob) as bsc:
            return bsc

def densenet(images, num_classes=1001, is_training=False,
                 dropout_keep_prob=0.8,
                 scope='densenet'):
        growth = 24
        compress_rate = 0.5

        def reduce_dim(input_feature):
            return int(int(input_feature.shape[-1]) * compress_rate)

        end_points = {}

        with tf.variable_scope(scope, 'DenseNet', [images, num_classes]):
            with slim.arg_scope(bn_drp_scope(is_training=is_training, keep_prob=dropout_keep_prob)) as ssc:

                net = images

                end_point = 'conv2d_33_16_channel'
                net = slim.conv2d(net, 2*growth, [3, 3], stride=2,scope=end_point)
                net = slim.max_pool2d(net,[3,3],stride=2,padding = 'SAME',scope = end_point)
                end_points[end_point] = net

                # Dense block 1
                end_point = 'Dense_Block_1'
                net = block(net, 6, growth,scope='block1')
                end_points[end_point] = net

                # Transaction Layer 1
                # 1*1 conv2d
                end_point = 'Transaction_Layer_1_11conv2d'
                net = bn_act_conv_drp(net,reduce_dim(net),[1,1],scope=end_point)
                end_points[end_point] = net
                # 2*2 average pool
                end_point = 'Transaction_Layer_1_22_average_pool'
                net = slim.avg_pool2d(net,[2,2],stride=2,scope=end_point)
                end_points[end_point] = net



                # Dense block 2
                end_point = 'Dense_Block_2'
                net = block(net, 12, growth,scope='block2')
                end_points[end_point] = net

                # Transaction Layer 3
                # 1*1 conv2d
                end_point = 'Transaction_Layer_2_11_conv2d'
                net = bn_act_conv_drp(net, reduce_dim(net), [1, 1], scope=end_point)
                end_points[end_point] = net
                # 2*2 average pool
                end_point = 'Transaction_Layer_2_22_average_pool'
                net = slim.avg_pool2d(net, [2, 2], stride=2, scope=end_point)
                end_points[end_point] = net



                # Dense block 3
                end_point = 'Dense_Block_3'
                net = block(net, 24, growth,scope='block3')
                end_points[end_point] = net

                # Transaction Layer 3
                # 1*1 conv2d
                end_point = 'Transaction_Layer_3_11_conv2d'
                net = bn_act_conv_drp(net, reduce_dim(net), [1, 1], scope=end_point)
                end_points[end_point] = net
                # 2*2 average pool
                end_point = 'Transaction_Layer_3_22_verage_pool'
                net = slim.avg_pool2d(net, [2, 2], stride=2, scope=end_point)
                end_points[end_point] = net

                # Dense block 4
                end_point = 'Dense_Block_4'
                net = block(net, 16, growth,scope='block4')
                net = slim.batch_norm(net, scope='last_batch_norm_relu')
                net = tf.nn.relu(net)
                end_points[end_point] = net


                # Global average pooling.
                net = tf.reduce_mean(net, [1, 2], keep_dims=True, name='GlobalPool')
                end_points['global_pool'] = net

                # net = slim.dropout(net, keep_prob=dropout_keep_prob, scope='Dropout_1b')
                # end_points['PreLogits'] = net

                net = slim.conv2d(net, num_classes, [1, 1], scope='logits')
                logits = tf.squeeze(net, [1, 2], name='SpatialSqueeze')

                end_points['Logits'] = logits
                end_points['predictions'] = slim.softmax(logits, scope='predictions')

        return logits,end_points

def densenet_arg_scope(weight_decay=0.004):
    """Defines the default densenet argument scope.

    Args:
      weight_decay: The weight decay to use for regularizing the model.

    Returns:
      An `arg_scope` to use for the inception v3 model.
    """
    with slim.arg_scope(
        [slim.conv2d],
        weights_initializer=tf.contrib.layers.variance_scaling_initializer(
            factor=2.0, mode='FAN_IN', uniform=False),
        activation_fn=None, biases_initializer=None, padding='same',
            stride=1) as sc:
        return sc

densenet.default_image_size = 224
#densenet.default_image_size = 32