from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import tensorflow as tf
slim=tf.contrib.slim
def trunc_normal(stddev):return tf.truncated_normal_initializer(stddev=stddev)
                                #从截断的正态分布中输出随机值，生成值服从指定平均值和标准差分布

def bn_act_conv_drp(current,num_outputs,kernel_size,scope='block'):
    #num_output是指输出通道数
    current=slim.batch_norm(current,scope=scope+'_bn')
    current=tf.nn.relu(current)
    current=slim.conv2d(current,num_outputs,kernel_size,scope=scope+'_conv')
    current=slim.dropout(current,cope=scope+'_dropout')
    return current

def block(net,layers,growth,scope='block'):
    for idx in range(layers):
        bottleneck=bn_act_conv_drp(net,4*growth,[1,1],scope=scope+'_conv1*1'+str(idx))
    tmp=bn_act_conv_drp(bottleneck,growth,[3,3],scope=scope+'_conv3*3'+str(idx))
    net=tf.concat(axis=3,values=[net,tmp])
    return net

def densenet(images,num_classes=1001,is_training=False,
             dropout_keep_prob=0.8,scope='densenet'):
    growth=24
    compression_rate=0.5

    def reduce_dim(input_feature):
        return int(int(input_feature.shape[-1])*compression_rate)
    end_points={}

    with tf.variable_scope(scope,'DenseNet',[images,num_classes]):
        with slim.arg_cope(bn_drp_scope
                               (is_training=True,keep_prob=dropout_keep_prob))as ssc:
            net=images
            net=slim.conv2d(net,2*growth,7,stride=2,
                            padding='SAME',activation_fn=None,scope='conv1_7*7')
            net=slim.max_pool2d(net,3,stride=2,padding='SAME',scope='maxpool_3*3')

            end_point='block1'#用block函数循环6次，构建Dense Block(1)
            net=block(net,6,growth,scope='block1')
            end_points[end_point]=net

            #Dendenet被分解为多个稠密块，块与块连接需要转换层，
            #转换层包括一个BN，一个1*1卷积层和一个2*2平均池化层

            #构建TransitionLayer
            net=bn_act_conv_drp(net,reduce_dim(net),[1,1],scope='trans1_conv')
            net=slim.avg_pool2d(net,[2,2],stride=2,padding='SAME',scope='trans1_avgpool')

            end_point='block2'
            net=block(net,12,growth,scope='block2')#循环12次构建DenseBlock(2)
            end_points[end_point]=net

            #Transition Layers(2)
            net=bn_act_conv_drp(net,reduce_dim(net),[1,1],scope='trans2_conv')
            net = slim.avg_pool2d(net, [2, 2], stride=2, padding='SAME', scope='trans2_avgpool')

            end_point='block3'
            net = block(net, 24, growth, scope='block3')#循环24次构建DenseBlock(3)
            end_points[end_point] = net

            #构建Transition Layers3
            net = bn_act_conv_drp(net, reduce_dim(net), [1, 1], scope='trans3_conv')
            net = slim.avg_pool2d(net, [2, 2], stride=2, padding='SAME', scope='trans3_avgpool')

            end_point='block4'
            net = block(net, 16, growth, scope='block4')  # 循环16次构建DenseBlock(4)
            net=slim.batch_norm(net,scope=scope+'final_bn')
            net=tf.nn.relu(net)

            end_points[end_point]=net

            end_point = 'classification_layer'#最后一层加全局平均池化
            net = tf.reduce_mean(net, [1, 2], keep_dims=True)
            bias = tf.constant_initializer(0.01)
            logits = tf.squeeze(slim.conv2d(net, num_classes, [1, 1],
                                            biases_initializer=bias,activation_fn=None, normalizer_fn=None, scope='fc'))
            end_points[end_point] = net
        return logits, end_points












