"""
    作者：徐飞
    日期：2020/02/28
    版本：01
    功能：建立训练模型
"""

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import tensorflow as tf
from tensorflow.contrib import slim
import numpy as np


# 定义包含batch_normalize, relu, conv2d, dropout的函数
def bn_act_conv_drop(current, num_output, ksize, scope='block'):
    """
    :param current: 输入data
    :param num_output: 输出通道
    :param ksize:卷积核
    :param scope: 函数名
    :return:data
    """
    current = slim.batch_norm(current, scope=scope + 'bn')
    current = tf.nn.relu(current)
    current = slim.conv2d(current, num_output, ksize, scope=scope + 'conv')
    current = slim.dropout(current, scope=scope + 'dropout')
    return current


# 定义Dense_block
def block(net, layers, growth, scope='block'):
    """
    :param net: 输入data
    :param layers: dense block 包含的网络层数
    :param growth:增长率
    :param scope:函数名
    :return:data
    """
    for idx in range(layers):
        # BN_Relu_Conv(1x1)_dropout --> BN_Relu_Conv(3x3)_dropout
        bottleneck = bn_act_conv_drop(net, 4 * growth, [1, 1], scope=scope + '_conv1x1' + str(idx))

        temp = bn_act_conv_drop(bottleneck, growth, [3, 3], scope=scope + 'conv3x3' + str(idx))
        net = tf.concat(axis=3, values=[net, temp])
    return net


# 定义transition layers
def transition(net, num_output, scope='transition'):
    net = bn_act_conv_drop(net, num_output, [1, 1], scope=scope + '_conv1x1')
    net = slim.avg_pool2d(net, [2, 2], stride=2, scope=scope + '_avgpool')
    return net


# 降维
def reduce_dim(input_deature):
    compression_rate = 0.5
    return int(int(input_deature.shape[-1]) * compression_rate)


# 定义默认参数
def bn_drp_scope(is_training=True, keep_prob=0.8):
    keep_prob = keep_prob if is_training else 1
    with slim.arg_scope(
        [slim.batch_norm],
            scale=True, is_training=is_training, updates_collections=None) as bsc:
        return bsc
        # with slim.arg_scope(
        #     [slim.dropout],
        #         is_training=is_training, keep_prob=keep_prob) as bsc:
        #     return bsc


# global average pooling
def Global_Average_Pooling(x, stride=1):
    width = np.shape(x)[1]
    height = np.shape(x)[2]
    pool_size = [width, height]
    return tf.layers.average_pooling2d(inputs=x, pool_size=pool_size, strides=stride)


# 网络结构
def DenseNet(images, num_class, is_training=False, dropout_keep_prob=0.8, scope='DenseNet', type='121'):
    """
    :param images: 输入图像
    :param num_class: 分类数
    :param is_training: 开启训练
    :param dropout_keep_prob: dropout率
    :param scope: 函数名字
    :param type: 网络类型：121、169、201、264
    :return: logits, end_points
    """
    ly1, ly2,  ly3,  ly4 = 6, 12, 24, 16
    if type == '169':
        ly1, ly2, ly3, ly4 = 6, 12, 32, 32
    elif type == '201':
        ly1, ly2, ly3, ly4 = 6, 12, 48, 32
    elif type == '264':
        ly1, ly2, ly3, ly4 = 6, 12, 64, 48

    growth = 24
    end_points = {}
    with tf.variable_scope(scope, 'DenseNet', [images, num_class]):
        with slim.arg_scope(bn_drp_scope(is_training=is_training,
                                         keep_prob=dropout_keep_prob)) as ssc:
            net = images
            net = slim.conv2d(net, 2 * growth, 7, stride=2, scope='_conv7x7')
            net = slim.max_pool2d(net, [3, 3], stride=2, scope='_maxpool')
            net = block(net, ly1, growth, scope='block_1')
            net = transition(net, reduce_dim(net), scope='transition_1')
            net = block(net, ly2, growth, scope='block_2')
            net = transition(net, reduce_dim(net), scope='transition_2')
            net = block(net, ly3, growth, scope='block_3')
            net = transition(net, reduce_dim(net), scope='transition_3')
            net = block(net, ly4, growth, scope='block_4')
            # 在进行BN-Relu
            net = slim.batch_norm(net, scope='last_batch_normalize')
            net = tf.nn.relu(net)
            # global average pooling
            net = Global_Average_Pooling(net, stride=1)
            # biases_initializer = tf.constant_initializer(0.1)
            # 代替全连接
            # net = slim.conv2d(net, num_class, [1, 1],
            #                   biases_initializer=biases_initializer, scope='logits')
            # logits = tf.squeeze(net, [1, 2], name='spatialSqueeze')
            logits = tf.layers.dense(net, num_class, name='linear')
            end_points['Logits'] = logits
            end_points['predictions'] = slim.softmax(logits, scope='predictions')
            # model = Model((224, 224, 3), outputs=logits)
    return logits, end_points

