"""
AlexNet网络结构
"""
import tensorflow as tf


# 定义卷积层函数
def conv_op(input_op, scope_name, kernel_height, kernel_width, output_op, dh, dw):
    channel_input = input_op.get_shape()[-1].value  # 获取输入的通道数量，即输入的卷积核的数量或者图像通道
    with tf.name_scope(scope_name) as scope:
        kernel = tf.get_variable(scope + "w", shape=[kernel_height, kernel_width, channel_input, output_op],
                                 dtype=tf.float32, initializer=tf.contrib.layers.xavier_initializer_conv2d())
        conv = tf.nn.conv2d(input_op, kernel, (1, dh, dw, 1), padding='SAME')
        bias_init_val = tf.constant(0.0, shape=[output_op], dtype=tf.float32)
        biases = tf.Variable(bias_init_val, trainable=True, name='b')
        z = tf.nn.bias_add(conv, biases)
        activation = tf.nn.relu(z, name=scope)
        return activation


# 创建最大池化层函数
def maxpool_op(input_op, scope_name, kernel_height, kernel_width, dh, dw):
    return tf.nn.max_pool(input_op, ksize=[1, kernel_height, kernel_width, 1],
                          strides=[1, dh, dw, 1], padding='SAME', name=scope_name)


# AlexNet
def alexnet(input_tensor, num_classes, regularizer):
    # 第一层，卷积层
    conv1 = conv_op(input_tensor, scope_name="conv1", kernel_height=11, kernel_width=11, output_op=64, dh=4, dw=4)
    lrn1 = tf.nn.lrn(conv1, 4, bias=1, alpha=1e-3/9, beta=0.75, name="lrn1")
    pool1 = maxpool_op(lrn1, scope_name='pool1', kernel_height=3, kernel_width=3, dh=2, dw=2)
    # 第二层，卷积层
    conv2 = conv_op(pool1, scope_name="conv2", kernel_height=5, kernel_width=5, output_op=192, dh=1, dw=1)
    lrn2 = tf.nn.lrn(conv2, 4, 1.0, alpha=1e-3/9, beta=0.75, name="lrn2")
    pool2 = maxpool_op(lrn2, scope_name='pool2', kernel_height=3, kernel_width=3, dh=2, dw=2)
    # 第三层，卷积层
    conv3 = conv_op(pool2, scope_name="conv3", kernel_height=3, kernel_width=3, output_op=384, dh=1, dw=1)
    # 第四层，卷积层
    conv4 = conv_op(conv3, scope_name="conv4", kernel_height=3, kernel_width=3, output_op=256, dh=1, dw=1)
    # 第五层，卷积层
    conv5 = conv_op(conv4, scope_name="conv5", kernel_height=3, kernel_width=3, output_op=256, dh=1, dw=1)
    pool5 = maxpool_op(conv5, scope_name='pool5', kernel_height=3, kernel_width=3, dh=2, dw=2)
    # 展开pool5
    shape = pool5.get_shape()
    nodes = shape[1].value * shape[2].value * shape[3].value
    reshaped = tf.reshape(pool5, [-1, nodes], name="reshape")
    # 第六层，全连接层
    with tf.variable_scope('fc6'):
        fc6_weight = tf.Variable(tf.truncated_normal([nodes, 4096], stddev=0.1, dtype=tf.float32), name="fc6_weight")
        fc6_bias = tf.Variable(tf.constant(0.0, dtype=tf.float32, shape=[4096]), name="fc6_bias")
        fc6 = tf.nn.relu(tf.add(tf.matmul(reshaped, fc6_weight), fc6_bias))
    # 第七层，全连接层
    with tf.variable_scope('fc7'):
        fc7_weight = tf.Variable(tf.truncated_normal([4096, 4096], stddev=0.1, dtype=tf.float32), name="fc7_weight")
        fc7_bias = tf.Variable(tf.constant(0.0, dtype=tf.float32, shape=[4096]), name="fc7_bias")
        fc7 = tf.nn.relu(tf.add(tf.matmul(fc6, fc7_weight), fc7_bias))
    # 第八层，全连接层
    with tf.variable_scope('fc8'):
        fc8_weight = tf.Variable(tf.truncated_normal([4096, num_classes], stddev=0.1, dtype=tf.float32), name="fc8_weight")
        if regularizer is not None:
            tf.add_to_collection('losses', regularizer(fc8_weight))
        fc8_bias = tf.Variable(tf.constant(0.0, dtype=tf.float32, shape=[num_classes]), name="fc8_bias")
        logit = tf.matmul(fc7, fc8_weight) + fc8_bias
    return logit
