"""
VGG-16和VGG-19网络结构
"""
import tensorflow as tf


# 定义卷积层函数
def conv_op(input_op, scope_name, kernel_height, kernel_width, output_op, dh, dw):
    channel_input = input_op.get_shape()[-1].value  # 获取输入的通道数量，即输入的卷积核的数量或者图像通道
    with tf.name_scope(scope_name) as scope:
        kernel = tf.get_variable(scope + "w", shape=[kernel_height, kernel_width, channel_input, output_op],
                                 dtype=tf.float32, initializer=tf.contrib.layers.xavier_initializer_conv2d())
        conv = tf.nn.conv2d(input_op, kernel, (1, dh, dw, 1), padding='SAME')
        bias_init_val = tf.constant(0.0, shape=[output_op], dtype=tf.float32)
        biases = tf.Variable(bias_init_val, trainable=True, name='b')
        z = tf.nn.bias_add(conv, biases)
        activation = tf.nn.relu(z, name=scope)
        return activation


# 创建最大池化层函数
def maxpool_op(input_op, scope_name, kernel_height, kernel_width, dh, dw):
    return tf.nn.max_pool(input_op, ksize=[1, kernel_height, kernel_width, 1],
                          strides=[1, dh, dw, 1], padding='SAME', name=scope_name)


# VGG-16
def vgg_16(input_tensor, num_classes, keep_prob, regularizer):
    # 第一层卷积，两个卷积层和一个池化层
    conv1_1 = conv_op(input_tensor, scope_name="conv1_1", kernel_height=3, kernel_width=3, output_op=64, dh=1, dw=1)
    conv1_2 = conv_op(conv1_1, scope_name="conv1_2", kernel_height=3, kernel_width=3, output_op=64, dh=1, dw=1)
    pool1 = maxpool_op(conv1_2, scope_name='pool1', kernel_height=2, kernel_width=2, dh=2, dw=2)
    # 第二层卷积
    conv2_1 = conv_op(pool1, scope_name="conv2_1", kernel_height=3, kernel_width=3, output_op=128, dh=1, dw=1)
    conv2_2 = conv_op(conv2_1, scope_name="conv2_2", kernel_height=3, kernel_width=3, output_op=128, dh=1, dw=1)
    pool2 = maxpool_op(conv2_2, scope_name='pool2', kernel_height=2, kernel_width=2, dh=2, dw=2)
    # 第三层卷积
    conv3_1 = conv_op(pool2, scope_name="conv3_1", kernel_height=3, kernel_width=3, output_op=256, dh=1, dw=1)
    conv3_2 = conv_op(conv3_1, scope_name="conv3_2", kernel_height=3, kernel_width=3, output_op=256, dh=1, dw=1)
    conv3_3 = conv_op(conv3_2, scope_name="conv3_3", kernel_height=3, kernel_width=3, output_op=256, dh=1, dw=1)
    pool3 = maxpool_op(conv3_3, scope_name='pool3', kernel_height=2, kernel_width=2, dh=2, dw=2)
    # 第四层卷积
    conv4_1 = conv_op(pool3, scope_name="conv4_1", kernel_height=3, kernel_width=3, output_op=256, dh=1, dw=1)
    conv4_2 = conv_op(conv4_1, scope_name="conv4_2", kernel_height=3, kernel_width=3, output_op=256, dh=1, dw=1)
    conv4_3 = conv_op(conv4_2, scope_name="conv4_3", kernel_height=3, kernel_width=3, output_op=256, dh=1, dw=1)
    pool4 = maxpool_op(conv4_3, scope_name='pool4', kernel_height=2, kernel_width=2, dh=2, dw=2)
    # 第五层卷积
    conv5_1 = conv_op(pool4, scope_name="conv5_1", kernel_height=3, kernel_width=3, output_op=256, dh=1, dw=1)
    conv5_2 = conv_op(conv5_1, scope_name="conv5_2", kernel_height=3, kernel_width=3, output_op=256, dh=1, dw=1)
    conv5_3 = conv_op(conv5_2, scope_name="conv5_3", kernel_height=3, kernel_width=3, output_op=256, dh=1, dw=1)
    pool5 = maxpool_op(conv5_3, scope_name='pool5', kernel_height=2, kernel_width=2, dh=2, dw=2)
    # 展开pool5
    shape = pool5.get_shape()
    nodes = shape[1].value * shape[2].value * shape[3].value
    reshaped = tf.reshape(pool5, [-1, nodes], name="reshape")
    # 第六层全连接层
    with tf.variable_scope('fc6'):
        fc6_weight = tf.Variable(tf.truncated_normal([nodes, 4096], stddev=0.1, dtype=tf.float32), name="fc6_weight")
        fc6_bias = tf.Variable(tf.constant(0.0, dtype=tf.float32, shape=[4096]), name="fc6_bias")
        fc6 = tf.nn.relu(tf.add(tf.matmul(reshaped, fc6_weight), fc6_bias))
        fc6 = tf.nn.dropout(fc6, keep_prob)
    # 第七层全连接层
    with tf.variable_scope('fc7'):
        fc7_weight = tf.Variable(tf.truncated_normal([4096, 4096], stddev=0.1, dtype=tf.float32), name="fc7_weight")
        fc7_bias = tf.Variable(tf.constant(0.0, dtype=tf.float32, shape=[4096]), name="fc7_bias")
        fc7 = tf.nn.relu(tf.add(tf.matmul(fc6, fc7_weight), fc7_bias))
        fc7 = tf.nn.dropout(fc7, keep_prob)
    # 第八层连接层
    with tf.variable_scope('fc8'):
        fc8_weight = tf.Variable(tf.truncated_normal([4096, num_classes], stddev=0.1, dtype=tf.float32), name="fc8_weight")
        if regularizer is not None:
            tf.add_to_collection('losses', regularizer(fc8_weight))
        fc8_bias = tf.Variable(tf.constant(0.0, dtype=tf.float32, shape=[num_classes]), name="fc8_bias")
        logit = tf.matmul(fc7, fc8_weight) + fc8_bias
    return logit


# VGG-19
def vgg_19(input_tensor, num_classes, keep_prob, regularizer):
    # 第一层卷积，两个卷积层和一个池化层
    conv1_1 = conv_op(input_tensor, scope_name="conv1_1", kernel_height=3, kernel_width=3, output_op=64, dh=1, dw=1)
    conv1_2 = conv_op(conv1_1, scope_name="conv1_2", kernel_height=3, kernel_width=3, output_op=64, dh=1, dw=1)
    pool1 = maxpool_op(conv1_2, scope_name="pool1", kernel_height=2, kernel_width=2, dw=2, dh=2)
    # 第二层卷积
    conv2_1 = conv_op(pool1, scope_name="conv2_1", kernel_height=3, kernel_width=3, output_op=128, dh=1, dw=1)
    conv2_2 = conv_op(conv2_1, scope_name="conv2_2", kernel_height=3, kernel_width=3, output_op=128, dh=1, dw=1)
    pool2 = maxpool_op(conv2_2, scope_name="pool2", kernel_height=2, kernel_width=2, dw=2, dh=2)
    # 第三层卷积
    conv3_1 = conv_op(pool2, scope_name="conv3_1", kernel_height=3, kernel_width=3, output_op=256, dh=1, dw=1)
    conv3_2 = conv_op(conv3_1, scope_name="conv3_2", kernel_height=3, kernel_width=3, output_op=256, dh=1, dw=1)
    conv3_3 = conv_op(conv3_2, scope_name="conv3_3", kernel_height=3, kernel_width=3, output_op=256, dh=1, dw=1)
    conv3_4 = conv_op(conv3_3, scope_name="conv3_4", kernel_height=3, kernel_width=3, output_op=256, dh=1, dw=1)
    pool3 = maxpool_op(conv3_4, scope_name="pool3", kernel_height=2, kernel_width=2, dw=2, dh=2)
    # 第四层卷积
    conv4_1 = conv_op(pool3, scope_name="conv4_1", kernel_height=3, kernel_width=3, output_op=512, dh=1, dw=1)
    conv4_2 = conv_op(conv4_1, scope_name="conv4_2", kernel_height=3, kernel_width=3, output_op=512, dh=1, dw=1)
    conv4_3 = conv_op(conv4_2, scope_name="conv4_3", kernel_height=3, kernel_width=3, output_op=512, dh=1, dw=1)
    conv4_4 = conv_op(conv4_3, scope_name="conv4_4", kernel_height=3, kernel_width=3, output_op=512, dh=1, dw=1)
    pool4 = maxpool_op(conv4_4, scope_name="pool4", kernel_height=2, kernel_width=2, dw=2, dh=2)
    # 第五层卷积
    conv5_1 = conv_op(pool4, scope_name="conv5_1", kernel_height=3, kernel_width=3, output_op=512, dh=1, dw=1)
    conv5_2 = conv_op(conv5_1, scope_name="conv5_2", kernel_height=3, kernel_width=3, output_op=512, dh=1, dw=1)
    conv5_3 = conv_op(conv5_2, scope_name="conv5_3", kernel_height=3, kernel_width=3, output_op=512, dh=1, dw=1)
    conv5_4 = conv_op(conv5_3, scope_name="conv5_4", kernel_height=3, kernel_width=3, output_op=512, dh=1, dw=1)
    pool5 = maxpool_op(conv5_4, scope_name="pool5", kernel_height=2, kernel_width=2, dw=2, dh=2)
    # 展开pool5
    shape = pool5.get_shape()
    nodes = shape[1].value * shape[2].value * shape[3].value
    reshaped = tf.reshape(pool5, [-1, nodes], name="reshape")
    # 第六层全连接层
    with tf.variable_scope('fc6'):
        fc6_weight = tf.Variable(tf.truncated_normal([nodes, 4096], stddev=0.1, dtype=tf.float32), name="fc6_weight")
        fc6_bias = tf.Variable(tf.constant(0.0, dtype=tf.float32, shape=[4096]), name="fc6_bias")
        fc6 = tf.nn.relu(tf.add(tf.matmul(reshaped, fc6_weight), fc6_bias))
        fc6 = tf.nn.dropout(fc6, keep_prob)
    # 第七层全连接层
    with tf.variable_scope('fc7'):
        fc7_weight = tf.Variable(tf.truncated_normal([4096, 4096], stddev=0.1, dtype=tf.float32), name="fc7_weight")
        fc7_bias = tf.Variable(tf.constant(0.0, dtype=tf.float32, shape=[4096]), name="fc7_bias")
        fc7 = tf.nn.relu(tf.add(tf.matmul(fc6, fc7_weight), fc7_bias))
        fc7 = tf.nn.dropout(fc7, keep_prob)
    # 第八层连接层
    with tf.variable_scope('fc8'):
        fc8_weight = tf.Variable(tf.truncated_normal([4096, num_classes], stddev=0.1, dtype=tf.float32), name="fc8_weight")
        if regularizer is not None:
            tf.add_to_collection('losses', regularizer(fc8_weight))
        fc8_bias = tf.Variable(tf.constant(0.0, dtype=tf.float32, shape=[num_classes]), name="fc8_bias")
        logit = tf.matmul(fc7, fc8_weight) + fc8_bias

    return logit
