"""
FR-CNN和Tiny FR-CNN网络结构
"""
import tensorflow as tf


# 网络结构  32*32*3------->1*1*324
def jaynet_32(input_tensor, num_classes, keep_prob, regularizer):
    # 1*1卷积层   Input: 32*32*3    Video: 32*32*6
    with tf.variable_scope('conv_layer1'):
        conv1_weights = tf.get_variable('weights', [3, 3, 3, 2],
                                        initializer=tf.truncated_normal_initializer(stddev=0.1))
        conv1_biases = tf.get_variable("bias", [6], initializer=tf.constant_initializer(0.0))
        conv1 = tf.nn.depthwise_conv2d(input_tensor, conv1_weights, strides=[1, 1, 1, 1], padding='SAME')
        # 深度可分离卷积
        # point_filter = tf.constant(value=1, shape=[1, 1, 6, 6], dtype=tf.float32)
        # conv1 = tf.nn.separable_conv2d(input_tensor, conv1_weights, point_filter,strides=[1, 1, 1, 1], padding='SAME')
        relu1 = tf.nn.relu(tf.nn.bias_add(conv1, conv1_biases))

    # 池化层   Video: 16*16*6
    with tf.name_scope("layer2-pool1"):
        pool1 = tf.nn.max_pool(relu1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="VALID")

    # 1*1卷积层   Video: 16*16*12
    with tf.variable_scope("layer3-conv2"):
        conv2_weights = tf.get_variable("weight", [5, 5, 6, 12],
                                        initializer=tf.truncated_normal_initializer(stddev=0.1))
        conv2_biases = tf.get_variable("bias", [12], initializer=tf.constant_initializer(0.0))
        conv2 = tf.nn.conv2d(pool1, conv2_weights, strides=[1, 1, 1, 1], padding='SAME')
        relu2 = tf.nn.relu(tf.nn.bias_add(conv2, conv2_biases))

    # 池化层   Video: 8*8*12
    with tf.name_scope("layer4-pool2"):
        pool2 = tf.nn.max_pool(relu2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')

    # 1*1卷积层    Video: 8*8*36
    with tf.variable_scope("layer5-conv3"):
        conv3_weights = tf.get_variable("weight", [3, 3, 12, 36],
                                        initializer=tf.truncated_normal_initializer(stddev=0.1))
        conv3_biases = tf.get_variable("bias", [36], initializer=tf.constant_initializer(0.0))
        conv3 = tf.nn.conv2d(pool2, conv3_weights, strides=[1, 1, 1, 1], padding='SAME')
        relu3 = tf.nn.relu(tf.nn.bias_add(conv3, conv3_biases))

    # 池化层   Video: 4*4*36
    with tf.name_scope("layer6-pool3"):
        pool3 = tf.nn.max_pool(relu3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')

    # 1*1卷积层    Video: 4*4*108
    with tf.variable_scope("layer7-conv4"):
        conv4_weights = tf.get_variable("weight", [3, 3, 36, 108],
                                        initializer=tf.truncated_normal_initializer(stddev=0.1))
        conv4_biases = tf.get_variable("bias", [108], initializer=tf.constant_initializer(0.0))
        conv4 = tf.nn.conv2d(pool3, conv4_weights, strides=[1, 1, 1, 1], padding='SAME')
        relu4 = tf.nn.relu(tf.nn.bias_add(conv4, conv4_biases))

    # 池化层   Video: 2*2*108
    with tf.name_scope("layer8-pool4"):
        pool4 = tf.nn.max_pool(relu4, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')

    # 1*1卷积层    Video: 2*2*324
    with tf.variable_scope("layer9-conv5"):
        conv5_weights = tf.get_variable("weight", [3, 3, 108, 324],
                                        initializer=tf.truncated_normal_initializer(stddev=0.1))
        conv5_biases = tf.get_variable("bias", [324], initializer=tf.constant_initializer(0.0))
        conv5 = tf.nn.conv2d(pool4, conv5_weights, strides=[1, 1, 1, 1], padding='SAME')
        relu5 = tf.nn.relu(tf.nn.bias_add(conv5, conv5_biases))

    # 池化层   Video: 1*1*324
    with tf.name_scope("layer10-pool5"):
        pool5 = tf.nn.max_pool(relu5, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')
        nodes = 1 * 1 * 324
        reshaped = tf.reshape(pool5, [-1, nodes])

    # 全连接层
    with tf.variable_scope('layer11-fc1'):
        fc1_weights = tf.get_variable("weight", [nodes, 1024],
                                      initializer=tf.truncated_normal_initializer(stddev=0.1))
        if regularizer is not None:
            tf.add_to_collection('losses', regularizer(fc1_weights))
        # tf.add_to_collection向当前计算图中添加张量集合
        fc1_biases = tf.get_variable("bias", [1024], initializer=tf.constant_initializer(0.1))
        fc1 = tf.nn.relu(tf.matmul(reshaped, fc1_weights) + fc1_biases)
        if keep_prob != 1:
            fc1 = tf.nn.dropout(fc1, keep_prob)

    # 全连接层
    with tf.variable_scope('layer12-fc2'):
        fc2_weights = tf.get_variable("weight", [1024, num_classes],
                                      initializer=tf.truncated_normal_initializer(stddev=0.1))
        if regularizer is not None:
            tf.add_to_collection('losses', regularizer(fc2_weights))
        fc2_biases = tf.get_variable("bias", [num_classes], initializer=tf.constant_initializer(0.1))
        logit = tf.matmul(fc1, fc2_weights) + fc2_biases
    return logit


# 网络结构  32*32*3------->1*1*162
def jaynet_32_tiny(input_tensor, num_classes, regularizer):
    # 1*1卷积层   Input: 32*32*3    Video: 32*32*3
    with tf.variable_scope('conv_layer1'):
        conv1_weights = tf.get_variable('weights', [3, 3, 3, 1],
                                        initializer=tf.truncated_normal_initializer(stddev=0.1))
        conv1_biases = tf.get_variable("bias", [3], initializer=tf.constant_initializer(0.0))
        conv1 = tf.nn.depthwise_conv2d(input_tensor, conv1_weights, strides=[1, 1, 1, 1], padding='SAME')
        # 当padding=SAME时，输入与输出形状相同
        relu1 = tf.nn.relu(tf.nn.bias_add(conv1, conv1_biases))

    # 池化层   Video: 16*16*3
    with tf.name_scope("layer2-pool1"):
        pool1 = tf.nn.max_pool(relu1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="VALID")

    # 1*1卷积层   Video: 16*16*6
    with tf.variable_scope("layer3-conv2"):
        conv2_weights = tf.get_variable("weight", [5, 5, 3, 6],
                                        initializer=tf.truncated_normal_initializer(stddev=0.1))
        conv2_biases = tf.get_variable("bias", [6], initializer=tf.constant_initializer(0.0))
        conv2 = tf.nn.conv2d(pool1, conv2_weights, strides=[1, 1, 1, 1], padding='SAME')
        relu2 = tf.nn.relu(tf.nn.bias_add(conv2, conv2_biases))

    # 池化层   Video: 8*8*6
    with tf.name_scope("layer4-pool2"):
        pool2 = tf.nn.max_pool(relu2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')

    # 1*1卷积层    Video: 8*8*18
    with tf.variable_scope("layer5-conv3"):
        conv3_weights = tf.get_variable("weight", [3, 3, 6, 18],
                                        initializer=tf.truncated_normal_initializer(stddev=0.1))
        conv3_biases = tf.get_variable("bias", [18], initializer=tf.constant_initializer(0.0))
        conv3 = tf.nn.conv2d(pool2, conv3_weights, strides=[1, 1, 1, 1], padding='SAME')
        relu3 = tf.nn.relu(tf.nn.bias_add(conv3, conv3_biases))

    # 池化层   Video: 4*4*18
    with tf.name_scope("layer6-pool3"):
        pool3 = tf.nn.max_pool(relu3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')

    # 1*1卷积层    Video: 4*4*54
    with tf.variable_scope("layer7-conv4"):
        conv4_weights = tf.get_variable("weight", [3, 3, 18, 54],
                                        initializer=tf.truncated_normal_initializer(stddev=0.1))
        conv4_biases = tf.get_variable("bias", [54], initializer=tf.constant_initializer(0.0))
        conv4 = tf.nn.conv2d(pool3, conv4_weights, strides=[1, 1, 1, 1], padding='SAME')
        relu4 = tf.nn.relu(tf.nn.bias_add(conv4, conv4_biases))

    # 池化层   Video: 2*2*54
    with tf.name_scope("layer8-pool4"):
        pool4 = tf.nn.max_pool(relu4, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')

    # 1*1卷积层    Video: 2*2*162
    with tf.variable_scope("layer9-conv5"):
        conv5_weights = tf.get_variable("weight", [3, 3, 54, 162],
                                        initializer=tf.truncated_normal_initializer(stddev=0.1))
        conv5_biases = tf.get_variable("bias", [162], initializer=tf.constant_initializer(0.0))
        conv5 = tf.nn.conv2d(pool4, conv5_weights, strides=[1, 1, 1, 1], padding='SAME')
        relu5 = tf.nn.relu(tf.nn.bias_add(conv5, conv5_biases))

    # 全局平均池化层   Video: 1*1*162
    with tf.name_scope("layer10-gap1"):
        gap1 = tf.nn.avg_pool(relu5, ksize=[1, 6, 6, 1], strides=[1, 6, 6, 1], padding='SAME')
        nodes = 1 * 1 * 162
        reshaped = tf.reshape(gap1, [-1, nodes])

    # 全连接层
    with tf.variable_scope('layer11-fc1'):
        fc1_weights = tf.get_variable("weight", [nodes, num_classes],
                                      initializer=tf.truncated_normal_initializer(stddev=0.1))
        if regularizer is not None:
            tf.add_to_collection('losses', regularizer(fc1_weights))
        fc1_biases = tf.get_variable("bias", [num_classes], initializer=tf.constant_initializer(0.1))
        logit = tf.matmul(reshaped, fc1_weights) + fc1_biases

    return logit