import tensorflow as tf
from module import conv2d as conv, maxpool2d, avgpool2d


# 带有批处理归一化的卷积层
def conv2d(input_tensor, name_scope, output_channel, kh, kw, dh, dw, padding='SAME'):
    """
    :param input_tensor: 输入的张量
    :param name_scope: 命名空间
    :param output_channel: 输出张量的通道数
    :param kh: 卷积核的宽度
    :param kw: 卷积核的高度
    :param dh: 卷积核的水平移动步长
    :param dw: 卷积核的垂直移动步长
    :param padding: 可选SAME和VALID。例：张量宽10，卷积核宽3，水平步长3，则卷积核在滑动至张量边界时出现1个多余的数，不能直接进行卷积运算。
    取SAME时，将在张量末尾添加2个0补齐，并进行卷积运算；取VALID时，不添加0，直接省略最后的余数。
    :return: 输出的张量
    """
    out = conv(input_tensor, name_scope, output_channel, kh=kh, kw=kw, dh=dh, dw=dw, activation=False, padding=padding)
    out = tf.nn.relu(tf.layers.batch_normalization(out, momentum=0.9997, epsilon=0.001, training=True))
    return out


# 第一模块
def block_1(input_tensor, name_scope):
    """
    :param input_tensor: 输入的张量
    :param name_scope: 命名空间
    :return: 输出的张量
    """
    # 第一子模块
    with tf.name_scope(name_scope + 'module1') as scope:
        # 第一分支
        branch1 = conv2d(input_tensor, scope + 'branch1', output_channel=64, kh=1, kw=1, dh=1, dw=1)
        # 第二分支
        branch2_1 = conv2d(input_tensor, scope + 'branch2_1', output_channel=48, kh=1, kw=1, dh=1, dw=1)
        branch2_2 = conv2d(branch2_1, scope + 'branch2_2', output_channel=64, kh=5, kw=5, dh=1, dw=1)
        # 第三分支
        branch3_1 = conv2d(input_tensor, scope + 'branch3_1', output_channel=64, kh=1, kw=1, dh=1, dw=1)
        branch3_2 = conv2d(branch3_1, scope + 'branch3_2', output_channel=96, kh=3, kw=3, dh=1, dw=1)
        branch3_3 = conv2d(branch3_2, scope + 'branch3_3', output_channel=96, kh=3, kw=3, dh=1, dw=1)
        # 第四分支
        branch4_1 = avgpool2d(input_tensor, scope + 'branch4_1', kh=3, kw=3, dh=1, dw=1)
        branch4_2 = conv2d(branch4_1, scope + 'branch4_2', output_channel=32, kh=1, kw=1, dh=1, dw=1)
        # 级联各个分支
        module1 = tf.concat([branch1, branch2_2, branch3_3, branch4_2], 3)
    # 第二子模块
    with tf.name_scope(name_scope + 'module2') as scope:
        # 第一分支
        branch1 = conv2d(module1, scope + 'branch1', output_channel=64, kh=1, kw=1, dh=1, dw=1)
        # 第二分支
        branch2_1 = conv2d(module1, scope + 'branch2_1', output_channel=48, kh=1, kw=1, dh=1, dw=1)
        branch2_2 = conv2d(branch2_1, scope + 'branch2_2', output_channel=64, kh=5, kw=5, dh=1, dw=1)
        # 第三分支
        branch3_1 = conv2d(module1, scope + 'branch3_1', output_channel=64, kh=1, kw=1, dh=1, dw=1)
        branch3_2 = conv2d(branch3_1, scope + 'branch3_2', output_channel=96, kh=3, kw=3, dh=1, dw=1)
        branch3_3 = conv2d(branch3_2, scope + 'branch3_3', output_channel=96, kh=3, kw=3, dh=1, dw=1)
        # 第四分支
        branch4_1 = avgpool2d(module1, scope + 'branch4_1', kh=3, kw=3, dh=1, dw=1)
        branch4_2 = conv2d(branch4_1, scope + 'branch4_2', output_channel=64, kh=1, kw=1, dh=1, dw=1)
        # 级联各个分支
        module2 = tf.concat([branch1, branch2_2, branch3_3, branch4_2], 3)
    # 第三子模块
    with tf.name_scope(name_scope + 'module3') as scope:
        # 第一分支
        branch1 = conv2d(module2, scope + 'branch1', output_channel=64, kh=1, kw=1, dh=1, dw=1)
        # 第二分支
        branch2_1 = conv2d(module2, scope + 'branch2_1', output_channel=48, kh=1, kw=1, dh=1, dw=1)
        branch2_2 = conv2d(branch2_1, scope + 'branch2_2', output_channel=64, kh=5, kw=5, dh=1, dw=1)
        # 第三分支
        branch3_1 = conv2d(module2, scope + 'branch3_1', output_channel=64, kh=1, kw=1, dh=1, dw=1)
        branch3_2 = conv2d(branch3_1, scope + 'branch3_2', output_channel=96, kh=3, kw=3, dh=1, dw=1)
        branch3_3 = conv2d(branch3_2, scope + 'branch3_3', output_channel=96, kh=3, kw=3, dh=1, dw=1)
        # 第四分支
        branch4_1 = avgpool2d(module2, scope + 'branch4_1', kh=3, kw=3, dh=1, dw=1)
        branch4_2 = conv2d(branch4_1, scope + 'branch4_2', output_channel=64, kh=1, kw=1, dh=1, dw=1)
        # 级联各个分支
        module3 = tf.concat([branch1, branch2_2, branch3_3, branch4_2], 3)
    return module3


# 第二模块
def block_2(input_tensor, name_scope):
    """
    :param input_tensor: 输入的张量
    :param name_scope: 命名空间
    :return: 输出的张量
    """
    # 第一子模块
    with tf.name_scope(name_scope + 'module1') as scope:
        # 第一分支
        branch1 = conv2d(input_tensor, scope + 'branch1', output_channel=384, kh=3, kw=3, dh=2, dw=2, padding='VALID')
        # 第二分支
        branch2_1 = conv2d(input_tensor, scope + 'branch2_1', output_channel=64, kh=1, kw=1, dh=1, dw=1)
        branch2_2 = conv2d(branch2_1, scope + 'branch2_2', output_channel=96, kh=3, kw=3, dh=1, dw=1)
        branch2_3 = conv2d(branch2_2, scope + 'branch2_3', output_channel=96, kh=3, kw=3, dh=2, dw=2, padding='VALID')
        # 第三分支
        branch3 = maxpool2d(input_tensor, scope + 'branch3', kh=3, kw=3, dh=2, dw=2, padding='VALID')
        # 级联各个分支
        module1 = tf.concat([branch1, branch2_3, branch3], 3)
    # 第二子模块
    with tf.name_scope(name_scope + 'module2') as scope:
        # 第一分支
        branch1 = conv2d(module1, scope + 'branch1', output_channel=192, kh=1, kw=1, dh=1, dw=1)
        # 第二分支
        branch2_1 = conv2d(module1, scope + 'branch2_1', output_channel=128, kh=1, kw=1, dh=1, dw=1)
        branch2_2 = conv2d(branch2_1, scope + 'branch2_2', output_channel=128, kh=1, kw=7, dh=1, dw=1)
        branch2_3 = conv2d(branch2_2, scope + 'branch2_3', output_channel=192, kh=7, kw=1, dh=1, dw=1)
        # 第三分支
        branch3_1 = conv2d(module1, scope + 'branch3_1', output_channel=128, kh=1, kw=1, dh=1, dw=1)
        branch3_2 = conv2d(branch3_1, scope + 'branch3_2', output_channel=128, kh=7, kw=1, dh=1, dw=1)
        branch3_3 = conv2d(branch3_2, scope + 'branch3_3', output_channel=128, kh=1, kw=7, dh=1, dw=1)
        branch3_4 = conv2d(branch3_3, scope + 'branch3_4', output_channel=128, kh=7, kw=1, dh=1, dw=1)
        branch3_5 = conv2d(branch3_4, scope + 'branch3_5', output_channel=192, kh=1, kw=7, dh=1, dw=1)
        # 第四分支
        branch4_1 = avgpool2d(module1, scope + 'branch4_1', kh=3, kw=3, dh=1, dw=1)
        branch4_2 = conv2d(branch4_1, scope + 'branch4_2', output_channel=192, kh=1, kw=1, dh=1, dw=1)
        # 级联各个分支
        module2 = tf.concat([branch1, branch2_3, branch3_5, branch4_2], 3)
    # 第三子模块
    with tf.name_scope(name_scope + 'module3') as scope:
        # 第一分支
        branch1 = conv2d(module2, scope + 'branch1', output_channel=192, kh=1, kw=1, dh=1, dw=1)
        # 第二分支
        branch2_1 = conv2d(module2, scope + 'branch2_1', output_channel=160, kh=1, kw=1, dh=1, dw=1)
        branch2_2 = conv2d(branch2_1, scope + 'branch2_2', output_channel=160, kh=1, kw=7, dh=1, dw=1)
        branch2_3 = conv2d(branch2_2, scope + 'branch2_3', output_channel=192, kh=7, kw=1, dh=1, dw=1)
        # 第三分支
        branch3_1 = conv2d(module2, scope + 'branch3_1', output_channel=160, kh=1, kw=1, dh=1, dw=1)
        branch3_2 = conv2d(branch3_1, scope + 'branch3_2', output_channel=160, kh=7, kw=1, dh=1, dw=1)
        branch3_3 = conv2d(branch3_2, scope + 'branch3_3', output_channel=160, kh=1, kw=7, dh=1, dw=1)
        branch3_4 = conv2d(branch3_3, scope + 'branch3_4', output_channel=160, kh=7, kw=1, dh=1, dw=1)
        branch3_5 = conv2d(branch3_4, scope + 'branch3_5', output_channel=192, kh=1, kw=7, dh=1, dw=1)
        # 第四分支
        branch4_1 = avgpool2d(module2, scope + 'branch4_1', kh=3, kw=3, dh=1, dw=1)
        branch4_2 = conv2d(branch4_1, scope + 'branch4_2', output_channel=192, kh=1, kw=1, dh=1, dw=1)
        # 级联各个分支
        module3 = tf.concat([branch1, branch2_3, branch3_5, branch4_2], 3)
    # 第四子模块
    with tf.name_scope(name_scope + 'module4') as scope:
        # 第一分支
        branch1 = conv2d(module3, scope + 'branch1', output_channel=192, kh=1, kw=1, dh=1, dw=1)
        # 第二分支
        branch2_1 = conv2d(module3, scope + 'branch2_1', output_channel=160, kh=1, kw=1, dh=1, dw=1)
        branch2_2 = conv2d(branch2_1, scope + 'branch2_2', output_channel=160, kh=1, kw=7, dh=1, dw=1)
        branch2_3 = conv2d(branch2_2, scope + 'branch2_3', output_channel=192, kh=7, kw=1, dh=1, dw=1)
        # 第三分支
        branch3_1 = conv2d(module3, scope + 'branch3_1', output_channel=160, kh=1, kw=1, dh=1, dw=1)
        branch3_2 = conv2d(branch3_1, scope + 'branch3_2', output_channel=160, kh=7, kw=1, dh=1, dw=1)
        branch3_3 = conv2d(branch3_2, scope + 'branch3_3', output_channel=160, kh=1, kw=7, dh=1, dw=1)
        branch3_4 = conv2d(branch3_3, scope + 'branch3_4', output_channel=160, kh=7, kw=1, dh=1, dw=1)
        branch3_5 = conv2d(branch3_4, scope + 'branch3_5', output_channel=192, kh=1, kw=7, dh=1, dw=1)
        # 第四分支
        branch4_1 = avgpool2d(module3, scope + 'branch4_1', kh=3, kw=3, dh=1, dw=1)
        branch4_2 = conv2d(branch4_1, scope + 'branch4_2', output_channel=192, kh=1, kw=1, dh=1, dw=1)
        # 级联各个分支
        module4 = tf.concat([branch1, branch2_3, branch3_5, branch4_2], 3)
    # 第五子模块
    with tf.name_scope(name_scope + 'module5') as scope:
        # 第一分支
        branch1 = conv2d(module4, scope + 'branch1', output_channel=192, kh=1, kw=1, dh=1, dw=1)
        # 第二分支
        branch2_1 = conv2d(module4, scope + 'branch2_1', output_channel=192, kh=1, kw=1, dh=1, dw=1)
        branch2_2 = conv2d(branch2_1, scope + 'branch2_2', output_channel=192, kh=1, kw=7, dh=1, dw=1)
        branch2_3 = conv2d(branch2_2, scope + 'branch2_3', output_channel=192, kh=7, kw=1, dh=1, dw=1)
        # 第三分支
        branch3_1 = conv2d(module4, scope + 'branch3_1', output_channel=192, kh=1, kw=1, dh=1, dw=1)
        branch3_2 = conv2d(branch3_1, scope + 'branch3_2', output_channel=192, kh=7, kw=1, dh=1, dw=1)
        branch3_3 = conv2d(branch3_2, scope + 'branch3_3', output_channel=192, kh=1, kw=7, dh=1, dw=1)
        branch3_4 = conv2d(branch3_3, scope + 'branch3_4', output_channel=192, kh=7, kw=1, dh=1, dw=1)
        branch3_5 = conv2d(branch3_4, scope + 'branch3_5', output_channel=192, kh=1, kw=7, dh=1, dw=1)
        # 第四分支
        branch4_1 = avgpool2d(module4, scope + 'branch4_1', kh=3, kw=3, dh=1, dw=1)
        branch4_2 = conv2d(branch4_1, scope + 'branch4_2', output_channel=192, kh=1, kw=1, dh=1, dw=1)
        # 级联各个分支
        module5 = tf.concat([branch1, branch2_3, branch3_5, branch4_2], 3)
    return module5


# 第三模块
def block_3(input_tensor, name_scope):
    """
    :param input_tensor: 输入的张量
    :param name_scope: 命名空间
    :return: 输出的张量
    """
    # 第一子模块
    with tf.name_scope(name_scope + 'module1') as scope:
        # 第一分支
        branch1_1 = conv2d(input_tensor, scope + 'branch1_1', output_channel=192, kh=1, kw=1, dh=1, dw=1)
        branch1_2 = conv2d(branch1_1, scope + 'branch1_2', output_channel=320, kh=3, kw=2, dh=2, dw=2, padding='VALID')
        # 第二分支
        branch2_1 = conv2d(input_tensor, scope + 'branch2_1', output_channel=192, kh=1, kw=1, dh=1, dw=1)
        branch2_2 = conv2d(branch2_1, scope + 'branch2_2', output_channel=192, kh=1, kw=7, dh=1, dw=1)
        branch2_3 = conv2d(branch2_2, scope + 'branch2_3', output_channel=192, kh=7, kw=1, dh=1, dw=1)
        branch2_4 = conv2d(branch2_3, scope + 'branch2_4', output_channel=192, kh=3, kw=3, dh=2, dw=2, padding='VALID')
        # 第三分支
        branch3 = maxpool2d(input_tensor, scope + 'branch3', kh=3, kw=3, dh=2, dw=2, padding='VALID')
        # 级联各个分支
        module1 = tf.concat([branch1_2, branch2_4, branch3], 3)
    # 第二子模块
    with tf.name_scope(name_scope + 'module2') as scope:
        # 第一分支
        branch1 = conv2d(module1, scope + 'branch1', output_channel=320, kh=1, kw=1, dh=1, dw=1)
        # 第二分支
        branch2_1 = conv2d(module1, scope + 'branch2_1', output_channel=384, kh=1, kw=1, dh=1, dw=1)
        branch2_2_1 = conv2d(branch2_1, scope + 'branch2_2_1', output_channel=384, kh=1, kw=3, dh=1, dw=1)
        branch2_2_2 = conv2d(branch2_1, scope + 'branch2_2_2', output_channel=384, kh=3, kw=1, dh=1, dw=1)
        branch2 = tf.concat([branch2_2_1, branch2_2_2], 3)
        # 第三分支
        branch3_1 = conv2d(module1, scope + 'branch3_1', output_channel=448, kh=1, kw=1, dh=1, dw=1)
        branch3_2 = conv2d(branch3_1, scope + 'branch3_2', output_channel=384, kh=3, kw=3, dh=1, dw=1)
        branch3_3_1 = conv2d(branch3_2, scope + 'branch3_3_1', output_channel=384, kh=1, kw=3, dh=1, dw=1)
        branch3_3_2 = conv2d(branch3_2, scope + 'branch3_3_2', output_channel=384, kh=3, kw=1, dh=1, dw=1)
        branch3 = tf.concat([branch3_3_1, branch3_3_2], 3)
        # 第四分支
        branch4_1 = avgpool2d(module1, scope + 'branch4_1', kh=3, kw=3, dh=1, dw=1)
        branch4_2 = conv2d(branch4_1, scope + 'branch4_2', output_channel=192, kh=1, kw=1, dh=1, dw=1)
        # 级联各个分支
        module2 = tf.concat([branch1, branch2, branch3, branch4_2], 3)
    # 第三子模块
    with tf.name_scope(name_scope + 'module3') as scope:
        # 第一分支
        branch1 = conv2d(module2, scope + 'branch1', output_channel=320, kh=1, kw=1, dh=1, dw=1)
        # 第二分支
        branch2_1 = conv2d(module2, scope + 'branch2_1', output_channel=384, kh=1, kw=1, dh=1, dw=1)
        branch2_2_1 = conv2d(branch2_1, scope + 'branch2_2_1', output_channel=384, kh=1, kw=3, dh=1, dw=1)
        branch2_2_2 = conv2d(branch2_1, scope + 'branch2_2_2', output_channel=384, kh=3, kw=1, dh=1, dw=1)
        branch2 = tf.concat([branch2_2_1, branch2_2_2], 3)
        # 第三分支
        branch3_1 = conv2d(module2, scope + 'branch3_1', output_channel=448, kh=1, kw=1, dh=1, dw=1)
        branch3_2 = conv2d(branch3_1, scope + 'branch3_2', output_channel=384, kh=3, kw=3, dh=1, dw=1)
        branch3_3_1 = conv2d(branch3_2, scope + 'branch3_3_1', output_channel=384, kh=1, kw=3, dh=1, dw=1)
        branch3_3_2 = conv2d(branch3_2, scope + 'branch3_3_2', output_channel=384, kh=3, kw=1, dh=1, dw=1)
        branch3 = tf.concat([branch3_3_1, branch3_3_2], 3)
        # 第四分支
        branch4_1 = avgpool2d(module2, scope + 'branch4_1', kh=3, kw=3, dh=1, dw=1)
        branch4_2 = conv2d(branch4_1, scope + 'branch4_2', output_channel=192, kh=1, kw=1, dh=1, dw=1)
        # 级联各个分支
        module3 = tf.concat([branch1, branch2, branch3, branch4_2], 3)
    return module3


# GoogLeNet，Inception_v3，299*299*3
def inception_v3(input_tensor, num_classes, keep_prob=None):
    """
    :param input_tensor: 输入的四维张量，形如[batch_size, height, width, channels]，一般为[batch_size, 299, 299, 3]
    :param num_classes: 分类数
    :param keep_prob: dropout概率
    :return: 未经过softmax的一维向量，统计后的模型参数量
    """
    # 第一层，卷积层
    conv1 = conv2d(input_tensor, 'conv1', 32, kh=3, kw=3, dh=2, dw=2, padding='VALID')
    # 第二层，卷积层
    conv2 = conv2d(conv1, 'conv2', 32, kh=3, kw=3, dh=1, dw=1, padding='VALID')
    # 第三层，卷积层
    conv3 = conv2d(conv2, 'conv3', 64, kh=3, kw=3, dh=1, dw=1)
    pool3 = maxpool2d(conv3, 'pool3', kh=3, kw=3, dh=2, dw=2, padding='VALID')
    # 第四层，卷积层
    conv4 = conv2d(pool3, 'conv4', 80, kh=1, kw=1, dh=1, dw=1, padding='VALID')
    # 第五层，卷积层
    conv5 = conv2d(conv4, 'conv5', 192, kh=3, kw=3, dh=1, dw=1, padding='VALID')
    pool5 = maxpool2d(conv5, 'pool5', kh=3, kw=3, dh=2, dw=2, padding='VALID')
    # 第六层，第一模块
    block6 = block_1(pool5, 'block6')
    # 第七层，第二模块
    block7 = block_2(block6, 'block7')
    # 第八层，第三模块
    block8 = block_3(block7, 'block8')
    pool8 = avgpool2d(block8, 'pool8', kh=8, kw=8, dh=1, dw=1, padding='VALID')
    if keep_prob is not None:
        pool8 = tf.nn.dropout(pool8, keep_prob)
    # 第九层，卷积映射层，保证softmax的输入输出维度一致
    logit = conv2d(pool8, 'out', num_classes, kh=1, kw=1, dh=1, dw=1)
    # 删除指定维度
    logit = tf.squeeze(logit, [1, 2], name='squeeze')
    return logit
