import tensorflow as tf
from module import conv2d, maxpool2d, avgpool2d, fc


# bottle neck模块
def residual_block(input_tensor, name_scope, channel_list, change_dimension=False, block_stride=1):
    """
    :param input_tensor: 输入的四维张量，形如[batch_size, height, width, channels]
    :param name_scope: 命名空间
    :param channel_list: 通道列表[in, out]包含两个参数，in为输入通道数，out为输出通道数
    :param change_dimension: 是否改变了通道数
    :param block_stride: 该模块内卷积的步长
    :return: 输出的张量
    """
    # 采用短接结构，避免梯度丢失，当输入输出的通道数不同时，使用1*1卷积改变通道数
    if change_dimension:
        short_cut_conv = conv2d(input_tensor, name_scope + '_shortcut', channel_list[1],
                                kh=1, kw=1, dh=block_stride, dw=block_stride, padding='SAME',
                                activation=None, training=True, batch_normalize=True, momentum=0.9, epsilon=0.0001)
    else:
        short_cut_conv = input_tensor
    block_conv_1 = conv2d(input_tensor, name_scope + '_bn_conv1', channel_list[0],
                          kh=1, kw=1, dh=block_stride, dw=block_stride, padding='SAME',
                          activation=tf.nn.relu, training=True, batch_normalize=True, momentum=0.9, epsilon=0.0001)
    block_conv_2 = conv2d(block_conv_1, name_scope + '_bn_conv2', channel_list[0],
                          kh=3, kw=3, dh=1, dw=1, padding='SAME',
                          activation=tf.nn.relu, training=True, batch_normalize=True, momentum=0.9, epsilon=0.0001)
    block_conv_3 = conv2d(block_conv_2, name_scope + '_bn_conv3', channel_list[1],
                          kh=1, kw=1, dh=1, dw=1, padding='SAME',
                          activation=None, training=True, batch_normalize=True, momentum=0.9, epsilon=0.0001)
    block_res = tf.add(short_cut_conv, block_conv_3)
    res = tf.nn.relu(block_res)
    return res


# ResNet-50，224*224*3
def resnet_50(input_tensor, num_classes, training=True):
    """
    :param input_tensor: 输入的四维张量，形如[batch_size, height, width, channels]，一般为[batch_size, 224, 224, 3]
    :param num_classes: 分类数
    :param training: 是否可以被训练
    :return: 未经过softmax的一维向量
    """
    # 第一模块
    conv1 = conv2d(input_tensor, 'conv1', 64, kh=7, kw=7, dh=2, dw=2,
                   training=training, batch_normalize=True, momentum=0.9, epsilon=0.0001)
    pool1 = maxpool2d(conv1, 'pool1', kh=3, kw=3, dh=2, dw=2, padding='VALID')
    # 第二模块
    block1_1 = residual_block(pool1, 'block1_1', [64, 256], True, 1)
    block1_2 = residual_block(block1_1, 'block1_2', [64, 256], False, 1)
    block1_3 = residual_block(block1_2, 'block1_3', [64, 256], False, 1)
    # 第三模块
    block2_1 = residual_block(block1_3, 'block2_1', [128, 512], True, 2)
    block2_2 = residual_block(block2_1, 'block2_2', [128, 512], False, 1)
    block2_3 = residual_block(block2_2, 'block2_3', [128, 512], False, 1)
    block2_4 = residual_block(block2_3, 'block2_4', [128, 512], False, 1)
    # 第四模块
    block3_1 = residual_block(block2_4, 'block3_1', [256, 1024], True, 2)
    block3_2 = residual_block(block3_1, 'block3_2', [256, 1024], False, 1)
    block3_3 = residual_block(block3_2, 'block3_3', [256, 1024], False, 1)
    block3_4 = residual_block(block3_3, 'block3_4', [256, 1024], False, 1)
    block3_5 = residual_block(block3_4, 'block3_5', [256, 1024], False, 1)
    block3_6 = residual_block(block3_5, 'block3_6', [256, 1024], False, 1)
    # 第五模块
    block4_1 = residual_block(block3_6, 'block4_1', [512, 2048], True, 2)
    block4_2 = residual_block(block4_1, 'block4_2', [512, 2048], False, 1)
    block4_3 = residual_block(block4_2, 'block4_3', [512, 2048], False, 1)
    pool2 = avgpool2d(block4_3, 'pool2', kh=7, kw=7, dh=7, dw=7)
    # 展开张量
    shape = pool2.get_shape()
    nodes = shape[1].value * shape[2].value * shape[3].value
    reshaped = tf.reshape(pool2, [-1, nodes], name='reshape')
    # 全连接映射层，保证softmax的输入输出维度一致
    logit = fc(reshaped, 'out', num_classes, activation=False)
    return logit


# ResNet-101，224*224*3
def resnet_101(input_tensor, num_classes, training=True):
    """
    :param input_tensor: 输入的四维张量，形如[batch_size, height, width, channels]，一般为[batch_size, 224, 224, 3]
    :param num_classes: 分类数
    :param training: 是否可以被训练
    :return: 未经过softmax的一维向量
    """
    # 第一模块
    conv1 = conv2d(input_tensor, 'conv1', 64, kh=7, kw=7, dh=2, dw=2,
                   training=training, batch_normalize=True, momentum=0.9, epsilon=0.0001)
    pool1 = maxpool2d(conv1, 'pool1', kh=3, kw=3, dh=2, dw=2, padding='VALID')
    # 第二模块
    block1_1 = residual_block(pool1, 'block1_1', [64, 256], True, 1)
    block1_2 = residual_block(block1_1, 'block1_2', [64, 256], False, 1)
    block1_3 = residual_block(block1_2, 'block1_3', [64, 256], False, 1)
    # 第三模块
    block2_1 = residual_block(block1_3, 'block2_1', [128, 512], True, 2)
    block2_2 = residual_block(block2_1, 'block2_2', [128, 512], False, 1)
    block2_3 = residual_block(block2_2, 'block2_3', [128, 512], False, 1)
    block2_4 = residual_block(block2_3, 'block2_4', [128, 512], False, 1)
    # 第四模块
    block3_1 = residual_block(block2_4, 'block3_1', [256, 1024], True, 2)
    block3_2 = residual_block(block3_1, 'block3_2', [256, 1024], False, 1)
    block3_3 = residual_block(block3_2, 'block3_3', [256, 1024], False, 1)
    block3_4 = residual_block(block3_3, 'block3_4', [256, 1024], False, 1)
    block3_5 = residual_block(block3_4, 'block3_5', [256, 1024], False, 1)
    block3_6 = residual_block(block3_5, 'block3_6', [256, 1024], False, 1)
    block3_7 = residual_block(block3_6, 'block3_7', [256, 1024], False, 1)
    block3_8 = residual_block(block3_7, 'block3_8', [256, 1024], False, 1)
    block3_9 = residual_block(block3_8, 'block3_9', [256, 1024], False, 1)
    block3_10 = residual_block(block3_9, 'block3_10', [256, 1024], False, 1)
    block3_11 = residual_block(block3_10, 'block3_11', [256, 1024], False, 1)
    block3_12 = residual_block(block3_11, 'block3_12', [256, 1024], False, 1)
    block3_13 = residual_block(block3_12, 'block3_13', [256, 1024], False, 1)
    block3_14 = residual_block(block3_13, 'block3_14', [256, 1024], False, 1)
    block3_15 = residual_block(block3_14, 'block3_15', [256, 1024], False, 1)
    block3_16 = residual_block(block3_15, 'block3_16', [256, 1024], False, 1)
    block3_17 = residual_block(block3_16, 'block3_17', [256, 1024], False, 1)
    block3_18 = residual_block(block3_17, 'block3_18', [256, 1024], False, 1)
    block3_19 = residual_block(block3_18, 'block3_19', [256, 1024], False, 1)
    block3_20 = residual_block(block3_19, 'block3_20', [256, 1024], False, 1)
    block3_21 = residual_block(block3_20, 'block3_21', [256, 1024], False, 1)
    block3_22 = residual_block(block3_21, 'block3_22', [256, 1024], False, 1)
    block3_23 = residual_block(block3_22, 'block3_23', [256, 1024], False, 1)
    # 第五模块
    block4_1 = residual_block(block3_23, 'block4_1', [512, 2048], True, 2)
    block4_2 = residual_block(block4_1, 'block4_2', [512, 2048], False, 1)
    block4_3 = residual_block(block4_2, 'block4_3', [512, 2048], False, 1)
    pool2 = avgpool2d(block4_3, 'pool2', kh=7, kw=7, dh=7, dw=7)
    # 展开张量
    shape = pool2.get_shape()
    nodes = shape[1].value * shape[2].value * shape[3].value
    reshaped = tf.reshape(pool2, [-1, nodes], name='reshape')
    # 全连接映射层，保证softmax的输入输出维度一致
    logit = fc(reshaped, 'out', num_classes, activation=False)
    return logit


# ResNet-152，224*224*3
def resnet_152(input_tensor, num_classes, training=True):
    """
    :param input_tensor: 输入的四维张量，形如[batch_size, height, width, channels]，一般为[batch_size, 224, 224, 3]
    :param num_classes: 分类数
    :param training: 是否可以被训练
    :return: 未经过softmax的一维向量
    """
    # 第一模块
    conv1 = conv2d(input_tensor, 'conv1', 64, kh=7, kw=7, dh=2, dw=2,
                   training=training, batch_normalize=True, momentum=0.9, epsilon=0.0001)
    pool1 = maxpool2d(conv1, 'pool1', kh=3, kw=3, dh=2, dw=2, padding='VALID')
    # 第二模块
    block1_1 = residual_block(pool1, 'block1_1', [64, 256], True, 1)
    block1_2 = residual_block(block1_1, 'block1_2', [64, 256], False, 1)
    block1_3 = residual_block(block1_2, 'block1_3', [64, 256], False, 1)
    # 第三模块
    block2_1 = residual_block(block1_3, 'block2_1', [128, 512], True, 2)
    block2_2 = residual_block(block2_1, 'block2_2', [128, 512], False, 1)
    block2_3 = residual_block(block2_2, 'block2_3', [128, 512], False, 1)
    block2_4 = residual_block(block2_3, 'block2_4', [128, 512], False, 1)
    block2_5 = residual_block(block2_4, 'block2_5', [128, 512], False, 1)
    block2_6 = residual_block(block2_5, 'block2_6', [128, 512], False, 1)
    block2_7 = residual_block(block2_6, 'block2_7', [128, 512], False, 1)
    block2_8 = residual_block(block2_7, 'block2_8', [128, 512], False, 1)
    # 第四模块
    block3_1 = residual_block(block2_8, 'block3_1', [256, 1024], True, 2)
    block3_2 = residual_block(block3_1, 'block3_2', [256, 1024], False, 1)
    block3_3 = residual_block(block3_2, 'block3_3', [256, 1024], False, 1)
    block3_4 = residual_block(block3_3, 'block3_4', [256, 1024], False, 1)
    block3_5 = residual_block(block3_4, 'block3_5', [256, 1024], False, 1)
    block3_6 = residual_block(block3_5, 'block3_6', [256, 1024], False, 1)
    block3_7 = residual_block(block3_6, 'block3_7', [256, 1024], False, 1)
    block3_8 = residual_block(block3_7, 'block3_8', [256, 1024], False, 1)
    block3_9 = residual_block(block3_8, 'block3_9', [256, 1024], False, 1)
    block3_10 = residual_block(block3_9, 'block3_10', [256, 1024], False, 1)
    block3_11 = residual_block(block3_10, 'block3_11', [256, 1024], False, 1)
    block3_12 = residual_block(block3_11, 'block3_12', [256, 1024], False, 1)
    block3_13 = residual_block(block3_12, 'block3_13', [256, 1024], False, 1)
    block3_14 = residual_block(block3_13, 'block3_14', [256, 1024], False, 1)
    block3_15 = residual_block(block3_14, 'block3_15', [256, 1024], False, 1)
    block3_16 = residual_block(block3_15, 'block3_16', [256, 1024], False, 1)
    block3_17 = residual_block(block3_16, 'block3_17', [256, 1024], False, 1)
    block3_18 = residual_block(block3_17, 'block3_18', [256, 1024], False, 1)
    block3_19 = residual_block(block3_18, 'block3_19', [256, 1024], False, 1)
    block3_20 = residual_block(block3_19, 'block3_20', [256, 1024], False, 1)
    block3_21 = residual_block(block3_20, 'block3_21', [256, 1024], False, 1)
    block3_22 = residual_block(block3_21, 'block3_22', [256, 1024], False, 1)
    block3_23 = residual_block(block3_22, 'block3_23', [256, 1024], False, 1)
    block3_24 = residual_block(block3_23, 'block3_24', [256, 1024], False, 1)
    block3_25 = residual_block(block3_24, 'block3_25', [256, 1024], False, 1)
    block3_26 = residual_block(block3_25, 'block3_26', [256, 1024], False, 1)
    block3_27 = residual_block(block3_26, 'block3_27', [256, 1024], False, 1)
    block3_28 = residual_block(block3_27, 'block3_28', [256, 1024], False, 1)
    block3_29 = residual_block(block3_28, 'block3_29', [256, 1024], False, 1)
    block3_30 = residual_block(block3_29, 'block3_30', [256, 1024], False, 1)
    block3_31 = residual_block(block3_30, 'block3_31', [256, 1024], False, 1)
    block3_32 = residual_block(block3_31, 'block3_32', [256, 1024], False, 1)
    block3_33 = residual_block(block3_32, 'block3_33', [256, 1024], False, 1)
    block3_34 = residual_block(block3_33, 'block3_34', [256, 1024], False, 1)
    block3_35 = residual_block(block3_34, 'block3_35', [256, 1024], False, 1)
    block3_36 = residual_block(block3_35, 'block3_36', [256, 1024], False, 1)
    # 第五模块
    block4_1 = residual_block(block3_36, 'block4_1', [512, 2048], True, 2)
    block4_2 = residual_block(block4_1, 'block4_2', [512, 2048], False, 1)
    block4_3 = residual_block(block4_2, 'block4_3', [512, 2048], False, 1)
    pool2 = avgpool2d(block4_3, 'pool2', kh=7, kw=7, dh=7, dw=7)
    # 展开张量
    shape = pool2.get_shape()
    nodes = shape[1].value * shape[2].value * shape[3].value
    reshaped = tf.reshape(pool2, [-1, nodes], name='reshape')
    # 全连接映射层，保证softmax的输入输出维度一致
    logit = fc(reshaped, 'out', num_classes, activation=False)
    return logit
