import tensorflow as tf
from module import conv3d, maxpool3d, avgpool3d, fc


# pseudo A类模块
def pseudo_a(input_tensor, name_scope, channel_list, change_dimension=False, block_stride=1):
    """
    :param input_tensor: 输入的四维张量，形如[batch_size, height, width, channels]
    :param name_scope: 命名空间
    :param channel_list: 通道列表[in, out]包含两个参数，in为输入通道数，out为输出通道数
    :param change_dimension: 是否改变了通道数
    :param block_stride: 该模块内卷积的步长
    :return: 输出的张量
    """
    # 采用短接结构，避免梯度丢失，当输入输出的通道数不同时，使用1*1卷积改变通道数
    if change_dimension:
        short_cut_conv = conv3d(input_tensor, name_scope + '_shortcut', channel_list[1],
                                1, 1, 1, 1, block_stride, block_stride, padding='SAME',
                                activation=None, training=True, batch_normalize=True, momentum=0.9, epsilon=0.0001)
    else:
        short_cut_conv = input_tensor
    block_conv_1 = conv3d(input_tensor, name_scope + '_bn_conv1', channel_list[0],
                          1, 1, 1, 1, block_stride, block_stride, padding='SAME',
                          activation=tf.nn.relu, training=True, batch_normalize=True, momentum=0.9, epsilon=0.0001)
    block_conv_2 = conv3d(block_conv_1, name_scope + '_bn_conv2', channel_list[0],
                          1, 3, 3, 1, 1, 1, padding='SAME', activation=tf.nn.relu,
                          training=True, batch_normalize=True, momentum=0.9, epsilon=0.0001)
    block_conv_3 = conv3d(block_conv_2, name_scope + '_bn_conv3', channel_list[0],
                          3, 1, 1, 1, 1, 1, padding='SAME', activation=tf.nn.relu,
                          training=True, batch_normalize=True, momentum=0.9, epsilon=0.0001)
    block_conv_4 = conv3d(block_conv_3, name_scope + '_bn_conv4', channel_list[1],
                          1, 1, 1, 1, 1, 1, padding='SAME', activation=None,
                          training=True, batch_normalize=True, momentum=0.9, epsilon=0.0001)
    block_res = tf.add(short_cut_conv, block_conv_4)
    res = tf.nn.relu(block_res)
    return res


# pseudo B类模块
def pseudo_b(input_tensor, name_scope, channel_list, change_dimension=False, block_stride=1):
    """
    :param input_tensor: 输入的四维张量，形如[batch_size, height, width, channels]
    :param name_scope: 命名空间
    :param channel_list: 通道列表[in, out]包含两个参数，in为输入通道数，out为输出通道数
    :param change_dimension: 是否改变了通道数
    :param block_stride: 该模块内卷积的步长
    :return: 输出的张量
    """
    # 采用短接结构，避免梯度丢失，当输入输出的通道数不同时，使用1*1卷积改变通道数
    if change_dimension:
        short_cut_conv = conv3d(input_tensor, name_scope + '_shortcut', channel_list[1],
                                1, 1, 1, 1, block_stride, block_stride, padding='SAME',
                                activation=None, training=True, batch_normalize=True, momentum=0.9, epsilon=0.0001)
    else:
        short_cut_conv = input_tensor
    block_conv_1 = conv3d(input_tensor, name_scope + '_bn_conv1', channel_list[0],
                          1, 1, 1, 1, block_stride, block_stride, padding='SAME',
                          activation=tf.nn.relu, training=True, batch_normalize=True, momentum=0.9, epsilon=0.0001)
    block_conv_2_1 = conv3d(block_conv_1, name_scope + '_bn_conv2_1', channel_list[0],
                            1, 3, 3, 1, 1, 1, padding='SAME', activation=tf.nn.relu,
                            training=True, batch_normalize=True, momentum=0.9, epsilon=0.0001)
    block_conv_2_2 = conv3d(block_conv_1, name_scope + '_bn_conv2_2', channel_list[0],
                            3, 1, 1, 1, 1, 1, padding='SAME', activation=tf.nn.relu,
                            training=True, batch_normalize=True, momentum=0.9, epsilon=0.0001)
    block_conv_2 = tf.add(block_conv_2_1, block_conv_2_2)
    block_conv_3 = conv3d(block_conv_2, name_scope + '_bn_conv3', channel_list[1],
                          1, 1, 1, 1, 1, 1, padding='SAME', activation=None,
                          training=True, batch_normalize=True, momentum=0.9, epsilon=0.0001)
    block_res = tf.add(short_cut_conv, block_conv_3)
    res = tf.nn.relu(block_res)
    return res


# pseudo C类模块
def pseudo_c(input_tensor, name_scope, channel_list, change_dimension=False, block_stride=1):
    """
    :param input_tensor: 输入的四维张量，形如[batch_size, height, width, channels]
    :param name_scope: 命名空间
    :param channel_list: 通道列表[in, out]包含两个参数，in为输入通道数，out为输出通道数
    :param change_dimension: 是否改变了通道数
    :param block_stride: 该模块内卷积的步长
    :return: 输出的张量
    """
    # 采用短接结构，避免梯度丢失，当输入输出的通道数不同时，使用1*1卷积改变通道数
    if change_dimension:
        short_cut_conv = conv3d(input_tensor, name_scope + '_shortcut', channel_list[1],
                                1, 1, 1, 1, block_stride, block_stride, padding='SAME',
                                activation=None, training=True, batch_normalize=True, momentum=0.9, epsilon=0.0001)
    else:
        short_cut_conv = input_tensor
    block_conv_1 = conv3d(input_tensor, name_scope + '_bn_conv1', channel_list[0],
                          1, 1, 1, 1, block_stride, block_stride, padding='SAME',
                          activation=tf.nn.relu, training=True, batch_normalize=True, momentum=0.9, epsilon=0.0001)
    block_conv_2 = conv3d(block_conv_1, name_scope + '_bn_conv2', channel_list[0],
                          1, 3, 3, 1, 1, 1, padding='SAME', activation=tf.nn.relu,
                          training=True, batch_normalize=True, momentum=0.9, epsilon=0.0001)
    block_conv_2_cut = conv3d(block_conv_2, name_scope + '_bn_conv2_2', channel_list[0],
                              3, 1, 1, 1, 1, 1, padding='SAME', activation=tf.nn.relu,
                              training=True, batch_normalize=True, momentum=0.9, epsilon=0.0001)
    block_conv_2 = tf.add(block_conv_2, block_conv_2_cut)
    block_conv_3 = conv3d(block_conv_2, name_scope + '_bn_conv3', channel_list[1],
                          1, 1, 1, 1, 1, 1, padding='SAME', activation=None,
                          training=True, batch_normalize=True, momentum=0.9, epsilon=0.0001)
    block_res = tf.add(short_cut_conv, block_conv_3)
    res = tf.nn.relu(block_res)
    return res


# P3D，8*224*224*3
def p3d(input_tensor, num_classes, training=True):
    """
    :param input_tensor: 输入的四维张量，形如[batch_size, height, width, channels]，一般为[batch_size, 224, 224, 3]
    :param num_classes: 分类数
    :param training: 是否可以被训练
    :return: 未经过softmax的一维向量
    """
    # 第一模块
    conv1 = conv3d(input_tensor, 'conv1', 64, 1, 7, 7, 1, 2, 2, training=training,
                   batch_normalize=True, momentum=0.9, epsilon=0.0001)
    pool1 = maxpool3d(conv1, 'pool1', 1, 3, 3, 1, 2, 2, padding='VALID')
    # 第二模块
    block1_1 = pseudo_a(pool1, 'block1_1', [64, 256], True, 1)
    block1_2 = pseudo_b(block1_1, 'block1_2', [64, 256], False, 1)
    block1_3 = pseudo_c(block1_2, 'block1_3', [64, 256], False, 1)
    # 第三模块
    block2_1 = pseudo_a(block1_3, 'block2_1', [128, 512], True, 2)
    block2_2 = pseudo_b(block2_1, 'block2_2', [128, 512], False, 1)
    block2_3 = pseudo_c(block2_2, 'block2_3', [128, 512], False, 1)
    block2_4 = pseudo_a(block2_3, 'block2_4', [128, 512], False, 1)
    # 第四模块
    block3_1 = pseudo_b(block2_4, 'block3_1', [256, 1024], True, 2)
    block3_2 = pseudo_c(block3_1, 'block3_2', [256, 1024], False, 1)
    block3_3 = pseudo_a(block3_2, 'block3_3', [256, 1024], False, 1)
    block3_4 = pseudo_b(block3_3, 'block3_4', [256, 1024], False, 1)
    block3_5 = pseudo_c(block3_4, 'block3_5', [256, 1024], False, 1)
    block3_6 = pseudo_a(block3_5, 'block3_6', [256, 1024], False, 1)
    # 第五模块
    block4_1 = pseudo_b(block3_6, 'block4_1', [512, 2048], True, 2)
    block4_2 = pseudo_c(block4_1, 'block4_2', [512, 2048], False, 1)
    block4_3 = pseudo_a(block4_2, 'block4_3', [512, 2048], False, 1)
    pool2 = avgpool3d(block4_3, 'pool2', 8, 7, 7, 8, 7, 7)
    # 展开张量
    shape = pool2.get_shape()
    nodes = shape[1].value * shape[2].value * shape[3].value * shape[4].value
    reshaped = tf.reshape(pool2, [-1, nodes], name='reshape')
    # 全连接映射层，保证softmax的输入输出维度一致
    logit = fc(reshaped, 'out', num_classes, activation=False)
    return logit
