import tensorflow as tf
from module import conv3d, maxpool3d, avgpool3d, fc


# 残差模块
def residual_block(input_tensor, name_scope, channel_list, kd=1, change_dimension=False, block_stride=1):
    """
    :param input_tensor: 输入的四维张量，形如[batch_size, height, width, channels]
    :param name_scope: 命名空间
    :param channel_list: 通道列表[in, out]包含两个参数，in为输入通道数，out为输出通道数
    :param kd: 卷积核的深度
    :param change_dimension: 是否改变了通道数
    :param block_stride: 该模块内卷积的步长
    :return: 输出的张量
    """
    # 采用短接结构，避免梯度丢失，当输入输出的通道数不同时，使用1*1卷积改变通道数
    if change_dimension:
        short_cut_conv = conv3d(input_tensor, name_scope + '_shortcut', channel_list[1],
                                1, 1, 1, 1, block_stride, block_stride, padding='SAME',
                                activation=None, batch_normalize=True, momentum=0.9, epsilon=1e-5)
    else:
        short_cut_conv = input_tensor
    block_conv_1 = conv3d(input_tensor, name_scope + '_bn_conv1', channel_list[0],
                          kd, 1, 1, 1, block_stride, block_stride, padding='SAME',
                          activation=tf.nn.relu, batch_normalize=True, momentum=0.9, epsilon=1e-5)
    block_conv_2 = conv3d(block_conv_1, name_scope + '_bn_conv2', channel_list[0], 1, 3, 3, 1, 1, 1,
                          padding='SAME', activation=tf.nn.relu, batch_normalize=True, momentum=0.9, epsilon=1e-5)
    block_conv_3 = conv3d(block_conv_2, name_scope + '_bn_conv3', channel_list[1], 1, 1, 1, 1, 1, 1,
                          padding='SAME', activation=None, batch_normalize=True, momentum=0.9, epsilon=1e-5)
    block_res = tf.add(short_cut_conv, block_conv_3)
    res = tf.nn.relu(block_res)
    return res


# SlowFast，8*224*224*3
def slowfast(input_tensor, num_classes):
    # 计算slow和fast输入的张量
    input_slow = input_tensor
    part = []
    for i in range(input_slow.shape[1]):
        if i % 8 == 0:
            part.append(input_slow[:, i: i + 1])
    input_fast = tf.concat(part, 1)
    # slow第一层，卷积层
    conv1_s = conv3d(input_slow, 'conv1_s', 64, 1, 7, 7, 1, 2, 2, batch_normalize=True, momentum=0.9, epsilon=1e-5)
    pool1_s = maxpool3d(conv1_s, 'pool1_s', 1, 3, 3, 1, 2, 2)
    # fast第一层，卷积层
    conv1_f = conv3d(input_fast, 'conv1_f', 8, 5, 7, 7, 1, 2, 2, batch_normalize=True, momentum=0.9, epsilon=1e-5)
    pool1_f = maxpool3d(conv1_f, 'pool1_f', 1, 3, 3, 1, 2, 2)
    # slow第二模块
    res2_1_s = residual_block(pool1_s, 'res2_1_s', [64, 256], 1, True, 1)
    res2_2_s = residual_block(res2_1_s, 'res2_2_s', [64, 256], 1)
    res2_3_s = residual_block(res2_2_s, 'res2_3_s', [64, 256], 1)
    # fast第二模块
    pool1_s = conv3d(pool1_s, 'pool1_sp', pool1_s.shape[4], 8, 1, 1, 1, 1, 1, padding='VALID')
    pool1_f = tf.concat([pool1_s, pool1_f], axis=4)
    res2_1_f = residual_block(pool1_f, 'res2_1_f', [8, 32], 3, True, 1)
    res2_2_f = residual_block(res2_1_f, 'res2_2_f', [8, 32], 3)
    res2_3_f = residual_block(res2_2_f, 'res2_3_f', [8, 32], 3)
    # slow第三模块
    res3_1_s = residual_block(res2_3_s, 'res3_1_s', [128, 512], 1, True, 2)
    res3_2_s = residual_block(res3_1_s, 'res3_2_s', [128, 512], 1)
    res3_3_s = residual_block(res3_2_s, 'res3_3_s', [128, 512], 1)
    res3_4_s = residual_block(res3_3_s, 'res3_4_s', [128, 512], 1)
    # fast第三模块
    res2_3_s = conv3d(res2_3_s, 'res2_3_sp', res2_3_s.shape[4], 8, 1, 1, 1, 1, 1, padding='VALID')
    res2_3_f = tf.concat([res2_3_s, res2_3_f], axis=4)
    res3_1_f = residual_block(res2_3_f, 'res3_1_f', [16, 64], 1, True, 2)
    res3_2_f = residual_block(res3_1_f, 'res3_2_f', [16, 64], 1)
    res3_3_f = residual_block(res3_2_f, 'res3_3_f', [16, 64], 1)
    res3_4_f = residual_block(res3_3_f, 'res3_4_f', [16, 64], 1)
    # slow第四模块
    res4_1_s = residual_block(res3_4_s, 'res4_1_s', [256, 1024], 1, True, 2)
    res4_2_s = residual_block(res4_1_s, 'res4_2_s', [256, 1024], 1)
    res4_3_s = residual_block(res4_2_s, 'res4_3_s', [256, 1024], 1)
    res4_4_s = residual_block(res4_3_s, 'res4_4_s', [256, 1024], 1)
    res4_5_s = residual_block(res4_4_s, 'res4_5_s', [256, 1024], 1)
    res4_6_s = residual_block(res4_5_s, 'res4_6_s', [256, 1024], 1)
    # fast第四模块
    res3_4_s = conv3d(res3_4_s, 'res3_4_sp', res3_4_s.shape[4], 8, 1, 1, 1, 1, 1, padding='VALID')
    res3_4_f = tf.concat([res3_4_s, res3_4_f], axis=4)
    res4_1_f = residual_block(res3_4_f, 'res4_1_f', [32, 128], 1, True, 2)
    res4_2_f = residual_block(res4_1_f, 'res4_2_f', [32, 128], 1)
    res4_3_f = residual_block(res4_2_f, 'res4_3_f', [32, 128], 1)
    res4_4_f = residual_block(res4_3_f, 'res4_4_f', [32, 128], 1)
    res4_5_f = residual_block(res4_4_f, 'res4_5_f', [32, 128], 1)
    res4_6_f = residual_block(res4_5_f, 'res4_6_f', [32, 128], 1)
    # slow第五模块
    res5_1_s = residual_block(res4_6_s, 'res5_1_s', [512, 2048], 1, True, 2)
    res5_2_s = residual_block(res5_1_s, 'res5_2_s', [512, 2048], 1)
    res5_3_s = residual_block(res5_2_s, 'res5_3_s', [512, 2048], 1)
    # fast第五模块
    res4_6_s = conv3d(res4_6_s, 'res4_6_sp', res4_6_s.shape[4], 8, 1, 1, 1, 1, 1, padding='VALID')
    res4_6_f = tf.concat([res4_6_s, res4_6_f], axis=4)
    res5_1_f = residual_block(res4_6_f, 'res5_1_f', [64, 256], 1, True, 2)
    res5_2_f = residual_block(res5_1_f, 'res5_2_f', [64, 256], 1)
    res5_3_f = residual_block(res5_2_f, 'res5_3_f', [64, 256], 1)
    # 全局平均池化
    res5_3_s = avgpool3d(res5_3_s, 'res5_3_sp', 8, 7, 7, 8, 1, 1, 'VALID')
    res5_3_f = avgpool3d(res5_3_f, 'res5_3_fp', 1, 7, 7, 1, 1, 1, 'VALID')
    fusion = tf.concat([res5_3_s, res5_3_f], axis=4)
    # 展开张量
    shape = fusion.get_shape()
    nodes = shape[1].value * shape[2].value * shape[3].value * shape[4].value
    reshaped = tf.reshape(fusion, [-1, nodes], name='reshape')
    # 第六层，全连接映射层，保证softmax的输入输出维度一致
    logit = fc(reshaped, 'out', num_classes, activation=False)
    return logit
