import tensorflow as tf
from module import conv2d, depthwise_conv2d, avgpool2d, fc


# MobileNet V1，深度可分离卷积模块
def block_v1(input_tensor, name_scope, output_channel, stride=1):
    """
    :param input_tensor: 输入的张量
    :param name_scope: 命名空间
    :param output_channel: 输出张量的通道数
    :param stride: 卷积核的移动步长
    :return: 输出的张量
    """
    # 深度卷积
    dw_conv = depthwise_conv2d(input_tensor, name_scope + 'dw_conv', kh=3, kw=3, dh=stride, dw=stride,
                               activation=tf.nn.relu6, momentum=0.9997, epsilon=0.001)
    # 可分离卷积操作
    output_tensor = conv2d(dw_conv, name_scope + 'pw_conv', output_channel, kh=1, kw=1, dh=1, dw=1,
                           activation=tf.nn.relu6, batch_normalize=True, momentum=0.9997, epsilon=0.001)
    return output_tensor


# MobileNet V2，bottle neck模块
def block_v2(input_tensor, name_scope, channel_list, width_multiplier=1.0, stride=1):
    """
    :param input_tensor: 输入的张量
    :param name_scope: 命名空间
    :param channel_list: 张量通道数列表，形如[in, out]，其中in为输入通道数，out为输出通道数
    :param width_multiplier: 通道扩张系数
    :param stride: 卷积核的移动步长
    :return: 输出的张量
    """
    input_channel = int(channel_list[0] * width_multiplier)
    # 采用短接结构，避免梯度丢失
    if stride == 1:
        short_cut_conv = conv2d(input_tensor, name_scope + '_shortcut', channel_list[1],
                                kh=1, kw=1, dh=stride, dw=stride, padding='SAME', activation=None,
                                training=True, batch_normalize=True, momentum=0.9997, epsilon=0.001)
    else:
        short_cut_conv = None
    # 第一个卷积扩张数通道
    conv1 = conv2d(input_tensor, name_scope + 'bn_conv1', input_channel, kh=1, kw=1, dh=1, dw=1,
                   activation=tf.nn.relu6, batch_normalize=True, momentum=0.9997, epsilon=0.001)
    # 第二个深度卷积
    dw_conv2 = depthwise_conv2d(conv1, name_scope + 'bn_dw_conv2', kh=3, kw=3, dh=stride, dw=stride,
                                activation=tf.nn.relu6, momentum=0.9997, epsilon=0.001)
    # 第三个卷积输出映射
    conv3 = conv2d(dw_conv2, name_scope + 'bn_conv3', channel_list[1], kh=1, kw=1, dh=1, dw=1,
                   activation=False, batch_normalize=True, momentum=0.9997, epsilon=0.001)
    if stride == 1:
        output_tensor = tf.add(short_cut_conv, conv3)
    else:
        output_tensor = conv3
    return output_tensor


# MobileNet V1，224*224*3
def mobilenet_v1(input_tensor, num_classes):
    """
    :param input_tensor: 输入的四维张量，形如[batch_size, height, width, channels]，一般为[batch_size, 224, 224, 3]
    :param num_classes: 分类数
    :return: 未经过softmax的一维向量
    """
    # 第一层，卷积层，进行批处理归一化
    conv1 = conv2d(input_tensor, 'conv1', 32, kh=3, kw=3, dh=2, dw=2,
                   batch_normalize=True, momentum=0.9997, epsilon=0.001)
    # 第二至十四层，深度可分离卷积层
    ds_conv2 = block_v1(conv1, 'ds_conv2', 64, stride=1)
    ds_conv3 = block_v1(ds_conv2, 'ds_conv3', 128, stride=2)
    ds_conv4 = block_v1(ds_conv3, 'ds_conv4', 128, stride=1)
    ds_conv5 = block_v1(ds_conv4, 'ds_conv5', 256, stride=2)
    ds_conv6 = block_v1(ds_conv5, 'ds_conv6', 256, stride=1)
    ds_conv7 = block_v1(ds_conv6, 'ds_conv7', 512, stride=2)
    ds_conv8 = block_v1(ds_conv7, 'ds_conv8', 512, stride=1)
    ds_conv9 = block_v1(ds_conv8, 'ds_conv9', 512, stride=1)
    ds_conv10 = block_v1(ds_conv9, 'ds_conv10', 512, stride=1)
    ds_conv11 = block_v1(ds_conv10, 'ds_conv11', 512, stride=1)
    ds_conv12 = block_v1(ds_conv11, 'ds_conv12', 512, stride=1)
    ds_conv13 = block_v1(ds_conv12, 'ds_conv13', 1024, stride=2)
    ds_conv14 = block_v1(ds_conv13, 'ds_conv14', 1024, stride=1)
    # 全局平均池化
    avg_pool = avgpool2d(ds_conv14, 'avg_pool', kh=7, kw=7, dh=1, dw=1, padding='VALID')
    # 删除指定维度
    squeeze = tf.squeeze(avg_pool, [1, 2], name='squeeze')
    # 十五层，全连接映射层，保证softmax的输入输出维度一致
    logit = fc(squeeze, 'out', num_classes, activation=False)
    return logit


# MobileNet V2，224*224*3
def mobilenet_v2(input_tensor, num_classes):
    """
    :param input_tensor: 输入的四维张量，形如[batch_size, height, width, channels]，一般为[batch_size, 224, 224, 3]
    :param num_classes: 分类数
    :return: 未经过softmax的一维向量
    """
    # 第一层，卷积层
    conv1 = conv2d(input_tensor, 'conv1', 32, kh=3, kw=3, dh=2, dw=2,
                   batch_normalize=True, momentum=0.9997, epsilon=0.001)
    # 第二层，bottle neck层
    conv2 = block_v2(conv1, 'conv2', [32, 16], stride=1)
    # 第三至四层，bottle neck层
    conv3 = block_v2(conv2, 'conv3', [16, 24], width_multiplier=6.0, stride=2)
    conv4 = block_v2(conv3, 'conv4', [16, 24], width_multiplier=6.0, stride=1)
    # 第五至七层，bottle neck层
    conv5 = block_v2(conv4, 'conv5', [24, 32], width_multiplier=6.0, stride=2)
    conv6 = block_v2(conv5, 'conv6', [24, 32], width_multiplier=6.0, stride=1)
    conv7 = block_v2(conv6, 'conv7', [24, 32], width_multiplier=6.0, stride=1)
    # 第八至十一层，bottle neck层
    conv8 = block_v2(conv7, 'conv8', [32, 64], width_multiplier=6.0, stride=2)
    conv9 = block_v2(conv8, 'conv9', [32, 64], width_multiplier=6.0, stride=1)
    conv10 = block_v2(conv9, 'conv10', [32, 64], width_multiplier=6.0, stride=1)
    conv11 = block_v2(conv10, 'conv11', [32, 64], width_multiplier=6.0, stride=1)
    # 第十二至十四层，bottle neck层
    conv12 = block_v2(conv11, 'conv12', [64, 96], width_multiplier=6.0, stride=1)
    conv13 = block_v2(conv12, 'conv13', [64, 96], width_multiplier=6.0, stride=1)
    conv14 = block_v2(conv13, 'conv14', [64, 96], width_multiplier=6.0, stride=1)
    # 第十五至十七层，bottle neck层
    conv15 = block_v2(conv14, 'conv15', [96, 160], width_multiplier=6.0, stride=2)
    conv16 = block_v2(conv15, 'conv16', [96, 160], width_multiplier=6.0, stride=1)
    conv17 = block_v2(conv16, 'conv17', [96, 160], width_multiplier=6.0, stride=1)
    # 第十八层，bottle neck层
    conv18 = block_v2(conv17, 'conv18', [160, 320], width_multiplier=6.0, stride=1)
    # 第十九层，卷积层
    conv19 = conv2d(conv18, 'conv19', 1280, kh=1, kw=1, dh=1, dw=1)
    # 全局平均池化
    avg_pool = avgpool2d(conv19, 'avg_pool', kh=7, kw=7, dh=1, dw=1, padding='VALID')
    # 第二十层，卷积映射层，保证softmax的输入输出维度一致
    logit = conv2d(avg_pool, 'out', num_classes, kh=1, kw=1, dh=1, dw=1)
    # 删除指定维度
    logit = tf.squeeze(logit, [1, 2], name='squeeze')
    return logit
