from tensorflow.keras import layers

def VGG16BaseModel(inputs):
    """
        tf版VGG16不包含head的网络结构, 从keras.applications中抄过来的, 其中改了两处地方:
            1. Block3中在进入池化之前, 为了与torch版特征图大小保持一致, 增加了一个(1,1)的padding
            2. Block5中池化核的步长由(2,2)改为(1,1)
        最终返回conv4_3和最终池化后的x
    """
    #! 模型结构
    # Block 1
    x = layers.Conv2D(64, (3,3), activation='relu', padding='same', name='block1_conv1')(inputs)
    x = layers.Conv2D(64, (3,3), activation='relu', padding='same', name='block1_conv2')(x)
    x = layers.MaxPooling2D((2,2), strides=(2,2), name='block1_pool')(x) #! (N,150,150,64) | (N,256,256,64)

    # Block 2
    x = layers.Conv2D(128, (3,3), activation='relu', padding='same', name='block2_conv1')(x)
    x = layers.Conv2D(128, (3,3), activation='relu', padding='same', name='block2_conv2')(x)
    x = layers.MaxPooling2D((2,2), strides=(2,2), name='block2_pool')(x) #! (N,75,75,128) | (N,128,128,128)

    # Block 3
    x = layers.Conv2D(256, (3,3), activation='relu', padding='same', name='block3_conv1')(x)
    x = layers.Conv2D(256, (3,3), activation='relu', padding='same', name='block3_conv2')(x)
    x = layers.Conv2D(256, (3,3), activation='relu', padding='same', name='block3_conv3')(x)
    x = layers.ZeroPadding2D([(0,1),(0,1)])(x) #! keras提供的VGG16没有padding, 为与教程保持一致增加padding层
    #! 为了SSD300和SSD512共用基础特征提取模块, 将填充加在右侧和下侧, 填充后的特征图大小 (N,76,76,256) | (N,129,129,128)
    x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x) #! (N,38,38,256) | (N,64,64,256)

    # Block 4
    x = layers.Conv2D(512, (3,3), activation='relu', padding='same', name='block4_conv1')(x)
    x = layers.Conv2D(512, (3,3), activation='relu', padding='same', name='block4_conv2')(x)
    x = layers.Conv2D(512, (3,3), activation='relu', padding='same', name='block4_conv3')(x) #! (N,38,38,512) | (N,64,64,512)
    conv4_3_feats = x
    x = layers.MaxPooling2D((2,2), strides=(2,2), name='block4_pool')(x) #! (N,19,19,512) | (N,32,32,512)

    # Block 5
    x = layers.Conv2D(512, (3,3), activation='relu', padding='same', name='block5_conv1')(x)
    x = layers.Conv2D(512, (3,3), activation='relu', padding='same', name='block5_conv2')(x)
    x = layers.Conv2D(512, (3,3), activation='relu', padding='same', name='block5_conv3')(x)
    x = layers.MaxPooling2D((3,3), strides=(1,1), name='block5_pool',padding='same')(x)
    #! keras提供的VGG16中Block5与教程中不一致，有两处，1是卷积核，2是步长
    #! 参考教程，将卷积核修改为3, 步长修改为2, 为与教程保持一致改为1, (N,19,19,512) | (N,32,32,512)
    
    return conv4_3_feats, x

def VGG16Base_feats(inputs):
    """
        在VGG16的基础上进行特征提取, 参考datawhale的cv-object-detect教程修改
        最终返回两个特征层
    """
    #! 模型结构
    conv4_3_feats, x = VGG16BaseModel(inputs) #! 会使用其中一部分模型结构
    #! 教程中提供的是填充+空洞卷积, 空洞卷积的目的是为了增大感受野, 从目前了解的情况来看是应该先填充扩大特征图的大小, 再使用空洞卷积来卷积增大感受野
    padding = layers.ZeroPadding2D([(6,6),(6,6)]) #! tf没有自动padding的功能, 需要手动添加 (N,31,31,512) | (N,44,44,512)
    conv6 = layers.Conv2D(1024,kernel_size=(3,3),dilation_rate=(6,6),activation="relu",name="conv6") #! 31 - 3 - (3-1)*(6-1) + 1 = 19 | 44 - 3 - (3-1)*(6-1) + 1 = 32
    conv7 = layers.Conv2D(1024,kernel_size=(1,1),activation="relu",name="conv7")

    #! 特征输出
    x = padding(x)         #! (N,31,31, 512) | (N,44,44, 512)
    x = conv6(x)           #! (N,19,19,1024) | (N,32,32,1024)
    conv7_feats = conv7(x) #! (N,19,19,1024) | (N,32,32,1024)
    return conv4_3_feats, conv7_feats

def SSD300AuxConv(conv7_feats): #! 辅助卷积
    """
        特征提取, 参考datawhale的cv-object-detect教程修改
        返回4个特征层
    """
    #! 模型结构
    conv8_1 = layers.Conv2D(256,kernel_size=(1,1),activation="relu",name="conv8_1") #! (N,19,19,256)
    conv8_2 = layers.Conv2D(512,kernel_size=(3,3),strides=(2,2),padding="SAME",activation="relu",name="conv8_2")
    #! (N,10,10,512)

    conv9_1 = layers.Conv2D(128,kernel_size=(1,1),activation="relu",name="conv9_1") #! (N,10,10,128)
    conv9_2 = layers.Conv2D(256,kernel_size=(3,3),strides=(2,2),padding="SAME",activation="relu",name="conv9_2")
    #! (N,5,5,256)

    conv10_1 = layers.Conv2D(128,kernel_size=(1,1),activation="relu",name="conv10_1") #! (N,5,5,128)
    conv10_2 = layers.Conv2D(256,kernel_size=(3,3),activation="relu",name="conv10_2") #! (N,3,3,256)

    conv11_1 = layers.Conv2D(128,kernel_size=(1,1),activation="relu",name="conv11_1") #! (N,3,3,128)
    conv11_2 = layers.Conv2D(256,kernel_size=(3,3),activation="relu",name="conv11_2") #! (N,1,1,256)

    #! 特征输出
    x = conv8_1(conv7_feats)
    x = conv8_2(x)
    conv8_2_feats = x

    x = conv9_1(x)
    x = conv9_2(x)
    conv9_2_feats = x

    x = conv10_1(x)
    x = conv10_2(x)
    conv10_2_feats = x

    x = conv11_1(x)
    x = conv11_2(x)
    conv11_2_feats = x

    return conv8_2_feats, conv9_2_feats, conv10_2_feats, conv11_2_feats

def SSD300PredConv(n_classes, conv4_3_feats, conv7_feats,conv8_2_feats, conv9_2_feats, conv10_2_feats, conv11_2_feats):
    """
        结果预测, 参考datawhale的cv-object-detect教程修改
        最终输出都为logits
    """
    #! 特征图上每个位置先验框的数量
    n_boxes = {
        'conv4_3' : 4, #! (N,38,38, 512)
        'conv7'   : 6, #! (N,19,19,1024)
        'conv8_2' : 6, #! (N,10,10, 512)
        'conv9_2' : 6, #! (N, 5, 5, 256)
        'conv10_2': 4, #! (N, 3, 3, 256)
        'conv11_2': 4, #! (N, 1, 1, 256)
    }

    #! 模型结构
    loc_conv4_3  = layers.Conv2D(n_boxes['conv4_3' ] * 4, kernel_size=(3,3), padding="SAME",name="loc_conv4_3" ) #! (N,38,38,16)
    loc_conv7    = layers.Conv2D(n_boxes['conv7'   ] * 4, kernel_size=(3,3), padding="SAME",name="loc_conv7"   ) #! (N,19,19,24)
    loc_conv8_2  = layers.Conv2D(n_boxes['conv8_2' ] * 4, kernel_size=(3,3), padding="SAME",name="loc_conv8_2" ) #! (N,10,10,24)
    loc_conv9_2  = layers.Conv2D(n_boxes['conv9_2' ] * 4, kernel_size=(3,3), padding="SAME",name="loc_conv9_2" ) #! (N, 5, 5,24)
    loc_conv10_2 = layers.Conv2D(n_boxes['conv10_2'] * 4, kernel_size=(3,3), padding="SAME",name="loc_conv10_2") #! (N, 3, 3,16)
    loc_conv11_2 = layers.Conv2D(n_boxes['conv11_2'] * 4, kernel_size=(3,3), padding="SAME",name="loc_conv11_2") #! (N, 1, 1,16)

    cl_conv4_3  = layers.Conv2D(n_boxes['conv4_3' ] * n_classes, kernel_size=(3,3), padding="SAME",name="cl_conv4_3" ) #! (N,38,38,4*n_classes)
    cl_conv7    = layers.Conv2D(n_boxes['conv7'   ] * n_classes, kernel_size=(3,3), padding="SAME",name="cl_conv7"   ) #! (N,19,19,6*n_classes)
    cl_conv8_2  = layers.Conv2D(n_boxes['conv8_2' ] * n_classes, kernel_size=(3,3), padding="SAME",name="cl_conv8_2" ) #! (N,10,10,6*n_classes)
    cl_conv9_2  = layers.Conv2D(n_boxes['conv9_2' ] * n_classes, kernel_size=(3,3), padding="SAME",name="cl_conv9_2" ) #! (N, 5, 5,6*n_classes)
    cl_conv10_2 = layers.Conv2D(n_boxes['conv10_2'] * n_classes, kernel_size=(3,3), padding="SAME",name="cl_conv10_2") #! (N, 3, 3,4*n_classes)
    cl_conv11_2 = layers.Conv2D(n_boxes['conv11_2'] * n_classes, kernel_size=(3,3), padding="SAME",name="cl_conv11_2") #! (N, 1, 1,4*n_classes)

    #! 特征输出
    l_conv4_3  = layers.Reshape([-1,4],name="l_conv4_3" )(loc_conv4_3 (conv4_3_feats) ) #! (N,5776,4)
    l_conv7    = layers.Reshape([-1,4],name="l_conv7"   )(loc_conv7   (conv7_feats)   ) #! (N,2166,4)
    l_conv8_2  = layers.Reshape([-1,4],name="l_conv8_2" )(loc_conv8_2 (conv8_2_feats) ) #! (N, 600,4)
    l_conv9_2  = layers.Reshape([-1,4],name="l_conv9_2" )(loc_conv9_2 (conv9_2_feats) ) #! (N, 150,4)
    l_conv10_2 = layers.Reshape([-1,4],name="l_conv10_2")(loc_conv10_2(conv10_2_feats)) #! (N,  36,4)
    l_conv11_2 = layers.Reshape([-1,4],name="l_conv11_2")(loc_conv11_2(conv11_2_feats)) #! (N,   4,4)

    c_conv4_3  = layers.Reshape([-1,n_classes],name="c_conv4_3" )(cl_conv4_3 (conv4_3_feats) ) #! (N,5776,n_classes)
    c_conv7    = layers.Reshape([-1,n_classes],name="c_conv7"   )(cl_conv7   (conv7_feats)   ) #! (N,2166,n_classes)
    c_conv8_2  = layers.Reshape([-1,n_classes],name="c_conv8_2" )(cl_conv8_2 (conv8_2_feats) ) #! (N, 600,n_classes)
    c_conv9_2  = layers.Reshape([-1,n_classes],name="c_conv9_2" )(cl_conv9_2 (conv9_2_feats) ) #! (N, 150,n_classes)
    c_conv10_2 = layers.Reshape([-1,n_classes],name="c_conv10_2")(cl_conv10_2(conv10_2_feats)) #! (N,  36,n_classes)
    c_conv11_2 = layers.Reshape([-1,n_classes],name="c_conv11_2")(cl_conv11_2(conv11_2_feats)) #! (N,   4,n_classes)

    locs = layers.Concatenate(axis=1,name="locs")(
        [l_conv4_3, l_conv7, l_conv8_2, l_conv9_2, l_conv10_2, l_conv11_2]) #! (N,8732,4)
    classes_scores = layers.Concatenate(axis=1,name="classes_scores")(
        [c_conv4_3, c_conv7, c_conv8_2, c_conv9_2, c_conv10_2, c_conv11_2]) #! (N,8732,n_classes)

    return locs, classes_scores

def SSD512AuxConv(conv7_feats): #! 辅助卷积
    """
        特征提取, 参考: https://www.cnblogs.com/silence-cho/p/13977009.html
        返回5个特征层
    """
    #! 模型结构
    conv8_1 = layers.Conv2D(256,kernel_size=(1,1),activation="relu",name="conv8_1") #! (N,32,32,256)
    conv8_2 = layers.Conv2D(512,kernel_size=(3,3),strides=(2,2),padding="SAME",activation="relu",name="conv8_2")
    #! (N,16,16,512)

    conv9_1 = layers.Conv2D(128,kernel_size=(1,1),activation="relu",name="conv9_1") #! (N,16,16,128)
    conv9_2 = layers.Conv2D(256,kernel_size=(3,3),strides=(2,2),padding="SAME",activation="relu",name="conv9_2")
    #! (N,8,8,256)

    conv10_1 = layers.Conv2D(128,kernel_size=(1,1),activation="relu",name="conv10_1") #! (N,8,8,128)
    conv10_2 = layers.Conv2D(256,kernel_size=(3,3),strides=(2,2),padding="SAME",activation="relu",name="conv10_2")
    #! (N,4,4,256)

    conv11_1 = layers.Conv2D(128,kernel_size=(1,1),activation="relu",name="conv11_1") #! (N,4,4,128)
    conv11_2 = layers.Conv2D(256,kernel_size=(3,3),strides=(2,2),padding="SAME",activation="relu",name="conv11_2")
    #! (N,2,2,256)

    conv12_1 = layers.Conv2D(128,kernel_size=(1,1),activation="relu",name="conv12_1") #! (N,2,2,128)
    padding = layers.ZeroPadding2D([(1,1),(1,1)]) #! tf没有自动padding功能, 需要手动在四周填充, 填充后形状: (N,4,4,128)
    conv12_2 = layers.Conv2D(256,kernel_size=(4,4),activation="relu",name="conv12_2") #! (N,1,1,256)

    #! 特征输出
    x = conv8_1(conv7_feats)
    x = conv8_2(x)
    conv8_2_feats = x

    x = conv9_1(x)
    x = conv9_2(x)
    conv9_2_feats = x

    x = conv10_1(x)
    x = conv10_2(x)
    conv10_2_feats = x

    x = conv11_1(x)
    x = conv11_2(x)
    conv11_2_feats = x

    x = conv12_1(x)
    x = padding(x)
    x = conv12_2(x)
    conv12_2_feats = x

    return conv8_2_feats, conv9_2_feats, conv10_2_feats, conv11_2_feats, conv12_2_feats

def SSD512PredConv(n_classes, conv4_3_feats, conv7_feats,conv8_2_feats, conv9_2_feats, conv10_2_feats, conv11_2_feats, conv12_2_feats):
    """
        结果预测, 参考: https://www.cnblogs.com/silence-cho/p/13977009.html
        最终输出都为logits
    """
    #! 特征图上每个位置先验框的数量
    n_boxes = {
        'conv4_3' : 4, #! (N,64,64, 512)
        'conv7'   : 6, #! (N,32,32,1024)
        'conv8_2' : 6, #! (N,16,16, 512)
        'conv9_2' : 6, #! (N, 8, 8, 256)
        'conv10_2': 6, #! (N, 4, 4, 256)
        'conv11_2': 4, #! (N, 2, 2, 256)
        'conv12_2': 4, #! (N, 1, 1, 256)
    }

    #! 模型结构
    loc_conv4_3  = layers.Conv2D(n_boxes['conv4_3' ] * 4, kernel_size=(3,3), padding="SAME",name="loc_conv4_3" ) #! (N,64,64,16)
    loc_conv7    = layers.Conv2D(n_boxes['conv7'   ] * 4, kernel_size=(3,3), padding="SAME",name="loc_conv7"   ) #! (N,32,32,24)
    loc_conv8_2  = layers.Conv2D(n_boxes['conv8_2' ] * 4, kernel_size=(3,3), padding="SAME",name="loc_conv8_2" ) #! (N,16,16,24)
    loc_conv9_2  = layers.Conv2D(n_boxes['conv9_2' ] * 4, kernel_size=(3,3), padding="SAME",name="loc_conv9_2" ) #! (N, 8, 8,24)
    loc_conv10_2 = layers.Conv2D(n_boxes['conv10_2'] * 4, kernel_size=(3,3), padding="SAME",name="loc_conv10_2") #! (N, 4, 4,24)
    loc_conv11_2 = layers.Conv2D(n_boxes['conv11_2'] * 4, kernel_size=(3,3), padding="SAME",name="loc_conv11_2") #! (N, 2, 2,16)
    loc_conv12_2 = layers.Conv2D(n_boxes['conv12_2'] * 4, kernel_size=(3,3), padding="SAME",name="loc_conv12_2") #! (N, 1, 1,16)

    cl_conv4_3  = layers.Conv2D(n_boxes['conv4_3' ] * n_classes, kernel_size=(3,3), padding="SAME",name="cl_conv4_3" ) #! (N,64,64,4*n_classes)
    cl_conv7    = layers.Conv2D(n_boxes['conv7'   ] * n_classes, kernel_size=(3,3), padding="SAME",name="cl_conv7"   ) #! (N,32,32,6*n_classes)
    cl_conv8_2  = layers.Conv2D(n_boxes['conv8_2' ] * n_classes, kernel_size=(3,3), padding="SAME",name="cl_conv8_2" ) #! (N,16,16,6*n_classes)
    cl_conv9_2  = layers.Conv2D(n_boxes['conv9_2' ] * n_classes, kernel_size=(3,3), padding="SAME",name="cl_conv9_2" ) #! (N, 8, 8,6*n_classes)
    cl_conv10_2 = layers.Conv2D(n_boxes['conv10_2'] * n_classes, kernel_size=(3,3), padding="SAME",name="cl_conv10_2") #! (N, 4, 4,6*n_classes)
    cl_conv11_2 = layers.Conv2D(n_boxes['conv11_2'] * n_classes, kernel_size=(3,3), padding="SAME",name="cl_conv11_2") #! (N, 2, 2,4*n_classes)
    cl_conv12_2 = layers.Conv2D(n_boxes['conv12_2'] * n_classes, kernel_size=(3,3), padding="SAME",name="cl_conv12_2") #! (N, 1, 1,4*n_classes)

    #! 特征输出
    l_conv4_3  = layers.Reshape([-1,4],name="l_conv4_3" )(loc_conv4_3 (conv4_3_feats) ) #! (N,16384,4)
    l_conv7    = layers.Reshape([-1,4],name="l_conv7"   )(loc_conv7   (conv7_feats)   ) #! (N, 6144,4)
    l_conv8_2  = layers.Reshape([-1,4],name="l_conv8_2" )(loc_conv8_2 (conv8_2_feats) ) #! (N, 1536,4)
    l_conv9_2  = layers.Reshape([-1,4],name="l_conv9_2" )(loc_conv9_2 (conv9_2_feats) ) #! (N,  384,4)
    l_conv10_2 = layers.Reshape([-1,4],name="l_conv10_2")(loc_conv10_2(conv10_2_feats)) #! (N,   96,4)
    l_conv11_2 = layers.Reshape([-1,4],name="l_conv11_2")(loc_conv11_2(conv11_2_feats)) #! (N,   16,4)
    l_conv12_2 = layers.Reshape([-1,4],name="l_conv12_2")(loc_conv12_2(conv12_2_feats)) #! (N,    4,4)

    c_conv4_3  = layers.Reshape([-1,n_classes],name="c_conv4_3" )(cl_conv4_3 (conv4_3_feats) ) #! (N,16384,n_classes)
    c_conv7    = layers.Reshape([-1,n_classes],name="c_conv7"   )(cl_conv7   (conv7_feats)   ) #! (N, 6144,n_classes)
    c_conv8_2  = layers.Reshape([-1,n_classes],name="c_conv8_2" )(cl_conv8_2 (conv8_2_feats) ) #! (N, 1536,n_classes)
    c_conv9_2  = layers.Reshape([-1,n_classes],name="c_conv9_2" )(cl_conv9_2 (conv9_2_feats) ) #! (N,  384,n_classes)
    c_conv10_2 = layers.Reshape([-1,n_classes],name="c_conv10_2")(cl_conv10_2(conv10_2_feats)) #! (N,   96,n_classes)
    c_conv11_2 = layers.Reshape([-1,n_classes],name="c_conv11_2")(cl_conv11_2(conv11_2_feats)) #! (N,   16,n_classes)
    c_conv12_2 = layers.Reshape([-1,n_classes],name="c_conv12_2")(cl_conv12_2(conv12_2_feats)) #! (N,    4,n_classes)

    locs = layers.Concatenate(axis=1,name="locs")(
        [l_conv4_3, l_conv7, l_conv8_2, l_conv9_2, l_conv10_2, l_conv11_2, l_conv12_2]) #! (N,24564,4)
    classes_scores = layers.Concatenate(axis=1,name="classes_scores")(
        [c_conv4_3, c_conv7, c_conv8_2, c_conv9_2, c_conv10_2, c_conv11_2, c_conv12_2]) #! (N,24564,n_classes)

    return locs, classes_scores