#---------------------------------------------------------------------------#
#       end-to-end网络：输入层 + encoder_net + decoder_net + 输出层 + 生成模型
#---------------------------------------------------------------------------#
import tensorflow as tf 
import tensorflow.keras as keras

'''
'''

def self_defined_upSample():
    '''
        # 解释：
            由于参考的AI菌的代码，keras.layers.UpSampling2D()并非重用最大池化索引（使用近邻插值）
            这里自写了upSample函数，有重用最大池化索引功能
            但是，近邻插值与重用最大池化索引的效果，差别大吗？
    '''
    pass

def decoder(feature_map, num_classes, num_upSample, model='keras'):
    assert model in ['keras', 'self_defined'], 'upSample function in net_model decoder should be choosed in "keras" or "self_defined"!'
    upSample = []
    i = 0
    for _ in range(num_upSample):
        if model == 'keras':
            upSample.append(keras.layers.UpSampling2D((2, 2)))
        elif model == 'self_defined':
            upSample.append(self_defined_upSample)
    
    # 26,26,512
    o = keras.layers.ZeroPadding2D((1,1))(feature_map)
    o = keras.layers.Conv2D(512, (3,3), padding='valid')(o)
    o = keras.layers.BatchNormalization()(o)

    # 进行一次UpSampling2D，此时hw变为原来的1/8
    # 52,52,256
    o = upSample[i](o)
    i += 1
    # o = keras.layers.UpSampling2D((2,2))(o)
    o = keras.layers.ZeroPadding2D((1,1))(o)
    o = keras.layers.Conv2D(256, (3,3), padding='valid')(o)
    o = keras.layers.BatchNormalization()(o)

    # 进行一次UpSampling2D，此时hw变为原来的1/4
    # 104,104,128
    for _ in range(num_upSample - 2):
        o = upSample[i](o)
        i += 1
        # o = keras.layers.UpSampling2D((2,2))(o)
        o = keras.layers.ZeroPadding2D((1,1))(o)
        o = keras.layers.Conv2D(128, (3,3), padding='valid')(o)
        o = keras.layers.BatchNormalization()(o)

    # 进行一次UpSampling2D，此时hw变为原来的1/2
    # 208,208,64
    o = upSample[i](o)
    i += 1
    # o = keras.layers.UpSampling2D((2,2))(o)
    o = keras.layers.ZeroPadding2D((1,1))(o)
    o = keras.layers.Conv2D(64, (3,3), padding='valid')(o)
    o = keras.layers.BatchNormalization()(o)

    # 此时输出为h_input/2,w_input/2,num_classes
    # 208,208,num_classes
    decoded_map = keras.layers.Conv2D(num_classes+1, (3,3), padding='same')(o) # 还要加上背景分类的概率
    
    return decoded_map

def get_net_model(net_input_shape, num_classes, num_upSample=3, encoder_level=3):
    '''
        num_upSample    decoder上采样次数
                        SegNet的实现代码中，一般n次下采样操作，对应n/2次上次采样操作
        encoder_level   encoder输出的特征层为第几个block的输出
                        e.g. encoder_level=3，则为从0开始数第3个block的输出，也就是4次下采样、缩小了16×
    '''
    # 1.输入层
    i_put = keras.layers.Input(shape=net_input_shape)

    # 2.encoder
    m_net = keras.applications.mobilenet.MobileNet(include_top=False, 
                                                   weights='imagenet', 
                                                   input_shape=net_input_shape)
    m_net_BlockendLayerIndex = [10,23,36,73,86] # blockIdx,multiple,layerIdx: 0,2×,10  1,4×,23  2,8×,36  3,16×,73  4,32×,86
    tmp_idx = m_net_BlockendLayerIndex[encoder_level]
    encoder = keras.models.Sequential(m_net.layers[0:tmp_idx+1])
    encoder.trainable = False

    feature_map = encoder(i_put)

    # 3.decoder
    decoded_map = decoder(feature_map, num_classes, num_upSample)

    # 4.输出层
    o_put = keras.layers.Softmax()(decoded_map)

    # 5.模型
    net_model = keras.models.Model(i_put, o_put)

    return net_model

if __name__ == '__main__':
    print('TST MSG: 测试SegNet网络建模是否正确')
    net_model = get_net_model((416,416,3), 1)
    print('TST MSG: "keras" model')
    net_model.summary()
    # print('TST MSG: "self_defined" model')
    # net_model.summary()