
import os, math
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
from tensorflow.keras import layers
import tensorflow as tf
from tensorflow import keras
import numpy as np
import  tensorflow.keras.backend as K
from tensorflow.keras.applications import ResNet50
from tensorflow.keras import initializers

def makeDarkNet(IMGSZ=512, GRIDSZ=16, classNum=2):

    input_image = layers.Input((IMGSZ, IMGSZ, 3), dtype='float32')

    # unit1
    x = layers.Conv2D(32, (3, 3), strides=(1, 1), padding='same', name='conv_1', use_bias=False)(input_image)
    x = layers.BatchNormalization(name='norm_1')(x)
    x = layers.LeakyReLU(alpha=0.1)(x)

    x = layers.MaxPooling2D(pool_size=(2, 2))(x)

    # unit2
    x = layers.Conv2D(64, (3, 3), strides=(1, 1), padding='same', name='conv_2', use_bias=False)(x)
    x = layers.BatchNormalization(name='norm_2')(x)
    x = layers.LeakyReLU(alpha=0.1)(x)
    x = layers.MaxPooling2D(pool_size=(2, 2))(x)

    # Layer 3
    x = layers.Conv2D(128, (3, 3), strides=(1, 1), padding='same', name='conv_3', use_bias=False)(x)
    x = layers.BatchNormalization(name='norm_3')(x)
    x = layers.LeakyReLU(alpha=0.1)(x)

    # Layer 4
    x = layers.Conv2D(64, (1, 1), strides=(1, 1), padding='same', name='conv_4', use_bias=False)(x)
    x = layers.BatchNormalization(name='norm_4')(x)
    x = layers.LeakyReLU(alpha=0.1)(x)

    # Layer 5
    x = layers.Conv2D(128, (3, 3), strides=(1, 1), padding='same', name='conv_5', use_bias=False)(x)
    x = layers.BatchNormalization(name='norm_5')(x)
    x = layers.LeakyReLU(alpha=0.1)(x)
    x = layers.MaxPooling2D(pool_size=(2, 2))(x)

    # Layer 6
    x = layers.Conv2D(256, (3, 3), strides=(1, 1), padding='same', name='conv_6', use_bias=False)(x)
    x = layers.BatchNormalization(name='norm_6')(x)
    x = layers.LeakyReLU(alpha=0.1)(x)

    # Layer 7
    x = layers.Conv2D(128, (1, 1), strides=(1, 1), padding='same', name='conv_7', use_bias=False)(x)
    x = layers.BatchNormalization(name='norm_7')(x)
    x = layers.LeakyReLU(alpha=0.1)(x)

    # Layer 8
    x = layers.Conv2D(256, (3, 3), strides=(1, 1), padding='same', name='conv_8', use_bias=False)(x)
    x = layers.BatchNormalization(name='norm_8')(x)
    x = layers.LeakyReLU(alpha=0.1)(x)
    x = layers.MaxPooling2D(pool_size=(2, 2))(x)

    # Layer 9
    x = layers.Conv2D(512, (3, 3), strides=(1, 1), padding='same', name='conv_9', use_bias=False)(x)
    x = layers.BatchNormalization(name='norm_9')(x)
    x = layers.LeakyReLU(alpha=0.1)(x)

    # Layer 10
    x = layers.Conv2D(256, (1, 1), strides=(1, 1), padding='same', name='conv_10', use_bias=False)(x)
    x = layers.BatchNormalization(name='norm_10')(x)
    x = layers.LeakyReLU(alpha=0.1)(x)

    # Layer 11
    x = layers.Conv2D(512, (3, 3), strides=(1, 1), padding='same', name='conv_11', use_bias=False)(x)
    x = layers.BatchNormalization(name='norm_11')(x)
    x = layers.LeakyReLU(alpha=0.1)(x)

    # Layer 12
    x = layers.Conv2D(256, (1, 1), strides=(1, 1), padding='same', name='conv_12', use_bias=False)(x)
    x = layers.BatchNormalization(name='norm_12')(x)
    x = layers.LeakyReLU(alpha=0.1)(x)

    # Layer 13
    x = layers.Conv2D(512, (3, 3), strides=(1, 1), padding='same', name='conv_13', use_bias=False)(x)
    x = layers.BatchNormalization(name='norm_13')(x)
    x = layers.LeakyReLU(alpha=0.1)(x)

    # for skip connection
    skip_x = x  # [b,32,32,512]

    x = layers.MaxPooling2D(pool_size=(2, 2))(x)

    # Layer 14
    x = layers.Conv2D(1024, (3, 3), strides=(1, 1), padding='same', name='conv_14', use_bias=False)(x)
    x = layers.BatchNormalization(name='norm_14')(x)
    x = layers.LeakyReLU(alpha=0.1)(x)

    # Layer 15
    x = layers.Conv2D(512, (1, 1), strides=(1, 1), padding='same', name='conv_15', use_bias=False)(x)
    x = layers.BatchNormalization(name='norm_15')(x)
    x = layers.LeakyReLU(alpha=0.1)(x)

    # Layer 16
    x = layers.Conv2D(1024, (3, 3), strides=(1, 1), padding='same', name='conv_16', use_bias=False)(x)
    x = layers.BatchNormalization(name='norm_16')(x)
    x = layers.LeakyReLU(alpha=0.1)(x)

    # Layer 17
    x = layers.Conv2D(512, (1, 1), strides=(1, 1), padding='same', name='conv_17', use_bias=False)(x)
    x = layers.BatchNormalization(name='norm_17')(x)
    x = layers.LeakyReLU(alpha=0.1)(x)

    # Layer 18
    x = layers.Conv2D(1024, (3, 3), strides=(1, 1), padding='same', name='conv_18', use_bias=False)(x)
    x = layers.BatchNormalization(name='norm_18')(x)
    x = layers.LeakyReLU(alpha=0.1)(x)

    # Layer 19
    x = layers.Conv2D(1024, (3, 3), strides=(1, 1), padding='same', name='conv_19', use_bias=False)(x)
    x = layers.BatchNormalization(name='norm_19')(x)
    x = layers.LeakyReLU(alpha=0.1)(x)

    # Layer 20
    x = layers.Conv2D(1024, (3, 3), strides=(1, 1), padding='same', name='conv_20', use_bias=False)(x)
    x = layers.BatchNormalization(name='norm_20')(x)
    x = layers.LeakyReLU(alpha=0.1)(x)

    # Layer 21
    skip_x = layers.Conv2D(64, (1, 1), strides=(1, 1), padding='same', name='conv_21', use_bias=False)(skip_x)
    skip_x = layers.BatchNormalization(name='norm_21')(skip_x)
    skip_x = layers.LeakyReLU(alpha=0.1)(skip_x)

    skip_x = SpaceToDepth(block_size=2)(skip_x)


    # concat
    # [b,16,16,1024], [b,16,16,256],=> [b,16,16,1280]
    x = tf.concat([skip_x, x], axis=-1)

    # Layer 22
    x = layers.Conv2D(1024, (3, 3), strides=(1, 1), padding='same', name='conv_22', use_bias=False)(x)
    x = layers.BatchNormalization(name='norm_22')(x)
    x = layers.LeakyReLU(alpha=0.1)(x)
    x = layers.Dropout(0.5)(x)  # add dropout
    # [b,16,16,5,7] => [b,16,16,35]

    x = layers.Conv2D(5 * (5 + classNum), (1, 1), strides=(1, 1), padding='same', name='conv_23')(x)

    output = layers.Reshape((GRIDSZ, GRIDSZ, 5, (5 + classNum)))(x)

    # create model
    model = keras.models.Model(input_image, output)

    layer = model.layers[-2]  # last convolutional layer
    # print(layer.name)
    layer.trainable = True

    weights = layer.get_weights()

    new_kernel = np.random.normal(size=weights[0].shape) / (GRIDSZ * GRIDSZ)
    new_bias = np.random.normal(size=weights[1].shape) / (GRIDSZ * GRIDSZ)

    layer.set_weights([new_kernel, new_bias])

    return model


def makeDarkNet2(IMGSZ=512, GRIDSZ=16):

    input_image = layers.Input((IMGSZ, IMGSZ, 3), dtype='float32')

    # unit1
    x = layers.Conv2D(32, (3, 3), strides=(1, 1), padding='same', name='conv_1', use_bias=False)(input_image)
    x = layers.BatchNormalization(name='norm_1')(x)
    x = layers.LeakyReLU(alpha=0.1)(x)

    x = layers.MaxPooling2D(pool_size=(2, 2))(x)

    # unit2
    x = layers.Conv2D(64, (3, 3), strides=(1, 1), padding='same', name='conv_2', use_bias=False)(x)
    x = layers.BatchNormalization(name='norm_2')(x)
    x = layers.LeakyReLU(alpha=0.1)(x)
    x = layers.MaxPooling2D(pool_size=(2, 2))(x)

    # Layer 3
    x = layers.Conv2D(128, (3, 3), strides=(1, 1), padding='same', name='conv_3', use_bias=False)(x)
    x = layers.BatchNormalization(name='norm_3')(x)
    x = layers.LeakyReLU(alpha=0.1)(x)

    # Layer 4
    x = layers.Conv2D(64, (1, 1), strides=(1, 1), padding='same', name='conv_4', use_bias=False)(x)
    x = layers.BatchNormalization(name='norm_4')(x)
    x = layers.LeakyReLU(alpha=0.1)(x)

    # Layer 5
    x = layers.Conv2D(128, (3, 3), strides=(1, 1), padding='same', name='conv_5', use_bias=False)(x)
    x = layers.BatchNormalization(name='norm_5')(x)
    x = layers.LeakyReLU(alpha=0.1)(x)
    x = layers.MaxPooling2D(pool_size=(2, 2))(x)

    # Layer 6
    x = layers.Conv2D(256, (3, 3), strides=(1, 1), padding='same', name='conv_6', use_bias=False)(x)
    x = layers.BatchNormalization(name='norm_6')(x)
    x = layers.LeakyReLU(alpha=0.1)(x)

    # Layer 7
    x = layers.Conv2D(128, (1, 1), strides=(1, 1), padding='same', name='conv_7', use_bias=False)(x)
    x = layers.BatchNormalization(name='norm_7')(x)
    x = layers.LeakyReLU(alpha=0.1)(x)

    # Layer 8
    x = layers.Conv2D(256, (3, 3), strides=(1, 1), padding='same', name='conv_8', use_bias=False)(x)
    x = layers.BatchNormalization(name='norm_8')(x)
    x = layers.LeakyReLU(alpha=0.1)(x)
    x = layers.MaxPooling2D(pool_size=(2, 2))(x)

    # Layer 9
    x = layers.Conv2D(512, (3, 3), strides=(1, 1), padding='same', name='conv_9', use_bias=False)(x)
    x = layers.BatchNormalization(name='norm_9')(x)
    x = layers.LeakyReLU(alpha=0.1)(x)

    # Layer 10
    x = layers.Conv2D(256, (1, 1), strides=(1, 1), padding='same', name='conv_10', use_bias=False)(x)
    x = layers.BatchNormalization(name='norm_10')(x)
    x = layers.LeakyReLU(alpha=0.1)(x)

    # Layer 11
    x = layers.Conv2D(512, (3, 3), strides=(1, 1), padding='same', name='conv_11', use_bias=False)(x)
    x = layers.BatchNormalization(name='norm_11')(x)
    x = layers.LeakyReLU(alpha=0.1)(x)

    # Layer 12
    x = layers.Conv2D(256, (1, 1), strides=(1, 1), padding='same', name='conv_12', use_bias=False)(x)
    x = layers.BatchNormalization(name='norm_12')(x)
    x = layers.LeakyReLU(alpha=0.1)(x)

    # Layer 13
    x = layers.Conv2D(512, (3, 3), strides=(1, 1), padding='same', name='conv_13', use_bias=False)(x)
    x = layers.BatchNormalization(name='norm_13')(x)
    x = layers.LeakyReLU(alpha=0.1)(x)

    # for skip connection
    skip_x = x  # [b,32,32,512]

    x = layers.MaxPooling2D(pool_size=(2, 2))(x)

    # Layer 14
    x = layers.Conv2D(1024, (3, 3), strides=(1, 1), padding='same', name='conv_14', use_bias=False)(x)
    x = layers.BatchNormalization(name='norm_14')(x)
    x = layers.LeakyReLU(alpha=0.1)(x)

    # Layer 15
    x = layers.Conv2D(512, (1, 1), strides=(1, 1), padding='same', name='conv_15', use_bias=False)(x)
    x = layers.BatchNormalization(name='norm_15')(x)
    x = layers.LeakyReLU(alpha=0.1)(x)

    # Layer 16
    x = layers.Conv2D(1024, (3, 3), strides=(1, 1), padding='same', name='conv_16', use_bias=False)(x)
    x = layers.BatchNormalization(name='norm_16')(x)
    x = layers.LeakyReLU(alpha=0.1)(x)

    # Layer 17
    x = layers.Conv2D(512, (1, 1), strides=(1, 1), padding='same', name='conv_17', use_bias=False)(x)
    x = layers.BatchNormalization(name='norm_17')(x)
    x = layers.LeakyReLU(alpha=0.1)(x)

    # Layer 18
    x = layers.Conv2D(1024, (3, 3), strides=(1, 1), padding='same', name='conv_18', use_bias=False)(x)
    x = layers.BatchNormalization(name='norm_18')(x)
    x = layers.LeakyReLU(alpha=0.1)(x)


    # Layer 17_2
    x_2 = layers.Conv2D(512, (1, 1), strides=(1, 1), padding='same', name='conv_17_2', use_bias=False)(x)
    x_2 = layers.BatchNormalization(name='norm_17_2')(x_2)
    x_2 = layers.LeakyReLU(alpha=0.1)(x_2)

    # Layer 18_2
    x_2 = layers.Conv2D(1024, (3, 3), strides=(1, 1), padding='same', name='conv_18_2', use_bias=False)(x_2)
    x_2 = layers.BatchNormalization(name='norm_18_2')(x_2)
    x_2 = layers.LeakyReLU(alpha=0.1)(x_2)

    x = tf.add(x, x_2)

    # Layer 17_3
    x_3 = layers.Conv2D(512, (1, 1), strides=(1, 1), padding='same', name='conv_17_3', use_bias=False)(x)
    x_3 = layers.BatchNormalization(name='norm_17_3')(x_3)
    x_3 = layers.LeakyReLU(alpha=0.1)(x_3)

    # Layer 18_3
    x_3 = layers.Conv2D(1024, (3, 3), strides=(1, 1), padding='same', name='conv_18_3', use_bias=False)(x_3)
    x_3 = layers.BatchNormalization(name='norm_18_3')(x_3)
    x_3 = layers.LeakyReLU(alpha=0.1)(x_3)

    x = tf.add(x, x_3)


    # # Layer 17_4
    # x_4 = layers.Conv2D(512, (1, 1), strides=(1, 1), padding='same', name='conv_17_4', use_bias=False)(x)
    # x_4 = layers.BatchNormalization(name='norm_17_4')(x_4)
    # x_4 = layers.LeakyReLU(alpha=0.1)(x_4)
    #
    # # Layer 18_4
    # x_4 = layers.Conv2D(1024, (3, 3), strides=(1, 1), padding='same', name='conv_18_4', use_bias=False)(x_4)
    # x_4 = layers.BatchNormalization(name='norm_18_4')(x_4)
    # x_4 = layers.LeakyReLU(alpha=0.1)(x_4)
    #
    # x = tf.add(x, x_4)
    #
    # # Layer 17_5
    # x_5 = layers.Conv2D(512, (1, 1), strides=(1, 1), padding='same', name='conv_17_5', use_bias=False)(x)
    # x_5 = layers.BatchNormalization(name='norm_17_5')(x_5)
    # x_5 = layers.LeakyReLU(alpha=0.1)(x_5)
    #
    # # Layer 18_5
    # x_5 = layers.Conv2D(1024, (3, 3), strides=(1, 1), padding='same', name='conv_18_5', use_bias=False)(x_5)
    # x_5 = layers.BatchNormalization(name='norm_18_5')(x_5)
    # x_5 = layers.LeakyReLU(alpha=0.1)(x_5)
    #
    # x = tf.add(x, x_5)



    # Layer 19
    x = layers.Conv2D(1024, (3, 3), strides=(1, 1), padding='same', name='conv_19', use_bias=False)(x)
    x = layers.BatchNormalization(name='norm_19')(x)
    x = layers.LeakyReLU(alpha=0.1)(x)

    # Layer 20
    x = layers.Conv2D(1024, (3, 3), strides=(1, 1), padding='same', name='conv_20', use_bias=False)(x)
    x = layers.BatchNormalization(name='norm_20')(x)
    x = layers.LeakyReLU(alpha=0.1)(x)

    # Layer 21
    skip_x = layers.Conv2D(64, (1, 1), strides=(1, 1), padding='same', name='conv_21', use_bias=False)(skip_x)
    skip_x = layers.BatchNormalization(name='norm_21')(skip_x)
    skip_x = layers.LeakyReLU(alpha=0.1)(skip_x)

    skip_x = SpaceToDepth(block_size=2)(skip_x)


    # concat
    # [b,16,16,1024], [b,16,16,256],=> [b,16,16,1280]
    x = tf.concat([skip_x, x], axis=-1)

    # Layer 22
    x = layers.Conv2D(1024, (3, 3), strides=(1, 1), padding='same', name='conv_22', use_bias=False)(x)
    x = layers.BatchNormalization(name='norm_22')(x)
    x = layers.LeakyReLU(alpha=0.1)(x)
    x = layers.Dropout(0.5)(x)  # add dropout
    # [b,16,16,5,7] => [b,16,16,35]

    x = layers.Conv2D(5 * 7, (1, 1), strides=(1, 1), padding='same', name='conv_23')(x)

    output = layers.Reshape((GRIDSZ, GRIDSZ, 5, 7))(x)

    # create model
    model = keras.models.Model(input_image, output)

    layer = model.layers[-2]  # last convolutional layer
    # print(layer.name)
    layer.trainable = True

    weights = layer.get_weights()

    new_kernel = np.random.normal(size=weights[0].shape) / (GRIDSZ * GRIDSZ)
    new_bias = np.random.normal(size=weights[1].shape) / (GRIDSZ * GRIDSZ)

    layer.set_weights([new_kernel, new_bias])

    # model = keras.models.Model(input_image, x)

    return model

# def makeDarkNet(IMGSZ=512, GRIDSZ=16):
#
#     input_image = layers.Input((IMGSZ, IMGSZ, 3), dtype='float32')
#
#     # unit1
#     x = layers.Conv2D(32, (3, 3), strides=(1, 1), padding='same', name='conv_1', use_bias=False)(input_image)
#     x = layers.BatchNormalization(name='norm_1')(x)
#     x = layers.LeakyReLU(alpha=0.1)(x)
#
#     x = layers.MaxPooling2D(pool_size=(2, 2))(x)
#
#     # unit2
#     x = layers.Conv2D(64, (3, 3), strides=(1, 1), padding='same', name='conv_2', use_bias=False)(x)
#     x = layers.BatchNormalization(name='norm_2')(x)
#     x = layers.LeakyReLU(alpha=0.1)(x)
#     x = layers.MaxPooling2D(pool_size=(2, 2))(x)
#
#     # Layer 3
#     x = layers.Conv2D(128, (3, 3), strides=(1, 1), padding='same', name='conv_3', use_bias=False)(x)
#     x = layers.BatchNormalization(name='norm_3')(x)
#     x = layers.LeakyReLU(alpha=0.1)(x)
#
#     # Layer 4
#     x = layers.Conv2D(64, (1, 1), strides=(1, 1), padding='same', name='conv_4', use_bias=False)(x)
#     x = layers.BatchNormalization(name='norm_4')(x)
#     x = layers.LeakyReLU(alpha=0.1)(x)
#
#     # Layer 5
#     x = layers.Conv2D(128, (3, 3), strides=(1, 1), padding='same', name='conv_5', use_bias=False)(x)
#     x = layers.BatchNormalization(name='norm_5')(x)
#     x = layers.LeakyReLU(alpha=0.1)(x)
#     x = layers.MaxPooling2D(pool_size=(2, 2))(x)
#
#     # Layer 6
#     x = layers.Conv2D(256, (3, 3), strides=(1, 1), padding='same', name='conv_6', use_bias=False)(x)
#     x = layers.BatchNormalization(name='norm_6')(x)
#     x = layers.LeakyReLU(alpha=0.1)(x)
#
#     # Layer 7
#     x = layers.Conv2D(128, (1, 1), strides=(1, 1), padding='same', name='conv_7', use_bias=False)(x)
#     x = layers.BatchNormalization(name='norm_7')(x)
#     x = layers.LeakyReLU(alpha=0.1)(x)
#
#     # Layer 8
#     x = layers.Conv2D(256, (3, 3), strides=(1, 1), padding='same', name='conv_8', use_bias=False)(x)
#     x = layers.BatchNormalization(name='norm_8')(x)
#     x = layers.LeakyReLU(alpha=0.1)(x)
#     x = layers.MaxPooling2D(pool_size=(2, 2))(x)
#
#     # Layer 9
#     x = layers.Conv2D(512, (3, 3), strides=(1, 1), padding='same', name='conv_9', use_bias=False)(x)
#     x = layers.BatchNormalization(name='norm_9')(x)
#     x = layers.LeakyReLU(alpha=0.1)(x)
#
#     # Layer 10
#     x = layers.Conv2D(256, (1, 1), strides=(1, 1), padding='same', name='conv_10', use_bias=False)(x)
#     x = layers.BatchNormalization(name='norm_10')(x)
#     x = layers.LeakyReLU(alpha=0.1)(x)
#
#     # Layer 11
#     x = layers.Conv2D(512, (3, 3), strides=(1, 1), padding='same', name='conv_11', use_bias=False)(x)
#     x = layers.BatchNormalization(name='norm_11')(x)
#     x = layers.LeakyReLU(alpha=0.1)(x)
#
#     # Layer 12
#     x = layers.Conv2D(256, (1, 1), strides=(1, 1), padding='same', name='conv_12', use_bias=False)(x)
#     x = layers.BatchNormalization(name='norm_12')(x)
#     x = layers.LeakyReLU(alpha=0.1)(x)
#
#     # Layer 13
#     x = layers.Conv2D(512, (3, 3), strides=(1, 1), padding='same', name='conv_13', use_bias=False)(x)
#     x = layers.BatchNormalization(name='norm_13')(x)
#     x = layers.LeakyReLU(alpha=0.1)(x)
#
#     # for skip connection
#     skip_x = x  # [b,32,32,512]
#
#     x = layers.MaxPooling2D(pool_size=(2, 2))(x)
#
#     # Layer 14
#     x = layers.Conv2D(1024, (3, 3), strides=(1, 1), padding='same', name='conv_14', use_bias=False)(x)
#     x = layers.BatchNormalization(name='norm_14')(x)
#     x = layers.LeakyReLU(alpha=0.1)(x)
#
#     # Layer 15
#     x = layers.Conv2D(512, (1, 1), strides=(1, 1), padding='same', name='conv_15', use_bias=False)(x)
#     x = layers.BatchNormalization(name='norm_15')(x)
#     x = layers.LeakyReLU(alpha=0.1)(x)
#
#     # Layer 16
#     x = layers.Conv2D(1024, (3, 3), strides=(1, 1), padding='same', name='conv_16', use_bias=False)(x)
#     x = layers.BatchNormalization(name='norm_16')(x)
#     x = layers.LeakyReLU(alpha=0.1)(x)
#
#     # Layer 17
#     x = layers.Conv2D(512, (1, 1), strides=(1, 1), padding='same', name='conv_17', use_bias=False)(x)
#     x = layers.BatchNormalization(name='norm_17')(x)
#     x = layers.LeakyReLU(alpha=0.1)(x)
#
#     # Layer 18
#     x = layers.Conv2D(1024, (3, 3), strides=(1, 1), padding='same', name='conv_18', use_bias=False)(x)
#     x = layers.BatchNormalization(name='norm_18')(x)
#     x = layers.LeakyReLU(alpha=0.1)(x)
#
#     # Layer 19
#     x = layers.Conv2D(1024, (3, 3), strides=(1, 1), padding='same', name='conv_19', use_bias=False)(x)
#     x = layers.BatchNormalization(name='norm_19')(x)
#     x = layers.LeakyReLU(alpha=0.1)(x)
#
#     # Layer 20
#     x = layers.Conv2D(1024, (3, 3), strides=(1, 1), padding='same', name='conv_20', use_bias=False)(x)
#     x = layers.BatchNormalization(name='norm_20')(x)
#     x = layers.LeakyReLU(alpha=0.1)(x)
#
#     # Layer 21
#     skip_x = layers.Conv2D(64, (1, 1), strides=(1, 1), padding='same', name='conv_21', use_bias=False)(skip_x)
#     skip_x = layers.BatchNormalization(name='norm_21')(skip_x)
#     skip_x = layers.LeakyReLU(alpha=0.1)(skip_x)
#
#     skip_x = SpaceToDepth(block_size=2)(skip_x)
#
#     # concat
#     # [b,16,16,1024], [b,16,16,256],=> [b,16,16,1280]
#     x = tf.concat([skip_x, x], axis=-1)
#
#     # Layer 22
#     x = layers.Conv2D(1024, (3, 3), strides=(1, 1), padding='same', name='conv_22', use_bias=False)(x)
#     x = layers.BatchNormalization(name='norm_22')(x)
#     x = layers.LeakyReLU(alpha=0.1)(x)
#     x = layers.Dropout(0.5)(x)  # add dropout
#     # [b,16,16,5,7] => [b,16,16,35]
#
#     x = layers.Conv2D(5 * 7, (1, 1), strides=(1, 1), padding='same', name='conv_23')(x)
#
#     output = layers.Reshape((GRIDSZ, GRIDSZ, 5, 6))(x)
#
#     # create model
#     model = keras.models.Model(input_image, output)
#
#     layer = model.layers[-2]  # last convolutional layer
#     # print(layer.name)
#     layer.trainable = True
#
#     weights = layer.get_weights()
#
#     new_kernel = np.random.normal(size=weights[0].shape) / (GRIDSZ * GRIDSZ)
#     new_bias = np.random.normal(size=weights[1].shape) / (GRIDSZ * GRIDSZ)
#
#     layer.set_weights([new_kernel, new_bias])
#
#     return model
#



class DBL(layers.Layer):
    def __init__(self, filters, kernel_size, strides, padding):
        super(DBL, self).__init__()

        _n = None
        # _n = kernel_size * kernel_size * filters
        # _n = math.sqrt(2. / _n)
        if _n:
            self.conv = layers.Conv2D(filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, use_bias=False
                                  , kernel_initializer=tf.random_normal_initializer(mean=0., stddev=_n))
        else:
            self.conv = layers.Conv2D(filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, use_bias=False)
        self.bn = layers.BatchNormalization()
        self.leakyRelu = layers.LeakyReLU(alpha=0.1)


    def call(self, inputs, training=True):
        x = self.conv(inputs, training=training)
        x = self.bn(x, training=training)
        outputs = self.leakyRelu(x, training=training)

        return outputs



class ResBlock(layers.Layer):
    def __init__(self, filters, strides=1):
        super(ResBlock, self).__init__()

        self.DBL1 = DBL(filters=filters, kernel_size=3, strides=strides, padding="same")
        self.DBL2 = DBL(filters=filters, kernel_size=3, strides=1, padding="same")

        # if strides != 1:
        self.matchInputsShape = layers.Conv2D(filters=filters, kernel_size=1, strides=strides, padding='same')
        # else:
        #     self.matchInputsShape = lambda x : x

    def call(self, inputs, training=True):

        x = self.DBL1(inputs)
        x = self.DBL2(x)

        inputsAfterMatch = self.matchInputsShape(inputs)

        outputs = x + inputsAfterMatch

        return outputs


class DarkNet53(keras.Model):
    def __init__(self, perResNum=None, IMGSZ=512, GRIDSZ=16):
        super(DarkNet53, self).__init__()
        if perResNum is None:
            perResNum = [1, 2, 8, 8, 4]
        self.perResNum = perResNum
        self.IMGSZ = IMGSZ
        self.GRIDSZ = GRIDSZ

        self.primerDBL = DBL(filters=32, kernel_size=3, strides=1, padding="same")

        # 512,512,32 -> 256,256,64
        self.layer1 = self.buildResBlock(64, perResNum[0], strides=2)
        # 256,256,64 -> 128,128,128
        self.layer2 = self.buildResBlock(128, perResNum[1], strides=2)
        # 128,128,128 -> 64,64,256
        self.layer3 = self.buildResBlock(256, perResNum[2], strides=2)
        # 64,64,256 -> 32,32,512
        self.layer4 = self.buildResBlock(512, perResNum[3], strides=2)
        # 32,32,512 -> 16,16,1024
        self.layer5 = self.buildResBlock(1024, perResNum[4], strides=2)
        # 16,16,1024 -> 16,16,512
        self.layer6 = ResBlock(filters=512, strides=1)
        # 16,16,512 -> 16,16,1024
        self.layer7 = DBL(filters=1024, kernel_size=3, strides=1, padding="same")

        # 16,16,1024 -> 16,16,5*7
        # self.drop = layers.Dropout(0.5)
        self.finalLayer = layers.Conv2D(35, (1, 1), strides=(1, 1), padding='same', trainable=True)
        self.finalReshape = layers.Reshape((GRIDSZ, GRIDSZ, 5, 7))

        # layer = self.finalLayer
        # weights = layer.get_weights()
        # print(weights)
        # new_kernel = np.random.normal(size=weights[0].shape) / (self.GRIDSZ * self.GRIDSZ)
        # new_bias = np.random.normal(size=weights[1].shape) / (self.GRIDSZ * self.GRIDSZ)
        # layer.set_weights([new_kernel, new_bias])


    def call(self, inputs, training=None):

        x = self.primerDBL(inputs, training=training)
        # print(x.shape)
        x = self.layer1(x, training=training)
        # print(x.shape)
        x = self.layer2(x, training=training)
        # print(x.shape)
        x = self.layer3(x, training=training)
        # print(x.shape)
        x = self.layer4(x, training=training)
        # print(x.shape)
        x = self.layer5(x, training=training)
        # print(x.shape)
        # x = self.drop(x)
        x = self.layer6(x, training=training)
        x = self.layer7(x, training=training)
        x = self.finalLayer(x, training=training)
        # print(x.shape)
        outputs = self.finalReshape(x)
        # print(outputs.shape)

        return outputs

    def buildResBlock(self, filters, blockNum, strides=1):
        resBlocks = keras.Sequential()

        # 第一块按正常步长走，实现下采样
        resBlocks.add(ResBlock(filters=filters, strides=strides))

        # 后面几块按步长为1走
        for i in range(1, blockNum):
            resBlocks.add(ResBlock(filters=filters, strides=1))

        return resBlocks


def makeDarkNet53(perResNum=None, IMGSZ=512, GRIDSZ=16):
    if perResNum is None:
        perResNum = [1, 2, 8, 8, 4]
    # create model
    model = DarkNet53(perResNum, IMGSZ, GRIDSZ)
    model.build((None, 512, 512, 3))
    # for m in model:
    #     m.trainable = True
    #     if isinstance(m, layers.Conv2d):
    #         n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
    #         m.weight.data.normal_(0, math.sqrt(2. / n))
    #     elif isinstance(m, layers.BatchNorm2d):
    #         m.weight.data.fill_(1)
    #         m.bias.data.zero_()

    layer = model.layers[-2]  # last convolutional layer
    # print(layer.name)
    layer.trainable = True

    weights = layer.get_weights()

    new_kernel = np.random.normal(size=weights[0].shape) / (GRIDSZ * GRIDSZ)
    new_bias = np.random.normal(size=weights[1].shape) / (GRIDSZ * GRIDSZ)

    layer.set_weights([new_kernel, new_bias])

    return model




class SpaceToDepth(layers.Layer):

    def __init__(self, block_size, **kwargs):
        self.block_size = block_size
        super(SpaceToDepth, self).__init__(**kwargs)

    def call(self, inputs):
        x = inputs
        batch, height, width, depth = K.int_shape(x)
        batch = -1
        reduced_height = height // self.block_size
        reduced_width = width // self.block_size
        y = K.reshape(x, (batch, reduced_height, self.block_size,
                             reduced_width, self.block_size, depth))
        z = K.permute_dimensions(y, (0, 1, 3, 2, 4, 5))
        t = K.reshape(z, (batch, reduced_height, reduced_width, depth * self.block_size **2))
        return t
 
    def compute_output_shape(self, input_shape):
        shape =  (input_shape[0], input_shape[1] // self.block_size, input_shape[2] // self.block_size,
                  input_shape[3] * self.block_size **2)
        return tf.TensorShape(shape)

    def get_config(self):
        config = super().get_config().copy()
        config.update({
            'block_size': self.block_size,
        })
        return config



if __name__ == '__main__':

    x = tf.random.normal((4, 512, 512, 3))
    model = makeDarkNet2()
    # model = makeDarkNet53()
    # model = model.layers[-2]

    out = model(x)
    print('out:', out.shape)
    # model.save("TestModelSave")