import tensorflow as tf
from .network import BottleNeck, h_swish, ConvBlock, AttentionConvolution


class MobileNetV3Large(tf.keras.Model):
    def __init__(self, num_classes=10, activation=tf.nn.softmax):
        super(MobileNetV3Large, self).__init__()

        self.head_conv = ConvBlock(filters=16, kernel_size=(3, 3), strides=2,
                                   padding='same', use_bias=False, activation=h_swish, name='head')

        self.bneck1 = BottleNeck(in_size=16, exp_size=16, out_size=16, s=1, is_se_existing=False, NL="RE", k=3)
        self.bneck2 = BottleNeck(in_size=16, exp_size=64, out_size=24, s=2, is_se_existing=False, NL="RE", k=3, name='feature_1_4')
        self.bneck3 = BottleNeck(in_size=24, exp_size=72, out_size=24, s=1, is_se_existing=False, NL="RE", k=3)
        self.bneck4 = BottleNeck(in_size=24, exp_size=72, out_size=40, s=2, is_se_existing=True, NL="RE", k=5)
        self.bneck5 = BottleNeck(in_size=40, exp_size=120, out_size=40, s=1, is_se_existing=True, NL="RE", k=5, name='feature_1_8')
        self.bneck6 = BottleNeck(in_size=40, exp_size=120, out_size=40, s=1, is_se_existing=True, NL="RE", k=5)
        self.bneck7 = BottleNeck(in_size=40, exp_size=240, out_size=80, s=2, is_se_existing=False, NL="HS", k=3)
        self.bneck8 = BottleNeck(in_size=80, exp_size=200, out_size=80, s=1, is_se_existing=False, NL="HS", k=3)
        self.bneck9 = BottleNeck(in_size=80, exp_size=184, out_size=80, s=1, is_se_existing=False, NL="HS", k=3)
        self.bneck10 = BottleNeck(in_size=80, exp_size=184, out_size=80, s=1, is_se_existing=False, NL="HS", k=3)
        self.bneck11 = BottleNeck(in_size=80, exp_size=480, out_size=112, s=1, is_se_existing=True, NL="HS", k=3, name='feature_1_16')
        self.bneck12 = BottleNeck(in_size=112, exp_size=672, out_size=112, s=1, is_se_existing=True, NL="HS", k=3)
        self.bneck13 = BottleNeck(in_size=112, exp_size=672, out_size=160, s=2, is_se_existing=True, NL="HS", k=5)
        self.bneck14 = BottleNeck(in_size=160, exp_size=960, out_size=160, s=1, is_se_existing=True, NL="HS", k=5)
        self.bneck15 = BottleNeck(in_size=160, exp_size=960, out_size=160, s=1, is_se_existing=True, NL="HS", k=5, name='feature_1_32')

        self.last_conv1 = ConvBlock(filters=960, kernel_size=(1, 1), use_bias=False, activation=h_swish)
        self.last_avg = tf.keras.layers.AveragePooling2D(pool_size=(7, 7), strides=1)
        self.last_conv2 = ConvBlock(filters=1280, kernel_size=(1, 1), use_bias=True, activation=h_swish, use_bn=False)
        self.last_conv3 = ConvBlock(filters=num_classes, kernel_size=(1, 1), use_bias=True, activation=activation, use_bn=False)
        self.dropout = tf.keras.layers.Dropout(
            rate=0.2,
            name="Dropout",
        )

    @staticmethod
    def get_feature_shape(name):
        shapes = {
            "feature_1_2": (112, 112, 16),
            "feature_1_4": (56, 56, 24),
            "feature_1_8": (28, 28, 40),
            "feature_1_16": (14, 14, 112),
            "feature_1_32": (7, 7, 160)
        }

        if name in shapes.keys():
            return shapes[name]
        return None

    def call(self, inputs, training=None):
        feats = {}
        # head
        x = self.head_conv(inputs, training=training)
        feats["feature_1_2"] = x

        # body
        x = self.bneck1(x, training=training)
        x = self.bneck2(x, training=training)
        feats["feature_1_4"] = x
        x = self.bneck3(x, training=training)
        x = self.bneck4(x, training=training)
        x = self.bneck5(x, training=training)
        feats["feature_1_8"] = x
        x = self.bneck6(x, training=training)
        x = self.bneck7(x, training=training)
        x = self.bneck8(x, training=training)
        x = self.bneck9(x, training=training)
        x = self.bneck10(x, training=training)
        x = self.bneck11(x, training=training)
        feats['feature_1_16'] = x
        x = self.bneck12(x, training=training)
        x = self.bneck13(x, training=training)
        x = self.bneck14(x, training=training)
        x = self.bneck15(x, training=training)
        feats['feature_1_32'] = x

        x = self.last_conv1(x, training=training)
        x = self.last_avg(x)
        x = self.last_conv2(x)
        x = self.dropout(x, training=training)
        x = self.last_conv3(x)
        x = tf.squeeze(x, axis=[1, 2])
        return x, feats


class MobileNetV3Attention(tf.keras.Model):
    def __init__(self, num_classes=10, activation=tf.nn.softmax):
        super(MobileNetV3Attention, self).__init__()

        self.head_conv = ConvBlock(filters=16, kernel_size=(3, 3), strides=2,
                                   padding='same', use_bias=False, activation=h_swish, name='head')

        self.bneck1 = BottleNeck(in_size=16+3, exp_size=16, out_size=16, s=1, is_se_existing=False, NL="RE", k=3)
        self.bneck2 = BottleNeck(in_size=16, exp_size=64, out_size=24, s=2, is_se_existing=False, NL="RE", k=3, name='feature_1_4')
        self.bneck3 = BottleNeck(in_size=24+3, exp_size=72, out_size=24, s=1, is_se_existing=False, NL="RE", k=3)
        self.bneck4 = BottleNeck(in_size=24, exp_size=72, out_size=40, s=2, is_se_existing=True, NL="RE", k=5)
        self.bneck5 = BottleNeck(in_size=40, exp_size=120, out_size=40, s=1, is_se_existing=True, NL="RE", k=5, name='feature_1_8')
        self.bneck6 = BottleNeck(in_size=40+3, exp_size=120, out_size=40, s=1, is_se_existing=True, NL="RE", k=5)
        self.bneck7 = BottleNeck(in_size=40, exp_size=240, out_size=80, s=2, is_se_existing=False, NL="HS", k=3)
        self.bneck8 = BottleNeck(in_size=80, exp_size=200, out_size=80, s=1, is_se_existing=False, NL="HS", k=3)
        self.bneck9 = BottleNeck(in_size=80, exp_size=184, out_size=80, s=1, is_se_existing=False, NL="HS", k=3)
        self.bneck10 = BottleNeck(in_size=80, exp_size=184, out_size=80, s=1, is_se_existing=False, NL="HS", k=3)
        self.bneck11 = BottleNeck(in_size=80, exp_size=480, out_size=112, s=1, is_se_existing=True, NL="HS", k=3, name='feature_1_16')
        self.bneck12 = BottleNeck(in_size=112, exp_size=672, out_size=112, s=1, is_se_existing=True, NL="HS", k=3)
        self.bneck13 = BottleNeck(in_size=112, exp_size=672, out_size=160, s=2, is_se_existing=True, NL="HS", k=5)
        self.bneck14 = BottleNeck(in_size=160, exp_size=960, out_size=160, s=1, is_se_existing=True, NL="HS", k=5)
        self.bneck15 = BottleNeck(in_size=160, exp_size=960, out_size=160, s=1, is_se_existing=True, NL="HS", k=5, name='feature_1_32')

        self.last_conv1 = ConvBlock(filters=960, kernel_size=(1, 1), use_bias=False, activation=h_swish)
        self.last_avg = tf.keras.layers.AveragePooling2D(pool_size=(7, 7), strides=1)
        self.last_conv2 = ConvBlock(filters=1280, kernel_size=(1, 1), use_bias=True, activation=h_swish, use_bn=False)
        self.last_conv3 = ConvBlock(filters=num_classes, kernel_size=(1, 1), use_bias=True, activation=activation, use_bn=False)
        self.dropout = tf.keras.layers.Dropout(
            rate=0.2,
            name="Dropout",
        )

        self.aconv1 = AttentionConvolution((112, 112), theta=0.5, name='attention_1_2')
        self.aconv2 = AttentionConvolution((56, 56), theta=0.3, name='attention_1_4')
        self.aconv3 = AttentionConvolution((28, 28), theta=0.3, name='attention_1_8')

    @staticmethod
    def get_feature_shape(name):
        shapes = {
            "feature_1_2": (112, 112, 16),
            "feature_1_4": (56, 56, 24),
            "feature_1_8": (28, 28, 40),
            "feature_1_16": (14, 14, 112),
            "feature_1_32": (7, 7, 160)
        }

        if name in shapes.keys():
            return shapes[name]
        return None

    def call(self, inputs, training=None):
        feats = {}
        # head
        x = self.head_conv(inputs, training=training)
        feats["feature_1_2"] = x
        #from pudb import set_trace
        #set_trace()
        x = self.aconv1(x, inputs)  # attention convolution

        # body
        x = self.bneck1(x, training=training)
        x = self.bneck2(x, training=training)
        feats["feature_1_4"] = x
        x = self.aconv2(x, inputs, training=training)  # attention convolution
        x = self.bneck3(x, training=training)
        x = self.bneck4(x, training=training)
        x = self.bneck5(x, training=training)
        feats["feature_1_8"] = x
        x = self.aconv3(x, inputs)
        x = self.bneck6(x, training=training)
        x = self.bneck7(x, training=training)
        x = self.bneck8(x, training=training)
        x = self.bneck9(x, training=training)
        x = self.bneck10(x, training=training)
        x = self.bneck11(x, training=training)
        feats['feature_1_16'] = x
        x = self.bneck12(x, training=training)
        x = self.bneck13(x, training=training)
        x = self.bneck14(x, training=training)
        x = self.bneck15(x, training=training)
        feats['feature_1_32'] = x

        x = self.last_conv1(x, training=training)
        x = self.last_avg(x)
        x = self.last_conv2(x)
        x = self.dropout(x, training=training)
        x = self.last_conv3(x)
        x = tf.squeeze(x, axis=[1, 2], name='output')
        return x, feats

if __name__ == '__main__':
    model = MobileNetV3Attention(num_classes=24)

    # x_in = tf.keras.Input(shape=(224, 224, 3))
    # out = model(x_in)
    # features = model.get_layer('feature_1_32').output
    # imodel = tf.keras.Model(inputs=x_in, outputs=[out, features])
    x = tf.random.normal((10, 224, 224, 3))
    y = model(x)
    print(y[0].shape)

    for key in y[1].keys():
        print(y[1][key].shape)

    for node in model.layers:
        for weight in node.weights:
            print(weight.name, weight.shape)

    #tf.saved_model.save(model, './models')
    # print(y.shape)

