# coding=utf-8
from .MobileNetV3Large import MobileNetV3Large
from .bap import BAP
import tensorflow as tf


class SceneNet(tf.keras.Model):
    def __init__(self, num_classes, num_atts=6, *args, **kargs):
        super(SceneNet, self).__init__(*args, **kargs)

        self.backbone = MobileNetV3Large(num_classes, activation=None)
        self.attention = tf.keras.layers.Conv2D(filters=num_atts,
                                                kernel_size=(1, 1),
                                                strides=1,
                                                padding='same',
                                                activation=tf.keras.activations.softmax)

        self.feature_name = "feature_1_32"
        self.bap = BAP(self.backbone.get_feature_shape(self.feature_name), num_atts)
        self.att_fcs = tf.keras.layers.Dense(num_classes)

    def call(self, inputs, training=None, mask=None):

        logits, features = self.backbone(inputs, training)

        feature = features[self.feature_name]
        attentions = self.attention(feature)

        features = self.bap(feature, attentions)

        alogits = self.att_fcs(features)

        return logits, alogits, attentions


@tf.function
def attention_crop(attention_maps):
    batch_size, height, width, num_parts = attention_maps.shape
    bboxes = []
    for i in range(batch_size):
        attention_map = attention_maps[i]
        part_weights = tf.reduce_mean(attention_map, axis=[0, 1])
        part_weights = tf.sqrt(part_weights)
        part_weights = part_weights / tf.reduce_sum(part_weights)
        selected_index = tf.random.categorical(tf.expand_dims(part_weights, axis=0), 1)
        selected_index = selected_index[0][0]
        mask = attention_map[:, :, selected_index]

        threshold = tf.random.uniform((1, ), minval=0.4, maxval=0.6)
        itemindex = tf.compat.v1.where(mask >= tf.reduce_max(mask) * threshold[0])

        ymin = tf.reduce_min(itemindex[:, 0]) / height - 0.1
        ymax = tf.reduce_max(itemindex[:, 0]) / height + 0.1
        xmin = tf.reduce_min(itemindex[:, 1]) / width - 0.1
        xmax = tf.reduce_max(itemindex[:, 1]) / width + 0.1

        bboxes.append(tf.stack([ymin, xmin, ymax, xmax], axis=0))
    return tf.cast(tf.stack(bboxes), dtype=tf.float32)


@tf.function
def attention_drop(attention_maps):
    batch_size, height, width, num_parts = attention_maps.shape
    masks = []
    for i in range(batch_size):
        attention_map = attention_maps[i]
        part_weights = tf.reduce_mean(attention_map, axis=[0, 1])
        part_weights = tf.math.sqrt(part_weights)
        part_weights = part_weights / tf.reduce_sum(part_weights)
        selected_index = tf.random.categorical(tf.expand_dims(part_weights, axis=0), 1)
        selected_index = selected_index[0][0]
        mask = attention_map[:, :, selected_index:selected_index + 1]

        # soft mask
        threshold = tf.random.uniform((1,), minval=0.2, maxval=0.5)
        mask = tf.compat.v1.where(mask < threshold[0] * tf.reduce_max(mask), 
                                  tf.ones_like(mask), 
                                  tf.zeros_like(mask))
        masks.append(mask)
    return tf.stack(masks)


@tf.function
def batch_acrop(attention_maps, images, image_size):
    batch_size = attention_maps.shape[0]
    bboxes = attention_crop(attention_maps)
    box_ind = tf.range(batch_size, dtype=tf.int32)
    return tf.image.crop_and_resize(images, bboxes, box_ind, image_size)


@tf.function
def batch_adrop(attention_maps, images):
    masks = attention_drop(attention_maps)
    return images * masks


if __name__ == '__main__':
    net = SceneNet(num_classes=24, num_atts=6, name='SceneNet')
    x = tf.random.normal((2, 224, 224, 3))
    y = net(x)
    tf.saved_model.save(net, './')
    net.summary()
    print(y[0].shape, y[1].shape, y[2].shape)
    box = attention_crop(y[2])
    print(box)
    masks = attention_drop(y[2])
    print(masks.shape)

    imgs = batch_acrop(y[2], x, (224, 224))

    print(imgs.shape)
