import tensorflow as tf

"""https://arxiv.org/pdf/1804.03999.pdf"""
"""Attention U-Net:Learning Where to Look for the Pancreas"""
class Encode(tf.keras.Model):
    def __init__(self, filters, p=0.2,pool=True):
        super().__init__()
        self.c1 = tf.keras.layers.Conv2D(filters, 3, padding='same', activation='relu')
        self.c2 = tf.keras.layers.Conv2D(filters, 3, padding='same')
        self.bn = tf.keras.layers.BatchNormalization(axis=3)
        self.act = tf.keras.layers.ReLU()
        self.pty = pool
        if pool:
            self.pool = tf.keras.layers.MaxPooling2D(pool_size=(2, 2))
            self.dropout = tf.keras.layers.Dropout(p)


    def call(self, inputs, training=None, mask=None):
        x = self.c1(inputs)
        x = self.c2(x)
        x = self.bn(x)
        x = self.act(x)
        if self.pty:
            p = self.pool(x)
            p = self.dropout(p)
            return x, p
        else:
            return x


class AttenBlock(tf.keras.Model):
    """Attention Block"""

    def __init__(self, filters):
        super(AttenBlock, self).__init__()
        self.W_l = tf.keras.Sequential()
        self.W_l.add(tf.keras.layers.Conv2D(filters=filters, kernel_size=1, padding='same'))

        self.W_h = tf.keras.Sequential()
        self.W_h.add(tf.keras.layers.Conv2D(filters=filters, kernel_size=1, strides=2, padding='same'))

        self.phi = tf.keras.Sequential()
        self.phi.add(tf.keras.layers.ReLU())
        self.phi.add(tf.keras.layers.Conv2D(filters=1, kernel_size=1, padding='same'))
        self.phi.add(tf.keras.layers.Activation('sigmoid'))
        self.bn = tf.keras.layers.BatchNormalization(axis=3)

    @tf.function
    def call(self, inputs, training=None, mask=None):
        l, h = inputs
        wl = self.W_l(l)
        wh = self.W_h(h)
        coef = self.phi(wl + wh)
        coef = tf.keras.layers.UpSampling2D(size=(2, 2))(coef)
        coef = coef*h
        coef = self.bn(coef)
        return coef


class Decode(tf.keras.Model):
    """使用降维到上层特征"""

    def __init__(self, filters):
        super().__init__()
        """反卷积"""
        self.trans = tf.keras.layers.Conv2DTranspose(filters, 2, strides=2, padding='same')
        self.atten = AttenBlock(filters)

        self.conv_block = tf.keras.Sequential()
        self.conv_block.add(tf.keras.layers.Conv2D(filters, 3, padding='same', activation='relu'))
        self.conv_block.add(tf.keras.layers.Conv2D(filters, 3, padding='same'))
        self.conv_block.add(tf.keras.layers.BatchNormalization(axis=3))
        self.conv_block.add(tf.keras.layers.ReLU())

    @tf.function
    def call(self, inputs, training=None, mask=None):
        """h and l"""
        h_layer, l_layer = inputs
        x = self.trans(h_layer)
        concat_layer = self.atten([h_layer, l_layer])
        x = tf.concat([x, concat_layer], axis=-1)
        x = tf.keras.layers.Dropout(0.2)(x)
        x = self.conv_block(x)
        return x


class AttenUnet(tf.keras.Model):
    """attention unet 采用局部注意力机制"""

    def __init__(self):
        super(AttenUnet, self).__init__()

        self.downsample01 = Encode(64)
        self.downsample02 = Encode(128)
        self.downsample03 = Encode(256)
        self.downsample04 = Encode(512)

        self.bottom = Encode(1024,pool=False)

        self.upsample01 = Decode(512)
        self.upsample02 = Decode(256)
        self.upsample03 = Decode(128)
        self.upsample04 = Decode(64)

        self.output_conv = tf.keras.layers.Conv2D(1, 1, activation='sigmoid')

    @tf.function
    def call(self, inputs, training=None, mask=None):
        x1, p1 = self.downsample01(inputs)
        x2, p2 = self.downsample02(p1)
        x3, p3 = self.downsample03(p2)
        x4, p4 = self.downsample04(p3)
        y1 = self.bottom(p4)
        y2 = self.upsample01([y1, x4])
        y3 = self.upsample02([y2, x3])
        y4 = self.upsample03([y3, x2])
        y5 = self.upsample04([y4, x1])
        out = self.output_conv(y5)

        return out


if __name__ == '__main__':
    model = AttenUnet()
    raw = tf.io.read_file("../UNet/SKU130770.png")
    image = tf.image.decode_png(raw, channels=3)
    image = tf.image.resize(images=image, size=[256, 256])
    image = image / 127.5 - 1
    image = tf.expand_dims(image, 0)

    res = model(image)
    print(res.shape)

    model.summary()