import tensorflow as tf

"""https://arxiv.org/pdf/1812.04103.pdf"""
"""Non-local U-Nets for Biomedical Image Segmentation"""

class SelfAttention(tf.keras.Model):
    """None Local attention"""
    """Q 和 K输出通道一样，才可以相乘，得到H*W的局部feature"""

    def __init__(self, key_f, v_f, out_f, type="up"):
        super(SelfAttention, self).__init__()
        if type == 'same':
            self.Q = tf.keras.layers.Conv2D(filters=key_f, activation='relu', kernel_size=1, strides=1, padding='same')
        if type == 'down':
            self.Q = tf.keras.layers.Conv2D(filters=key_f, activation='relu', kernel_size=3, strides=2, padding='same')
        if type == 'up':
            self.Q = tf.keras.layers.Conv2DTranspose(filters=key_f, activation='relu', kernel_size=3, strides=2,
                                                     padding='same')
        self.K = tf.keras.layers.Conv2D(filters=key_f, activation='relu', kernel_size=1, strides=1, padding='same')
        self.V = tf.keras.layers.Conv2D(filters=v_f, activation='relu', kernel_size=1, strides=1, padding='same')

        self.outputConv = tf.keras.layers.Conv2D(filters=out_f, activation='relu', kernel_size=1, strides=1,
                                                 padding='same')
        self.bn = tf.keras.layers.BatchNormalization()
        self._scale = key_f ** 0.5
        self.key_f = key_f
        self.v_f = v_f
        self.dropout = tf.keras.layers.Dropout(0.5)

    @tf.function
    def call(self, inputs, training=None, mask=None):
        """显存吃太多了，可以考虑降低feature map的大小，再进入self-attention模块"""
        shape = tf.shape(inputs)
        inputs = self.bn(inputs)
        q = self.Q(inputs)
        v = self.V(inputs)
        k = self.K(inputs)
        h, w = tf.shape(q)[1], tf.shape(q)[2]
        k = tf.reshape(k, shape=(shape[0], self.key_f, -1))
        q = tf.reshape(q, shape=(shape[0], self.key_f, -1))
        v = tf.reshape(v, shape=(shape[0], self.v_f, -1))
        q = tf.transpose(q, perm=[0, 2, 1])

        score = tf.matmul(q, k) / self._scale
        score = tf.keras.activations.softmax(score)
        score = tf.transpose(score, perm=[0, 2, 1])
        score = self.dropout(score)  # add dropout layers
        v = tf.matmul(v, score)
        v = tf.reshape(v, shape=(shape[0], h, w, self.v_f))

        v = self.outputConv(v)
        return v

class NonLolcalAttenUnet(tf.keras.Model):
    """attention unet 采用Selfattenion ;输入的path size=32"""

    def __init__(self, **kwargs):
        super(NonLolcalAttenUnet, self).__init__()

        self.path_block = tf.keras.layers.Conv2D(filters=32, kernel_size=3, strides=1, padding='same')

        """BN->relu6-Conv-BN->relu6-Conv-"""
        self.inputsBlock = tf.keras.Sequential()
        self.inputsBlock.add(tf.keras.layers.BatchNormalization())
        self.inputsBlock.add(tf.keras.layers.ReLU(6.0))
        self.inputsBlock.add(tf.keras.layers.Conv2D(filters=32, kernel_size=3, padding='same'))
        self.inputsBlock.add(tf.keras.layers.BatchNormalization())
        self.inputsBlock.add(tf.keras.layers.ReLU(6.0))
        self.inputsBlock.add(tf.keras.layers.Conv2D(filters=32, kernel_size=3, padding='same'))

        self.outputsBlock = tf.keras.Sequential()
        self.outputsBlock.add(tf.keras.layers.BatchNormalization())
        self.outputsBlock.add(tf.keras.layers.ReLU(6.0))
        self.outputsBlock.add(tf.keras.layers.Conv2D(filters=32, kernel_size=3, padding='same'))
        self.outputsBlock.add(tf.keras.layers.BatchNormalization())
        self.outputsBlock.add(tf.keras.layers.ReLU(6.0))
        self.outputsBlock.add(tf.keras.layers.Conv2D(filters=32, kernel_size=3, padding='same'))

        """downsample"""
        self.downsampleBlock01 = tf.keras.Sequential()
        self.downsampleBlock01.add(tf.keras.layers.BatchNormalization())
        self.downsampleBlock01.add(tf.keras.layers.ReLU(6.0))
        self.downsampleBlock01.add(tf.keras.layers.Conv2D(filters=64, kernel_size=3, strides=2, padding='same'))
        self.downsampleBlock01.add(tf.keras.layers.BatchNormalization())
        self.downsampleBlock01.add(tf.keras.layers.ReLU(6.0))
        self.downsampleBlock01.add(tf.keras.layers.Conv2D(filters=64, kernel_size=3, padding='same'))

        self.downsampleBlock01_res = tf.keras.layers.Conv2D(filters=64, kernel_size=1, strides=2, padding='same')

        self.downsampleBlock02 = tf.keras.Sequential()
        self.downsampleBlock02.add(tf.keras.layers.BatchNormalization())
        self.downsampleBlock02.add(tf.keras.layers.ReLU(6.0))
        self.downsampleBlock02.add(tf.keras.layers.Conv2D(filters=128, kernel_size=3, strides=2, padding='same'))
        self.downsampleBlock02.add(tf.keras.layers.BatchNormalization())
        self.downsampleBlock02.add(tf.keras.layers.ReLU(6.0))
        self.downsampleBlock02.add(tf.keras.layers.Conv2D(filters=128, kernel_size=3, padding='same'))

        self.downsampleBlock02_res = tf.keras.layers.Conv2D(filters=128, kernel_size=1, strides=2, padding='same')

        self.Bottom_block = SelfAttention(key_f=64, v_f=64, out_f=128, type="same")

        self.up01 = SelfAttention(key_f=32, v_f=32, out_f=64, type="up")
        self.up01_conv = tf.keras.layers.Conv2DTranspose(filters=64, activation='linear', kernel_size=3, strides=2,
                                                         padding='same')
        self.up02 = SelfAttention(key_f=16, v_f=16, out_f=32, type="up")
        self.up02_conv = tf.keras.layers.Conv2DTranspose(filters=32, activation='linear', kernel_size=3, strides=2,
                                                         padding='same')

        self.final_conv = tf.keras.layers.Conv2D(filters=1, kernel_size=1, activation='sigmoid', padding='same')

    @tf.function
    def call(self, inputs, training=None, mask=None):
        inputs = self.path_block(inputs)

        """input block"""
        conv1 = self.inputsBlock(inputs)
        conv1 += inputs

        """first downsample to 64"""

        conv2 = self.downsampleBlock01(conv1)
        conv2 += self.downsampleBlock01_res(conv1)

        """second downsample to 128"""

        conv3 = self.downsampleBlock02(conv2)
        conv3 += self.downsampleBlock02_res(conv2)

        """bottom block with global info"""
        bottom = self.Bottom_block(conv3)
        bottom += conv3

        "up sample and residual to 64"

        up1 = self.up01(bottom)  # up 2
        up1 += self.up01_conv(bottom)

        "merger high feature and up sample and residual to 32"
        up2 = self.up02(up1 + conv2)  # up 1
        up2 += self.up02_conv(up1 + conv2)  # up 1

        """outputBlock"""
        out_merge = up2 + conv1
        output = self.outputsBlock(out_merge)
        output += out_merge

        final = self.final_conv(output)
        print("final", final)
        return final
