import tensorflow as tf
from vit_keras import vit
import numpy as np

"""TransUNet: Transformers Make Strong Encoders for Medical Image Segmentation"""
"""https://arxiv.org/pdf/2102.04306.pdf"""


# tf.config.experimental_run_functions_eagerly(True)


class PatchEmbed(tf.keras.layers.Layer):
    def __init__(self, IMG_SIZE, patch_size, embed_dim):
        super(PatchEmbed, self).__init__()
        # 图像块个数
        self.num_path = (IMG_SIZE // patch_size) ** 2
        self.IMG_SIZE = IMG_SIZE
        self.patch_size = patch_size
        # kernel 大小为块的大小，即每块输出一个值,类似每个块展平后使用相同的全连接层进行处理

        self.proj = tf.keras.layers.Conv2D(filters=embed_dim, kernel_size=patch_size, strides=patch_size)

    @tf.function
    def call(self, inputs, **kwargs):
        B, H, W, C = inputs.shape
        assert H == self.IMG_SIZE and W == self.IMG_SIZE, "image shape not match"
        x = self.proj(inputs)
        x = tf.reshape(x, shape=[B, self.num_path, -1])
        return x


class MSA(tf.keras.layers.Layer):
    def __init__(self, dim, hidden=None, num_head=12):
        super(MSA, self).__init__()

        hidden = hidden if hidden else dim
        self.Q = tf.keras.layers.Dense(dim)
        self.K = tf.keras.layers.Dense(dim)
        self.V = tf.keras.layers.Dense(dim)
        self.scale = tf.math.sqrt(tf.cast(dim, tf.float32))
        self.proj = tf.keras.Sequential()
        self.proj.add(tf.keras.layers.Dense(hidden, activation='relu'))
        self.proj.add(tf.keras.layers.Dropout(0.2))
        self.proj.add(tf.keras.layers.Dense(dim))
        self.OU = tf.keras.layers.LayerNormalization()
        self.IN = tf.keras.layers.LayerNormalization()
        self.num_head = num_head

    @tf.function
    def call(self, inputs, **kwargs):
        B, N, C = inputs.shape
        inputs = self.IN(inputs)
        q = self.Q(inputs)
        k = self.K(inputs)
        v = self.V(inputs)
        q = tf.reshape(q, shape=[B, N, self.num_head, C // self.num_head])
        k = tf.reshape(k, shape=[B, N, self.num_head, C // self.num_head])
        v = tf.reshape(v, shape=[B, N, self.num_head, C // self.num_head])
        score = tf.matmul(q, tf.transpose(k, perm=[0, 1, 3, 2]))
        score = score / self.scale
        score = tf.keras.activations.softmax(score)
        score = tf.keras.layers.Dropout(0.2)(score)
        v = tf.matmul(score, v)
        v = tf.reshape(v, shape=[B, N, C])
        out = v + inputs
        out = self.OU(out)
        out = self.proj(out)
        out = out + v

        return out


class Conv2DReLu(tf.keras.layers.Layer):
    def __init__(self, filters, kernel_size, padding="same", strides=1, **kwargs):
        super().__init__(**kwargs)
        self.conv = tf.keras.layers.Conv2D(
            filters=filters, kernel_size=kernel_size, strides=strides,
            padding=padding, kernel_regularizer=tf.keras.regularizers.l2(0.0001))
        self.bn = tf.keras.layers.BatchNormalization(momentum=0.9, epsilon=1e-5)
        self.filters = filters
        self.kernel_size = kernel_size
        self.padding = padding
        self.strides = strides

    @tf.function
    def call(self, inputs, **kwargs):
        x = self.conv(inputs)
        x = self.bn(x)
        x = tf.nn.relu(x)
        return x


class DecoderBlock(tf.keras.layers.Layer):
    def __init__(self, filters, **kwargs):
        super(DecoderBlock, self).__init__(**kwargs)
        self.filters = filters

    def build(self, input_shape):
        self.conv1 = Conv2DReLu(filters=self.filters, kernel_size=3)
        self.conv2 = Conv2DReLu(filters=self.filters, kernel_size=3)
        self.upsampling = tf.keras.layers.UpSampling2D(size=2, interpolation="bilinear")

    def call(self, inputs, skip=None):
        x = self.upsampling(inputs)
        if skip is not None:
            x = tf.concat([x, skip], axis=-1)
        x = self.conv1(x)
        x = self.conv2(x)
        return x


class CUP(tf.keras.layers.Layer):
    """Cascaded Upsampler """

    def __init__(self):
        super(CUP, self).__init__()


class TranUnet(tf.keras.Model):
    def __init__(self, IMG_SIZE):
        super(TranUnet, self).__init__()

        base = vit.vit_b16(
            image_size=IMG_SIZE,
            # activation='sigmoid',
            pretrained=True,
            include_top=False,
            pretrained_top=False
        )
        self.extra_feature = tf.keras.Model(base.inputs, base.get_layer("Transformer/encoder_norm").output)
        self.decode = tf.keras.layers.Conv2D(filters=1, kernel_size=1, activation='sigmoid')
        self.upsampe = tf.keras.layers.UpSampling2D(size=16, interpolation='bilinear')

        self.patch_size = 16
        self.embed_dim = 768

    @tf.function
    def call(self, inputs, training=None, mask=None):
        # assert len(tf.shape(inputs)) == 3
        y = self.extra_feature(inputs)[:, 1:, :]
        y = tf.reshape(y, shape=[-1, self.patch_size, self.patch_size, self.embed_dim])
        y = self.decode(y)
        y = self.upsampe(y)
        return y


class TransUent_HYbirdT(tf.keras.Model):
    def __init__(self):
        super(TransUent_HYbirdT, self).__init__()

    def call(self, inputs, training=None, mask=None):
        pass


if __name__ == '__main__':
    IMG_SIZE = 256
    model = TranUnet(IMG_SIZE)
    # raw = tf.io.read_file("../UNet/SKU130770.png")
    # image = tf.image.decode_png(raw, channels=3)
    # image = tf.image.resize(images=image, size=[224, 224])
    image = np.random.random_sample(size=[10, 256, 256, 3])
    image = image / 127.5 - 1
    # image = tf.expand_dims(image, 0)

    res = model(image)
    print(res.shape)

    model.summary()
