import tensorflow as tf from tensorflow.keras.layers import Conv2d,LayerNormalization,ZeroPadding2D,UpSampling2D,Activation from tensorflow.keras import Model from einops import rearrange from math import sqrt from functools import partial # helpers def exists(val): return val is not None def cast_tuple(val, depth): return val if isinstance(val, tuple) else (val,) * depth # classes class DsConv2d: def __init__(self, dim_in, dim_out, kernel_size, padding, stride = 1, bias = True): self.net = tf.keras.Sequential() self.net.add(Conv2d(dim_in, kernel_size = kernel_size, strides = stride, use_bias = bias)) self.net.add(ZeroPadding2D(padding)) self.net.add(Conv2d(dim_out, kernel_size = 1, use_bias = bias)) def __call__(self, x): return self.net(x) class LayerNorm(tf.keras.layers.Layer): def __init__(self, dim, eps = 1e-5): self.eps = eps self.g = self.add_weight( name='g', shape=(1, dim, 1, 1), initializer=tf.keras.initializers.Ones(), trainable=True ) self.b = self.add_weight( name='b', shape=(1, dim, 1, 1), initializer=tf.keras.initializers.Zeros(), trainable=True ) def __call__(self, x): std = tf.math.sqrt(tf.math.reduce_variance(x, axis=1, keepdims=True)) mean = tf.reduce_mean(x, axis= 1, keepdim = True) return (x - mean) / (std + self.eps) * self.g + self.b class PreNorm: def __init__(self, dim, fn): self.fn = fn self.norm = LayerNormalization() def __call__(self, x): return self.fn(self.norm(x)) class EfficientSelfAttention: def __init__( self, dim, heads, reduction_ratio ): self.scale = (dim // heads) ** -0.5 self.heads = heads self.to_q = Conv2d(dim, 1, use_bias = False) self.to_kv = Conv2d(dim * 2, reduction_ratio, strides = reduction_ratio, use_bias = False) self.to_out = Conv2d(dim, 1, use_bias = False) def __call__(self, x): h, w = x.shape[1], x.shape[2] heads = self.heads q, k, v = (self.to_q(x), *tf.split(self.to_kv(x), num_or_size_splits=2, axis=-1)) q, k, v = map(lambda t: rearrange(t, 'b x y (h c) -> (b h) (x y) c', h = heads), (q, k, v)) sim = tf.einsum('b i d, b j d -> b i j', q, k) * self.scale attn = tf.nn.softmax(sim) out = tf.einsum('b i j, b j d -> b i d', attn, v) out = rearrange(out, '(b h) (x y) c -> b x y (h c)', h = heads, x = h, y = w) return self.to_out(out) class MixFeedForward: def __init__( self, dim, expansion_factor ): hidden_dim = dim * expansion_factor self.net = tf.keras.Sequential() self.net.add(Conv2d(hidden_dim, 1)) self.net.add(DsConv2d(hidden_dim, hidden_dim, 3, padding = 1)) self.net.add(Activation('gelu')) self.net.add(Conv2d(dim, 1)) def __call__(self, x): return self.net(x) class Unfold: def __init__(self, kernel, stride, padding): self.kernel = kernel self.stride = stride self.padding = padding self.zeropadding2d = ZeroPadding2D(padding) def __call__(self, x): x = self.zeropadding2d(x) x = tf.image.extract_patches(x, sizes=[1, self.kernel, self.kernel, 1], strides=[1, self.stride, self.stride, 1], rates=[1, 1, 1, 1], padding='VALID') x = tf.reshape(x, (x.shape[0], -1, x.shape[-1])) return x class MiT: def __init__( self, channels, dims, heads, ff_expansion, reduction_ratio, num_layers ): stage_kernel_stride_pad = ((7, 4, 3), (3, 2, 1), (3, 2, 1), (3, 2, 1)) dims = (channels, *dims) dim_pairs = list(zip(dims[:-1], dims[1:])) self.stages = [] for (dim_in, dim_out), (kernel, stride, padding), num_layers, ff_expansion, heads, reduction_ratio in zip(dim_pairs, stage_kernel_stride_pad, num_layers, ff_expansion, heads, reduction_ratio): get_overlap_patches = Unfold(kernel, stride, padding) overlap_patch_embed = Conv2d(dim_out, 1) layers = [] for _ in range(num_layers): layers.append([ PreNorm(dim_out, EfficientSelfAttention(dim = dim_out, heads = heads, reduction_ratio = reduction_ratio)), PreNorm(dim_out, MixFeedForward(dim = dim_out, expansion_factor = ff_expansion)), ]) self.stages.append([ get_overlap_patches, overlap_patch_embed, layers ]) def __call__( self, x, return_layer_outputs = False ): h, w = x.shape[1], x.shape[2] layer_outputs = [] for (get_overlap_patches, overlap_embed, layers) in self.stages: x = get_overlap_patches(x) num_patches = x.shape[-2] ratio = int(sqrt((h * w) / num_patches)) x = rearrange(x, 'b (h w) c -> b h w c', h = h // ratio) x = overlap_embed(x) for (attn, ff) in layers: x = attn(x) + x x = ff(x) + x layer_outputs.append(x) ret = x if not return_layer_outputs else layer_outputs return ret class Segformer(Model): def __init__( self, dims = (32, 64, 160, 256), heads = (1, 2, 5, 8), ff_expansion = (8, 8, 4, 4), reduction_ratio = (8, 4, 2, 1), num_layers = 2, channels = 3, decoder_dim = 256, num_classes = 4 ): super(Segformer, self).__init__() dims, heads, ff_expansion, reduction_ratio, num_layers = map(partial(cast_tuple, depth = 4), (dims, heads, ff_expansion, reduction_ratio, num_layers)) assert all([*map(lambda t: len(t) == 4, (dims, heads, ff_expansion, reduction_ratio, num_layers))]), 'only four stages are allowed, all keyword arguments must be either a single value or a tuple of 4 values' self.mit = MiT( channels = channels, dims = dims, heads = heads, ff_expansion = ff_expansion, reduction_ratio = reduction_ratio, num_layers = num_layers ) self.to_fused = [] for i, dim in enumerate(dims): to_fused = tf.keras.Sequential() to_fused.add(Conv2d(decoder_dim, 1)) to_fused.add(UpSampling2D(2 ** i)) self.to_fused.append(to_fused) self.to_segmentation = tf.keras.Sequential() self.to_segmentation.add(Conv2d(decoder_dim, 1)) self.to_segmentation.add(Conv2d(num_classes, 1)) def __call__(self, x): layer_outputs = self.mit(x, return_layer_outputs = True) fused = [to_fused(output) for output, to_fused in zip(layer_outputs, self.to_fused)] fused = tf.concat(fused, axis = -1) return self.to_segmentation(fused)