from typing import List, Optional, Union
import numpy as np

import mindspore as ms
from mindspore import Tensor, nn
from mindspore import ops
from mindspore import ops as P
from mindspore.common.initializer import Normal, initializer, TruncatedNormal, XavierUniform
from mindspore.common.parameter import Parameter


class Identity(nn.Cell):
    def construct(self, x):
        return x

class DropPath(nn.Cell):
    """
    Drop paths (Stochastic Depth) per sample  (when applied in main path of residual blocks).
    """

    def __init__(self, drop_prob=0):
        super().__init__()
        self.keep_prob = 1 - drop_prob
        seed = min(seed, 0)
        self.rand = P.UniformReal()
        self.shape = P.Shape()
        self.floor = P.Floor()

    def construct(self, x):
        if self.training:
            x_shape = self.shape(x)
            random_tensor = self.rand((x_shape[0], 1, 1))
            random_tensor = random_tensor + self.keep_prob
            random_tensor = self.floor(random_tensor)
            x = x / self.keep_prob
            x = x * random_tensor
        return x

class Mlp(nn.Cell):
    def __init__(
        self,
        in_features: int,
        hidden_features: Optional[int] = None,
        out_features: Optional[int] = None,
        act_layer: nn.Cell = nn.GELU,
        drop: float = 0.,
    ):
        super().__init__()
        out_features = out_features or in_features
        hidden_features = hidden_features or in_features
        self.fc1= nn.Dense(in_features, hidden_features, weight_init=TruncatedNormal(sigma=0.02))
        self.act = act_layer()
        self.fc2 = nn.Dense(hidden_features, out_features, weight_init=TruncatedNormal(sigma=0.02))
        self.drop = nn.Dropout(1-drop)

    def construct(self, x):
        x = self.fc1(x)
        x = self.act(x)
        x = self.drop(x)
        x = self.fc2(x)
        x = self.drop(x)
        return x


class Attention(nn.Cell):
    def __init__(
        self,
        dim: int,
        num_heads: int = 8,
        qkv_bias: bool = True,
        attn_drop: float = 0.,
        proj_drop: float = 0.
    ):
        super().__init__()
        self.num_heads = num_heads
        head_dim = dim // num_heads
        self.scale = Tensor(head_dim**-0.5)

        self.qkv = nn.Dense(dim, dim * 3, weight_init=TruncatedNormal(sigma=0.02), has_bias=qkv_bias)
        self.attn_drop = nn.Dropout(1 - attn_drop)
        self.proj = nn.Dense(dim, dim, weight_init=TruncatedNormal(sigma=0.02))
        self.proj_drop = nn.Dropout(1 - proj_drop)

        self.mul = ops.Mul()
        self.reshape = ops.Reshape()
        self.transpose = ops.Transpose()
        # self.unstack = ops.Unstack(axis=0)
        self.attn_matmul_v = ops.BatchMatMul()
        self.q_matmul_k = ops.BatchMatMul(transpose_b=True)
        self.softmax = nn.Softmax(axis=-1)

    def construct(self, x):
        b, n, c = x.shape
        qkv = self.qkv(x)
        qkv = self.reshape(qkv, (b, n, 3, self.num_heads, c // self.num_heads))
        qkv = self.transpose(qkv, (2, 0, 3, 1, 4))
        # q, k, v = self.unstack(qkv)
        q, k, v = qkv[0], qkv[1], qkv[2]
        attn = self.q_matmul_k(q, k)
        attn = self.mul(attn, self.scale)
        attn = self.softmax(attn)
        attn = self.attn_drop(attn)

        out = self.attn_matmul_v(attn, v)
        out = self.transpose(out, (0, 2, 1, 3))
        out = self.reshape(out, (b, n, c))
        out = self.proj(out)
        out = self.proj_drop(out)
        return out


class LayerScale(nn.Cell):
    def __init__(self, dim, init_values=1e-5):
        super().__init__()
        self.gamma = Parameter(init_values * P.ones((dim,)), name='gamma')
    
    def forward(self, x):
        return self.gamma * x

class Block(nn.Cell):
    def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0.,
                 drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, init_values=None):
        super().__init__()
        self.ls1 = LayerScale(init_values=init_values) if init_values else Identity()
        self.norm1 = norm_layer((dim,))
        self.attn = Attention(
            dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop)
        
        self.drop_path = DropPath(drop_path) if drop_path > 0. else Identity()
        
        self.ls2 = LayerScale(init_values=init_values) if init_values else Identity()
        self.norm2 = norm_layer((dim,))
        mlp_hidden_dim = int(dim * mlp_ratio)
        self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)

    def construct(self, x):
        x = x + self.drop_path(self.ls1(self.attn(self.norm1(x))))
        x = x + self.drop_path(self.ls2(self.mlp(self.norm2(x))))
        return x


class PatchEmbed(nn.Cell):
    def __init__(self, img_size=(96, 96, 96), patch_size=(16, 16, 16), in_chans=1, embed_dim=768, embed_type='mlp'):
        super().__init__()
        self.img_size = img_size
        self.patch_size = patch_size
        assert np.all([s % p == 0 for s, p in zip(img_size, patch_size)]), 'image size must be divisible by patch size'
        self.num_patches = int(np.prod([s // p for s, p in zip(img_size, patch_size)]))
        
        assert embed_type in ('conv', 'mlp')
        self.embed_type = embed_type
        self.embed_dim = embed_dim
        
        self.reshape = ops.Reshape()
        self.transpose = ops.Transpose()
        if embed_type == 'conv':
            self.proj = nn.Conv3d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size, has_bias=True)
            # initialize as linear
            weight_shape = self.proj.weight.shape
            weight = initializer('xavier_uniform', (weight_shape[0], int(np.prod(weight_shape[1:]))))
            self.proj.weight.set_data(self.reshape(weight, weight_shape))
            
        else:
            self.patch_dim = in_chans * int(np.prod(self.patch_size))
            self.proj = nn.Dense(self.patch_dim, embed_dim, weight_init='xavier_uniform')

    def construct(self, x):
        if self.embed_type == 'conv':
            x = self.proj(x)
            b, c, h, w, d = x.shape 
            x = self.reshape(x, (b, c, h * w * d))
            x = self.transpose(x, (0, 2, 1))
        else:
            b, c, h, w, d = x.shape
            x = self.reshape(x, (
                b, c, 
                h//self.patch_size[0], self.patch_size[0],
                w//self.patch_size[1], self.patch_size[1],
                d//self.patch_size[2], self.patch_size[2]))
            x = self.transpose(x, (0, 2, 4, 6, 1, 3, 5, 7))
            x = self.reshape(x, (b * self.num_patches, self.patch_dim))
            x = self.proj(x)
            x = self.reshape(x, (b, self.num_patches, self.embed_dim))
        return x


def init(init_type, shape, dtype, name, requires_grad):
    initial = initializer(init_type, shape, dtype).init_data()
    return Parameter(initial, name=name, requires_grad=requires_grad)


class VisionTransformer(nn.Cell):
    """ Vision Transformer """
    def __init__(self, img_size=(96, 96, 96), patch_size=(16, 16, 16), in_chans=1, num_classes=0, embed_dim=768, depth=12,
                 num_heads=12, mlp_ratio=4., qkv_bias=False,  drop_rate=0., attn_drop_rate=0.,
                 drop_path_rate=0., norm_layer=nn.LayerNorm, global_pool=False, init_values=None, embed_type='mlp', **kwargs):
        super().__init__()
        assert len(img_size) == 3 and len(patch_size) == 3
        
        self.num_features = self.embed_dim = embed_dim
        self.global_pool = global_pool

        self.patch_embed = PatchEmbed(
            img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim, embed_type=embed_type)
        self.num_patches = self.patch_embed.num_patches

        self.cls_token = init(init_type=TruncatedNormal(sigma=.02),
                                  shape=(1, 1, embed_dim),
                                  dtype=ms.float32,
                                  name="cls_token",
                                  requires_grad=True)
        self.num_prefix_tokens = 1
        self.pos_embed = init(init_type=TruncatedNormal(sigma=.02),
                                      shape=(1, self.num_patches + 1, embed_dim),
                                      dtype=ms.float32,
                                      name="pos_embed",
                                      requires_grad=True)
        self.pos_drop = nn.Dropout(1-drop_rate)

        dpr = np.linspace(0, drop_path_rate, depth)  # stochastic depth decay rule
        
        self.blocks = nn.SequentialCell([
            Block(
                dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias,
                drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, init_values=init_values)
            for i in range(depth)])
        self.norm = norm_layer((embed_dim,))
        self.fc_norm = norm_layer((embed_dim,)) if self.global_pool else Identity()

        # Classifier head
        self.head = nn.Dense(embed_dim, num_classes, weight_init=TruncatedNormal(sigma=0.02)) if num_classes > 0 else Identity()
        self.tile = ops.Tile()
        self.concat = ops.Concat(axis=1)
        self.mean = ops.ReduceMean(keep_dims=False)


    def forward_features(self, x):
        x = self.patch_embed(x)
        
        cls_tokens = self.tile(self.cls_token, (x.shape[0], 1, 1))
        x = self.concat((cls_tokens, x))
        x = x + self.pos_embed
        x = self.pos_drop(x)

        x = self.blocks(x)
        x = self.norm(x)
        return x

    def forward_head(self, x):
        if self.global_pool:
            x = self.mean(x[:, self.num_prefix_tokens:], (1))
        else:
            x = x[:, 0]
        x = self.fc_norm(x)
        return self.head(x)

    def construct(self, x):
        x = self.forward_features(x)
        x = self.forward_head(x)
        return x


def vit_tiny(**kwargs):
    model = VisionTransformer(embed_dim=192, depth=12, num_heads=3, mlp_ratio=4,
        qkv_bias=True, **kwargs)
    return model

def vit_small(**kwargs):
    model = VisionTransformer(embed_dim=384, depth=12, num_heads=6, mlp_ratio=4,
        qkv_bias=True, **kwargs)
    return model


def vit_base(**kwargs):
    model = VisionTransformer(embed_dim=768, depth=12, num_heads=12, mlp_ratio=4,
        qkv_bias=True, **kwargs)
    return model

def vit_large(**kwargs):
    model = VisionTransformer(embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4,
        qkv_bias=True, **kwargs)
    return model

