import mindspore as ms
import mindspore.nn as nn
import mindspore as np
import mindspore.ops as ops
from mindspore.ops import operations as P
from mindspore.ops import functional as F

try:
    from mindspore.nn.layer.flash_attention import FlashAttention

    FLASHATTENTION_VALID = True
    print('Flash Attn Valid!')
except ImportError:
    FLASHATTENTION_VALID = False
    print('Flash Attn Invalid!')


class PatchRouter(nn.Cell):
    def __init__(self, in_ch, hidden=64, H=512, W=512):
        super(PatchRouter, self).__init__()
        # Conv + ReLU to extract lightweight features
        self.conv = nn.Conv2d(
            in_channels=in_ch,
            out_channels=hidden,
            kernel_size=3,
            pad_mode="pad",
            padding=1,
            has_bias=True
        )
        self.relu = nn.ReLU()
        # Adaptive pooling down to (H//16, W//16) grid
        self.pool = ops.AdaptiveAvgPool2D((H // 16, W // 16))
        # Linear heads for scoring
        self.dense1 = nn.Dense(hidden, hidden // 2)
        self.relu2 = nn.ReLU()
        self.dense2 = nn.Dense(hidden // 2, 1)
        self.sigmoid = ops.Sigmoid()
        # Helpers for reshape / transpose
        self.reshape = ops.Reshape()
        self.transpose = ops.Transpose()

    def construct(self, x_lowres: Tensor) -> Tensor:
        """
        x_lowres: [B, in_ch, H/4, W/4] (example downsampled by 4×)
        returns scores: [B, N] where N = (H//16)*(W//16)
        """
        # 1) feature extraction
        feat = self.relu(self.conv(x_lowres))               # [B, hidden, H/4, W/4]
        pooled = self.pool(feat)                            # [B, hidden, P_h, P_w]
        b, hidden_dim, p_h, p_w = pooled.shape
        # 2) flatten spatial dims → tokens
        flat = self.reshape(pooled, (b, hidden_dim, p_h * p_w))  # [B, hidden, N]
        tokens = self.transpose(flat, (0, 2, 1))                 # [B, N, hidden]
        # 3) score head (apply Dense to last dim)
        tokens_flat = tokens.reshape((-1, hidden_dim))           # [B*N, hidden]
        h1 = self.relu2(self.dense1(tokens_flat))                # [B*N, hidden/2]
        s_flat = self.dense2(h1)                                 # [B*N, 1]
        scores = self.sigmoid(s_flat).reshape((b, p_h*p_w))      # [B, N]
        return scores

class LayerNorm(nn.LayerNorm):
    """ Layer norm with eps = 1e-6 """

    def __init__(self, normalized_shape):
        super().__init__(normalized_shape=normalized_shape, epsilon=1e-6)


class Linear(nn.Dense):
    """Custom Linear Layer"""

    def __init__(self, in_channels, out_channels, has_bias=True):
        super().__init__(in_channels, out_channels, has_bias=has_bias, weight_init="xavier_uniform")
        self.dtype = ms.float16
        self.to_float(ms.float16)

    def construct(self, x):
        x_shape = self.shape_op(x)
        if len(x_shape) != 2:
            x = self.reshape(x, (-1, x_shape[-1]))
        x = self.matmul(x, self.cast(self.weight, self.dtype))
        if self.has_bias:
            x = self.bias_add(x, self.cast(self.bias, self.dtype))
        if len(x_shape) != 2:
            out_shape = x_shape[:-1] + (F.shape(x)[-1],)
            x = self.reshape(x, out_shape)
        return x


class DropPath(nn.Cell):
    def __init__(self, drop_prob):
        super().__init__()
        self.drop = nn.Dropout(p=drop_prob)
        self.mask = ms.Tensor(np.ones(1, ), dtype=ms.float32)
        self.tile = P.Tile()
        self.mul = P.mul()

    def construct(self, x):
        if not self.training:
            return x
        mask = self.tile(self.mask, (x.shape[0],) + (1,) * (x.ndim - 1))
        out = self.drop(mask)
        out = self.mul(out, x)
        return out


class LayerScale(nn.Cell):
    def __init__(self, dim, init_values=1e-5):
        super().__init__()
        self.gamma = ms.Parameter(
            ms.Tensor(init_values * np.ones((dim,)), ms.float32), requires_grad=True)
        self.mul = P.Mul()

    def construct(self, x):
        return self.mul(x, self.gamma)


class Attention(nn.Cell):
    def __init__(self,
                 dim,
                 num_heads=8,
                 qkv_bias=False,
                 qk_norm=False,
                 attn_drop=0.,
                 proj_drop=0.,
                 use_flash_attn=True
                 ):
        super().__init__()
        assert dim % num_heads == 0, 'dim should be divisible by num_heads'
        self.num_heads = num_heads
        self.head_dim = dim // num_heads
        self.scale = ms.Tensor(self.head_dim ** -0.5, dtype=ms.float16)

        self.qkv = Linear(dim, dim * 3, has_bias=qkv_bias)
        self.q_norm = LayerNorm((self.head_dim,)) if qk_norm else nn.Identity()
        self.k_norm = LayerNorm((self.head_dim,)) if qk_norm else nn.Identity()
        self.attn_drop = nn.Dropout(p=attn_drop)
        self.proj = Linear(dim, dim)
        self.proj_drop = nn.Dropout(p=proj_drop)

        self.reshape = P.Reshape()
        self.transpose = P.Transpose()
        self.mul = P.Mul()
        self.batch_matmul_trans = P.BatchMatMul(transpose_b=True)
        self.batch_matmul = P.BatchMatMul(transpose_b=False)
        self.softmax = nn.Softmax()

        if FLASHATTENTION_VALID and use_flash_attn:
            self.flash_attn = FlashAttention(head_dim=dim, head_num=num_heads,
                                             dropout_rate=attn_drop, next_block_num=0,
                                             high_precision=True)
        else:
            self.flash_attn = None
        # for attention mask
        self.add = P.Add()
        self.sub = P.Sub()
        self.expand_dims = P.ExpandDims()
        self.sub_one = ms.Tensor(1, dtype=ms.float32)
        self.multiply_data = ms.Tensor(-10000.0, dtype=ms.float32)

    def construct(self, x, attn_mask):
        orig_shape = x.shape

        qkv = self.qkv(x)  # [B, L, 3*C]
        qkv = self.transpose(
            self.reshape(qkv, (x.shape[0], x.shape[1], 3, self.num_heads, self.head_dim)),
            (2, 0, 3, 1, 4)
        )  # (3, B, H, L, D)
        q, k, v = ops.unbind(qkv, dim=0)  # (B, H, L, D)
        q, k = self.q_norm(q), self.k_norm(k)

        if self.flash_attn is not None:
            x = self.flash_attn(q, k, v, attn_mask)
        else:
            q = self.mul(q, self.scale)
            attn = self.batch_matmul_trans(q, k)
            attn = self.cast(attn, ms.float32)
            attn_mask = self.expand_dims(attn_mask, 1)
            adder = self.mul(
                self.sub(self.sub_one, attn_mask),
                self.multiply_data
            )
            attn = self.add(adder, attn)
            attn = self.softmax(attn)
            attn = self.attn_drop(attn)
            attn = self.cast(attn, ms.float16)
            x = self.batch_matmul(attn, v)

        x = self.reshape(self.transpose(x, (0, 2, 1, 3)), orig_shape)
        x = self.proj(x)
        x = self.proj_drop(x)
        return x


class Mlp(nn.Cell):
    def __init__(self,
                 in_features,
                 hidden_features=None,
                 out_features=None,
                 drop=0.,
                 ):
        super().__init__()
        out_features = out_features or in_features
        hidden_features = hidden_features or in_features
        self.fc1 = Linear(in_features, hidden_features)
        self.act = nn.GELU()
        self.drop1 = nn.Dropout(p=drop)
        self.fc2 = Linear(hidden_features, out_features)
        self.drop2 = nn.Dropout(p=drop)

    def construct(self, x):
        x = self.cast(x, ms.float16)
        x = self.fc1(x)
        x = self.act(x)
        x = self.drop1(x)
        x = self.fc2(x)
        x = self.drop2(x)
        return x


class Block(nn.Cell):
    def __init__(self,
                 dim,
                 num_heads,
                 mlp_ratio=4.,
                 qkv_bias=False,
                 qk_norm=False,
                 drop=0.,
                 attn_drop=0.,
                 init_values=None,
                 drop_path=0.,
                 use_flash_attn=True
                 ):
        super().__init__()
        self.norm1 = LayerNorm((dim,))
        self.attn = Attention(
            dim,
            num_heads=num_heads,
            qkv_bias=qkv_bias,
            qk_norm=qk_norm,
            attn_drop=attn_drop,
            proj_drop=drop,
            use_flash_attn=use_flash_attn
        )
        self.ls1 = LayerScale(dim, init_values=init_values) if init_values else nn.Identity()
        self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity()

        self.norm2 = LayerNorm((dim,))
        self.mlp = Mlp(
            in_features=dim,
            hidden_features=int(dim * mlp_ratio),
            drop=drop,
        )
        self.ls2 = LayerScale(dim, init_values=init_values) if init_values else nn.Identity()
        self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity()

        self.add = P.Add()
        self.dtype = ms.float16

    def construct(self, x, attn_mask):
        input_x = self.norm1(x)
        input_x = F.cast(input_x, self.dtype)
        attn = self.attn(input_x, attn_mask)
        attn = self.drop_path1(self.ls1(attn))
        x = self.add(x, attn)

        output_x = self.norm2(x)
        output_x = F.cast(output_x, self.dtype)
        mlp = self.mlp(output_x)
        mlp = self.drop_path2(self.ls2(mlp))
        x = self.add(x, mlp)
        return x

if __name__ == '__main__':
    # --------------------
    # 推理示例（MindSpore）
    # --------------------
    # 假设已经有：
    #  - router: PatchRouter 实例
    #  - embedder(tokens)：将原图切Patch并编码成 [B, N, C] 的 tokens
    #  - vit_encoder: 接受 [B_selected, C] 或 [total_selected, C] 的稀疏 tokens
    #  - sam_decoder: SAM 的解码器接口

    # 1) 低分辨路由打分
    scores = router(x_lowres)              # [B, N]
    # 2) 二值门控
    mask = scores > tau                    # [B, N] (bool)
    # 3) 从原始tokens中聚合高分Patch
    tokens = embedder(x_full_res)          # [B, N, C]
    mask_expanded = mask.expand_dims(-1)   # [B, N, 1]
    # 使用 MaskedSelect：返回 [total_selected, C]
    selected_tokens = ops.MaskedSelect()(tokens, mask_expanded).reshape(-1, tokens.shape[-1])
    # 4) 稀疏ViT编码
    encoded = vit_encoder(selected_tokens) # [total_selected, E]
    # 5) SAM解码至全图
    mask_out = sam_decoder(encoded, prompts)
