import numpy as np
from mindspore import Tensor, Parameter, nn
from mindspore import dtype as mstype
from mindspore import ops as P
import mindspore.common.initializer as weight_init

from data_utils.data_config import DataManager
from .vit_modules_mm import PosEmbedMM
from .vit_modules_native import Block, LayerNorm, Linear

class ViTConfigMM:
    def __init__(self,
                 mask_ratio: float = 0.75,
                 modality_list = ('3d_thick', '2d'),
                 modality_embed = False,
                 num_channels: int = 4096,
                 seq_len: int = 2048,
                 initializer_range: float = 0.02,
                 embed_dim: int = 384,
                 depth: int = 12,
                 num_heads: int = 6,
                 mlp_ratio: float = 4.0,
                 qkv_bias: bool = True,
                 attn_drop_rate: float = 0.0,
                 drop_rate: float = 0.0,
                 drop_path_rate: float = 0.,
                 recompute: bool = False,
                 ):
        self.manager = DataManager(modality_list, total_dim=num_channels, seq_len=seq_len)
        self.mask_ratio = mask_ratio
        self.in_chans = num_channels
        self.seq_len = seq_len
        self.modality_embed = modality_embed
        self.initializer_range = initializer_range
        self.embed_dim = embed_dim
        self.depth = depth
        self.num_heads = num_heads
        self.mlp_ratio = mlp_ratio
        self.qkv_bias = qkv_bias
        self.attn_drop_rate = attn_drop_rate
        self.drop_rate = drop_rate
        self.drop_path_rate = drop_path_rate
        self.recompute = recompute



class ViTModelMM(nn.Cell):
    def __init__(self, config=None):
        super().__init__()
        config = config if config else ViTConfigMM()
        self.manager = config.manager
        self.embed_dim = config.embed_dim
        self.seq_len = config.seq_len
        # --------------------------------------------------------------------------
        # MAE encoder specifics
        self.patch_embed = Linear(config.in_chans, self.embed_dim)
        self.pos_embed = PosEmbedMM(
            self.manager, 
            self.embed_dim,
        )
        if config.modality_embed:
            self.modality_embed = nn.Embedding(vocab_size=config.manager.n_modality, embedding_size=config.embed_dim)
        else:
            self.modality_embed = None
        
        self.cls_token = Parameter(
            weight_init.initializer(weight_init.Normal(sigma=config.initializer_range), (1, 1, config.embed_dim)),
            requires_grad=True)
        
        # stochastic depth decay rule
        hdr = [x.item() for x in np.linspace(0, config.drop_path_rate, config.depth)]
        self.blocks = nn.CellList([
            Block(
                dim=config.embed_dim,
                num_heads=config.num_heads,
                mlp_ratio=config.mlp_ratio,
                qkv_bias=config.qkv_bias,
                drop_path=hdr[i]
            )
            for i in range(config.depth)])
        if config.recompute:
            for b in self.blocks:
                b.recompute()
        self.norm = LayerNorm((config.embed_dim,))
        # --------------------------------------------------------------------------

        self.stride_slice = P.StridedSlice()
        self.add = P.Add()
        self.expand_dim = P.ExpandDims()
        self.tile = P.Tile()
        self.gather = P.GatherD()
        self.gather_new = P.Gather()
        self.cat = P.Concat(axis=1)
        self.transpose = P.Transpose()
        self.equal = P.Equal()
        self.init_weights()

    def init_weights(self):
        """ ViT weight initialization."""
        for name, cell in self.cells_and_names():
            if isinstance(cell, (Linear, nn.Dense)):
                cell.weight.set_data(weight_init.initializer(weight_init.XavierUniform(),
                                                             cell.weight.shape,
                                                             cell.weight.dtype))
                if isinstance(cell, (Linear, nn.Dense)) and cell.bias is not None:
                    cell.bias.set_data(weight_init.initializer(weight_init.Zero(),
                                                               cell.bias.shape,
                                                               cell.bias.dtype))
            elif isinstance(cell, (LayerNorm, nn.LayerNorm)):
                cell.gamma.set_data(weight_init.initializer(weight_init.One(),
                                                            cell.gamma.shape,
                                                            cell.gamma.dtype))
                cell.beta.set_data(weight_init.initializer(weight_init.Zero(),
                                                           cell.beta.shape,
                                                           cell.beta.dtype))

    def construct(self, image, m_idx, instance_ids):
        """construct of VisionTransformer Encoder"""
        attention_mask = self.equal(self.expand_dim(instance_ids, 1), self.expand_dim(instance_ids, 2))
        attention_mask = self.cast(attention_mask, mstype.float32)
        
        batch_size = image.shape[0]
        # patch to encoder tokens and add positions
        tokens = self.patch_embed(image)
        encoder_pos_embedding = self.pos_embed(m_idx)
        tokens = self.add(tokens, encoder_pos_embedding)
        cls_tokens = self.tile(self.cls_token, (batch_size, 1, 1))
        encoded_tokens = self.cat((cls_tokens, tokens))
        
        for block in self.blocks:
            encoded_tokens = block(encoded_tokens, attention_mask)

        encoded_tokens = self.norm(encoded_tokens)
        return encoded_tokens

