import numpy as np
from mindspore import Tensor, Parameter, nn
from mindspore import dtype as mstype
from mindspore import ops as P
import mindspore.common.initializer as weight_init

from data_utils.data_config import DataManager
from .vit_modules_mm import PosEmbedMM, MAELossMM
from .vit_modules_native import Block, LayerNorm, Linear


class ViTMAEConfigMM:
    def __init__(self,
                 mask_ratio: float = 0.75,
                 modality_list = ('3d_thick', '2d'),
                 modality_embed = False,
                 num_channels: int = 4096,
                 seq_len: int = 2048,
                 initializer_range: float = 0.02,
                 embed_dim: int = 384,
                 depth: int = 12,
                 num_heads: int = 6,
                 mlp_ratio: float = 4.0,
                 qkv_bias: bool = True,
                 attn_drop_rate: float = 0.0,
                 drop_rate: float = 0.0,
                 drop_path_rate: float = 0.,
                 decoder_embed_dim: int = 216,
                 decoder_depth: int = 8,
                 decoder_num_heads: int = 8,
                 norm_pix_loss: bool = True,
                 loss_type: str = 'l2',
                 recompute: bool = False,
                 num_prefix_tokens: int = 1,
                 use_flash_attn: bool = True,
                 ):
        super().__init__()
        self.manager = DataManager(modality_list, total_dim=num_channels, seq_len=seq_len)
        self.mask_ratio = mask_ratio
        self.in_chans = num_channels
        self.seq_len = seq_len
        self.modality_embed = modality_embed
        self.initializer_range = initializer_range
        self.embed_dim = embed_dim
        self.depth = depth
        self.num_heads = num_heads
        self.mlp_ratio = mlp_ratio
        self.qkv_bias = qkv_bias
        self.attn_drop_rate = attn_drop_rate
        self.drop_rate = drop_rate
        self.drop_path_rate = drop_path_rate
        self.decoder_embed_dim = decoder_embed_dim
        self.decoder_depth = decoder_depth
        self.decoder_num_heads = decoder_num_heads
        self.norm_pixel_loss = norm_pix_loss
        self.loss_type = loss_type
        self.recompute = recompute
        self.num_prefix_tokens = num_prefix_tokens
        self.use_flash_attn = use_flash_attn


class ViTMAEModelMM(nn.Cell):
    def __init__(self, config=None):
        super().__init__()
        config = config if config else ViTMAEConfigMM()
        self.manager = config.manager
        self.embed_dim = config.embed_dim
        self.seq_len = config.seq_len
        # --------------------------------------------------------------------------
        # MAE encoder specifics
        self.patch_embed = Linear(config.in_chans, self.embed_dim)
        self.pos_embed = PosEmbedMM(
            self.manager, 
            self.embed_dim,
        )
        if config.modality_embed:
            self.modality_embed = nn.Embedding(vocab_size=config.manager.n_modality, embedding_size=config.embed_dim)
        else:
            self.modality_embed = None
        self.num_prefix_tokens = config.num_prefix_tokens
        self.cls_token = Parameter(
            weight_init.initializer(weight_init.Normal(sigma=config.initializer_range), (1, self.num_prefix_tokens, config.embed_dim)),
            requires_grad=True)
        
        # stochastic depth decay rule
        # hdr = [x.item() for x in np.linspace(0, config.drop_path_rate, config.depth)]
        self.blocks = nn.CellList([
            Block(
                dim=config.embed_dim,
                num_heads=config.num_heads,
                mlp_ratio=config.mlp_ratio,
                qkv_bias=config.qkv_bias,
                use_flash_attn=config.use_flash_attn
            )
            for i in range(config.depth)])
        if config.recompute:
            for b in self.blocks:
                b.recompute()
        self.norm = LayerNorm((config.embed_dim,))
        # --------------------------------------------------------------------------

        self.stride_slice = P.StridedSlice()
        self.add = P.Add()
        self.expand_dim = P.ExpandDims()
        self.tile = P.Tile()
        self.gather = P.GatherD()
        self.gather_new = P.Gather()
        self.cat = P.Concat(axis=1)
        self.transpose = P.Transpose()
        self.init_weights()

    def init_weights(self):
        """ ViT weight initialization."""
        for name, cell in self.cells_and_names():
            if isinstance(cell, (Linear, nn.Dense)):
                cell.weight.set_data(weight_init.initializer(weight_init.XavierUniform(),
                                                             cell.weight.shape,
                                                             cell.weight.dtype))
                if isinstance(cell, (Linear, nn.Dense)) and cell.bias is not None:
                    cell.bias.set_data(weight_init.initializer(weight_init.Zero(),
                                                               cell.bias.shape,
                                                               cell.bias.dtype))
            elif isinstance(cell, (LayerNorm, nn.LayerNorm)):
                cell.gamma.set_data(weight_init.initializer(weight_init.One(),
                                                            cell.gamma.shape,
                                                            cell.gamma.dtype))
                cell.beta.set_data(weight_init.initializer(weight_init.Zero(),
                                                           cell.beta.shape,
                                                           cell.beta.dtype))

    def gather_unmasked(self, tensor, unmask_index):
        unmask_index = self.expand_dim(unmask_index, -1)
        unmask_index = self.tile(unmask_index, (1, 1, tensor.shape[2]))
        return self.gather(tensor, 1, unmask_index)
        
    def construct(self, image, m_idx, unmask_index, attention_mask):
        """construct of VisionTransformerForMae Encoder"""
        batch_size = image.shape[0]
        # patch to encoder tokens and add positions
        tokens = self.patch_embed(image)
        encoder_pos_embedding = self.pos_embed(m_idx)
        tokens = self.add(tokens, encoder_pos_embedding)

        # per-instance mask
        unmask_tokens = self.gather_unmasked(tokens, unmask_index)

        cls_tokens = self.tile(self.cls_token, (batch_size, 1, 1))
        encoded_tokens = self.cat((cls_tokens, unmask_tokens))
        
        # unmask_index with cls token
        unmask_index_aug = self.cat((
            P.Zeros()((batch_size, 1), unmask_index.dtype),
            unmask_index + self.num_prefix_tokens
        ))
        # apply token mask to attn mask
        encoder_input_mask = self.gather_unmasked(attention_mask, unmask_index_aug) # gather at dim 1
        encoder_input_mask = self.transpose(encoder_input_mask, (0, 2, 1))
        encoder_input_mask = self.gather_unmasked(encoder_input_mask, unmask_index_aug) # gather at dim 2
        
        for block in self.blocks:
            encoded_tokens = block(encoded_tokens, encoder_input_mask)

        encoded_tokens = self.norm(encoded_tokens)
        return encoded_tokens



class ViTMAEForPreTrainingMM(nn.Cell):
    def __init__(self, config=None):
        super().__init__()
        config = config if config else ViTMAEConfigMM()
        self.vit = ViTMAEModelMM(config)
        self.manager = config.manager
        self.embed_dim = config.embed_dim
        self.seq_len = config.seq_len
        # --------------------------------------------------------------------------
        # MAE decoder specifics
        self.decoder_embed_dim = config.decoder_embed_dim
        self.decoder_embed = Linear(config.embed_dim, config.decoder_embed_dim)
        self.num_prefix_tokens = config.num_prefix_tokens
        self.mask_token = Parameter(
            weight_init.initializer(weight_init.Normal(sigma=config.initializer_range),
                                    (1, 1, config.decoder_embed_dim)),
            name='mask_token', requires_grad=True)

        self.decoder_pos_embed = PosEmbedMM(
            manager=config.manager,
            embed_dim=config.decoder_embed_dim, 
        )
        
        # hdr = [x.item() for x in np.linspace(0, config.drop_path_rate, config.decoder_depth)]
        self.decoder_blocks = nn.CellList([
            Block(
                dim=self.decoder_embed_dim,
                num_heads=config.decoder_num_heads,
                mlp_ratio=config.mlp_ratio,
                qkv_bias=config.qkv_bias,
                use_flash_attn=config.use_flash_attn
                )
            for i in range(config.decoder_depth)])
        
        self.decoder_norm = LayerNorm((config.decoder_embed_dim,))

        self.decoder_pred = Linear(
            config.decoder_embed_dim, config.in_chans
        )

        self.stride_slice = P.StridedSlice()
        self.add = P.Add()
        self.expand_dim = P.ExpandDims()
        self.tile = P.Tile()
        self.gather = P.GatherD()
        self.cat = P.Concat(axis=1)
        self.gather1 = P.GatherD()
        self.gather2 = P.GatherD()
        self.equal = P.Equal()
        self.print = P.Print()
        self.mse_loss = MAELossMM(config.manager, config.norm_pixel_loss, loss_type=config.loss_type)
        self.init_weights()

    def init_weights(self):
        """ ViT weight initialization."""
        for _, cell in self.cells_and_names():
            if isinstance(cell, (Linear, nn.Dense)):
                cell.weight.set_data(weight_init.initializer(weight_init.XavierUniform(),
                                                             cell.weight.shape,
                                                             cell.weight.dtype))
                if isinstance(cell, (Linear, nn.Dense)) and cell.bias is not None:
                    cell.bias.set_data(weight_init.initializer(weight_init.Zero(),
                                                               cell.bias.shape,
                                                               cell.bias.dtype))
            elif isinstance(cell, (LayerNorm, nn.LayerNorm)):
                cell.gamma.set_data(weight_init.initializer(weight_init.One(),
                                                            cell.gamma.shape,
                                                            cell.gamma.dtype))
                cell.beta.set_data(weight_init.initializer(weight_init.Zero(),
                                                           cell.beta.shape,
                                                           cell.beta.dtype))

    def construct(self, image, modality_idx, instance_ids, target_mask, rand_indices, unmask_index):
        """construct of VisionTransformerForMae"""
        ids_restore = P.argsort(rand_indices, axis=-1)
        attention_mask = self.equal(self.expand_dim(instance_ids, 1), self.expand_dim(instance_ids, 2))
        attention_mask = self.cast(attention_mask, mstype.float32)
        
        encoder_tokens = self.vit.construct(image, modality_idx, unmask_index, attention_mask)

        unmask_tokens = self.decoder_embed(encoder_tokens)
        unmask_tokens = self.cast(unmask_tokens, mstype.float32)
        
        # mask tokens add the positions using the masked indices derived above
        batch_size = encoder_tokens.shape[0]
        num_masked = self.seq_len - encoder_tokens.shape[1] + 1
        mask_tokens = self.tile(self.mask_token, (batch_size, num_masked, 1))

        img_tokens = self.stride_slice(
            unmask_tokens, (0, self.num_prefix_tokens, 0),
            (unmask_tokens.shape[0], unmask_tokens.shape[1], unmask_tokens.shape[2]), (1, 1, 1))
        # concat the masked tokens to the decoder tokens and attend with decoder
        full_tokens_ = self.cat((img_tokens, mask_tokens))
        ids_restore_copy = ids_restore
        ids_restore_ = self.expand_dim(ids_restore_copy, -1)
        ids_restore_ = self.tile(ids_restore_, (1, 1, self.decoder_embed_dim))
        full_tokens_ = self.gather2(full_tokens_, 1, ids_restore_)
        
        # add pos embed to image tokens
        full_tokens = self.add(full_tokens_, self.decoder_pos_embed(modality_idx))
        
        # add cls tokens with no embedding
        cls_tokens = self.stride_slice(
            unmask_tokens, (0, 0, 0),
            (unmask_tokens.shape[0], self.num_prefix_tokens, unmask_tokens.shape[2]), (1, 1, 1))
        decoder_tokens = self.cat((cls_tokens, full_tokens))

        # decoder
        for block in self.decoder_blocks:
            decoder_tokens = block(decoder_tokens, attention_mask)

        # normalize decoder tokens
        decoder_tokens = self.decoder_norm(decoder_tokens)

        # splice out the mask tokens and project to pixel values
        pred = self.decoder_pred(decoder_tokens)
        pred = self.cast(pred, mstype.float32)
        # remove cls tokens
        pred = self.stride_slice(pred, (0, self.num_prefix_tokens, 0), (pred.shape[0], pred.shape[1], pred.shape[2]), (1, 1, 1))
        
        mae_loss = self.mse_loss(pred, image, target_mask)
        #if self._phase == 'train':
        #    self.print(f'**loss={mae_loss}')
        return mae_loss
