import numpy as np
import math
from mindspore import Tensor, Parameter, nn
from mindspore import dtype as mstype
from mindspore import ops as P
import mindspore.common.initializer as weight_init
from mindformers.models.base_model import BaseModel
from mindformers.models.mae.mae_modules import Block, LayerNorm, Linear, Dropout
from mindformers.core.loss import MSELoss
from mindformers.models.base_config import BaseConfig
from mindformers.models.vit.vit_config import default_parallel_config, default_moe_config

from .vit_modules_3d import PatchEmbed3D, PosEmbed3D


class ViT3DConfig(BaseConfig):
    def __init__(self,
                 image_size=(96, 96, 96),
                 patch_size=(16, 16, 16),
                 num_channels: int = 1,
                 initializer_range: float = 0.02,
                 hidden_size: int = 384,
                 num_hidden_layers: int = 12,
                 num_attention_heads: int = 6,
                 intermediate_size: int = 1536,
                 qkv_bias: bool = True,
                 hidden_act: str = "gelu",
                 post_layernorm_residual: bool = False,
                 layer_norm_eps: float = 1e-6,
                 attention_probs_dropout_prob: float = 0.0,
                 hidden_dropout_prob: float = 0.0,
                 drop_path_rate: float = 0.1,
                 use_mean_pooling: bool = True,
                 return_hidden: bool = False,
                 sin_cos_embed: bool = False,
                 layernorm_compute_type: mstype = mstype.float32,
                 softmax_compute_type: mstype = mstype.float32,
                 param_init_type: mstype = mstype.float32,
                 recompute: bool = False,
                 parallel_config = default_parallel_config,
                 moe_config = default_moe_config,
                 **kwargs):
        super().__init__(**kwargs)
        self.image_size = image_size
        self.patch_size = patch_size
        self.in_chans = num_channels
        self.initializer_range = initializer_range
        self.embed_dim = hidden_size
        self.depth = num_hidden_layers
        self.num_heads = num_attention_heads
        self.intermediate_size = intermediate_size
        self.qkv_bias = qkv_bias
        self.hidden_act = hidden_act
        self.post_layernorm_residual = post_layernorm_residual
        self.layer_norm_eps = layer_norm_eps
        self.attention_dropout_rate = attention_probs_dropout_prob
        self.drop_rate = hidden_dropout_prob
        self.drop_path_rate = drop_path_rate
        self.use_mean_pooling = use_mean_pooling
        self.return_hidden = return_hidden
        self.sin_cos_embed = sin_cos_embed
        self.layernorm_compute_type = layernorm_compute_type
        self.softmax_compute_type = softmax_compute_type
        self.param_init_type = param_init_type
        self.recompute = recompute
        self.parallel_config = parallel_config
        self.moe_config = moe_config


class ViT3DModel(BaseModel):
    def __init__(self, config=None):
        config = config if config else ViT3DConfig()
        super().__init__(config)
        self.use_moe = (config.moe_config.expert_num > 1)
        parallel_config = config.parallel_config
        dp = parallel_config.data_parallel
        self.global_pool = config.use_mean_pooling
        self.return_hidden = config.return_hidden

        self.img_size = config.image_size
        self.patch_size = config.patch_size
        self.grid_size = [s // p for s, p in zip(self.img_size, self.patch_size)]
        self.embed_dim = config.embed_dim

        self.patch_embed = PatchEmbed3D(img_size=config.image_size, patch_size=config.patch_size,
                                        in_chans=config.in_chans, embed_dim=config.embed_dim,
                                        parallel_config=config.parallel_config)
        self.cls_tokens = Parameter(
            weight_init.initializer(weight_init.TruncatedNormal(sigma=config.initializer_range),
                                    (1, 1, config.embed_dim)), requires_grad=True)
        self.num_patches = num_patches = self.patch_embed.num_patches
        self.seq_length = seq_length = num_patches + 1

        # self.pos_embed = Parameter(
        #     weight_init.initializer(weight_init.TruncatedNormal(sigma=config.initializer_range),
        #                             (1, seq_length, config.embed_dim)), requires_grad=True)
        self.pos_embed = PosEmbed3D(
            embed_dim=self.embed_dim,
            grid_size=self.grid_size,
            sincos=config.sin_cos_embed,
            initializer_range=config.initializer_range
        )
        # stochastic depth decay rule
        hdr = [x.item() for x in np.linspace(0, config.drop_path_rate, config.depth)]
        parallel_config_args = parallel_config.moe_parallel_config if self.use_moe else parallel_config.dp_mp_config
        self.blocks = nn.CellList([
            Block(hidden_size=config.embed_dim,
                  ffn_hidden_size=config.intermediate_size,
                  seq_length=seq_length,
                  drop_rate=config.drop_rate,
                  attention_dropout_rate=config.attention_dropout_rate,
                  hidden_dropout_rate=hdr[i],
                  layer_norm_eps=config.layer_norm_eps,
                  qkv_bias=config.qkv_bias,
                  init_values=config.init_values,
                  weight_init='XavierUniform',
                  layernorm_compute_type=config.layernorm_compute_type,
                  softmax_compute_type=config.softmax_compute_type,
                  window_size=None,
                  num_heads=config.num_heads,
                  hidden_act=config.hidden_act,
                  post_layernorm_residual=config.post_layernorm_residual,
                  param_init_type=config.param_init_type,
                  parallel_config=parallel_config_args)
            for i in range(config.depth)])

        if config.recompute:
            for b in self.blocks:
                b.recompute()

        self.add = P.Add().shard(((dp, 1, 1), (1, 1, 1)))
        self.cast = P.Cast()
        self.tile = P.Tile().shard(((dp, 1, 1),))
        self.cat = P.Concat(axis=1)
        self.fc_norm = LayerNorm((config.embed_dim,), eps=1e-6).shard(((dp, 1, 1),))
        self.norm = LayerNorm((config.embed_dim,), eps=1e-6).shard(((dp, 1, 1),))
        
        self.reduce_mean = P.ReduceMean().shard(((dp, 1, 1),))
        self.dropout = Dropout(keep_prob=(1. - config.drop_rate))
        self.dropout.shard(((dp, 1, 1),))

        self.stride_slice = P.StridedSlice().shard(((dp, 1, 1),))

        self.init_weights_vit()
        self.fix_init_weight()

    def fix_init_weight(self):
        """fix init weight"""

        def rescale(param, layer_id):
            values = param.data / (math.sqrt(2.0 * layer_id))
            param.set_data(values)

        for layer_id, block in enumerate(self.blocks):
            if self.use_moe:
                rescale(block.attention.projection.weight, layer_id + 1)
                rescale(block.output.ffn.projection.weight, layer_id + 1)
            else:
                rescale(block.attention.projection.weight, layer_id + 1)
                rescale(block.output.projection.weight, layer_id + 1)

    def init_weights_vit(self):
        """init weights vit
         ViT weight initialization, original timm impl (for reproducibility) """
        for name, cell in self.cells_and_names():
            if isinstance(cell, Linear):
                cell.weight.set_data(weight_init.initializer(
                    weight_init.TruncatedNormal(sigma=self.config.initializer_range),
                    cell.weight.shape,
                    cell.weight.dtype))
                if isinstance(cell, Linear) and cell.bias is not None:
                    cell.bias.set_data(weight_init.initializer(weight_init.Zero(),
                                                               cell.bias.shape,
                                                               cell.bias.dtype))
            elif isinstance(cell, (LayerNorm, nn.LayerNorm)):
                cell.gamma.set_data(weight_init.initializer(weight_init.One(),
                                                            cell.gamma.shape,
                                                            cell.gamma.dtype))
                cell.beta.set_data(weight_init.initializer(weight_init.Zero(),
                                                           cell.beta.shape,
                                                           cell.beta.dtype))

    def no_weight_decay(self):
        return {'pos_embed', 'cls_tokens'}

    def construct_without_pool(self, image):
        """construct of vit without pool"""
        tokens = self.patch_embed(image)
        batch_size = image.shape[0]
        cls_tokens = self.tile(self.cls_tokens, (batch_size, 1, 1))
        tokens = self.cat((cls_tokens, tokens))
        tokens = self.add(tokens, self.pos_embed())

        x = self.dropout(tokens)
        encoder_input_mask = P.Ones()((batch_size, self.seq_length, self.seq_length), mstype.float32)
        all_x = []
        for block in self.blocks:
            x = block(x, encoder_input_mask)
            all_x.append(x)
        return all_x

    def construct(self, image):
        """construct of vit"""
        all_x = self.construct_without_pool(image)
        if self.return_hidden:
            return all_x
        x = all_x[-1]
        b, s, c = x.shape

        if self.global_pool:
            x = self.stride_slice(
                x, (0, 1, 0), (b, s, c), (1, 1, 1)
            )
            x = self.reduce_mean(x, 1)
            out = self.fc_norm(x)
        else:
            out = self.norm(x)
            out = self.stride_slice(
                out, (0, 0, 0), (b, 1, c), (1, 1, 1)
            )
        return out
