# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file was refer to project:
# https://github.com/facebookresearch/mae
# ============================================================================
"""Mae Model."""
import copy
import numpy as np
from mindspore import Tensor, Parameter, nn
from mindspore import dtype as mstype
from mindspore import ops as P
import mindspore as ms
import mindspore.common.initializer as weight_init
from mindformers.models.base_model import BaseModel
from mindformers.models.mae.mae_modules import Block, LayerNorm, Linear
# from mindformers.core.loss import MSELoss

from mindformers.models.base_config import BaseConfig
from mindformers.models.mae.mae_config import default_parallel_config, default_moe_config

from .vit_modules_3d import PatchEmbed3D, get_3d_sincos_pos_embed, Patchify3D, UnPatchify3D
from .mae_3d import ViTMAE3DConfig, ViTMAE3DModel

class IJEPA3DConfig(BaseConfig):
    def __init__(self,
                 image_size = (96, 96, 96),
                 patch_size = (16, 16, 16),
                 M = 4,
                 target_aspect_ratio = (0.75,1.5),
                 target_scale = (0.15, .2),
                 context_aspect_ratio = 1,
                 context_scale = (0.85,1.0),
                 num_channels: int = 1,
                 initializer_range: float = 0.02,
                 hidden_size: int = 384,
                 num_hidden_layers: int = 12,
                 num_attention_heads: int = 6,
                 intermediate_size: int = 1536,
                 qkv_bias: bool = True,
                 hidden_act: str = "gelu",
                 post_layernorm_residual: bool = False,
                 layer_norm_eps: float = 1e-6,
                 attention_probs_dropout_prob: float = 0.0,
                 hidden_dropout_prob: float = 0.0,
                 drop_path_rate: float = 0.,
                 decoder_num_hidden_layers: int = 8,
                 sin_cos_embed: bool = False,
                 layernorm_compute_type: mstype = mstype.float32,
                 softmax_compute_type: mstype = mstype.float32,
                 param_init_type: mstype = mstype.float32,
                 parallel_config = default_parallel_config,
                 moe_config = default_moe_config,
                 **kwargs):
        super().__init__(**kwargs)
        self.image_size = image_size
        self.patch_size = patch_size
        self.M = M
        self.target_aspect_ratio = target_aspect_ratio
        self.target_scale = target_scale
        self.context_aspect_ratio = context_aspect_ratio
        self.context_scale = context_scale
        self.in_chans = num_channels
        self.initializer_range = initializer_range
        self.embed_dim = hidden_size
        self.depth = num_hidden_layers
        self.num_heads = num_attention_heads
        self.intermediate_size = intermediate_size
        self.qkv_bias = qkv_bias
        self.hidden_act = hidden_act
        self.post_layernorm_residual = post_layernorm_residual
        self.layer_norm_eps = layer_norm_eps
        self.attention_dropout_rate = attention_probs_dropout_prob
        self.drop_rate = hidden_dropout_prob
        self.drop_path_rate = drop_path_rate
        self.decoder_depth = decoder_num_hidden_layers
        self.sin_cos_embed = sin_cos_embed
        self.layernorm_compute_type = layernorm_compute_type
        self.softmax_compute_type = softmax_compute_type
        self.param_init_type = param_init_type
        self.parallel_config = parallel_config
        self.moe_config = moe_config





class Predictor(BaseModel):
    def __init__(self, config):
        config = config if config else IJEPA3DConfig()
        super().__init__(config)
        self.use_moe = False
        parallel_config = config.parallel_config
        dp = parallel_config.data_parallel
        
        self.img_size = config.image_size
        self.patch_size = config.patch_size
        self.grid_size = [s // p for s, p in zip(self.img_size, self.patch_size)]
        self.num_patches = np.prod(self.grid_size)
        hdr = [x.item() for x in np.linspace(0, config.drop_path_rate, config.depth)]
        parallel_config_args = parallel_config.moe_parallel_config if self.use_moe else parallel_config.dp_mp_config
        
        self.blocks = nn.CellList([
            Block(hidden_size=config.embed_dim,
                  ffn_hidden_size=config.intermediate_size,
                  seq_length=self.num_patches,
                  drop_rate=config.drop_rate,
                  attention_dropout_rate=config.attention_dropout_rate,
                  hidden_dropout_rate=hdr[i],
                  layer_norm_eps=config.layer_norm_eps,
                  qkv_bias=config.qkv_bias,
                  init_values=config.init_values,
                  weight_init='XavierUniform',
                  layernorm_compute_type=config.layernorm_compute_type,
                  softmax_compute_type=config.softmax_compute_type,
                  window_size=config.window_size,
                  num_heads=config.num_heads,
                  hidden_act=config.hidden_act,
                  moe_config=config.moe_config,
                  post_layernorm_residual=config.post_layernorm_residual,
                  param_init_type=config.param_init_type,
                  parallel_config=parallel_config_args)
            for i in range(config.decoder_depth)])
    
        self.cat = P.Concat(axis=1).shard(((dp, 1, 1), (dp, 1, 1)))
        self.stride_slice = P.StridedSlice().shard(((1, 1, 1),))
        self.init_weights()
    
    def init_weights(self):
        """ ViT weight initialization."""
        for name, cell in self.cells_and_names():
            if isinstance(cell, Linear):
                cell.weight.set_data(weight_init.initializer(weight_init.XavierUniform(),
                                                             cell.weight.shape,
                                                             cell.weight.dtype))
                if isinstance(cell, Linear) and cell.bias is not None:
                    cell.bias.set_data(weight_init.initializer(weight_init.Zero(),
                                                               cell.bias.shape,
                                                               cell.bias.dtype))
            elif isinstance(cell, (LayerNorm, nn.LayerNorm)):
                cell.gamma.set_data(weight_init.initializer(weight_init.One(),
                                                            cell.gamma.shape,
                                                            cell.gamma.dtype))
                cell.beta.set_data(weight_init.initializer(weight_init.Zero(),
                                                           cell.beta.shape,
                                                           cell.beta.dtype))
            if name == "patch_embed.proj":
                cell.weight.set_data(weight_init.initializer(weight_init.XavierUniform(),
                                                             cell.weight.shape,
                                                             cell.weight.dtype))
    
    def construct(self, context_encoding, target_masks):
        encoded_tokens = self.cat((context_encoding, target_masks))
        batch_size, seq_length = encoded_tokens.shape[0], encoded_tokens.shape[1]
        # attend with vision transformer
        encoder_input_mask = P.Ones()((batch_size, seq_length, seq_length), mstype.float32)
        for block in self.blocks:
            encoded_tokens = block(encoded_tokens, encoder_input_mask)
        pred_target_tokens = self.stride_slice(
            encoded_tokens,
            (0, context_encoding.shape[1], 0),
            (encoded_tokens.shape[0], seq_length, encoded_tokens.shape[2]), 
            (1, 1, 1)
        )
        return pred_target_tokens



class IJEPA3DForPreTraining(BaseModel):
    def __init__(self, config=None):
        config = config if config else IJEPA3DConfig()
        super().__init__(config)
        self.M = config.M
        self.use_moe = False
        parallel_config = config.parallel_config
        dp = parallel_config.data_parallel
        mp = parallel_config.model_parallel
        vit_cfg = ViTMAE3DConfig(**config)
        
        self.base_encoder = ViTMAE3DModel(vit_cfg)
        self.momentum_encoder = ViTMAE3DModel(vit_cfg)
        self.update_momentum_encoder(m=0)
        for n, p in self.momentum_encoder.parameters_and_names():
            p.requires_grad = False
        
        self.num_patches = num_patches = self.base_encoder.patch_embed.num_patches
        self.grid_size = self.base_encoder.grid_size

        self.mask_tokens = Parameter(
            weight_init.initializer(weight_init.TruncatedNormal(sigma=config.initializer_range),
                                    (1, 1, config.embed_dim)),
            name='mask_tokens', requires_grad=True)

        self.predictor = Predictor(config)

        self.stride_slice = P.StridedSlice().shard(((1, 1, 1),))
        self.add = P.Add().shard(((dp, 1, 1), (1, 1, 1)))
        self.expand_dim = P.ExpandDims().shard(((dp, 1),))
        self.tile = P.Tile().shard(((dp, 1, 1),))
        self.cat = P.Concat(axis=1).shard(((dp, 1, 1), (dp, 1, 1)))
        self.stack = P.Stack(axis=0)
        self.gather = P.Gather()
        self.mse_loss = nn.MSELoss()

        # self.images_summary = P.ImageSummary().shard(((dp, 1, 1, 1),))

        self.init_weights()


    def init_weights(self):
        """ ViT weight initialization."""
        for name, cell in self.cells_and_names():
            if isinstance(cell, Linear):
                cell.weight.set_data(weight_init.initializer(weight_init.XavierUniform(),
                                                             cell.weight.shape,
                                                             cell.weight.dtype))
                if isinstance(cell, Linear) and cell.bias is not None:
                    cell.bias.set_data(weight_init.initializer(weight_init.Zero(),
                                                               cell.bias.shape,
                                                               cell.bias.dtype))
            elif isinstance(cell, (LayerNorm, nn.LayerNorm)):
                cell.gamma.set_data(weight_init.initializer(weight_init.One(),
                                                            cell.gamma.shape,
                                                            cell.gamma.dtype))
                cell.beta.set_data(weight_init.initializer(weight_init.Zero(),
                                                           cell.beta.shape,
                                                           cell.beta.dtype))
            if name == "patch_embed.proj":
                cell.weight.set_data(weight_init.initializer(weight_init.XavierUniform(),
                                                             cell.weight.shape,
                                                             cell.weight.dtype))
    
    def update_momentum_encoder(self, m=0.999):
        for (name1, param1), (name2, param2) in zip(self.base_encoder.parameters_and_names(), self.momentum_encoder.parameters_and_names()):
            assert (name1 == name2)
            if 'block' in name1:
                param2.set_data(m * param2.data + (1-m)*param1.data)
            else:
                param2.set_data(param1.data)


    def construct(self, image, target_patches, context_patches):
        # same patches in the batch
        target_patches, context_patches = target_patches[0], context_patches[0]
        
        targets = self.momentum_encoder.construct(image)
        targets = P.stop_gradient(targets)
        target_blocks = self.stack([self.gather(targets, target_patches[i], 1) for i in range(self.M)])
        
        context_block = self.base_encoder(image, context_patches)
        prediction_blocks = P.zeros_like(target_blocks)
        
        for i in range(self.M):
            target_masks = self.tile(self.mask_tokens, (target_blocks.shape[1], target_blocks.shape[2], 1))
            target_pos_embedding = self.gather(self.base_encoder.pos_embed, target_patches[i], 1)
            target_masks = target_masks + target_pos_embedding
            prediction_blocks[i] = self.predictor(context_block, target_masks)
        loss = self.mse_loss(prediction_blocks, target_blocks)
        return loss


from mindspore.train.callback._callback import Callback
class EMA_Callback(Callback):
    def __init__(self, epochs, initial_ema=0.999):
        super().__init__()
        # ema sched
        self.ema = np.linspace(initial_ema, 1.0, epochs)

    def step_end(self, run_context):
        cb_params = run_context.original_args()
        cur_epoch = cb_params.cur_epoch_num
        cb_params.train_network.network.update_momentum_encoder(m=float(self.ema[cur_epoch]))

