# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""nerf engine utils"""

import os
import warnings

import mindspore as md

from models import NeRFMLP
from nerf_utils import Embedder
import torch
from mindspore import Tensor, Parameter

__all__ = ["get_embedder", "create_nerf", "context_setup", "create_nerf_from_torch"]


def get_embedder(multi_res, i=0):
    """
    Get embedder function.

    Args:
        multi_res (int): Log2 of max freq for positional encoding.
        i (int, optional): Set 0 for default positional encoding, -1 for none. Default: 0.

    Returns:
        Tuple of nn.Cell and int, embedder and the output dimensions.

        - **embedder** (nn.Cell) - The embedder.
        - **out_dims** (int) - The output dimensions.
    """
    if i == -1:
        return md.ops.Identity(), 3

    embed_kwargs = {
        "include_input": True,
        "input_dims": 3,
        "max_freq_pow": multi_res - 1,
        "num_freqs": multi_res,
        "log_sampling": True,
        "periodic_fns": [md.ops.Sin(), md.ops.Cos()],
    }

    embedder_obj = Embedder(**embed_kwargs)
    embed = embed_kwargs
    return embed, embedder_obj.out_dims


def create_nerf(config, out_dir):
    """
    Create nerf model and load weights.

    Args:
        config (Config): The config object.
        out_dir (str): The output directory.

    Returns:
        Tuple of 6 items. The network items.

        - **start_iter** (int) - The start iteration number.
        - **optimizer** (Cell) - The MLP optimizer.
        - **model_coarse** (Cell) - The coarse MLP.
        - **model_fine** (Cell) - The fine MLP.
        - **embed_fn** (Cell) - The positional embedder functions for location.
        - **embed_dirs_fn** (Cell) - The positional embedder functions for direction.
    """
    embed_fn, input_ch = get_embedder(config.multi_res, config.i_embed)

    input_ch_views = 0
    embed_dirs_fn = None
    if config.use_view_dirs:
        embed_dirs_fn, input_ch_views = get_embedder(config.multi_res_views, config.i_embed)
    # Create networks
    output_ch = 4
    skips = [4]
    model_coarse = NeRFMLP(
        cap_d=config.net_depth,
        cap_w=config.net_width,
        input_ch=input_ch,
        output_ch=output_ch,
        skips=skips,
        input_ch_views=input_ch_views,
        use_view_dirs=config.use_view_dirs,
    )
    grad_vars = [{"params": model_coarse.trainable_params()}]

    model_fine = None
    if config.cap_n_importance > 0:
        model_fine = NeRFMLP(
            cap_d=config.net_depth_fine,
            cap_w=config.net_width_fine,
            input_ch=input_ch,
            output_ch=output_ch,
            skips=skips,
            input_ch_views=input_ch_views,
            use_view_dirs=config.use_view_dirs,
        )
        grad_vars += [{"params": model_fine.trainable_params()}]

    optimizer = None
    # Load checkpoints
    start_iter = 0
    if config.ckpt is not None:
        ckpts = [config.ckpt]
    else:
        ckpts = [os.path.join(out_dir, f) for f in sorted(os.listdir(out_dir)) if ".tar" in f]

    print("Found ckpts", ckpts)
    if (ckpts and not config.no_reload) or config.ckpt is not None:
        # Reload the latest ckpt
        ckpt_path = ckpts[-1]
        print("Reloading from", ckpt_path)
        ckpt = md.load_checkpoint(ckpt_path)

        # Load training steps
        start_iter = int(ckpt["global_steps"]) + 1

        # Load network weights
        md.load_param_into_net(
            model_coarse,
            {key: value for key, value in ckpt.items() if ".model_coarse." in key},
        )
        if model_fine is not None:
            md.load_param_into_net(
                model_fine,
                {key: value for key, value in ckpt.items() if ".model_fine." in key},
            )
    else:
        print("No ckpt reloaded")

    return start_iter, optimizer, model_coarse, model_fine, embed_fn, embed_dirs_fn


def create_nerf_from_torch(config, out_dir):
    """
    Create nerf model and load weights.

    Args:
        config (Config): The config object.
        out_dir (str): The output directory.

    Returns:
        Tuple of 6 items. The network items.

        - **start_iter** (int) - The start iteration number.
        - **optimizer** (Cell) - The MLP optimizer.
        - **model_coarse** (Cell) - The coarse MLP.
        - **model_fine** (Cell) - The fine MLP.
        - **embed_fn** (Cell) - The positional embedder functions for location.
        - **embed_dirs_fn** (Cell) - The positional embedder functions for direction.
    """
    embed_fn, input_ch = get_embedder(config.multi_res, config.i_embed)

    input_ch_views = 0
    embed_dirs_fn = None
    if config.use_view_dirs:
        embed_dirs_fn, input_ch_views = get_embedder(config.multi_res_views, config.i_embed)
    # Create networks
    output_ch = 4
    skips = [4]
    model_coarse = NeRFMLP(
        cap_d=config.net_depth,
        cap_w=config.net_width,
        input_ch=input_ch,
        output_ch=output_ch,
        skips=skips,
        input_ch_views=input_ch_views,
        use_view_dirs=config.use_view_dirs,
    )
    grad_vars = [{"params": model_coarse.trainable_params()}]

    model_fine = None
    if config.cap_n_importance > 0:
        model_fine = NeRFMLP(
            cap_d=config.net_depth_fine,
            cap_w=config.net_width_fine,
            input_ch=input_ch,
            output_ch=output_ch,
            skips=skips,
            input_ch_views=input_ch_views,
            use_view_dirs=config.use_view_dirs,
        )
        grad_vars += [{"params": model_fine.trainable_params()}]

    optimizer = None
    # Load checkpoints
    start_iter = 0
    if config.ckpt is not None:
        ckpts = [config.ckpt]
    else:
        ckpts = [os.path.join(out_dir, f) for f in sorted(os.listdir(out_dir)) if ".tar" in f]

    print("Found ckpts", ckpts)
    if (ckpts and not config.no_reload) or config.ckpt is not None:
        # Reload the latest ckpt
        ckpt_path = ckpts[-1]
        print("Reloading from", ckpt_path)

        model_coarse, model_fine, start_iter = convert_torch_to_mindspore(ckpt_path, model_coarse, model_fine)

    return start_iter, optimizer, model_coarse, model_fine, embed_fn, embed_dirs_fn


def context_setup(idx, device, mode):
    """
    Set up running context.

    Args:
        idx (int): The device index.
        device (str): The target platforms. ``Ascend`` or ``GPU``.
        mode (str): The running mode. `PYNATIVE_MODE` or `GRAPH_MODE`.

    Raises:
        NotImplementedError: `cumprod` ops does not support CPU.

    Supported Platforms:
        ``Ascend`` ``GPU``
    """
    if device == "CPU":
        raise NotImplementedError("`cumprod` ops does not support CPU.")

    md.context.set_context(mode=mode, device_target=device, device_id=idx)

    warnings.warn("Not support N-D searchsorted, set `max_call_depth=20000` to prevent `for loop` collapse.")
    md.context.set_context(max_call_depth=20000)


def convert_torch_to_mindspore(ckpt_path, model_coarse, model_fine):
    '''
    Convert torch model to mindspore model.
    ckpt_path: path of torch model
    model: mindspore model
    '''
    if not os.path.exists(ckpt_path):
        raise ValueError("torch model not found")
    ckpt = torch.load(ckpt_path, map_location='cpu')

    start_iter = int(ckpt["global_step"]) + 1

    # print(model_coarse.parameters_dict())

    if 'network_fn_state_dict' in ckpt and model_coarse is not None:
        # first load the coarse network
        coarse_params = ckpt['network_fn_state_dict']
        md_coarse_params = model_coarse.parameters_dict()
        for key in md_coarse_params.keys():
            update_torch_to_ms(coarse_params, md_coarse_params, key, key)

    if 'network_fine_state_dict' in ckpt and model_fine is not None:
        # then load the fine network
        fine_params = ckpt['network_fine_state_dict']
        md_fine_params = model_fine.parameters_dict()
        for key in md_fine_params.keys():
            update_torch_to_ms(fine_params, md_fine_params, key, key)

    return model_coarse, model_fine, start_iter

def update_torch_to_ms(torch_param_dict, ms_param_dict, torch_key, ms_key):
    """Updates mindspore param's data from torch param's data."""
    assert torch_key in torch_param_dict
    value = torch_param_dict[torch_key].cpu().numpy()
    value = Parameter(Tensor(value), name=ms_key)
    _update_param(ms_param_dict[ms_key], value)

def _special_process_par(par, new_par):
    """
    Processes the special condition.
    Like (12,2048,1,1)->(12,2048), this case is caused by GE 4 dimensions tensor.
    """
    par_shape_len = len(par.data.shape)
    new_par_shape_len = len(new_par.data.shape)
    delta_len = new_par_shape_len - par_shape_len
    delta_i = 0
    for delta_i in range(delta_len):
        if new_par.data.shape[par_shape_len + delta_i] != 1:
            break
    if delta_i == delta_len - 1:
        new_val = new_par.data.asnumpy()
        new_val = new_val.reshape(par.data.shape)
        par.set_data(Tensor(new_val, par.data.dtype))
        return True
    return False

def _update_param(param, new_param):
    """Updates param's data from new_param's data."""
 
    if isinstance(param.data, Tensor) and isinstance(new_param.data, Tensor):
        if param.data.dtype != new_param.data.dtype:
            print("Failed to combine the net and the parameters for param %s.", param.name)
            msg = ("Net parameters {} type({}) different from parameter_dict's({})"
                   .format(param.name, param.data.dtype, new_param.data.dtype))
            raise RuntimeError(msg)
 
        if param.data.shape != new_param.data.shape:
            if not _special_process_par(param, new_param):
                print("Failed to combine the net and the parameters for param %s.", param.name)
                msg = ("Net parameters {} shape({}) different from parameter_dict's({})"
                       .format(param.name, param.data.shape, new_param.data.shape))
                raise RuntimeError(msg)
            return
 
        param.set_data(new_param.data)
        return
 
    if isinstance(param.data, Tensor) and not isinstance(new_param.data, Tensor):
        if param.data.shape != (1,) and param.data.shape != ():
            print("Failed to combine the net and the parameters for param %s.", param.name)
            msg = ("Net parameters {} shape({}) is not (1,), inconsistent with parameter_dict's(scalar)."
                   .format(param.name, param.data.shape))
            raise RuntimeError(msg)
        param.set_data(initializer(new_param.data, param.data.shape, param.data.dtype))
 
    elif isinstance(new_param.data, Tensor) and not isinstance(param.data, Tensor):
        print("Failed to combine the net and the parameters for param %s.", param.name)
        msg = ("Net parameters {} type({}) different from parameter_dict's({})"
               .format(param.name, type(param.data), type(new_param.data)))
        raise RuntimeError(msg)
 
    else:
        param.set_data(type(param.data)(new_param.data))
