"""SynergyNet"""

import mindspore.numpy as np
import scipy.io as sio
import mindspore
import mindspore.nn as nn
import mindspore.ops as ops
from mindspore import Tensor

from mind3d.models.blocks.i2p import I2P
from mind3d.models.blocks.synergynet_mlp import ForwardProcess, ReverseProcess
from mindspore import context, Tensor, load_checkpoint, load_param_into_net

# All data parameters import
from mind3d.utils.synergynet_util import ParamsPack

__all__ = ["SynergyNet"]

param_pack = ParamsPack()


def parse_param_62(param):
    """Work for only tensor"""
    reshape = ops.Reshape()
    p_ = reshape(param[:, :12], (-1, 3, 4))
    p = p_[:, :, :3]
    offset = reshape(p_[:, :, -1], (-1, 3, 1))
    alpha_shp = reshape(param[:, 12:52], (-1, 40, 1))
    alpha_exp = reshape(param[:, 52:62], (-1, 10, 1))
    return p, offset, alpha_shp, alpha_exp


class SynergyNet(nn.Cell):
    """
    Constructs a SynergyNet architecture from
    `MobileNetV2: Inverted Residuals and Linear Bottlenecks <https://arxiv.org/abs/1801.04381>`_.

    Args:
        img_size (int): The input images's size. Default: 120.
        mode (str): "train" or "test". Default: "train".

    Inputs:
        - **x** (Tensor) - Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})`.

    Outputs:
        Tensor of shape :math:`(N, CLASSES_{out})`.

    Supported Platforms:
        ``GPU``

    Examples:
        >>> from mind3d.models.synergynet import SynergyNet
        >>> model = SynergyNet(img_size, mode)


    About SynergyNet:

    The SynergyNet pipeline contains two stages. The first stage includes a preliminary 3DMM regression from images
    and a multi-attribute feature aggregation (MAFA) for landmark refinement. The second stage contains a landmark-to
    3DMM regressor to reveal the embedded facial geometry in sparse landmarks.

    Citation:

    .. code-block::

        @article{9665823,
         author={Wu, Cho-Ying and Xu, Qiangeng and Neumann, Ulrich},
         booktitle={2021 International Conference on 3D Vision (3DV)},
         title={Synergy between 3DMM and 3D Landmarks for Accurate 3D Facial Geometry},
         year={2021},
         pages={453-463},
         doi={10.1109/3DV53792.2021.00055}
         }
    """
    def __init__(self, img_size=120, mode="train"):
        super(SynergyNet, self).__init__()

        self.mode = mode
        self.img_size = img_size
        # Image-to-parameter
        self.I2P = I2P()
        # Forward
        self.forwardDirection = ForwardProcess(68)
        # Reverse
        self.reverseDirection = ReverseProcess(68)

        self.matmul = ops.MatMul()
        self.mul = ops.Mul()

    def reconstruct_vertex_62(self, param, lmk_pts=68):

        param_mean = Tensor(param_pack.param_mean, dtype=mindspore.float32)
        param_std = Tensor(param_pack.param_std, dtype=mindspore.float32)
        w_shp = Tensor(param_pack.w_shp, dtype=mindspore.float32)
        u = Tensor(param_pack.u, dtype=mindspore.float32)
        w_exp = Tensor(param_pack.w_exp, dtype=mindspore.float32)

        # Online training needs these to parallel
        u_base = Tensor(param_pack.u_base, dtype=mindspore.float32)
        w_shp_base = Tensor(param_pack.w_shp_base, dtype=mindspore.float32)
        w_exp_base = Tensor(param_pack.w_exp_base, dtype=mindspore.float32)

        std=param_std[:62]
        mean=param_mean[:62]
        param_=self.mul(param,std)+mean
        p, offset, alpha_shp, alpha_exp = parse_param_62(param_)
        transpose = ops.Transpose()

        """For 68 pts"""
        vertex1=np.matmul(w_shp_base,alpha_shp)
        vertex2=np.matmul(w_exp_base,alpha_exp)
        vertex=u_base+vertex1+vertex2
        vertex=vertex.view(-1,lmk_pts,3)
        vertex=transpose(vertex,(0,2,1))
        vertex=np.matmul(p,vertex)
        vertex=vertex+offset


        # transform to image coordinate space
        vertex[:, 1, :] = param_pack.std_size + 1 - vertex[:, 1, :]

        data_param = [param_mean, param_std, w_shp_base, u_base, w_exp_base]

        return vertex


    def construct(self, x, target=None):
        if self.mode == "test":
            _3D_attr = self.I2P.construct_test(x)
            return _3D_attr
        else:
            _3D_attr, _3D_attr_GT, avgpool = self.I2P(x,target)

            vertex_lmk1 = self.reconstruct_vertex_62(_3D_attr)

            vertex_GT_lmk = self.reconstruct_vertex_62(_3D_attr_GT)   
            # global point feature
            point_residual = self.forwardDirection(vertex_lmk1, avgpool, _3D_attr[:, 12:52], _3D_attr[:, 52:62])
            # low-level point feature
            vertex_lmk2 = vertex_lmk1 + 0.05 * point_residual
            _3D_attr_S2 = self.reverseDirection(vertex_lmk2)

            return _3D_attr, _3D_attr_GT, vertex_lmk1, vertex_GT_lmk, vertex_lmk2, _3D_attr_S2

