import mindspore
import mindspore.nn
from nequip.data import AtomicDataDict
from nequip.nn import GraphModuleMixin
from typing import Optional
import mindspore.ops as ops
import mindspore.numpy as np
from mindmaterial.e3.nn import Scatter

class SaveForOutput(mindspore.nn.Cell, GraphModuleMixin):
    """Copy a field and disconnect it from the autograd graph.

    Copy a field and disconnect it from the autograd graph, storing it under another key for inspection as part of the models output.

    Args:
        field: the field to save
        out_field: the key to put the saved copy in
    """

    field: str
    out_field: str

    def __init__(self, field: str, out_field: str, irreps_in=None):
        super().__init__()
        self._init_irreps(irreps_in=irreps_in)
        self.irreps_out[out_field] = self.irreps_in[field]
        self.field = field
        self.out_field = out_field

    def construct(self, data: AtomicDataDict.Type) -> AtomicDataDict.Type:
        data[self.out_field] = data[self.field].detach().clone()
        return data


def _broadcast(src: mindspore.Tensor, other: mindspore.Tensor, dim: int):
    if dim < 0:
        dim = other.dim() + dim
    if src.dim() == 1:
        for _ in range(0, dim):
            src = src.unsqueeze(0)
    for _ in range(src.dim(), other.dim()):
        src = src.unsqueeze(-1)

    src = ops.expand_dims(src, -1)
    return src


def scatter(
    src: mindspore.Tensor,
    index: mindspore.Tensor,
    dim: int = -1,
    out: Optional[mindspore.Tensor] = None,
    dim_size: Optional[int] = None,
    reduce: str = "sum",
) -> mindspore.Tensor:
    assert reduce == "sum"  # for now only sum, TODO do we need _broadcast? what do "out" mean here.
    sc = Scatter('add')
    if out is None:
        size = list(src.shape)
        if dim_size is not None:
            size[dim] = dim_size
        elif index.numel() == 0:
            size[dim] = 0
        else:
            size[dim] = int(ops.max(index)[1]) + 1
        out = ops.Zeros()(tuple(size), src.dtype)

        return sc(src, index, out)
    else:
        return sc(src, index, out)

def scatter_std(
    src: mindspore.Tensor,
    index: mindspore.Tensor,
    dim: int = -1,
    out: Optional[mindspore.Tensor] = None,
    dim_size: Optional[int] = None,
    unbiased: bool = True,
) -> mindspore.Tensor:

    if out is not None:
        dim_size = list(out.shape)[dim]

    if dim < 0:
        dim = src.dim() + dim

    count_dim = dim
    if index.dim() <= dim:
        count_dim = index.dim() - 1

    ones = ops.Ones()(index.shape, src.dtype)
    count = scatter(ones, index, count_dim, dim_size=dim_size)

    index = ops.expand_dims(index, -1)
    tmp = scatter(src, index, dim, dim_size=dim_size)
    count = _broadcast(count, tmp, dim)

    count = ops.clip_by_value(count, clip_value_min=mindspore.Tensor(1, mindspore.int32))
    mean = ops.Div()(tmp, count)

    var = src - ops.GatherD()(mean, dim, index) # TODO: double check needed
    var = var * var
    out = scatter(var, index, dim, out, dim_size)

    if unbiased:
        count = ops.Sub()(count,1).clamp_(1)
        count = ops.clip_by_value(count, clip_value_min=mindspore.Tensor(1, mindspore.int32))
    out = ops.Div()(out, count + 1e-6)
    out = ops.Sqrt()(out)

    return out


def scatter_mean(
    src: mindspore.Tensor,
    index: mindspore.Tensor,
    dim: int = -1,
    out: Optional[mindspore.Tensor] = None,
    dim_size: Optional[int] = None,
) -> mindspore.Tensor:
    out = scatter(src, index, dim, out, dim_size)
    dim_size = out.shape

    index_dim = dim
    if index_dim < 0:
        index_dim = index_dim + src.dim()
    if index.dim() <= index_dim:
        index_dim = index.dim() - 1

    ones = ops.Ones()(index.shap, src.dtype)
    count = scatter(ones, index, index_dim, None, dim_size)
    count[count < 1] = 1
    count = _broadcast(count, out, dim)
    if out.is_floating_point():
        out = np.true_divide(out, count)
    else:
        out = np.floor_divide(out, count)
    return out
