import os, sys
import math
from functools import reduce
from collections import OrderedDict
from typing import *

import mindspore as ms
import mindspore.nn as nn
import mindspore.ops as ops

import numpy as np
from mindspore import Tensor
from mindspore.ops import operations as P
from mindspore.common import initializer as init

from mindspore.ops import operations as P
from mindspore.ops import functional as F
from mindspore.ops.operations import _inner_ops as inner
from mindspore.common.parameter import Parameter
from mindspore.common.initializer import initializer, Initializer
from mindspore.common.tensor import Tensor
from mindspore.common._decorator import deprecated
from mindspore.ops.primitive import constexpr
import mindspore.context as context
from mindspore._checkparam import Rel
from mindspore._checkparam import Validator as validator
from mindspore._extends import cell_attr_register
from mindspore.communication.management import get_group_size, get_rank
from mindspore.communication import management
from mindspore.common import dtype as mstype
from mindspore.parallel._utils import _is_in_auto_parallel_mode
# from mindspore.nn.cell import Cell


def check_divisible(number, divide):
    if number % divide == 0:
        return divide
    else:
        return 1


class MaxPool3d(nn.Cell):

    def __init__(self, kernel_size=1, stride=1, pad_mode="valid", pad_list=0, ceil_mode=None, data_format="NCDHW",
                 auto_prefix=True, flags=None):
        """Initialize MaxPool2d."""
        super().__init__(auto_prefix=auto_prefix, flags=flags)
        # kernel_size=1, strides=1, pad_mode="VALID", pad_list=0, ceil_mode=None, data_format="NCDHW"
        self.kernel_size = kernel_size
        self.stride = stride
        self.pad_mode = pad_mode
        self.pad_list = pad_list
        self.ceil_mode = ceil_mode
        self.data_format = data_format
        self.max_pool = P.MaxPool3D(kernel_size=self.kernel_size,
                                    strides=self.stride,
                                    pad_mode=self.pad_mode,
                                    pad_list=self.pad_list,
                                    ceil_mode=self.ceil_mode,
                                    data_format=self.data_format)

    def construct(self, x):
        out = self.max_pool(x)
        return out


class AvgPool3d(nn.Cell):

    def __init__(self, kernel_size=1, stride=1, pad_mode="valid", pad=0, ceil_mode=False,
                 count_include_pad: bool = True, divisor_override: float = 0, data_format: str = "NCDHW",
                 auto_prefix=True, flags=None):
        """Initialize AvgPool3d."""
        super().__init__(auto_prefix=auto_prefix, flags=flags)
        # kernel_size=1, strides=1, pad_mode="valid", pad=0, ceil_mode=False,
        # count_include_pad=True, divisor_override=0, data_format="NCDHW"
        self.kernel_size = kernel_size
        self.stride = stride
        self.pad_mode = pad_mode
        self.pad = pad
        self.ceil_mode = ceil_mode
        self.count_include_pad = count_include_pad
        self.divisor_override = divisor_override
        self.data_format = data_format
        self.avgpool = P.AvgPool3D(kernel_size=self.kernel_size,
                                   strides=self.stride,
                                   pad_mode=self.pad_mode,
                                   pad=self.pad,
                                   ceil_mode=self.ceil_mode,
                                   count_include_pad=self.count_include_pad,
                                   divisor_override=self.divisor_override,
                                   data_format=self.data_format)

    def construct(self, x):
        out = self.avgpool(x)
        return out


class GroupNorm3d(nn.Cell):
    """
    this version avoid transpose ops to save memory and time
    """

    def __init__(self, num_groups, num_channels, eps=1e-05, affine=True, gamma_init='ones', beta_init='zeros'):
        """Initialize GroupNorm."""
        super(GroupNorm3d, self).__init__()
        self.num_groups = validator.check_positive_int(num_groups, "num_groups", self.cls_name)
        self.num_channels = validator.check_positive_int(num_channels, "num_channels", self.cls_name)
        if num_channels % num_groups != 0:
            raise ValueError(f"For '{self.cls_name}', the 'num_channels' must be divided by 'num_groups', "
                             f"but got 'num_channels': {num_channels}, 'num_groups': {num_groups}.")
        self.eps = validator.check_value_type('eps', eps, (float,), type(self).__name__)
        self.affine = validator.check_bool(affine, arg_name="affine", prim_name=self.cls_name)

        self.gamma = Parameter(initializer(
            gamma_init, num_channels), name="gamma", requires_grad=affine)
        self.beta = Parameter(initializer(
            beta_init, num_channels), name="beta", requires_grad=affine)
        self.shape = F.shape
        self.reshape = F.reshape
        self.reduce_mean = P.ReduceMean(keep_dims=True)
        self.square = F.square
        self.reduce_sum = P.ReduceSum(keep_dims=True)
        self.sqrt = P.Sqrt()

    def _cal_output(self, x):
        """calculate groupnorm output"""
        batch, channel, depth, height, width = self.shape(x)
        # _channel_check(channel, self.num_channels, self.cls_name)
        x = self.reshape(x, (batch, self.num_groups, -1))
        mean = self.reduce_mean(x, 2)
        var = self.reduce_sum(self.square(x - mean), 2) / (channel * height * width * depth / self.num_groups)
        std = self.sqrt(var + self.eps)
        x = (x - mean) / std
        x = self.reshape(x, (batch, channel, depth, height, width))
        output = x * self.reshape(self.gamma, (-1, 1, 1, 1)) + self.reshape(self.beta, (-1, 1, 1, 1))
        return output

    def construct(self, x):
        # _shape_check(self.shape(x), self.cls_name)
        # _check_dtype(x.dtype, [mstype.float16, mstype.float32], "input", self.cls_name)
        output = self._cal_output(x)
        return output

    def extend_repr(self):
        return 'num_groups={}, num_channels={}'.format(self.num_groups, self.num_channels)


class ForwardConv(nn.Cell):
    """A module containing a custom number of convolutional layers, each followed by GroupNorm and ReLU activation."""

    def __init__(self, in_channels, out_channels, num_groups=4, kernel=3):
        super(ForwardConv, self).__init__()
        self.conv1 = nn.Conv3d(in_channels, out_channels, kernel_size=kernel, pad_mode='same')
        self.gn1 = GroupNorm3d(check_divisible(in_channels, num_groups), out_channels)
        self.relu1 = nn.ReLU()

        self.conv2 = nn.Conv3d(out_channels, out_channels, kernel_size=kernel, pad_mode='same')
        self.gn2 = GroupNorm3d(check_divisible(out_channels, num_groups), out_channels)
        self.relu2 = nn.ReLU()

        self.conv3 = nn.Conv3d(out_channels, out_channels, kernel_size=kernel, pad_mode='same')
        self.gn3 = GroupNorm3d(check_divisible(out_channels, num_groups), out_channels)
        self.relu3 = nn.ReLU()

    def construct(self, features):
        x = self.conv1(features)
        x = self.gn1(x)
        x = self.relu1(x)

        x = self.conv2(x)
        x = self.gn2(x)
        x = self.relu2(x)

        x = self.conv3(x)
        x = self.gn3(x)
        x = self.relu3(x)
        return x
