import numpy as np
import datetime
import logging
from collections import defaultdict
import mindspore
from mindspore import nn, ops, Tensor, numpy, Parameter
from mindspore import numpy as mnp
from mindspore.common import dtype as mstype
from .heads.sim_head import CenterHeadV2
from mind3d.utils.sim_center_utils import get_paddings_indicator


class PFNLayer(nn.Cell):
    """
   Pillar Feature Net Layer.
        The Pillar Feature Net could be composed of a series of these layers, but the PointPillars paper results only
        used a single PFNLayer. This layer performs a similar role as second.pytorch.voxelnet.VFELayer.

    Args:
        in_channels (int): Input channel.
        out_channels (int): Output channel.
        use_norm:
        last_layer:<bool>. If last_layer, there is no concatenation of features.

    Returns:
        Tensor, output tensor.
    """

    def __init__(self, in_channels, out_channels, norm_cfg=None, last_layer=False):
        super(PFNLayer, self).__init__()

        self.last_vfe = last_layer
        if not self.last_vfe:
            out_channels = out_channels // 2
        self.units = out_channels

        self.linear = nn.Dense(in_channels, self.units, has_bias=False)
        if norm_cfg == 'None':
            self.norm = nn.BatchNorm2d(self.units)  # , eps=1e-3, momentum=0.99

        self.tile = ops.Tile()
        self.concat = ops.Concat(axis=2)
        self.expand_dims = ops.ExpandDims()
        self.argmax_w_value = ops.ArgMaxWithValue(axis=1, keep_dims=True)

    def construct(self, inputs):
        """forward graph"""
        x = self.linear(inputs)
        x = self.expand_dims(x, 3)
        x = x.transpose((0, 2, 1, 3))
        x = self.norm(x)
        x = x.transpose((0, 2, 1, 3)).squeeze(axis=3)
        x = ops.ReLU()(x)
        x_max = self.argmax_w_value(x)[1]
        if self.last_vfe:
            return x_max
        x_repeat = self.tile(x_max, (1, inputs.shape[1], 1))
        x_concatenated = self.concat([x, x_repeat])
        return x_concatenated


class PillarFeatureNet(nn.Cell):
    """Pillar feature net
    the network prepares the pillar features and performs forward pass through PFNLayers. This net performs a
    similar role to SECOND's second.pytorch.voxelnet.VoxelFeatureExtractor.
    :param num_input_features: <int>. Number of input features, either x, y, z or x, y, z, r.
    :param num_filters: (<int>: N). Number of features in each of the N PFNLayers.
    :param with_distance: <bool>. Whether to include Euclidean distance to points.
    :param voxel_size: (<float>: 3). Size of voxels, only utilize x and y size.
    :param pc_range: (<float>: 6). Point cloud range, only utilize x and y min.
    """

    def __init__(
            self,
            num_input_features,
            norm_cfg,
            num_filters,
            with_distance,
            voxel_size,
            pc_range
    ):
        super().__init__()
        self.name = "PillarFeatureNet"
        assert len(num_filters) > 0
        num_input_features += 5

        if with_distance:
            num_input_features += 1
        self._with_distance = with_distance

        # Create PillarFeatureNet layers
        num_filters = [num_input_features] + list(num_filters)
        pfn_layers = []

        for i in range(len(num_filters) - 1):
            in_filters = num_filters[i]
            out_filters = num_filters[i + 1]
            if i < len(num_filters) - 2:
                last_layer = False
            else:
                last_layer = True

            pfn_layers.append(
                PFNLayer(in_filters, out_filters, norm_cfg=norm_cfg, last_layer=last_layer)
            )
        self.pfn_layers = nn.SequentialCell(pfn_layers)

        # Need pillar (voxel) size and x/y offset in order to calculate pillar offset
        self.vx = voxel_size[0]
        self.vy = voxel_size[1]
        self.x_offset = self.vx / 2 + pc_range[0]
        self.y_offset = self.vy / 2 + pc_range[1]
        self.expand_dims = ops.ExpandDims()

    def construct(self, features, num_points, coors):
        """forward graph"""

        points_mean = (features[:, :, :3].sum(axis=1, keepdims=True) /
                       ops.Maximum()(num_points, 1).view(-1, 1, 1))
        f_cluster = features[:, :, :3] - points_mean

        # Find distance of x, y, and z from pillar center
        f_center = ops.ZerosLike()(features[:, :, :2])
        f_center[:, :, 0] = features[:, :, 0] - (
                self.expand_dims(coors[:, 3].astype(mstype.float32), 1) * self.vx + self.x_offset)
        f_center[:, :, 1] = features[:, :, 1] - (
                self.expand_dims(coors[:, 2].astype(mstype.float32), 1) * self.vy + self.y_offset)

        # Combine feature decorations
        features_ls = [features, f_cluster, f_center]
        if self._with_distance:
            points_dist = mnp.norm(features[:, :, :3], 2, 2, keepdims=True)
            features_ls.append(points_dist)
        features = ops.Concat(axis=-1)(features_ls)

        # The feature decorations were calculated without regard to whether pillar was empty. Need to ensure that
        # empty pillars remain set to zero.
        voxel_count = features.shape[1]
        mask = get_paddings_indicator(num_points, voxel_count, axis=0)
        mask = self.expand_dims(mask, -1).astype(features.dtype)
        features *= mask

        # Forward pass through PFNLayers
        # for pfn in self.pfn_layers:
            #features = pfn(features)
        features = self.pfn_layers(features)
        # print('PillarFeatureNet output shape', features.shape)
        return features.squeeze()


class PointPillarsScatter(nn.Cell):
    """PointPillars scatter
    Converts learned features from dense tensor to sparse pseudo image. This replaces SECOND's
    second.pytorch.voxelnet.SparseMiddleExtractor.
    :param output_shape: ([int]: 4). Required output shape of features.
    :param num_input_features: <int>. Number of input features."""

    def __init__(self, num_input_features, norm_cfg, name="PointPillarsScatter", **kwargs):
        super().__init__()
        self.name = "PointPillarsScatter"
        self.nchannels =num_input_features
        self.zero=ops.Zeros()
        self.ny=Parameter(Tensor(1, mindspore.int32))
        self.nx=Parameter(Tensor(1, mindspore.int32))

    def construct(self, voxel_features, coords, batch_size, input_shape):
        """forward graph"""
        self.ny = input_shape[0]
        self.nx = input_shape[1]
        # Batch_canvas will be the final output.

        # z coordinate is not used, z -> batch
        batch_canvas = []
        for batch_itt in range(batch_size):  # [bs, v, p, 64]
            canvas = numpy.zeros((int(self.nchannels), int(self.nx * self.ny)), mindspore.float32)
            batch_mask = (coords[:, 0] == batch_itt) * 1
            batch_mask = batch_mask.nonzero().squeeze(axis=1)
            this_coords = coords.gather(batch_mask, axis=0)
            indices = (this_coords[:, 2] * self.nx + this_coords[:, 3]).astype('int')
            voxels = voxel_features.gather(batch_mask, axis=0)
            voxels = numpy.transpose(voxels)

            # Now scatter the blob back to the canvas.
            canvas=canvas.T
            canvas[indices]=voxels.T
            canvas=canvas.T
            # canvas[:, Tensor(indices)] = Tensor(voxels)

            # Append to a list for later stacking.
            batch_canvas.append(canvas)

        # Stack to 3-dim tensor (batch-size, nchannels, nrows*ncols)
        stack = ops.Stack()
        batch_canvas = stack(batch_canvas)

        # Undo the column stacking to final 4-dim tensor
        batch_canvas = batch_canvas.view(batch_size, self.nchannels, int(self.ny), int(self.nx))
        # print('PointPillarsScatter output shape', batch_canvas.shape)
        return batch_canvas


class RPN(nn.Cell): # con2D(  weight_init='XavierUniform')
    def __init__(
            self,
            layer_nums,
            ds_layer_strides,
            ds_num_filters,
            us_layer_strides,
            us_num_filters,
            num_input_features,
            norm_cfg=None,
            name="rpn",
            logger=None,
    ):
        super(RPN, self).__init__()
        self._layer_strides = ds_layer_strides
        self._num_filters = ds_num_filters
        self._layer_nums = layer_nums
        self._upsample_strides = us_layer_strides
        self._num_upsample_filters = us_num_filters
        self._num_input_features = num_input_features

        assert len(self._layer_strides) == len(self._layer_nums)
        assert len(self._num_filters) == len(self._layer_nums)
        assert len(self._num_upsample_filters) == len(self._upsample_strides)

        self._upsample_start_idx = len(self._layer_nums) - len(self._upsample_strides)

        must_equal_list = []
        for i in range(len(self._upsample_strides)):
            must_equal_list.append(
                self._upsample_strides[i]
                / np.prod(self._layer_strides[: i + self._upsample_start_idx + 1])
            )

        for val in must_equal_list:
            assert val == must_equal_list[0]

        in_filters = [self._num_input_features, *self._num_filters[:-1]]
        blocks = nn.CellList([])
        deblocks = nn.CellList([])

        for i, layer_num in enumerate(self._layer_nums):
            block, num_out_filters = self._make_layer(
                in_filters[i],
                self._num_filters[i],
                layer_num,
                stride=self._layer_strides[i],
            )
            blocks.append(block)
            if i - self._upsample_start_idx >= 0:
                stride = (self._upsample_strides[i - self._upsample_start_idx])
                if stride >= 1:
                    deblock = nn.SequentialCell(
                        nn.Conv2dTranspose(         
                            num_out_filters,
                            self._num_upsample_filters[i - self._upsample_start_idx],
                            stride,
                            pad_mode='same',
                            stride=stride,
                            has_bias=False, weight_init="XavierUniform"),
                        # nn.BatchNorm2d(
                        #     self._num_upsample_filters[i - self._upsample_start_idx],
                        #     eps=1e-3, momentum=0.99
                        # ),
                        nn.BatchNorm2d(
                            self._num_upsample_filters[i - self._upsample_start_idx],
                            eps=1e-3, momentum=0.99
                        ),
                        nn.ReLU(),
                    )
                else:
                    stride = int(np.round(1 / stride))
                    deblock = nn.SequentialCell(
                        nn.Conv2d(
                            num_out_filters,
                            self._num_upsample_filters[i - self._upsample_start_idx],
                            kernel_size=stride,
                            pad_mode='valid',
                            stride=stride,
                            has_bias=False, weight_init="XavierUniform"),
                        # nn.BatchNorm2d(
                        nn.BatchNorm2d(
                            self._num_upsample_filters[i - self._upsample_start_idx],
                            eps=1e-3, momentum=0.99
                        ),
                        nn.ReLU(),
                    )
                deblocks.append(deblock)
        self.blocks = blocks
        self.deblocks = deblocks #nn.CellList(deblocks)

        logger.info("Finish RPN Initialization")

    @property
    def downsample_factor(self):
        factor = np.prod(self._layer_strides)
        if len(self._upsample_strides) > 0:
            factor /= self._upsample_strides[-1]
        return factor

    def _make_layer(self, inplanes, planes, num_blocks, stride=1):
        block = nn.SequentialCell(
            nn.ZeroPad2d(padding=1),
            nn.Conv2d(int(inplanes), planes, 3, stride=stride, pad_mode='valid', has_bias=False, weight_init="XavierUniform"),
            # nn.BatchNorm2d(planes, eps=1e-3, momentum=0.99),
            nn.BatchNorm2d(planes, eps=1e-3, momentum=0.99),
            nn.ReLU(),
        )

        for j in range(num_blocks):
            block.append(nn.Conv2d(planes, planes, 3, padding=1, pad_mode='pad', has_bias=False, weight_init="XavierUniform"))
            # block.append(nn.BatchNorm2d(planes, eps=1e-3, momentum=0.99))
            block.append(nn.BatchNorm2d(planes, eps=1e-3, momentum=0.99))
            block.append(nn.ReLU())

        return block, planes

    def construct(self, x):
        ups = []
        op = ops.Concat(1)
        for i in range(len(self.blocks)):
            x = self.blocks[i](x)
            if i - self._upsample_start_idx >= 0:
                ups.append(self.deblocks[i - self._upsample_start_idx](x))
        if len(ups) > 0:
            x = op((ups[0], ups[1], ups[2]))
        # print('RPN output shape', x.shape)
        return x


class SimtrackNet(nn.Cell):
    """
   Simtrack network.

    Args:
        PFN_num_input_features(int): the input channel of PFN.
        PPS_num_input_features(int): the input channel of PointPillarsScatter.
        RPN_num_input_features(int): the input channel of RPN.
        in_channels(int): Input channel.
    Returns:
        Tensor, output tensor.
    """    
    def __init__(
            self,
            PFN_num_input_features,
            num_filters,
            with_distance,
            voxel_size,
            pc_range,
            norm_cfg,
            PPS_num_input_features,
            layer_nums,
            ds_layer_strides,
            ds_num_filters,
            us_layer_strides,  # #[1, 2, 4], #,
            us_num_filters,
            RPN_num_input_features,
            in_channels,  # this is linked to 'neck' us_num_filters
            tasks,
            weight,
            code_weights,
            common_heads,
            train_cfg=None,
            test_cfg=None,
            pretrained=None,
    ):
        super().__init__()

        self.test = test_cfg

        self.reader = PillarFeatureNet(
            PFN_num_input_features,
            norm_cfg,
            num_filters,
            with_distance,
            voxel_size,
            pc_range
        )

        self.backbone = PointPillarsScatter(
            PPS_num_input_features, norm_cfg)

        self.neck = RPN(
            layer_nums,
            ds_layer_strides,
            ds_num_filters,
            us_layer_strides,  # #[1, 2, 4], #,
            us_num_filters,
            RPN_num_input_features,
            norm_cfg,
            logger=logging.getLogger("RPN")
        )

        self.bbox_head = CenterHeadV2(
            in_channels,  # this is linked to 'neck' us_num_filters
            tasks,
            weight,
            code_weights,
            common_heads
        )
        

    def extract_feat(self, data):

        input_features = self.reader(data["features"], data["num_voxels"], data["coors"])

        x = self.backbone(input_features, data["coors"], data["batch_size"], data["input_shape"])
        
        x = self.neck(x)
        return x

    def construct(self, example, return_loss=False, return_feature=True):
        voxels = example["voxels"]
        coordinates = example["coordinates"]
        num_points_in_voxel = example["num_points"]
        num_voxels = example["num_voxels"]

        batch_size = len(num_voxels)

        data = dict(
            features=voxels,
            num_voxels=num_points_in_voxel,
            coors=coordinates,
            batch_size=batch_size,
            input_shape=example["shape"][0],
        )

        x = self.extract_feat(data)

        preds = self.bbox_head(x)
        return preds

    def post_process(self,example, preds):
        return self.bbox_head.predict(example, preds, self.test)

    def get_loss(self, example, preds):
        return self.bbox_head.loss(example, preds)
