"""Groupfree 3D Detection Model"""
import time
import sys
import os
import numpy as np
import mindspore as ms
import mindspore.nn as nn
import mindspore.ops as ops
from mindspore import Tensor
from mindspore.ops.functional import stop_gradient

from mind3d.models.backbones.groupfree_3d_backbone import Pointnet2Backbone
from mind3d.models.blocks.fps_module import FPSModule
from mind3d.models.blocks.kps_module import KPSModule
from mind3d.models.heads.groupfree_3d_predict_head import PredictHead
from mind3d.models.blocks.transformer_decoder import TransformerDecoderLayer
from mindspore import Tensor

class Groupfree3DModel(nn.Cell):
    """
    Constructs a groupfree-3d model from
    Group-Free 3D Object Detection via Transformers
    https://openaccess.thecvf.com/content/ICCV2021/papers/Liu_Group-Free_3D_Object_Detection_via_Transformers_ICCV_2021_paper.pdf

    Args:
        num_class: number of class
        num_heading_bin: number of heading bin
        num_size_cluster: number of cluster
    
    Returns:
        Tensor

    Supported Platforms:
        GPU
    """
    def __init__(self, num_class, num_heading_bin, num_size_cluster, mean_size_arr,
                 input_feature_dim=0, width=2, bn_momentum=0.1, sync_bn=False, num_proposal=128, sampling='kps',
                 dropout=0.1, activation="relu", nhead=8, num_decoder_layers=0, dim_feedforward=2048,
                 self_position_embedding='xyz_learned', cross_position_embedding='xyz_learned',
                 size_cls_agnostic=False):
        super(Groupfree3DModel, self).__init__()
        self.num_class = num_class
        self.num_heading_bin = num_heading_bin
        self.num_size_cluster = num_size_cluster
        self.mean_size_arr = mean_size_arr
        assert (mean_size_arr.shape[0] == self.num_size_cluster)
        self.input_feature_dim = input_feature_dim
        self.num_proposal = num_proposal
        self.bn_momentum = bn_momentum
        self.sync_bn = sync_bn
        self.width = width
        self.nhead = nhead
        self.sampling = sampling
        self.num_decoder_layers = num_decoder_layers
        self.dim_feedforward = dim_feedforward
        self.self_position_embedding = self_position_embedding
        self.cross_position_embedding = cross_position_embedding
        self.size_cls_agnostic = size_cls_agnostic

        # backbone
        self.backbone_net = Pointnet2Backbone(input_feature_dim=self.input_feature_dim, width=self.width)
        # fps not kps
        # fps not kps
        if self.sampling == 'fps':
            self.fps_module = FPSModule(num_proposal)
        else:
            self.kps_module = KPSModule()
        # proposal head
        self.proposal_head = PredictHead(num_class, num_heading_bin, num_size_cluster,
                                             mean_size_arr, num_proposal, 288)
        # Transformer Decoder Projection
        self.decoder_key_proj = nn.Conv1d(288, 288, kernel_size=1, has_bias=True, weight_init="he_uniform", bias_init="normal")
        self.decoder_query_proj = nn.Conv1d(288, 288, kernel_size=1, has_bias=True, weight_init="he_uniform", bias_init="normal")
        
        #1 Decoder and Head
        self.decoder1 = TransformerDecoderLayer()
        self.decoder1_head = PredictHead(num_class, num_heading_bin, num_size_cluster, mean_size_arr, num_proposal, 288)
        
        #2 Decoder and Head
        self.decoder2 = TransformerDecoderLayer()
        self.decoder2_head = PredictHead(num_class, num_heading_bin, num_size_cluster, mean_size_arr, num_proposal, 288)
        
        #3 Decoder and Head
        self.decoder3 = TransformerDecoderLayer()
        self.decoder3_head = PredictHead(num_class, num_heading_bin, num_size_cluster, mean_size_arr, num_proposal, 288)
        
        #4 Decoder and Head
        self.decoder4 = TransformerDecoderLayer()
        self.decoder4_head = PredictHead(num_class, num_heading_bin, num_size_cluster, mean_size_arr, num_proposal, 288)
        
        #5 Decoder and Head
        self.decoder5 = TransformerDecoderLayer()
        self.decoder5_head = PredictHead(num_class, num_heading_bin, num_size_cluster, mean_size_arr, num_proposal, 288)
        
        #6 Decoder and Head
        self.decoder6 = TransformerDecoderLayer()
        self.decoder6_head = PredictHead(num_class, num_heading_bin, num_size_cluster, mean_size_arr, num_proposal, 288)
        
    def trans(self, x):
        return ops.Transpose()(x, (0, 2, 1))


    def construct(self, inputs):
        r"""
            Forward pass of the network

            Parameters
            ----------
            pointcloud: Variable(torch.cuda.FloatTensor)
                (B, N, 3 + input_feature_dim) tensor
                Point cloud to run predicts on
                Each point in the point-cloud MUST
                be formated as (x, y, z, features...)

            Returns
            ----------
            end_points: {XXX_xyz, XXX_features, XXX_inds}
                XXX_xyz: float32 Tensor of shape (B,K,3)
                XXX_features: float32 Tensor of shape (B,K,D)
                XXX-inds: int64 Tensor of shape (B,K) values in [0,N-1]
        """

        # end_points = {}
        point_clouds = inputs # B,N,3
        # backbone
        backbone_features, backbone_xyz, backbone_sample_idx = self.backbone_net(point_clouds)
        
        # fps or kps
        if self.sampling == 'fps':
            # fps
            fps_xyz, fps_features, fps_sample_inx = self.fps_module(backbone_xyz, backbone_features)
            points_obj_cls_logits = None
        else:
            fps_xyz, fps_features, fps_sample_inx, points_obj_cls_logits = self.kps_module(backbone_xyz, backbone_features)
        # Proposal
        proposal_output = self.proposal_head(fps_features, fps_xyz, prefix='proposal_')  # N num_proposal 3
        center, pred_size = proposal_output[2], proposal_output[-2]
        
        query0 = self.decoder_query_proj(fps_features)
        key = self.decoder_key_proj(backbone_features)
        query_pos = ops.Concat(-1)((center, pred_size))
        query_pos_detach = Tensor(query_pos.asnumpy())
        # query_pos_detach = query_pos
        # query_pos_detach = stop_gradient(query_pos).copy()
        key_pos = backbone_xyz
        
        #1 decoder and head
        query1 = self.decoder1(query0, key, query_pos_detach, key_pos)
        decoder1_output = self.decoder1_head(query1, fps_xyz, prefix='decoder1_')  # N num_proposal 3
        center, pred_size = decoder1_output[2], decoder1_output[-2]
        query_pos = ops.Concat(-1)((center, pred_size))
        # query_pos_detach = stop_gradient(query_pos).copy()
        query_pos_detach = Tensor(query_pos.asnumpy())
        # query_pos_detach = query_pos
        
        #2 decoder and head
        query2 = self.decoder2(query1, key, query_pos_detach, key_pos)
        decoder2_output = self.decoder2_head(query2, fps_xyz, prefix='decoder2_')  # N num_proposal 3
        center, pred_size = decoder2_output[2], decoder2_output[-2]
        query_pos = ops.Concat(-1)((center, pred_size))
        # query_pos_detach = stop_gradient(query_pos).copy()
        query_pos_detach = Tensor(query_pos.asnumpy())
        # query_pos_detach = query_pos
        
        #3 decoder and head
        query3 = self.decoder3(query2, key, query_pos_detach, key_pos)
        decoder3_output = self.decoder3_head(query3, fps_xyz, prefix='decoder3_')  # N num_proposal 3
        center, pred_size = decoder3_output[2], decoder3_output[-2]
        query_pos = ops.Concat(-1)((center, pred_size))
        # query_pos_detach = stop_gradient(query_pos).copy()
        query_pos_detach = Tensor(query_pos.asnumpy())
        # query_pos_detach = query_pos
        
        #4 decoder and head
        query4 = self.decoder4(query3, key, query_pos_detach, key_pos)
        decoder4_output = self.decoder4_head(query4, fps_xyz, prefix='decoder4_')  # N num_proposal 3
        center, pred_size = decoder4_output[2], decoder4_output[-2]
        query_pos = ops.Concat(-1)((center, pred_size))
        # query_pos_detach = stop_gradient(query_pos).copy()
        query_pos_detach = Tensor(query_pos.asnumpy())
        # query_pos_detach = query_pos
        
        #5 decoder and head
        query5 = self.decoder5(query4, key, query_pos_detach, key_pos)
        decoder5_output = self.decoder5_head(query5, fps_xyz, prefix='decoder5_')  # N num_proposal 3
        center, pred_size = decoder5_output[2], decoder5_output[-2]
        query_pos = ops.Concat(-1)((center, pred_size))
        # query_pos_detach = stop_gradient(query_pos).copy()
        query_pos_detach = Tensor(query_pos.asnumpy())
        # query_pos_detach = query_pos
        
        #6 decoder and head
        query6 = self.decoder6(query5, key, query_pos_detach, key_pos)
        decoder6_output = self.decoder6_head(query6, fps_xyz, prefix='decoder6_')  # N num_proposal 3
        center, pred_size = decoder6_output[2], decoder6_output[-2]
        query_pos = ops.Concat(-1)((center, pred_size))
        # query_pos_detach = stop_gradient(query_pos).copy()
        query_pos_detach = Tensor(query_pos.asnumpy())
        # query_pos_detach = query_pos
        
        return backbone_xyz, backbone_sample_idx, fps_sample_inx, points_obj_cls_logits, proposal_output, decoder1_output, decoder2_output, decoder3_output, decoder4_output, decoder5_output, decoder6_output
