from collections import namedtuple

import torch
import torch.nn as nn
#from pointnet2_ops.pointnet2_modules import PointnetFPModule, PointnetSAModuleMSG
import sys
sys.path.append('../')
from utils.pointnet2_modules import PointnetFPModule, PointnetSAModuleMSG
from common.lib.loss_utils.focal_loss import SigmoidFocalClassificationLoss
from common.lib.loss_utils.bin_reg_loss import get_bin_reg_loss
from common.lib.loss_utils.bin_reg_loss_approach import get_bin_reg_loss_approach
from common.lib.loss_utils.cosine_loss import get_cosine_loss
from common.lib.loss_utils.score_loss import get_score_loss
from common.lib.loss_utils.weightedsmoothl1loss import WeightedSmoothL1Loss
from common.lib.loss_utils.objectness_loss import get_objectness_loss
from functools import partial
import numpy as np
from models.roi_head import RoI_Head




class PointNet2SemSegMSG(nn.Module):
    def __init__(self, config):
        super().__init__()

        self.use_xyz = config.use_xyz
        self.use_normal = config.use_normal
        self.config = config

        if config.use_focalloss:
            # self.num_classes = 1
            self.num_classes = 1
            #config.num_classes = 1 
        else:
            self.num_classes = config.num_classes

        if not self.use_normal:
            config.points_feature_dim = 0

        if config.use_focalloss:
            self.stage_one_cls_loss_func = SigmoidFocalClassificationLoss(gamma=config.focal_loss_gamma, alpha=config.focal_loss_alpha)

            self.stage_one_score_loss_func = partial(get_score_loss)
            # self.stage_one_objectness_loss_func = partial(get_objectness_loss)

        else:
            weights = torch.tensor([1., 5.])
            self.stage_one_cls_loss_func = nn.CrossEntropyLoss(weight=weights)
        self.head_pose_suction_channels = config.head_pose_suction_channels
        if config.train_grasp:
            if config.use_bin_reg_loss:
                if config.train_grasp_approach:
                    self.stage_one_grasp_pose_loss_func = partial(get_bin_reg_loss_approach,
                                                                  azimuth_scope=config.azimuth_scope, azimuth_bin_size=config.azimuth_bin_size,
                                                                  elevation_scope=config.elevation_scope, elevation_bin_size=config.elevation_bin_size,
                                                                  width_scope=config.width_scope, width_bin_size=config.width_bin_size,
                                                                  grasp_angle_scope=config.grasp_angle_scope, grasp_angle_bin_size=config.grasp_angle_bin_size)
                else:
                    self.stage_one_grasp_pose_loss_func = partial(get_bin_reg_loss,
                        loc_scope=config.loc_scope, loc_bin_size=config.loc_bin_size,
                        azimuth_scope=config.azimuth_scope, azimuth_bin_size=config.azimuth_bin_size,
                        elevation_scope=config.elevation_scope,
                        elevation_bin_size=config.elevation_bin_size,
                        width_scope=config.width_scope, width_bin_size=config.width_bin_size,
                        grasp_angle_scope=config.grasp_angle_scope, grasp_angle_bin_size=config.grasp_angle_bin_size)
                if config.train_grasp_approach:
                    config.head_pose_grasp_channels += int(config.azimuth_scope / config.azimuth_bin_size) * 2
                    config.head_pose_grasp_channels += int(config.elevation_scope / config.elevation_bin_size) * 2
                    config.head_pose_grasp_channels += int(config.width_scope / config.width_bin_size) * 2
                    config.head_pose_grasp_channels += int(config.grasp_angle_scope/config.grasp_angle_bin_size) * 2
                else:
                    config.head_pose_grasp_channels = int(config.loc_scope / config.loc_bin_size) * 4 * 3
                    config.head_pose_grasp_channels += int(config.azimuth_scope / config.azimuth_bin_size) * 2
                    config.head_pose_grasp_channels += int(config.elevation_scope / config.elevation_bin_size) * 2
                    config.head_pose_grasp_channels += int(config.width_scope / config.width_bin_size) * 2
                    config.head_pose_grasp_channels += int(config.grasp_angle_scope/config.grasp_angle_bin_size) * 2
            else:
                #self.stage_one_pose_loss_func = nn.SmoothL1Loss()
                #config.head_pose_grasp_channels = 3
                print('not implementation error!')
                assert False

        if config.train_suction:
            self.stage_one_suction_pose_loss_func = partial(get_cosine_loss, reduction=True)

        if config.train_grasp_approach:
            self.stage_one_normal_loss_func = partial(get_cosine_loss, reduction=True)
            self.stage_one_distance_loss_func = WeightedSmoothL1Loss()

        self.points_feature_dim = config.points_feature_dim
        self.head_pose_grasp_channels = config.head_pose_grasp_channels

        self._build_model()
            

    def _build_model(self):

        self.stage_one_SA_modules = nn.ModuleList()
        self.stage_one_SA_modules.append(
            PointnetSAModuleMSG(
                npoint=4096,
                radii=[0.005, 0.01, 0.015],
                #radii=[0.015, 0.02, 0.03],
                nsamples=[8, 16, 32],
                #nsamples=[6, 12, 24],
                #mlps=[[self.points_feature_dim, 16, 16, 32], [self.points_feature_dim, 32, 32, 64]],
                #mlps=[[self.points_feature_dim, 12, 24], [self.points_feature_dim, 12, 24], [self.points_feature_dim, 12, 24]],
                #mlps=[[self.points_feature_dim, 12], [self.points_feature_dim, 12], [self.points_feature_dim, 12]],
                mlps=[[self.points_feature_dim, 16, 32], [self.points_feature_dim, 16, 32], [self.points_feature_dim, 32, 64]],
                use_xyz=self.use_xyz,
            )
        )
        c_out_0 = 32 + 32 + 64

        c_in = c_out_0
        self.stage_one_SA_modules.append(
            PointnetSAModuleMSG(
                npoint=1024,
                radii=[0.02, 0.03, 0.04],
                nsamples=[8, 16, 32],
                #nsamples=[6, 12, 24],
                #mlps=[[c_in, 64, 64, 128], [c_in, 64, 96, 128]],
                #mlps=[[c_in, 64, 128], [c_in, 64, 128], [c_in, 96, 128]],
                #mlps=[[c_in, 64], [c_in, 64], [c_in, 64]],
                mlps=[[c_in, 64, 96], [c_in, 64, 128], [c_in, 96, 128]],
                use_xyz=self.use_xyz,
            )
        )
        c_out_1 = 96 + 128 + 128

        c_in = c_out_1
        self.stage_one_SA_modules.append(
            PointnetSAModuleMSG(
                npoint=256,
                radii=[0.05, 0.1],
                nsamples=[16, 32],
                #nsamples=[12, 24],
                #mlps=[[c_in, 128, 196, 256], [c_in, 128, 196, 256]],
                #mlps=[[c_in, 128, 256], [c_in, 196, 256]],
                #mlps=[[c_in, 128], [c_in, 128]],
                mlps=[[c_in, 196, 256], [c_in, 196, 256]],
                use_xyz=self.use_xyz,
            )
        )
        c_out_2 = 256 + 256

        c_in = c_out_2
        self.stage_one_SA_modules.append(
            PointnetSAModuleMSG(
                npoint=64,
                radii=[0.1, 0.15],
                nsamples=[16, 32],
                #nsamples=[12, 24],
                #mlps=[[c_in, 256, 256, 512], [c_in, 256, 384, 512]],
                #mlps=[[c_in, 256, 512], [c_in, 384, 512]],
                mlps=[[c_in, 256, 512], [c_in, 384, 512]],
                use_xyz=self.use_xyz,
            )
        )
        c_out_3 = 512 + 512

        self.stage_one_FP_modules = nn.ModuleList()
        #self.FP_modules.append(PointnetFPModule(mlp=[256 + self.points_feature_dim, 128]))
        self.stage_one_FP_modules.append(PointnetFPModule(mlp=[256 + self.points_feature_dim, 128, 128]))
        #self.FP_modules.append(PointnetFPModule(mlp=[512 + c_out_0, 256]))
        self.stage_one_FP_modules.append(PointnetFPModule(mlp=[512 + c_out_0, 256, 256]))
        #self.FP_modules.append(PointnetFPModule(mlp=[512 + c_out_1, 512]))
        self.stage_one_FP_modules.append(PointnetFPModule(mlp=[512 + c_out_1, 512, 512]))
        self.stage_one_FP_modules.append(PointnetFPModule(mlp=[c_out_3 + c_out_2, 512, 512]))
        
        self.stage_one_head_classification = nn.Sequential(
            #nn.Conv1d(128, 64, kernel_size=1, bias=False),
            nn.Conv1d(128, 32, kernel_size=1, bias=False),
            #nn.BatchNorm1d(64),
            nn.BatchNorm1d(32),
            nn.ReLU(True),
            nn.Dropout(0.5),
            nn.Conv1d(32, self.num_classes+1, kernel_size=1),
        )
        # print('num classes',self.num_classes)
        self.stage_one_head_pose = nn.Sequential(
            #nn.Conv1d(128, 64, kernel_size=1, bias=False),
            nn.Conv1d(128, 128, kernel_size=1, bias=False),
            #nn.BatchNorm1d(64),
            nn.BatchNorm1d(128),
            nn.ReLU(True),
            nn.Dropout(0.5),
            nn.Conv1d(128, self.head_pose_grasp_channels, kernel_size=1),
        )

        # print('head_pose_grasp_channels',self.head_pose_grasp_channels)


        self.stage_one_head_pose_suction =nn.Sequential(
            #nn.Conv1d(128, 64, kernel_size=1, bias=False),
            nn.Conv1d(128, 128, kernel_size=1, bias=False),
            #nn.BatchNorm1d(64),
            nn.BatchNorm1d(128),
            nn.ReLU(True),
            nn.Dropout(0.5),
            nn.Conv1d(128, self.head_pose_suction_channels, kernel_size=1),
        )


        self.stage_one_head_distance =nn.Sequential(
            nn.Conv1d(128, 128, kernel_size=1, bias=False),
            nn.BatchNorm1d(128),
            nn.ReLU(True),
            nn.Dropout(0.5),
            nn.Conv1d(128, 1, kernel_size=1),
        )

    def _break_up_pc(self, pc):
        xyz = pc[..., 0:3].contiguous()
        features = pc[..., 3:].transpose(1, 2).contiguous() if pc.size(-1) > 3 else None

        return xyz, features

    def forward(self, pointcloud, **kwargs):
        r"""
            Forward pass of the network

            Parameters
            ----------
            pointcloud: Variable(torch.cuda.FloatTensor)
                (B, N, 3 + input_channels) tensor
                Point cloud to run predicts on
                Each point in the point-cloud MUST
                be formated as (x, y, z, features...)
        """
        xyz, features = self._break_up_pc(pointcloud)
        # print(pointcloud.size())

        l_xyz, l_features = [xyz], [features]
        for i in range(len(self.stage_one_SA_modules)):
            li_xyz, li_features = self.stage_one_SA_modules[i](l_xyz[i], l_features[i])
            l_xyz.append(li_xyz)
            l_features.append(li_features)

        for i in range(-1, -(len(self.stage_one_FP_modules) + 1), -1):
            l_features[i - 1] = self.stage_one_FP_modules[i](
                l_xyz[i - 1], l_xyz[i], l_features[i - 1], l_features[i]
            )
        # cls_pred: (B, C=1, N)
        cls_pred = self.stage_one_head_classification(l_features[0])

        # pose_red: (B, N, C)
        grasp_pose_pred = self.stage_one_head_pose(l_features[0]).transpose(1, 2).contiguous()

        normals_pred = self.stage_one_head_pose_suction(l_features[0]).transpose(1, 2).contiguous()
        suction_pose_pred = self.stage_one_head_pose_suction(l_features[0]).transpose(1, 2).contiguous()
        distance_pred = self.stage_one_head_distance(l_features[0]).transpose(1, 2).contiguous()


        if kwargs['mode'] in ['train', 'validate']:
            # stage_one_loss_dict = self.get_loss(cls_pred, kwargs['cls_label'], grasp_pose_pred, kwargs['pose_label'])
            fg_mask = (kwargs['cls_label'] > 0)
            # print(fg_mask.size())
            if kwargs['mode'] == 'train':
                score_pred = torch.squeeze(cls_pred[:, 1], dim=1)
                # objectness_pred = torch.squeeze(cls_pred[:, 2], dim=1)
            if kwargs['mode'] == 'validate':
                score_pred = torch.squeeze(cls_pred[:, 1], dim=1)
                # objectness_pred = torch.squeeze(cls_pred[:, 2], dim=1)
            # score_pred = cls_pred[:,1:2,:]
            stage_one_loss_dict = self.get_loss(cls_pred[:,0:1,:], kwargs['cls_label'],
                                                score_pred[fg_mask], kwargs['score_label'][fg_mask],
                                                # objectness_pred, kwargs['objectness_label'],
                                                grasp_pose_pred,kwargs['pose_label'],
                                                normals_pred, kwargs['normals_label'],
                                                distance_pred[fg_mask],kwargs['distance_label'][fg_mask])
            # return stage_one_loss_dict, cls_pred, grasp_pose_pred, l_features[0]
            return stage_one_loss_dict, cls_pred, grasp_pose_pred, normals_pred, distance_pred,l_features[0]
        elif kwargs['mode'] == 'test':
            if self.config.train_grasp:
                return cls_pred, grasp_pose_pred, suction_pose_pred,l_features[0]
            else:
                return cls_pred, suction_pose_pred, l_features[0]


    def get_loss(self, cls_pred, cls_label, score_pred, score_label, grasp_pose_pred, grasp_pose_label, normal_pred,normal_label,
                 distance_pred, distance_label, loss_dict=None):

        if loss_dict is None:
            loss_dict = {}
        # get the loss 
        stage_one_cls_loss = self.config.loss_weight[0] * self.stage_one_cls_loss_func(cls_pred, cls_label)

        stage_one_score_loss = self.config.loss_weight[3] * self.stage_one_score_loss_func(score_pred, score_label)
        # stage_one_objectness_loss = self.config.loss_weight[4] * self.stage_one_objectness_loss_func(objectness_pred,
        #                                                                                              objectness_label)


        stage_one_cls_total_loss = stage_one_cls_loss + stage_one_score_loss #+ stage_one_objectness_loss

        stage_one_loss = stage_one_cls_total_loss * 0


        stage_one_loss += stage_one_cls_total_loss

        loss_dict['stage_one_cls_loss'] = stage_one_cls_loss.clone().detach()
        loss_dict['stage_one_score_loss'] = stage_one_score_loss.clone().detach()
        # loss_dict['stage_one_objectness_loss'] = stage_one_objectness_loss.clone().detach()


        B, N, C = grasp_pose_pred.size()
        grasp_pose_pred = grasp_pose_pred.view(B*N, C)
        _, _, C = grasp_pose_label.size()
        grasp_pose_label = grasp_pose_label.view(B*N, C)


        B_, N_, C_ = normal_pred.size()
        normal_pred = normal_pred.view(B_ * N_, C_)
        _, _, C_ = normal_label.size()
        normal_label = normal_label.view(B_*N_, C_)



        fg_mask = (cls_label.view(-1) >0)
        fg_sum = fg_mask.long().sum().item()
        assert fg_sum > 0

        if self.config.train_grasp:
            if self.config.train_grasp_approach:
                stage_one_grasp_pose_loss, reg_grasp_loss_dict = self.stage_one_grasp_pose_loss_func(grasp_pose_pred[fg_mask], grasp_pose_label[fg_mask])
                stage_one_grasp_pose_loss *= self.config.loss_weight[1]
                stage_one_normal_loss, reg_normal_loss_dict = self.stage_one_normal_loss_func(normal_pred,normal_label)
                stage_one_normal_loss *= self.config.loss_weight[2]
                stage_one_distance_loss = self.config.loss_weight[4] * self.stage_one_distance_loss_func(distance_pred,
                                                                                                         distance_label.unsqueeze(dim=1))

                stage_one_distance_loss = stage_one_distance_loss.sum() / fg_sum


                loss_dict['stage_one_grasp_pose_loss'] = stage_one_grasp_pose_loss.clone().detach()
                loss_dict['stage_one_normal_loss'] = stage_one_normal_loss.clone().detach()
                loss_dict['stage_one_distance_loss'] = stage_one_distance_loss.clone().detach()

                total_pose_loss = stage_one_grasp_pose_loss + stage_one_normal_loss + stage_one_distance_loss
                stage_one_loss += total_pose_loss
            else:
                stage_one_grasp_pose_loss, reg_grasp_loss_dict = self.stage_one_grasp_pose_loss_func(grasp_pose_pred[fg_mask], grasp_pose_label[fg_mask])
                stage_one_grasp_pose_loss *= self.config.loss_weight[1]
                loss_dict['stage_one_grasp_pose_loss'] = stage_one_grasp_pose_loss.clone().detach()
                stage_one_loss += stage_one_grasp_pose_loss
        if self.config.train_suction:
            stage_one_suction_pose_loss, reg_suction_loss_dict = self.stage_one_suction_pose_loss_func(normal_pred, normal_label)
            stage_one_suction_pose_loss *= self.config.loss_weight[2]
            loss_dict['stage_one_suction_pose_loss'] = stage_one_suction_pose_loss.clone().detach()
            stage_one_loss += stage_one_suction_pose_loss

        loss_dict['stage_one_loss'] = stage_one_loss
        
        return loss_dict
        


if __name__ == "__main__":
    import sys
    sys.path.append('../')
    from common.utils.config_ import cfg, cfg_from_yaml_file, merge_new_config_file
    config = cfg_from_yaml_file('../experiments/base_config.yaml', cfg)
    config = merge_new_config_file(config, '../experiments/local_msg_classification_config.yaml')
    model = PointNet2SemSegMSG(config).cuda()
    pointcloud = torch.randn(2, 16384, 3).cuda()
    cls, pose = model(pointcloud)
    print(cls.shape)
    print(pose.shape)
