import torch
import torch.nn as nn
from models.pointnet2_msg_sem import PointNet2SemSegMSG
from models.roi_head import RoI_Head
from common.lib.bbox import decode_bbox_target, decode_bbox_target_approach
import torch.nn.functional as F

class Two_Stage_GraspNet(nn.Module):
    def __init__(self, config):
        super().__init__()
        self.config = config
        self.back_bone = PointNet2SemSegMSG(self.config)
        if config.fix_back_bone:
            for p in self.back_bone.parameters():
                p.requires_grad = False
        if config.train_two_stage:
            self.roi_head = RoI_Head(input_channels=128, model_cfg=self.config.ROI_HEAD, num_class=1)

    def forward(self, data, **kwargs):
        batch_size = data.shape[0]

        assert kwargs['mode'] in ['train', 'validate', 'test']
        # forward the back_bone
        if kwargs['mode'] in ['train', 'validate']:
            stage_one_loss_dict, cls_pred, grasp_pose_pred, normals_pred, distance_pred, stage_one_feature = self.back_bone(data, **kwargs)
        elif kwargs['mode'] == 'test':
            cls_pred, pose_pred, stage_one_feature = self.back_bone(data, **kwargs)
        else:
            assert False, 'No mode {}'.format(self.mode)


        cls_pred_ = cls_pred[:,:1,:]
        score_pred = cls_pred[:,1:2,:]
        # objectness_pred = cls_pred[:,2:3,:]

        if self.config.train_grasp:
            B, N, C = grasp_pose_pred.size()
            pose_pred = grasp_pose_pred.view(B*N, C)

            xyz = data.reshape(-1, 3)

            if kwargs['mode'] in ['train', 'validate']:
                # get the mask for calculate loss
                # fg_mask: (B*N*C, )
                fg_mask = (kwargs['cls_label'].view(-1) > 0)
                fg_sum = fg_mask.long().sum()
                # make sure fg_sum larger than 0
                assert fg_sum > 0
                _, _, C = kwargs['pose_label'].size()
                # pose_label: (B*N, C=8) (x,y,z,azimuth,elevation,contact_width,virtual_width,grasp_angle)
                pose_label = kwargs['pose_label'].view(B*N, C)
                if self.config.train_grasp_approach:
                    width_label = pose_label[fg_mask, 0].unsqueeze(dim=-1)
                else:
                    width_label = pose_label[fg_mask, 6].unsqueeze(dim=-1)
                    # center; (fg_sum, 3)
                    center = pose_label[fg_mask, :3] + xyz[fg_mask, :3]
                # matrix_label: (fg_sum, 3, 3)
                if self.config.train_two_stage:
                    matrix_label = kwargs['matrix_label'].view(-1, 3, 3)[fg_mask]
                elif self.config.train_grasp_approach:
                    matrix_label = kwargs['matrix_label'].view(-1, 3, 3)
                # score_label: (fg_sum, 1)
                score_label = kwargs['score_label'].view(-1, 1)[fg_mask]
                # gt_grasp: (fg_sum, 14)
                if self.config.train_two_stage:
                    gt_grasp = torch.cat((center, matrix_label[:, :, 0] , matrix_label[:, :, 1], width_label, matrix_label[:, :, 2], score_label), dim=-1)
                if batch_size > 1:
                    fg_list = (kwargs['cls_label'].squeeze()>0).long().sum(dim=1)
                else:
                    fg_list = (kwargs['cls_label'].squeeze()>0).long().sum(dim=0)
            else:
                gt_grasp = None
                fg_list = None



            # decode the pose_pred to center_residual_pred, grasp_width and Rotation matrix
            if self.config.train_grasp_approach:
                width, R = decode_bbox_target_approach(
                    pose_pred, azimuth_scope=self.config.azimuth_scope,
                    azimuth_bin_size=self.config.azimuth_bin_size,
                    elevation_scope=self.config.elevation_scope,
                    elevation_bin_size=self.config.elevation_bin_size,
                    width_scope=self.config.width_scope,
                    width_bin_size=self.config.width_bin_size,
                    grasp_angle_scope=self.config.grasp_angle_scope,
                    grasp_angle_bin_size=self.config.grasp_angle_bin_size)
            else:
                center_residual_pred, width, R = decode_bbox_target(
                    pose_pred, self.config.loc_scope, self.config.loc_bin_size,
                    azimuth_scope=self.config.azimuth_scope,
                    azimuth_bin_size=self.config.azimuth_bin_size,
                    elevation_scope=self.config.elevation_scope,
                    elevation_bin_size=self.config.elevation_bin_size,
                    width_scope=self.config.width_scope,
                    width_bin_size=self.config.width_bin_size,
                    grasp_angle_scope=self.config.grasp_angle_scope,
                    grasp_angle_bin_size=self.config.grasp_angle_bin_size)

            # get the grasp predict approach_vector, closing_vector, grasp_o_vecter
            # center_residual_pred: (B, N, 3)
            if self.config.train_grasp_approach:
                # print('shape is:', distance_pred.shape)
                # print(normals_pred.shape)
                # print(distance_pred.view(-1).shape)
                approach_vector = R[:, :, 0].reshape(-1, self.config.num_points, 3)
                center_residual_pred = approach_vector.view(-1, 3).transpose(1, 0).contiguous() * distance_pred.view(-1)
                center_residual_pred = center_residual_pred.transpose(1, 0).contiguous().view(-1, self.config.num_points, 3)
                center_residual_pred += data
            else:
                center_residual_pred = center_residual_pred.reshape(-1, self.config.num_points, 3)
                center_residual_pred += data
            # width: (B, N, 1)
            width = width.reshape(-1, self.config.num_points, 1)
            # approach_vector: (B, N, 3)
            approach_vector = R[:, :, 0].reshape(-1, self.config.num_points, 3)
            # closing_vector: (B, N, 3)
            closing_vector = R[:, :, 1].reshape(-1, self.config.num_points, 3)
            # grasp_o_vector: (B, N, 3)
            grasp_o_vector = R[:, :, 2].reshape(-1, self.config.num_points, 3)
            # grasp: (B, N, 13)
            grasp = torch.cat((center_residual_pred, approach_vector, closing_vector, width, grasp_o_vector), dim=-1)
            # input to the roi_head
            batch_dict = {
                            'batch_size':batch_size, # B
                            'batch_cls_preds': torch.sigmoid(cls_pred_.transpose(1, 2)), # (B, N, 1)
                            'batch_score_preds': score_pred.transpose(1, 2),
                            'batch_grasp_preds': grasp, # (B, N, 13)
                            'batch_normals_preds': normals_pred,
                            'batch_distance_preds': distance_pred,
                            'batch_grasp_pose_label': matrix_label,
                            # 'gt_grasps': gt_grasp, # (fg_sum, 14)
                            # 'point_coords': xyz.reshape(-1, self.config.num_points, 3), # (B, N, 3)
                            # 'point_features': stage_one_feature.transpose(1, 2).contiguous(), #(B, N, C=128)
                            'fg_list': fg_list, # list len(list) = B
                        }

            # update loss in mode train and validate
            if kwargs['mode'] in ['train', 'validate']:
                batch_dict.update(stage_one_loss_dict)
                batch_dict['total_loss'] = batch_dict['stage_one_loss']

            if self.config.train_two_stage:
                # roi_head forward
                loss, batch_dict, targets_dict = self.roi_head(batch_dict)

                batch_dict['stage_two_loss'] = loss

                if batch_dict['stage_two_loss'] is None:
                    if kwargs['mode'] in ['train', 'validate']:
                        batch_dict['total_loss'] = batch_dict['stage_one_loss']
                else:
                    if kwargs['mode'] in ['train', 'validate']:
                        batch_dict['total_loss'] = self.config.stage_loss_weight[1] * batch_dict['stage_two_loss'] + self.config.stage_loss_weight[0] * batch_dict['stage_one_loss']
                        ### add by weiwei 2020.10.12.18:47
                        batch_dict['target_dict'] = targets_dict
                        ### add finished
        else:
            batch_dict = {
                'batch_size': batch_size,  # B
                'batch_cls_preds': torch.sigmoid(cls_pred_.transpose(1, 2)),  # (B, N, 1)
                'batch_score_preds': score_pred.transpose(1,2),
                'batch_suction_pose_preds': normals_pred,
                # 'batch_objectness_preds': torch.sigmoid(objectness_pred.transpose(1,2))
                # 'batch_grasp_preds': grasp,  # (B, N, 13)
                # 'gt_grasps': gt_grasp,  # (fg_sum, 14)
                # 'point_coords': xyz.reshape(-1, self.config.num_points, 3),  # (B, N, 3)
                # 'point_features': stage_one_feature.transpose(1, 2).contiguous(),  # (B, N, C=128)
                # 'fg_list': fg_list,  # list len(list) = B
            }
            if kwargs['mode'] in ['train', 'validate']:
                batch_dict.update(stage_one_loss_dict)
                batch_dict['total_loss'] = batch_dict['stage_one_loss']
        return batch_dict
       

