import os
import sys
import pickle
from typing import Iterator
import numpy as np
import mindspore.dataset as ds


class ScannetDatasetConfig(object):
    """
    Construct ScanNet V2 dataset from
    ScanNet: Richly-annotated 3D Reconstructions of Indoor Scenes
    https://arxiv.org/abs/1702.04405

    Introduction:
        ScanNet is an RGB-D video dataset containing 2.5 million views in more than 1500 scans, annotated with 3D camera poses, surface reconstructions, and instance-level semantic segmentations.

    Supported Platforms:
        GPU
    """
    def __init__(self):
        self.num_class = 18
        self.num_heading_bin = 1
        self.num_size_cluster = 18

        self.type2class = {'cabinet': 0, 'bed': 1, 'chair': 2, 'sofa': 3, 'table': 4, 'door': 5,
                           'window': 6, 'bookshelf': 7, 'picture': 8, 'counter': 9, 'desk': 10, 'curtain': 11,
                           'refrigerator': 12, 'showercurtrain': 13, 'toilet': 14, 'sink': 15, 'bathtub': 16,
                           'garbagebin': 17}
        self.class2type = {self.type2class[t]: t for t in self.type2class}
        self.nyu40ids = np.array([3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39])
        self.nyu40id2class = {nyu40id: i for i, nyu40id in enumerate(list(self.nyu40ids))}
        self.mean_size_arr = np.load('/home/amax/tyolm/datasets/scannet/meta_data/scannet_means.npz')['arr_0']
        self.type_mean_size = {}
        for i in range(self.num_size_cluster):
            self.type_mean_size[self.class2type[i]] = self.mean_size_arr[i, :]

    def angle2class(self, angle):
        ''' Convert continuous angle to discrete class
            [optinal] also small regression number from  
            class center angle to current angle.
           
            angle is from 0-2pi (or -pi~pi), class center at 0, 1*(2pi/N), 2*(2pi/N) ...  (N-1)*(2pi/N)
            return is class of int32 of 0,1,...,N-1 and a number such that
                class*(2pi/N) + number = angle
            NOT USED.
        '''
        assert (False)

    def class2angle(self, pred_cls, residual, to_label_format=True):
        ''' Inverse function to angle2class.
        
        As ScanNet only has axis-alined boxes so angles are always 0. '''
        return 0

    def size2class(self, size, type_name):
        ''' Convert 3D box size (l,w,h) to size class and size residual '''
        size_class = self.type2class[type_name]
        size_residual = size - self.type_mean_size[type_name]
        return size_class, size_residual

    def class2size(self, pred_cls, residual):
        ''' Inverse function to size2class '''
        return self.mean_size_arr[pred_cls, :] + residual

    def param2obb(self, center, heading_class, heading_residual, size_class, size_residual, pred_size=None):
        heading_angle = self.class2angle(heading_class, heading_residual)
        if pred_size!= None:
        # box_size = self.class2size(int(size_class), size_residual)
            box_size = pred_size
        else:
            box_size = self.class2size(int(size_class), size_residual)
        obb = np.zeros((7,))
        obb[0:3] = center
        obb[3:6] = box_size
        obb[6] = heading_angle * -1
        return obb


def rotate_aligned_boxes(input_boxes, rot_mat):
    centers, lengths = input_boxes[:, 0:3], input_boxes[:, 3:6]
    new_centers = np.dot(centers, np.transpose(rot_mat))

    dx, dy = lengths[:, 0] / 2.0, lengths[:, 1] / 2.0
    new_x = np.zeros((dx.shape[0], 4))
    new_y = np.zeros((dx.shape[0], 4))

    for i, crnr in enumerate([(-1, -1), (1, -1), (1, 1), (-1, 1)]):
        crnrs = np.zeros((dx.shape[0], 3))
        crnrs[:, 0] = crnr[0] * dx
        crnrs[:, 1] = crnr[1] * dy
        crnrs = np.dot(crnrs, np.transpose(rot_mat))
        new_x[:, i] = crnrs[:, 0]
        new_y[:, i] = crnrs[:, 1]

    new_dx = 2.0 * np.max(new_x, 1)
    new_dy = 2.0 * np.max(new_y, 1)
    new_lengths = np.stack((new_dx, new_dy, lengths[:, 2]), axis=1)

    return np.concatenate([new_centers, new_lengths], axis=1)


def rotz(t):
    """Rotation about the z-axis."""
    c = np.cos(t)
    s = np.sin(t)
    return np.array([[c, -s, 0],
                     [s, c, 0],
                     [0, 0, 1]])


def random_sampling(pc, num_sample, replace=None, return_choices=False):
    """ Input is NxC, output is num_samplexC
    """
    if replace is None: replace = (pc.shape[0] < num_sample)
    # np.random.seed(1234)
    choices = np.random.choice(pc.shape[0], num_sample, replace=replace)
    if return_choices:
        return pc[choices], choices
    else:
        return pc[choices]

DC = ScannetDatasetConfig()
MAX_NUM_OBJ = 64
MEAN_COLOR_RGB = np.array([109.8, 97.2, 83.8])


class ScannetDetectionDataset():
    """
    scannet v2 dataset for groupfree-3d detection
    """
    def __init__(self, split_set='train', num_points=50000,
                 use_color=False, use_height=False, augment=False,
                 data_root=None):

        if data_root is None:
            self.data_path = os.path.join(BASE_DIR, 'scannet_train_detection_data')
        else:
            self.data_path = os.path.join(data_root, 'scannet_train_detection_data')

        self.num_points = num_points
        self.use_color = use_color
        self.use_height = use_height
        self.augment = augment

        filename = os.path.join(data_root, f'{split_set}_data.pkl')
        if not os.path.exists(filename):
            all_scan_names = list(set([os.path.basename(x)[0:12] \
                                       for x in os.listdir(self.data_path) if x.startswith('scene')]))
            if split_set == 'all':
                self.scan_names = all_scan_names
            elif split_set in ['train', 'val', 'test']:
                split_filenames = os.path.join(ROOT_DIR, '/home/tyolm/codes/ms3d/data/meta_data',
                                               'scannetv2_{}.txt'.format(split_set))
                with open(split_filenames, 'r') as f:
                    self.scan_names = f.read().splitlines()
                    # remove unavailiable scans
                num_scans = len(self.scan_names)
                self.scan_names = [sname for sname in self.scan_names \
                                   if sname in all_scan_names]
                print('kept {} scans out of {}'.format(len(self.scan_names), num_scans))
                num_scans = len(self.scan_names)
            else:
                print('illegal split name')
                return

            mesh_vertices_list = []
            instance_labels_list = []
            semantic_labels_list = []
            instance_bboxes_list = []
            for scan_name in self.scan_names:
                mesh_vertices = np.load(os.path.join(self.data_path, scan_name) + '_vert.npy')
                instance_labels = np.load(os.path.join(self.data_path, scan_name) + '_ins_label.npy')
                semantic_labels = np.load(os.path.join(self.data_path, scan_name) + '_sem_label.npy')
                instance_bboxes = np.load(os.path.join(self.data_path, scan_name) + '_bbox.npy')
                mesh_vertices_list.append(mesh_vertices)
                instance_labels_list.append(instance_labels)
                semantic_labels_list.append(semantic_labels)
                instance_bboxes_list.append(instance_bboxes)

            self.mesh_vertices_list = mesh_vertices_list
            self.instance_labels_list = instance_labels_list
            self.semantic_labels_list = semantic_labels_list
            self.instance_bboxes_list = instance_bboxes_list

            with open(filename, 'wb') as f:
                pickle.dump((self.mesh_vertices_list, self.instance_labels_list,
                             self.semantic_labels_list, self.instance_bboxes_list), f)
            print(f"{filename} saved successfully")
            assert len(self.scan_names) == len(self.mesh_vertices_list)
        else:
            with open(filename, 'rb') as f:
                self.mesh_vertices_list, self.instance_labels_list, \
                self.semantic_labels_list, self.instance_bboxes_list = pickle.load(f)
            print(f"{filename} loaded successfully")

    def __len__(self):
        return len(self.mesh_vertices_list)

    def __getitem__(self, idx):
        # idx=5
        """
        Returns a dict with following keys:
            point_clouds: (N,3+C)
            center_label: (MAX_NUM_OBJ,3) for GT box center XYZ
            sem_cls_label: (MAX_NUM_OBJ,) semantic class index
            heading_class_label: (MAX_NUM_OBJ,) with int values in 0,...,NUM_HEADING_BIN-1
            heading_residual_label: (MAX_NUM_OBJ,)
            size_class_label: (MAX_NUM_OBJ,) with int values in 0,...,NUM_SIZE_CLUSTER
            size_residual_label: (MAX_NUM_OBJ,3)
            box_label_mask: (MAX_NUM_OBJ) as 0/1 with 1 indicating a unique box
            point_obj_mask: (N,) with 0/1 with 1 indicating the point is in one of the object's OBB.
            point_instance_label: (N,) with int values in -1,...,num_box, indicating which object the point belongs to, -1 means a backgound point.
            scan_idx: int scan index in scan_names list
            pcl_color: unused
        """

        mesh_vertices = self.mesh_vertices_list[idx]
        instance_labels = self.instance_labels_list[idx]
        semantic_labels = self.semantic_labels_list[idx]
        instance_bboxes = self.instance_bboxes_list[idx]

        if not self.use_color:
            point_cloud = mesh_vertices[:, 0:3]  # do not use color for now
            pcl_color = mesh_vertices[:, 3:6]
        else:
            point_cloud = mesh_vertices[:, 0:6]
            point_cloud[:, 3:] = (point_cloud[:, 3:] - MEAN_COLOR_RGB) / 256.0

        if self.use_height:
            floor_height = np.percentile(point_cloud[:, 2], 0.99)
            height = point_cloud[:, 2] - floor_height
            point_cloud = np.concatenate([point_cloud, np.expand_dims(height, 1)], 1)

            # ------------------------------- LABELS ------------------------------
        target_bboxes = np.zeros((MAX_NUM_OBJ, 6))
        target_bboxes_mask = np.zeros((MAX_NUM_OBJ))
        angle_classes = np.zeros((MAX_NUM_OBJ,))
        angle_residuals = np.zeros((MAX_NUM_OBJ,))
        size_classes = np.zeros((MAX_NUM_OBJ,))
        size_residuals = np.zeros((MAX_NUM_OBJ, 3))
        size_gts = np.zeros((MAX_NUM_OBJ, 3))

        point_cloud, choices = random_sampling(point_cloud, self.num_points, return_choices=True)
        # choices = range(self.num_points)
        # point_cloud = point_cloud[choices]
        
        instance_labels = instance_labels[choices]
        semantic_labels = semantic_labels[choices]

        pcl_color = pcl_color[choices]

        target_bboxes_mask[0:instance_bboxes.shape[0]] = 1
        target_bboxes[0:instance_bboxes.shape[0], :] = instance_bboxes[:, 0:6]

        # ------------------------------- DATA AUGMENTATION ------------------------------        
        if self.augment:
            if np.random.random() > 0.5:
                # Flipping along the YZ plane
                point_cloud[:, 0] = -1 * point_cloud[:, 0]
                target_bboxes[:, 0] = -1 * target_bboxes[:, 0]

            if np.random.random() > 0.5:
                # Flipping along the XZ plane
                point_cloud[:, 1] = -1 * point_cloud[:, 1]
                target_bboxes[:, 1] = -1 * target_bboxes[:, 1]

            # random scale
            # k = np.random.uniform(0.8, 1.2)
            # point_cloud[:,:3] = k * point_cloud[:,:3]
            # target_bboxes[:, 0:6] = k * target_bboxes[:, 0:6]
            
            k = np.random.uniform(0.8, 1.2, (1, 3))
            point_cloud[:, 0:3] = k * point_cloud[:, 0:3]
            target_bboxes[:, 0:3] = k * target_bboxes[:, 0:3]
            target_bboxes[:, 3:6] = k * target_bboxes[:, 3:6]
            
            # Rotation along up-axis/Z-axis
            rot_angle = (np.random.random() * np.pi / 18) - np.pi / 36  # -5 ~ +5 degree
            rot_mat = rotz(rot_angle)
            point_cloud[:, 0:3] = np.dot(point_cloud[:, 0:3], np.transpose(rot_mat))
            target_bboxes = rotate_aligned_boxes(target_bboxes, rot_mat)
            
            # k = np.random.uniform(0.9, 1.1, (1, 3))
            # point_cloud[:, 0:3] = k * point_cloud[:, 0:3]
            # target_bboxes[:, 0:3] = k * target_bboxes[:, 0:3]
            # target_bboxes[:, 3:6] = k * target_bboxes[:, 3:6]

        gt_centers = target_bboxes[:, 0:3]
        gt_centers[instance_bboxes.shape[0]:, :] += 1000.0  # padding centers with a large number
        # compute GT Centers *AFTER* augmentation
        # generate gt centers
        # Note: since there's no map between bbox instance labels and
        # pc instance_labels (it had been filtered 
        # in the data preparation step) we'll compute the instance bbox
        # from the points sharing the same instance label.
        point_obj_mask = np.zeros(self.num_points)
        point_instance_label = np.zeros(self.num_points) - 1
        for i_instance in np.unique(instance_labels):
            # find all points belong to that instance
            ind = np.where(instance_labels == i_instance)[0]
            # find the semantic label            
            if semantic_labels[ind[0]] in DC.nyu40ids:
                x = point_cloud[ind, :3]
                center = 0.5 * (x.min(0) + x.max(0))
                ilabel = np.argmin(((center - gt_centers) ** 2).sum(-1))
                point_instance_label[ind] = ilabel
                point_obj_mask[ind] = 1.0

        class_ind = [np.where(DC.nyu40ids == x)[0][0] for x in instance_bboxes[:, -1]]
        # NOTE: set size class as semantic class. Consider use size2class.
        size_classes[0:instance_bboxes.shape[0]] = class_ind
        size_residuals[0:instance_bboxes.shape[0], :] = \
            target_bboxes[0:instance_bboxes.shape[0], 3:6] - DC.mean_size_arr[class_ind, :]
        size_gts[0:instance_bboxes.shape[0], :] = target_bboxes[0:instance_bboxes.shape[0], 3:6]

        # ret_dict = {}
        # ret_dict['point_clouds'] = point_cloud.astype(np.float32)
        # ret_dict['center_label'] = gt_centers.astype(np.float32)
        # ret_dict['heading_class_label'] = angle_classes.astype(np.int64)
        # ret_dict['heading_residual_label'] = angle_residuals.astype(np.float32)
        # ret_dict['size_class_label'] = size_classes.astype(np.int64)
        # ret_dict['size_residual_label'] = size_residuals.astype(np.float32)
        # ret_dict['size_gts'] = size_gts.astype(np.float32)
        target_bboxes_semcls = np.zeros((MAX_NUM_OBJ))
        target_bboxes_semcls[0:instance_bboxes.shape[0]] = \
            [DC.nyu40id2class[x] for x in instance_bboxes[:, -1][0:instance_bboxes.shape[0]]]
        # ret_dict['sem_cls_label'] = target_bboxes_semcls.astype(np.int64)
        # ret_dict['box_label_mask'] = target_bboxes_mask.astype(np.float32)
        # ret_dict['point_obj_mask'] = point_obj_mask.astype(np.int64)
        # ret_dict['point_instance_label'] = point_instance_label.astype(np.int64)
        # ret_dict['scan_idx'] = np.array(idx).astype(np.int64)
        # ret_dict['pcl_color'] = pcl_color
        # return ret_dict

        return point_cloud.astype(np.float32), gt_centers.astype(np.float32), angle_classes.astype(np.int64), \
        angle_residuals.astype(np.float32), size_classes.astype(np.int64), size_residuals.astype(np.float32), size_gts.astype(np.float32), \
        target_bboxes_semcls.astype(np.int64), target_bboxes_mask.astype(np.float32), \
        point_obj_mask.astype(np.int64),\
        point_instance_label.astype(np.int64), np.array(idx).astype(np.int64), pcl_color


if __name__ == '__main__':
    data_root = '/home/amax/tyolm/datasets/scannet'
    dataset = ScannetDetectionDataset(data_root=data_root)
    dataset_generator = ds.GeneratorDataset(dataset, ["point_clouds", "center_label", "heading_class_label", "heading_residual_label",\
    "size_class_label", "size_residual_label", "size_gts", "sem_cls_label", "box_label_mask", "point_obj_mask", "point_instance_label", "scan_idx", "pcl_color"], shuffle=False)
    
    data = dataset[0]
    print(data)

    #dataset_generator_batch = dataset_generator.batch(batch_size=1)
    #iterator = dataset_generator_batch.create_dict_iterator()
    
    # for data in iterator:
    #     center_label = data["center_label"].asnumpy()
    #     print(center_label.shape)
    # print("data size:", dataset_generator_batch.get_dataset_size())
    #for data in iterator:        
    #   print(data["point_clouds"].shape)