import sapien.core as sapien
import numpy as np
import pdb
import numpy as np
from PIL import Image, ImageColor
import open3d as o3d
import json
import transforms3d as t3d
import cv2
import torch
import yaml
import trimesh
import math
from .._GLOBAL_CONFIGS import CONFIGS_PATH
import os
from sapien.sensor import StereoDepthSensor, StereoDepthSensorConfig

import pytorch3d.ops as torch3d_ops

def fps(points, num_points=1024, use_cuda=True):
    K = [num_points]
    if use_cuda:
        points = torch.from_numpy(points).cuda()
        sampled_points, indices = torch3d_ops.sample_farthest_points(points=points.unsqueeze(0), K=K)
        sampled_points = sampled_points.squeeze(0)
        sampled_points = sampled_points.cpu().numpy()
    else:
        points = torch.from_numpy(points)
        sampled_points, indices = torch3d_ops.sample_farthest_points(points=points.unsqueeze(0), K=K)
        sampled_points = sampled_points.squeeze(0)
        sampled_points = sampled_points.numpy()

    return sampled_points, indices



class Camera:

    def __init__(self, bias=0, random_head_camera_dis=0, **kwags):
        """ """
        self.pcd_crop = kwags.get("pcd_crop", False)
        self.pcd_down_sample_num = kwags.get("pcd_down_sample_num", 0)
        self.pcd_crop_bbox = kwags.get("bbox", [[-0.6, -0.35, 0.7401], [0.6, 0.35, 2]])
        # self.pcd_crop_bbox = kwags.get("bbox", [[-1, -1, 0.701],[1, 1, 3]])
        self.pcd_crop_bbox[0][2] += bias
        self.table_z_bias = bias
        self.random_head_camera_dis = random_head_camera_dis

        self.static_camera_config = []
        self.head_camera_type = kwags["camera"].get("head_camera_type", "D435")
        self.wrist_camera_type = kwags["camera"].get("wrist_camera_type", "D435")
        self.custom_camera_type = kwags.get('custom_camera_type', 'D435')
        self.collect_head_camera = kwags["camera"].get("collect_head_camera", True)
        self.collect_wrist_camera = kwags["camera"].get("collect_wrist_camera", True)
        self.collect_global_camera = kwags.get('collect_global_camera', True)
        
        # Initialize all_cameras list and camera indexing
        self.all_cameras = []
        self.camera_names = []

        # embodiment = kwags.get('embodiment')
        # embodiment_config_path = os.path.join(CONFIGS_PATH, '_embodiment_config.yml')
        # with open(embodiment_config_path, 'r', encoding='utf-8') as f:
        #     embodiment_types = yaml.load(f.read(), Loader=yaml.FullLoader)
        # robot_file = embodiment_types[embodiment]['file_path']
        # if robot_file is None:
        #     raise "No embodiment files"

        # robot_config_file = os.path.join(robot_file, 'config.yml')
        # with open(robot_config_file, 'r', encoding='utf-8') as f:
        #     embodiment_args = yaml.load(f.read(), Loader=yaml.FullLoader)
        # TODO
        self.static_camera_info_list = kwags["left_embodiment_config"]["static_camera_list"]
        self.static_camera_num = len(self.static_camera_info_list)

    def load_camera(self, scene):
        """
        Add cameras and set camera parameters
            - Including four cameras: left, right, front, head.
        """
        near, far = 0.1, 100
        camera_config_path = os.path.join(CONFIGS_PATH, "_camera_config.yml")

        assert os.path.isfile(camera_config_path), "task config file is missing"

        with open(camera_config_path, "r", encoding="utf-8") as f:
            camera_args = yaml.load(f.read(), Loader=yaml.FullLoader)

        # sensor_mount_actor = scene.create_actor_builder().build_kinematic()

        # camera_args = get_camera_config()
        
        def _create_camera_with_pose(scene, camera_name, cam_pos, cam_forward, cam_left, camera_type='D435', near=0.1, far=100):
            """
            Helper function to create a camera with specified pose parameters.
            
            Args:
                scene: The SAPIEN scene
                camera_name: Name of the camera
                cam_pos: Camera position as numpy array
                cam_forward: Camera forward direction as numpy array
                cam_left: Camera left direction as numpy array
                camera_type: Type of camera configuration to use
                near: Near clipping plane
                far: Far clipping plane
            
            Returns:
                tuple: (camera, camera_config)
            """
            if (type(cam_pos) == list):
                cam_pos = np.array(cam_pos)
            if (type(cam_forward) == list):
                cam_forward = np.array(cam_forward)
            if (type(cam_left) == list):
                cam_left = np.array(cam_left)
            if camera_type not in camera_args.keys():
                raise ValueError(f"Camera type {camera_type} not supported")
            
            camera_config = camera_args[camera_type]
            
            # Normalize direction vectors
            cam_forward = cam_forward / np.linalg.norm(cam_forward)
            cam_left = cam_left / np.linalg.norm(cam_left)
            up = np.cross(cam_forward, cam_left)
            
            # Create transformation matrix
            mat44 = np.eye(4)
            mat44[:3, :3] = np.stack([cam_forward, cam_left, up], axis=1)
            mat44[:3, 3] = cam_pos
            
            # Create camera
            camera = scene.add_camera(
                name=camera_name,
                width=camera_config['w'],
                height=camera_config['h'],
                fovy=np.deg2rad(camera_config['fovy']),
                near=near,
                far=far,
            )
            camera.entity.set_pose(sapien.Pose(mat44))
            
            return camera, camera_config

        def create_camera(camera_info, random_head_camera_dis=0):
            if camera_info["type"] not in camera_args.keys():
                raise ValueError(f"Camera type {camera_info['type']} not supported")

            camera_config = camera_args[camera_info["type"]]
            cam_pos = np.array(camera_info["position"])
            vector = np.random.randn(3)
            random_dir = vector / np.linalg.norm(vector)
            cam_pos = cam_pos + random_dir * np.random.uniform(low=0, high=random_head_camera_dis)
            cam_forward = np.array(camera_info["forward"]) / np.linalg.norm(np.array(camera_info["forward"]))
            cam_left = np.array(camera_info["left"]) / np.linalg.norm(np.array(camera_info["left"]))
            up = np.cross(cam_forward, cam_left)
            mat44 = np.eye(4)
            mat44[:3, :3] = np.stack([cam_forward, cam_left, up], axis=1)
            mat44[:3, 3] = cam_pos

            # ========================= sensor camera =========================
            # sensor_config = StereoDepthSensorConfig()
            # sensor_config.rgb_resolution = (camera_config['w'], camera_config['h'])

            camera = scene.add_camera(
                name=camera_info["name"],
                width=camera_config["w"],
                height=camera_config["h"],
                fovy=np.deg2rad(camera_config["fovy"]),
                near=near,
                far=far,
            )
            camera.entity.set_pose(sapien.Pose(mat44))

            # ========================= sensor camera =========================
            # sensor_camera = StereoDepthSensor(
            #     sensor_config,
            #     sensor_mount_actor,
            #     sapien.Pose(mat44)
            # )
            # camera.entity.set_pose(sapien.Pose(camera_info['position']))
            # return camera, sensor_camera, camera_config
            return camera, camera_config

        # Clear all_cameras list for fresh assignment
        self.all_cameras = []
        self.camera_names = []

        # ================================= wrist camera =================================
        if self.collect_wrist_camera:
            wrist_camera_config = camera_args[self.wrist_camera_type]
            self.left_camera = scene.add_camera(
                name="left_camera",
                width=wrist_camera_config["w"],
                height=wrist_camera_config["h"],
                fovy=np.deg2rad(wrist_camera_config["fovy"]),
                near=near,
                far=far,
            )

            self.right_camera = scene.add_camera(
                name="right_camera",
                width=wrist_camera_config["w"],
                height=wrist_camera_config["h"],
                fovy=np.deg2rad(wrist_camera_config["fovy"]),
                near=near,
                far=far,
            )
            
            # Add wrist cameras to all_cameras list
            self.all_cameras.append(self.left_camera)
            self.camera_names.append("left_camera")
            self.all_cameras.append(self.right_camera) 
            self.camera_names.append("right_camera")

        # ================================= sensor camera =================================
        # sensor_config = StereoDepthSensorConfig()
        # sensor_config.rgb_resolution = (wrist_camera_config['w'], wrist_camera_config['h'])
        # self.left_sensor_camera = StereoDepthSensor(
        #     sensor_config,
        #     sensor_mount_actor,
        #     sapien.Pose([0,0,0],[1,0,0,0])
        # )

        # self.right_sensor_camera = StereoDepthSensor(
        #     sensor_config,
        #     sensor_mount_actor,
        #     sapien.Pose([0,0,0],[1,0,0,0])
        # )

        # ================================= global cameras =================================
        self.observer_camera, _ = _create_camera_with_pose(
            scene, "observer_camera", [0.4, 0.22, 1.42],[-1,-1,-1] ,[1,-1, 0], 
            camera_type='D435', near=near, far=far
        )
        self.world_camera1, _ = _create_camera_with_pose(
            scene, "world_camera1", [0.4, -0.4, 1.6],[-1, 1, -1.4] ,[1,-1, 0],
            camera_type='D435', near=near, far=far
        )
        self.world_camera2, _ = _create_camera_with_pose(
            scene, "world_camera2", [-0.4, -0.4, 1.6], [1, 1, -1.4], [-1, 1, 0],
            camera_type='D435', near=near, far=far
        )
        
        # Add global cameras to all_cameras list
        self.all_cameras.extend([self.observer_camera,self.world_camera1,self.world_camera2])
        self.camera_names.extend(["observer_camera","world_camera1","world_camera2"])

        # ================================= static camera =================================
        self.head_camera_id = None
        self.static_camera_list = []
        # self.static_sensor_camera_list = []
        self.static_camera_name = []
        # static camera list
        # print(self.static_camera_info_list)
        # print("collect_global_camera:",self.collect_global_camera)
        for i, camera_info in enumerate(self.static_camera_info_list):
            # print(camera_info)
            if camera_info.get("forward") == None:
                camera_info["forward"] = (-1 * np.array(camera_info["position"])).tolist()
            if camera_info.get("left") == None:
                camera_info["left"] = [
                    -camera_info["forward"][1],
                    camera_info["forward"][0],
                ] + [0]

            if camera_info["name"] == "head_camera":
                if self.collect_head_camera:
                    self.head_camera_id = i
                    camera_info["type"] = self.head_camera_type
                    # camera, sensor_camera, camera_config = create_camera(camera_info)
                    camera, camera_config = create_camera(camera_info,
                                                          random_head_camera_dis=self.random_head_camera_dis)
                    self.static_camera_list.append(camera)
                    self.static_camera_name.append(camera_info["name"])
                    # self.static_sensor_camera_list.append(sensor_camera)
                    self.static_camera_config.append(camera_config)
                    self.all_cameras.append(camera)
                    self.camera_names.append(camera_info["name"])
                    # ================================= sensor camera =================================
                    # camera_config = get_camera_config(camera_info['type'])
                    # cam_pos = np.array(camera_info['position'])
                    # cam_forward = np.array(camera_info['forward']) / np.linalg.norm(np.array(camera_info['forward']))
                    # cam_left = np.array(camera_info['left']) / np.linalg.norm(np.array(camera_info['left']))
                    # up = np.cross(cam_forward, cam_left)
                    # mat44 = np.eye(4)
                    # mat44[:3, :3] = np.stack([cam_forward, cam_left, up], axis=1)
                    # mat44[:3, 3] = cam_pos
                    # sensor_config = StereoDepthSensorConfig()
                    # sensor_config.rgb_resolution = (camera_config['w'], camera_config['h'])

                    # self.head_sensor = StereoDepthSensor(
                    #     sensor_config,
                    #     sensor_mount_actor,
                    #     sapien.Pose(mat44)
                    # )
            else:
                # Set default camera type if not specified
                if "type" not in camera_info:
                    camera_info["type"] = "D435"  # Default camera type
                
                if self.collect_global_camera:
                    # camera, sensor_camera, camera_config = create_camera(camera_info)
                    camera, camera_config = create_camera(camera_info)
                    self.static_camera_list.append(camera)
                    self.static_camera_name.append(camera_info["name"])
                    # self.static_sensor_camera_list.append(sensor_camera)
                    self.static_camera_config.append(camera_config)
                    self.all_cameras.append(camera)
                    self.camera_names.append(camera_info["name"])

        
        # Print camera information for debugging
        # print(f"Total cameras: {len(self.all_cameras)}")
        # for i, camera in enumerate(self.all_cameras):
        #     print(f"Camera {i}: {camera.name}")

    def update_picture(self):
        for camera in self.all_cameras:
            camera.take_picture()

    def update_wrist_camera(self, left_pose, right_pose):
        """
        Update rendering to refresh the camera's RGBD information
        (rendering must be updated even when disabled, otherwise data cannot be collected).
        """
        if self.collect_wrist_camera:
            self.left_camera.entity.set_pose(left_pose)
            self.right_camera.entity.set_pose(right_pose)

    def _get_config(self, camera):
        camera_intrinsic_cv = camera.get_intrinsic_matrix()
        camera_extrinsic_cv = camera.get_extrinsic_matrix()
        camera_model_matrix = camera.get_model_matrix()
        return {
            "intrinsic_cv": camera_intrinsic_cv,
            "extrinsic_cv": camera_extrinsic_cv,
            "cam2world_gl": camera_model_matrix,
        }

    def get_config(self) -> dict:
        res = {}
        if self.collect_wrist_camera:
            res['left_camera'] = self._get_config(self.left_camera)
            res['right_camera'] = self._get_config(self.right_camera)
        
        for camera, camera_name in zip(self.static_camera_list, self.static_camera_name):
            if camera_name == 'head_camera':
                if self.collect_head_camera:
                    res[camera_name] = self._get_config(camera)
            else:
                if self.collect_global_camera:
                    res[camera_name] = self._get_config(camera)
        # ================================= sensor camera =================================
        # res['head_sensor'] = res['head_camera']
        # print(res)
        return res

    def get_camera_config(self, camera_type='D435') -> dict:
        """
        Get camera configuration for a specific camera type.
        
        Args:
            camera_type: Type of camera (e.g., 'D435')
            
        Returns:
            Camera configuration dictionary containing width, height, fovy, etc.
        """
        camera_config_path = os.path.join(CONFIGS_PATH, "_camera_config.yml")
        
        if not os.path.isfile(camera_config_path):
            raise FileNotFoundError("Camera config file is missing")
            
        with open(camera_config_path, "r", encoding="utf-8") as f:
            camera_args = yaml.load(f.read(), Loader=yaml.FullLoader)
            
        if camera_type not in camera_args:
            raise ValueError(f"Camera type {camera_type} not supported")
            
        return camera_args[camera_type]

    def _get_rgba(self, camera):
        camera_rgba = camera.get_picture("Color")
        # SAPIEN returns color in BGRA/BGR order; convert to standard RGB order
        camera_rgb_img = (camera_rgba * 255).clip(0, 255).astype("uint8")[:, :, [2, 1, 0]]
        return camera_rgb_img

    def get_rgb(self) -> dict:
        rgba = self.get_rgba()
        rgb = {}
        for camera_name, camera_data in rgba.items():
            rgb[camera_name] = {}
            rgb[camera_name]["rgb"] = camera_data["rgba"][:, :, :3]  # Exclude alpha channel
        return rgb
    
    # Get Camera RGBA
    def get_rgba(self) -> dict:
        # ================================= sensor camera =================================
        # def _get_sensor_rgba(sensor):
        #     camera_rgba = sensor.get_rgb()
        #     camera_rgba_img = (camera_rgba * 255).clip(0, 255).astype("uint8")[:,:,:3]
        #     return camera_rgba_img

        res = {}

        if self.collect_wrist_camera:
            res['left_camera'] = {}
            res['right_camera'] = {}
            res['left_camera']['rgba'] = self._get_rgba(self.left_camera)
            res['right_camera']['rgba'] = self._get_rgba(self.right_camera)

        for camera, camera_name in zip(self.static_camera_list, self.static_camera_name):
            if camera_name == 'head_camera':
                if self.collect_head_camera:
                    res[camera_name]={}
                    res[camera_name]['rgba'] = self._get_rgba(camera)
            else:
                if self.collect_global_camera:
                    res[camera_name]={}
                    res[camera_name]['rgba'] = self._get_rgba(camera)
        # ================================= sensor camera =================================
        # res['head_sensor']['rgb'] = _get_sensor_rgba(self.head_sensor)

        return res

    def get_observer_rgba(self) -> dict:
        self.observer_camera.take_picture()
        return self._get_rgba(self.observer_camera)

    def get_observer_rgb(self) -> dict:
        self.observer_camera.take_picture()

        def _get_rgb(camera):
            camera_rgba = camera.get_picture("Color")
            # Convert BGRA/BGR to RGB as done in _get_rgba
            camera_rgb_img = (camera_rgba * 255).clip(0, 255).astype("uint8")[:, :, [2, 1, 0]]
            return camera_rgb_img

        return _get_rgb(self.observer_camera)

    def _get_segmentation(self, camera, level="actor"):
        seg_labels = camera.get_picture("Segmentation")  # [H, W, 4]
        colormap = sorted(set(ImageColor.colormap.values()))
        color_palette = np.array(
            [ImageColor.getrgb(color) for color in colormap], dtype=np.uint8
        )
        if level == "mesh":
            label0_image = seg_labels[..., 0].astype(np.uint8) # mesh-level
        elif level == "actor":
            label0_image = seg_labels[..., 1].astype(np.uint8) # actor-level
        return color_palette[label0_image]

    # Get Camera Segmentation
    def get_segmentation(self, level="mesh") -> dict:
 
        res = {
            # 'left_camera':{},
            # 'right_camera':{}
        }

        if self.collect_wrist_camera:
            res['left_camera'] = {}
            res['right_camera'] = {}
            res['left_camera'][f'{level}_segmentation'] = self._get_segmentation(self.left_camera, level=level)
            res['right_camera'][f'{level}_segmentation'] = self._get_segmentation(self.right_camera, level=level)
        
        for camera, camera_name in zip(self.static_camera_list, self.static_camera_name):
            if camera_name == 'head_camera':
                if self.collect_head_camera:
                    res[camera_name]={}
                    res[camera_name][f'{level}_segmentation'] = self._get_segmentation(camera, level=level)
            else:
                res[camera_name]={}
                res[camera_name][f'{level}_segmentation'] = self._get_segmentation(camera, level=level)
        return res

    def _get_depth(self, camera):
        position = camera.get_picture("Position")
        depth = -position[..., 2]
        depth_image = (depth * 1000.0).astype(np.float64)
        return depth_image

    def _get_sensor_depth(self, sensor):
        depth = sensor.get_depth()
        depth = (depth * 1000.0).astype(np.float64)
        return depth

    # Get Camera Depth
    def get_depth(self)->dict:
        res = {
            # 'left_camera':{},
            # 'right_camera':{},
            # 'head_sensor':{}
        }
        
        if self.collect_wrist_camera:
            res['left_camera'] = {}
            res['right_camera'] = {}
            res['left_camera']['depth'] = self._get_depth(self.left_camera)
            res['right_camera']['depth'] = self._get_depth(self.right_camera)
        for camera, camera_name in zip(self.static_camera_list, self.static_camera_name):
            if camera_name == 'head_camera':
                if self.collect_head_camera:
                    res[camera_name]={}
                    res[camera_name]['depth'] = self._get_depth(camera)
            else:
                res[camera_name]={}
                res[camera_name]['depth'] = self._get_depth(camera)
        # res['head_sensor']['depth'] = _get_sensor_depth(self.head_sensor)

        return res

    def _get_pcd(self, camera, point_num=0):
        rgba = camera.get_picture_cuda("Color").torch() # [H, W, 4]
        position = camera.get_picture_cuda("Position").torch()
        model_matrix = camera.get_model_matrix()
        device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        model_matrix = torch.tensor(model_matrix, dtype=torch.float32).to(device)

        # Extract valid three-dimensional points and corresponding color data.
        valid_mask = position[..., 3] < 1
        points_opengl = position[..., :3][valid_mask]
        points_color = rgba[valid_mask][:,:3]
        
        # Check if we have any valid points
        if points_opengl.shape[0] == 0:
            return np.zeros((0, 6))  # Return empty 2D array with 6 columns
            
        # Transform into the world coordinate system.
        points_world = torch.bmm(points_opengl.view(1, -1, 3), model_matrix[:3, :3].transpose(0,1).view(-1, 3, 3)).squeeze(1) + model_matrix[:3, 3]

        # Format color data.
        points_color = torch.clamp(points_color, 0, 1)

        points_world = points_world.squeeze(0)
        
        # If crop is needed
        if self.pcd_crop:
            min_bound = torch.tensor(self.pcd_crop_bbox[0], dtype=torch.float32).to(device)
            max_bound = torch.tensor(self.pcd_crop_bbox[1], dtype=torch.float32).to(device)
            inside_bounds_mask = (points_world.squeeze(0) >= min_bound).all(dim=1) & (points_world.squeeze(0)  <= max_bound).all(dim=1)
            points_world = points_world[inside_bounds_mask]
            points_color = points_color[inside_bounds_mask]
            
            # Check if we still have points after cropping
            if points_world.shape[0] == 0:
                return np.zeros((0, 6))  # Return empty 2D array with 6 columns
        
        # Convert the tensor back to a NumPy array for use with Open3D.
        points_world_np = points_world.cpu().numpy()
        points_color_np = points_color.cpu().numpy()

        if point_num > 0:
            points_world_np, index = fps(points_world_np, point_num)
            index = index.detach().cpu().numpy()[0]
            points_color_np = points_color_np[index,:]

        return np.hstack((points_world_np, points_color_np))

    # Get Camera PointCloud
    def get_pcd(self, is_combine=True, camera_indices=[6,7,8,9,10]):
        """
        Get point cloud from cameras specified by indices.
        
        Args:
            camera_indices: List of indices corresponding to cameras in self.all_cameras.
                          If None, uses all available cameras.
        
        Returns:
            Combined point cloud from specified cameras.
        """
        if camera_indices is None:
            cam_list = self.all_cameras.copy()
        else:
            cam_list = []
            for idx in camera_indices:
                if 0 <= idx < len(self.all_cameras):
                    cam_list.append(self.all_cameras[idx])
                else:
                    print(f"Warning: Camera index {idx} is out of range. Valid range: 0-{len(self.all_cameras)-1}")
        
     
        if len(cam_list) == 0:
            print('No cameras available for point cloud generation!')
            return np.zeros((0, 6))  # Return empty 2D array with 6 columns
        
        pcd_list = []
        for camera in cam_list:
            try:
                pcd = self._get_pcd(camera)
                if pcd.shape[0] > 0:
                    pcd_list.append(pcd)
            except Exception as e:
                print(f"Warning: Failed to generate point cloud from camera: {e}")
        
        if len(pcd_list) == 0:
            return np.zeros((0, 6))  # Return empty 2D array with 6 columns
        
        # Combine all point clouds
        conbine_pcd = np.vstack(pcd_list)
        
        pcd_array = conbine_pcd[:,:3]
        index = np.array(range(len(conbine_pcd)))

        if self.pcd_down_sample_num > 0:
            pcd_array, index = fps(conbine_pcd[:,:3], self.pcd_down_sample_num)
            index = index.detach().cpu().numpy()[0]

        return conbine_pcd[index]

    # Get World PointCloud
    def get_world_pcd(self):
        self.world_camera1.take_picture()
        self.world_camera2.take_picture()

        def _get_camera_pcd(camera, color=True):
            rgba = camera.get_picture_cuda("Color").torch()  # [H, W, 4]
            position = camera.get_picture_cuda("Position").torch()
            model_matrix = camera.get_model_matrix()

            device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
            model_matrix = torch.tensor(model_matrix, dtype=torch.float32).to(device)

            # Extract valid three-dimensional points and corresponding color data.
            valid_mask = position[..., 3] < 1
            points_opengl = position[..., :3][valid_mask]
            points_color = rgba[valid_mask][:, :3]
            
            # Check if we have any valid points
            if points_opengl.shape[0] == 0:
                return np.zeros((0, 6))  # Return empty 2D array with 6 columns
                
            # Transform into the world coordinate system.
            points_world = (torch.bmm(
                points_opengl.view(1, -1, 3),
                model_matrix[:3, :3].transpose(0, 1).view(-1, 3, 3),
            ).squeeze(1) + model_matrix[:3, 3])

            # Format color data.
            points_color = torch.clamp(points_color, 0, 1)
            points_world = points_world.squeeze(0)

            # Convert the tensor back to a NumPy array for use with Open3D.
            points_world_np = points_world.cpu().numpy()
            points_color_np = points_color.cpu().numpy()
            # print(points_world_np.shape, points_color_np.shape)

            res_pcd = (np.hstack((points_world_np, points_color_np)) if color else points_world_np)
            return res_pcd

        pcd1 = _get_camera_pcd(self.world_camera1, color=True)
        pcd2 = _get_camera_pcd(self.world_camera2, color=True)
        res_pcd = np.vstack((pcd1, pcd2))

        # Apply downsampling if needed
        if res_pcd.shape[0] > 0:
            pcd_array, index = fps(res_pcd[:, :3], 2000)
            index = index.detach().cpu().numpy()[0]
            return res_pcd[index]
        else:
            return np.zeros((0, 6))  # Return empty 2D array with 6 columns

    def _get_from_cam(self, info_name, camera, level="actor"):
        try:
            if info_name == 'rgba':
                return self._get_rgba(camera)
            elif info_name == 'depth':
                return self._get_depth(camera)
            elif info_name == 'seg':
                return self._get_segmentation(camera, level=level)
            elif info_name == 'pcd':
                return self._get_pcd(camera)
            elif info_name == 'config':
                return self._get_config(camera)
            else:
                print(f"Warning: Unknown info_name '{info_name}' requested from camera")
                return None
        except Exception as e:
            print(f"Error getting {info_name} from camera {camera.name}: {e}")
            return None
