import sys
from pathlib import Path
# Fix for Python 3.10 compatibility - ensure path is string
current_dir = str(Path(__file__).parent)
if current_dir not in sys.path:
    sys.path.append(current_dir)

import argparse
import json
import zarr
import numpy as np
import os
import shutil
import time
import subprocess
import re
from pathlib import Path
from termcolor import colored
import traceback
from time_it import timeit
from scalePcd import scalePcdD,scalePcd
from pathlib import Path
from loadJson import _load_pcd_manip

"""
UNIFIED DATA INTERFACE:

unifiedData: dict[str, np.ndarray | dict] = {
    "task_name": str,           # Task name extracted from zarr path before "-demo"
    "assets": list[str],        # List of asset strings containing '/base' from scene_info.json, same for all timesteps in an episode
    "rgb": np.ndarray,          # Shape: (H, W, 3) - RGB image data (observer RGB)
    "pointcloud": np.ndarray,   # Shape: (Np, 6) - Point cloud [x, y, z, r, g, b] from robotwin->rgbd->pcd
    "segpointcloud": np.ndarray, # Shape: (Np, 6) - Segmented point cloud [x, y, z, r, g, b] colored by segmentation rgb+seg->pcd
    "repointcloud": np.ndarray, # Shape: (Np, 6) - Reconstructed point cloud [x, y, z, r, g, b] from rgbPcd pipeline (rgbd->pcd reconstruction)
    "injpointcloud": np.ndarray, # Shape: (Np, 6) - Injected point cloud [x, y, z, r, g, b] from injPcd pipeline
    "vggtpointcloud2": np.ndarray, # Shape: (Np, 6) - VGGT mode 2 generated point cloud [x, y, z, r, g, b] vggt->rgbd->pcd
    "vggtpointcloud3": np.ndarray, # Shape: (Np, 6) - VGGT mode 3 generated point cloud [x, y, z, r, g, b] vggt->pcdhead->pcd
    "foundationPoses": np.ndarray, # Shape: (N, numofobj, 6) - Foundation poses [x, y, z, rx, ry, rz] for objects
    
    # Robot state data (single frame)
    "state": np.ndarray,        # Shape: varies - Robot state vector
    "action": np.ndarray,       # Shape: varies - Robot action vector
    "js_action": np.ndarray,    # Shape: varies - Joint space action vector
    "js_state": np.ndarray,     # Shape: varies - Joint space state vector
    
    # Pose data (single frame)
    "key_poses": np.ndarray,    # Shape: (48,) - Combined key poses [contact_poses(12) + gripper_poses(36)]
    
    # Camera data (nested dict, single frame)
    "cameras": dict[str, dict[str, np.ndarray]] = {
        "camera_name": {
            "rgb": np.ndarray,          # Shape: (H, W, 3) - RGB image
            "depth": np.ndarray,        # Shape: (H, W) - Depth image
            "segmentation": np.ndarray, # Shape: (H, W, 3) - Segmentation mask
            "intrinsic_cv": np.ndarray, # Shape: (3, 3) - Camera intrinsic matrix
            "extrinsic_cv": np.ndarray, # Shape: (3, 4) - Camera extrinsic matrix
            "cam2world_gl": np.ndarray, # Shape: (4, 4) - Camera to world transform
        }
    },
    
    # Gaussian splatting data (nested dict, single frame)
    "gaussian": dict[str, np.ndarray] = {
        "positions": np.ndarray,    # Shape: (Ng, 3) - 3D positions of Gaussians
        "scaling": np.ndarray,      # Shape: (Ng, 3) - Scaling factors
        "rotation": np.ndarray,     # Shape: (Ng, 4) - Quaternion rotations [w,x,y,z]
        "opacity": np.ndarray,      # Shape: (Ng,) - Opacity values
        "colors": np.ndarray,       # Shape: (Ng, 3) or (Ng, 3, SH_coeffs) - Colors/SH coeffs
        "use_sh": bool,             # Whether using spherical harmonics
        "sh_degree": int            # Degree of spherical harmonics
    }
}

Note: 
- All arrays are for SINGLE FRAME only
- Zarr storage has separate 'data', 'cameras', and 'gaussian' groups
- UnifiedData merges them with 'cameras' and 'gaussian' as nested dicts
- Np = number of points, Ng = number of Gaussians, H/W = image dimensions
- segpointcloud is initially None/empty and gets filled by segPcd pipeline step
- repointcloud is initially None/empty and gets filled by rgbPcd pipeline step
- vggtpointcloud2 is initially None/empty and gets filled by vggtPcd pipeline step with mode=2
- vggtpointcloud3 is initially None/empty and gets filled by vggtPcd pipeline step with mode=3
- key_poses (48D) is loaded from data/key_poses in zarr, combining contact_poses (12D) and gripper_poses (36D)
"""
pipeMap = {
    'p': 'pointcloud',
    's': 'segpointcloud',
    'r': 'repointcloud',
    'i': 'injpointcloud',
    'v2': 'vggtpointcloud2',
    'v3': 'vggtpointcloud3',
    'fp': 'foundationPoses',
    'c': 'cameras',
    'g': 'gaussian',
    'ec': 'ec',
    'gc': 'gc',
    'ee': 'ee',
    'qpos':'qpos',
    'kp': 'key_poses',
    'ps': 'paintpointcloud',
    'js': 'state',
    'ja': 'action',
    'jjs': 'js_state',
    'jja': 'js_action',
    'ees': 'ee_state',
    'eea': 'ee_action',
    'qps': 'qpos_state',
    'qpa': 'qpos_action'
}
full_pipeMap={
    'b': 'midBack_camera',
    'f': 'midFront_camera',
    'l': 'midLeft_camera',
    'r': 'midRight_camera',
    'h': 'midHead_camera',
    **pipeMap
}
zarrMap = {
    'pointcloud': ['pcd', 'pointcloud'],
    'segpointcloud': ['pcd', 'segpointcloud'],
    'repointcloud': ['pcd', 'repointcloud'],
    'injpointcloud': ['pcd', 'injpointcloud'],
    'vggtpointcloud2': ['pcd', 'vggtpointcloud2'],
    'vggtpointcloud3': ['pcd', 'vggtpointcloud3'],
    'foundationPoses': ['data', 'foundationPoses'],
    'gaussian_positions': ['gaussian', 'positions'],
    'gaussian_scaling': ['gaussian', 'scaling'],
    'gaussian_rotation': ['gaussian', 'rotation'],
    'gaussian_opacity': ['gaussian', 'opacity'],
    'gaussian_colors': ['gaussian', 'colors'],
    'ec': ['data', 'ec'],
    'gc': ['data', 'gc'],
    'ee': [['ee','state'],['ee','action']],
    'qpos': [['qpos','state'],['qpos','action']],
    'ee_state': ['ee','state'],
    'ee_action': ['ee','action'],
    'qpos_state': ['qpos','state'],
    'qpos_action': ['qpos','action'],
    'key_poses': ['data', 'key_poses'],
    'paintpointcloud': ['pcd', 'paintpointcloud'],
    'state': ['data', 'state'],
    'action': ['data', 'action'],
    'js_state': ['data', 'js_state'],
    'js_action': ['data', 'js_action'],
    
}

class PipelineAnalyzer:
    """Analyzes pipeline to determine what data needs to be loaded and written."""
    @staticmethod
    def get_match_from_pipeline(pipeline_str: str,match:str) -> str:
        components = pipeline_str.split('-')
        for component in reversed(components):
            if component.startswith(match):
                return component
        return ""
    @staticmethod
    def get_key_from_pipeline(pipeline_str: str,get_full_key:bool=False,match="ln",multi_single_char:bool=False) -> str:
        """
        Automatically determine output key from pipeline string.
        
        Looks for the last component that starts with 'ln' and maps the suffix to output key.
        
        Args:
            pipeline_str: Pipeline string like 'seg%3-pcd%3-eng0%-f1024-lnr'
            
        Returns:
            Output key like 'repointcloud' based on the ln suffix mapping
            
        Examples:
            'seg%3-pcd%3-eng0%-f1024-lnr' -> 'repointcloud'
            'ldp-k-js-ja-savep-lnp' -> 'pointcloud'
        """
        if not pipeline_str:
            print(colored(f"empty pipeline string", 'red'))
            exit()
        components = pipeline_str.split('-')
        target_component = None
        for component in reversed(components):
            if component.startswith(match):
                rest=component[len(match):]
                if not multi_single_char and (rest in full_pipeMap.keys()):
                    target_component = component
                    break
                if multi_single_char:
                    for char in rest:
                        if char in full_pipeMap.keys():
                            continue
                    else:
                        target_component = component
                        break
        if target_component is None:
            print(colored(f"no {match} component found in pipeline string: {pipeline_str}", 'red'))
            return [] if multi_single_char else None
        rest = target_component[len(match):]  
        if not multi_single_char:
            return rest if not get_full_key else full_pipeMap[rest]
        else:
            return [(char if not get_full_key else full_pipeMap[char]) for char in rest]
    @staticmethod
    def test_to_real(pipeline_str:str):
        pipe_list=pipeline_str.split('-')
        pipe_list_new=[]
        for pipe in pipe_list:
            if pipe=='k' or pipe.startswith('j') or pipe.startswith('save'):
                continue
            pipe_list_new.append(pipe)
        return '-'.join(pipe_list_new)
    
    @staticmethod
    def analyze_pipeline(pipeline_str: str):
        """
        Analyze pipeline string to determine what data will be:
        1. Loaded (read from disk)
        2. Written (saved to disk - either created or modified)
        
        Returns:
            dict with keys: 'load_keys', 'write_keys', 'pipeline_steps'
        """
        if not pipeline_str:
            return {
                'load_keys': set(), 
                'write_keys': set(),
                'pipeline_steps': []
            }
 
        load_keys = set()
        write_keys = set()
        def write_pcd_if_loaded():
            for loaded_key in load_keys:
                if "pointcloud" in loaded_key:
                    write_keys.add(loaded_key)
        pipeline_steps = []
        
        parts = pipeline_str.split('-')
        
        for part in parts:
            if part.startswith('f') :
                if not len(part) > 1 :
                    print(colored(f"Invalid fps parameter: {part}, want a number and an optional included pcd shortcut", 'red'))
                else:
                    # num_points = int(part[1:])
                    pipeline_steps.append(('fps', part[1:]))
                    write_pcd_if_loaded()
            elif part.startswith('r'):
                if not len(part) > 1 :
                    print(colored(f"Invalid fps parameter: {part}, want a number and an optional included pcd shortcut", 'red'))
                else:
                    # num_points = int(part[1:])
                    pipeline_steps.append(('rands', part[1:]))
                    write_pcd_if_loaded()
          
            elif part.startswith('t'):
                pipeline_steps.append(('trim', part[1:] if len(part) > 1 else None))
                write_pcd_if_loaded()
        
            elif part.startswith('a'):
                # Arm trimming - ac{number} for cylinder or am{number} for mesh
                if len(part) < 3:
                    print(colored(f"Invalid arm trimming parameter: {part}, want ac + number or am + number", 'red'))
                else:
                    arm_param = part[1:]  # Remove 'a' prefix
                    if len(arm_param) >= 2 and arm_param[0] in ['c', 'm'] and arm_param[1:].isdigit():
                        # Valid format: ac{number} or am{number}
                        pipeline_steps.append(('arm_trim', arm_param))
                        write_pcd_if_loaded()
                        load_keys.add('js_state')
                    else:
                        print(colored(f"Invalid arm trimming parameter: {arm_param}, want c + number or m + number", 'red'))
                        
            elif part.startswith('seg'):
                if len(part) > 3:
                    param = part[3:]
                else:
                    param = None
                pipeline_steps.append(('segmentation', param))
                load_keys.add('cameras')
                write_keys.add('segpointcloud')
                
            elif part == 'g':
                pipeline_steps.append(('gaussian', None))
                load_keys.add('pointcloud')
                write_keys.add('gaussian')

            elif part.startswith('k'):
                if len(part) == 1:
                    pipeline_steps.append(('keypcd', 0.0))  # Default distance 0.0
                    load_keys.add('key_poses')
                    write_pcd_if_loaded()
                    write_keys.add('key_poses')
                else:
                    if not part[1:].isdigit():
                        print(colored(f"Invalid keypcd parameter: {part}, want a number", 'red'))
                    else:
                        dist = float(part[1:]) / 100.0  # Convert to meters (assuming input is in cm)
                        pipeline_steps.append(('keypcd', dist))
                        load_keys.add('key_poses')
                        write_pcd_if_loaded()
                        write_keys.add('key_poses')
            elif part.startswith('pcd'):
                if len(part) > 3:
                    param = part[3:]
                else:
                    param = None
                # RGB point cloud generation - needs cameras, creates repointcloud
                pipeline_steps.append(('pcd_generation', param))
                load_keys.add('cameras')
                write_keys.add('repointcloud')
                write_keys.add('injpointcloud')
                
            elif part.startswith('vt'):
                if not len(part) > 2 or not part[2:].isdigit():
                    print(colored(f"Invalid vggt parameter: {part}", 'red'))
                else:
                    # VGGT - needs cameras, creates vggtpointcloud
                    mode = int(part[2:])
                    pipeline_steps.append(('vggt', mode))
                    load_keys.add('cameras')
                    write_keys.add(f'vggtpointcloud{mode}')
                    
            elif part.startswith('pose'):
                # Pose generation - needs cameras, creates foundationPoses
                pipeline_steps.append(('pose', part[len('pose'):]))
                load_keys.add('cameras')
                write_keys.add('foundationPoses')
                    
            elif part.startswith('ln'):
                # Soft link creation - ln{pcd_type}
                if not len(part) > 2:
                    print(colored(f"Invalid ln parameter: {part}, want ln + one of {list(pipeMap.keys())}", 'red'))
                else:
                    ln_param = part[2:]
                    if ln_param in pipeMap.keys() and ( ln_param not in ['ee', 'qpos']):
                        load_keys.add(pipeMap[ln_param])
                        write_keys.add(pipeMap[ln_param])
                        pipeline_steps.append(('softlink', ln_param))
                    elif  ln_param in ['ee', 'qpos']:
                        pipeline_steps.append(('softlink', ln_param))
                    else:
                        print(colored(f"Invalid ln parameter: {ln_param}, want one of {pipeMap.keys()}", 'red'))
                        
            elif part.startswith('ld'):
                # Load data - ld{pcd_type} - only loads data without processing
                if not len(part) > 2:
                    print(colored(f"Invalid ld parameter: {part}, want ld + one of {list(pipeMap.keys())}", 'red'))
                else:
                    ld_param = part[2:]
                    if ld_param in pipeMap.keys():
                        load_keys.add(pipeMap[ld_param])
                        print(colored(f"Added {pipeMap[ld_param]} to load_keys (load only, no processing)", 'blue'))
                    else:
                        print(colored(f"Invalid ld parameter: {ld_param}, want one of {pipeMap.keys()}", 'red'))
                        
            elif part.startswith('save'):
                # Save operations - need the respective pointclouds to .pcd but don't modify zarr data
                if not len(part) > 4:
                    print(colored(f"Too few characters for save parameter: {part}, want one of {pipeMap.keys()}", 'red'))
                else:
                    save_param = part[4:]
                    if save_param in pipeMap.keys():
                        load_keys.add(pipeMap[save_param])
                        pipeline_steps.append(('save', save_param))
                    else:
                        print(colored(f"Invalid save parameter: {save_param}, want one of {pipeMap.keys()}", 'red'))
                    
            elif part.startswith('c'):
                # Camera filter - c{chars} where chars are 1-5 characters from 'bflrh'
                if len(part) == 1:
                    print(colored(f"Invalid camera filter parameter: {part}, need 1-5 chars from 'bflrh'", 'red'))
                else:
                    camera_filter_param = part[1:]
                    # Validate the parameter
                    valid_chars = set('bflrh')
                    if len(camera_filter_param) > 5:
                        print(colored(f"Invalid camera filter parameter: {camera_filter_param}, max 5 chars allowed", 'red'))
                    elif not all(c in valid_chars for c in camera_filter_param):
                        print(colored(f"Invalid camera filter parameter: {camera_filter_param}, valid chars are: bflrh", 'red'))
                    else:
                        pipeline_steps.append(('camera_filter', camera_filter_param))
                        load_keys.add('cameras')
                        write_keys.add('cameras')
                    
            
            elif part.startswith('vi') and not part[2:].isdigit():
                # Video visualization with format
                video_format = part[2:]
                pipeline_steps.append(('video', video_format))
            elif part.startswith('gpr') and len(part) == 4:
                # Gripper pipeline: gprX where X is a single char robot key
                gripper_key = part[3]
                pipeline_steps.append(('gripper', gripper_key))
                load_keys.add('gc')
                load_keys.add('state')
                write_keys.add('repointcloud')
            elif part.startswith('paint'):
                if len(part) > 5:
                    param = part[5:]
                else:
                    param = None
                pipeline_steps.append(('paint', param))
                load_keys.add('segpointcloud')
                write_keys.add('paintpointcloud')
            elif part.startswith('eng'):
                if len(part) > 3:
                    param = part[3:]
                else:
                    param = None
                pipeline_steps.append(('engrip', param))
                load_keys.update(set(['segpointcloud','state','action','key_poses']))
                write_keys.update(set(["state","action"]))
                write_pcd_if_loaded()
            elif part.startswith('j'):
                param = part[1:]  
                pipeline_steps.append(('trajpcd', param))
                for key in ['ee_state', 'ee_action', 'qpos_state', 'qpos_action','key_poses']:
                    load_keys.add(key)
                    write_keys.add(key)
                write_pcd_if_loaded()
            else:
                print(colored(f"Invalid pipeline step: {part}", 'red'))
        
        resultDict={
            'load_keys': load_keys,
            'write_keys': write_keys,
            'pipeline_steps': pipeline_steps
        }
        for key, val in resultDict.items():
            print(colored(f"{key}: {str(val)}", 'green'))
        # if input("confirm continue?(y/n)") =='y':
        return resultDict
        # else:
        #     exit()

class DataTraverser:
    def __init__(self,seg_source, zarr_path=None,mem_run=False, output_path=None, episode_range=None, frame_range=None, pipeline=None,task_name=None):
        """
        Initialize the data traverser with optimized loading based on pipeline analysis.
        
        Args:
            zarr_path (str): Path to the input .zarr directory
            output_path (str): Path to output .zarr directory (if None, no saving)
            episode_range (str): Episode range like "0-10-2" (start-end-step)
            frame_range (str): Frame range like "0-100-5" (start-end-step)
            pipeline (str): Processing pipeline to analyze for selective loading
        """
        self.episode_range = episode_range
        self.frame_range = frame_range
        self.input_zarr_path = Path(zarr_path) if zarr_path else None
        if seg_source == "traverse":
            assert zarr_path is not None
            path_str = str(zarr_path)
            assert "-demo" in path_str, f"zarr_path {path_str} should contain -demo"
            self.task_name = os.path.basename(path_str).split("-demo")[0]
        else:
            assert task_name is not None
            self.task_name = task_name
        self.output_zarr_path = Path(output_path) if output_path else None
        self.pipeline = pipeline
        self.seg_source = seg_source
        
        # Timing variables for tracking intervals
        self.last_print_time = time.time()
        self.frame_count = 0

        # Analyze pipeline to determine what data to load
        self.pipeline_analysis = PipelineAnalyzer.analyze_pipeline(pipeline or "")
        
        # Lazy-load pipeline modules based on analysis
        self._load_pipeline_modules()

        pcd_manip = _load_pcd_manip()
        self.bbox_min = pcd_manip["focused_bbox"][0]
        self.bbox_max = pcd_manip["focused_bbox"][1]
        
        if mem_run:
            return
        # Handle output path logic
        self._setup_output_path()
        
        # Open source zarr for reading
        self.source_zarr = zarr.open(str(self.input_zarr_path), mode='r')
        
        # Open target zarr for modification (if saving)
        if self.target_zarr_path:
            self.target_zarr = zarr.open(str(self.target_zarr_path), mode='r+')
        else:
            self.target_zarr = None
        
        # Load metadata
        self._load_metadata()
        
        # Load only necessary episodes and frames into memory
        self._load_selective_data()
    
    def _load_pipeline_modules(self):
        """Dynamically load only the pipeline modules that are needed based on pipeline analysis."""
        self.pipeline_modules = {}
        pipeline_steps = self.pipeline_analysis['pipeline_steps']
        
        # Check which modules are needed
        needed_modules = set()
        for step_type, param in pipeline_steps:
            if step_type in ['fps', 'rands']:
                needed_modules.add('fpsPcd')
            elif step_type == 'trim':
                needed_modules.add('trimPcd')
            elif step_type == 'arm_trim':
                needed_modules.add('trimArmPcd')
            elif step_type == 'gaussian':
                needed_modules.add('pcd2gs')
            elif step_type in ['pcd_generation','segmentation']:
                needed_modules.add('rgbPcd')
            elif step_type == 'vggt':
                needed_modules.add('vggtPcd')
            elif step_type == 'pose':
                needed_modules.add('posePcd')
            elif step_type == 'save':
                needed_modules.add('savePcd')
            elif step_type == 'video':
                needed_modules.add('VideoVisualizer')
            elif step_type == 'camera_filter':
                needed_modules.add('cameraFilter')
            elif step_type == 'gripper':
                needed_modules.add('gripper')
            elif step_type == 'keypcd':
                needed_modules.add('keyPcd')
            elif step_type == 'paint':
                needed_modules.add('paintPcd')
            elif step_type == 'engrip':
                needed_modules.add('enGprPcd')
            elif step_type == 'trajpcd':
                needed_modules.add('trajPcd')
        
        # Import only the needed modules
        for module_name in needed_modules:
            try:
                if module_name == 'VideoVisualizer':
                    from videovisualizer import VideoVisualizer
                    self.pipeline_modules[module_name] = VideoVisualizer
                elif module_name == 'fpsPcd':
                    from fpsPcd import fpsPcd
                    self.pipeline_modules[module_name] = fpsPcd
                elif module_name == 'trimPcd':
                    from trimPcd import trimPcd
                    self.pipeline_modules[module_name] = trimPcd
                elif module_name == 'pcd2gs':
                    from pcd2gs import pcd2gs
                    self.pipeline_modules[module_name] = pcd2gs
                elif module_name == 'savePcd':
                    from savePcd import savePcd
                    self.pipeline_modules[module_name] = savePcd
                elif module_name == 'rgbPcd':
                    from rgbPcd import rgbPcd
                    self.pipeline_modules[module_name] = rgbPcd
                elif module_name == 'vggtPcd':
                    from vggtPcd import vggtPcd
                    self.pipeline_modules[module_name] = vggtPcd
                elif module_name == 'posePcd':
                    from posePcd import posePcd
                    self.pipeline_modules[module_name] = posePcd
                elif module_name == 'trimArmPcd':
                    from trimArmPcd import trimArmPcd
                    self.pipeline_modules[module_name] = trimArmPcd
                elif module_name == 'cameraFilter':
                    from cameraFilter import cameraFilter
                    self.pipeline_modules[module_name] = cameraFilter
                elif module_name == 'gripper':
                    from gripper import gripper
                    self.pipeline_modules[module_name] = gripper
                elif module_name == 'keyPcd':
                    from keyPcd import keyPcd
                    self.pipeline_modules[module_name] = keyPcd
                elif module_name == 'paintPcd':
                    from paintPcd import paintPcd
                    self.pipeline_modules[module_name] = paintPcd
                elif module_name == 'enGprPcd':
                    from enGprPcd import enGprPcd
                    self.pipeline_modules[module_name] = enGprPcd
                elif module_name == 'trajPcd':
                    from trajPcd import trajPcd
                    self.pipeline_modules[module_name] = trajPcd
                
                print(colored(f"  ✓ Loaded {module_name}", 'green'))
            except ImportError as e:
                print(colored(f"  ✗ Failed to load {module_name}: {e}", 'red'))
                raise

    def _setup_output_path(self):
        """Setup output path based on copy-on-write logic."""
        if not self.output_zarr_path:
            # No output specified - don't save
            self.target_zarr_path = None
        elif self.output_zarr_path == self.input_zarr_path:
            # Same path - modify in place
            self.target_zarr_path = self.input_zarr_path
            print(f"Modifying in place: {self.target_zarr_path}")
        else:
            # Different path - copy first, then modify
            self.target_zarr_path = self.output_zarr_path
            if self.target_zarr_path.exists():
                if input(f"Remove existing zarr {self.target_zarr_path}? (y/n)") == 'y':
                    print(f"Removing existing {self.target_zarr_path}")
                    shutil.rmtree(self.target_zarr_path)
            print(f"Copying {self.input_zarr_path} -> {self.target_zarr_path}")
            shutil.copytree(self.input_zarr_path, self.target_zarr_path,symlinks=True)
    
    def _load_metadata(self):
        """Load metadata from source zarr."""
        self.meta = self.source_zarr['meta']
        self.episode_ends = np.array(self.meta['episode_ends'][:])
        # print("_load_metadata cameras", self.source_zarr['cameras'].keys(),list(self.source_zarr['cameras'].keys()),self.source_zarr['cameras']['real_camera'])
        # Determine data availability
        self.data = self.source_zarr['data']
        self.has_cameras = 'cameras' in self.source_zarr
        self.has_gaussian = 'gaussian' in self.source_zarr
        
        # Load scene_info.json if it exists
        self.scene_info = None
        scene_info_path = self.input_zarr_path / 'scene_info.json'
        if scene_info_path.exists():
            with open(scene_info_path, 'r') as f:
                self.scene_info = json.load(f)
        
        if self.has_cameras:
            self.cameras = self.source_zarr['cameras']
            self.camera_names = list(self.cameras.keys())
        else:
            self.cameras = None
            self.camera_names = []
        
        # Get total timesteps
        for key in ['state', 'observer_rgb', 'point_cloud']:
            if key in self.data:
                self.total_timesteps = self.data[key].shape[0]
                break
        else:
            if self.has_cameras and self.camera_names and self.cameras:
                first_camera = self.cameras[self.camera_names[0]]
                for data_type in ['rgb', 'depth', 'segmentation']:
                    if data_type in first_camera:
                        self.total_timesteps = first_camera[data_type].shape[0]
                        break
    
    def _get_assets_for_timestep(self, timestep):
        """Get asset strings for a given timestep based on scene_info.json.
        Returns a list of all values that contain '/base' in the episode's info section."""
        if self.scene_info is None:
            return []
        
        # Find which episode this timestep belongs to
        episode_idx = 0
        for i, episode_end in enumerate(self.episode_ends):
            if timestep < episode_end:
                episode_idx = i
                break
        else:
            episode_idx = len(self.episode_ends) - 1
        
        episode_key = f"episode_{episode_idx}"
        if episode_key in self.scene_info and 'info' in self.scene_info[episode_key]:
            info_dict = self.scene_info[episode_key]['info']
            # Find all values that contain '/base'
            base_assets = []
            for key, value in info_dict.items():
                if isinstance(value, str) and '/base' in value:
                    base_assets.append(value)
            return base_assets
        
        return None
    @timeit(force=True)
    def _load_selective_data(self):
        self.episodes_info = self.parse_ef()
        print(f"Loading episodes {self.episodes_info[0]['ep_idx']} to {self.episodes_info[-1]['ep_idx']} (step {self.episodes_info[0]['fr_step']})")
        self.episodes = []
        for info in self.episodes_info:
            ep_idx = info['ep_idx']
            timesteps = info['timesteps']
            print(f"Loading episode {ep_idx}: {len(timesteps)} frames (fr_start={info['fr_start']} fr_step={info['fr_step']})")
            episode_frames = [None] * len(timesteps)
            if len(timesteps) > 1 and all(timesteps[i+1] == timesteps[i] + 1 for i in range(len(timesteps)-1)):
                print(f"  ↳ batch loading {len(timesteps)} consecutive frames", end="\r",flush=True)
                episode_frames = self._load_batch_frames(timesteps)
            else:
                for local_frame_idx, timestep in enumerate(timesteps):
                    print(f"  ↳ frame {local_frame_idx} | timestep {timestep}", end="\r",flush=True)
                    episode_frames[local_frame_idx] = self._load_selective_frame(timestep)
            self.episodes.append(episode_frames)
        total_frames_loaded = sum(len(episode) for episode in self.episodes)
        print(f"\n📦 Loaded {len(self.episodes)} episodes with total {total_frames_loaded} frames")
    
    def _load_selective_frame(self, timestep):
        """Load a single frame with only necessary data based on pipeline analysis."""
        unified_data = {}
        load_keys = self.pipeline_analysis['load_keys']
        unified_data['task_name'] = self.task_name
        if 'rgb' in load_keys and 'observer_rgb' in self.data:
            unified_data['rgb'] = self.data['observer_rgb'][timestep] #NUMPY_REMOVED
        else:
            unified_data['rgb'] = None
            
        # Load pointcloud data generically using zarrMap
        for pcd_key, zarr_path in zarrMap.items():
            if pcd_key in load_keys:
                group_name, dataset_name = zarr_path
                if group_name in self.source_zarr and dataset_name in self.source_zarr[group_name]:
                    if len(self.source_zarr[group_name][dataset_name])> timestep:
                        pointcloud_data = self.source_zarr[group_name][dataset_name][timestep] #NUMPY_REMOVED
                        # Auto-scale pointcloud data if it's a pointcloud field
                        if 'pointcloud' in pcd_key and pointcloud_data is not None and pointcloud_data.shape[0] > 0:
                            pointcloud_data = scalePcd(pointcloud_data, tag=f"auto_scale_{pcd_key}")
                        unified_data[pcd_key] = pointcloud_data
                    else:
                        unified_data[pcd_key] = None
                else:
                    if pcd_key not in ['segpointcloud', 'repointcloud', 'injpointcloud', 'vggtpointcloud2','vggtpointcloud3','gaussian','paintpointcloud']:
                        print(colored(f"WARNING: want {pcd_key}, but source_zarr doesn't have it: {group_name}/{dataset_name}", 'yellow'))
                    unified_data[pcd_key] = None
            else:
                unified_data[pcd_key] = None
        
        # Load robot state data selectively
        for key in ['state', 'action']:
            if key in load_keys and key in self.data:
                unified_data[key] = self.data[key][timestep] #NUMPY_REMOVED
            else:
                unified_data[key] = [] #NUMPY_REMOVED
        
        # Load joint space data selectively
        for key in ['js_state', 'js_action']:
            if key in load_keys and 'data' in self.source_zarr and key in self.source_zarr['data']:
                unified_data[key] = self.source_zarr['data'][key][timestep] #NUMPY_REMOVED
            else:
                unified_data[key] = None
        
        # Load ee and qpos data selectively
        for key in ['ee_state', 'ee_action', 'qpos_state', 'qpos_action']:
            if key in load_keys:
                group_name, dataset_name = zarrMap[key]
                if group_name in self.source_zarr and dataset_name in self.source_zarr[group_name]:
                    if len(self.source_zarr[group_name][dataset_name]) > timestep:
                        unified_data[key] = self.source_zarr[group_name][dataset_name][timestep] #NUMPY_REMOVED
                    else:
                        unified_data[key] = None
                else:
                    unified_data[key] = None
            else:
                unified_data[key] = None
        
        # Load camera data selectively
        # print("cameras", self.has_cameras, self.cameras, self.camera_names)
        if 'cameras' in load_keys and self.has_cameras and self.cameras:
            unified_data['cameras'] = {}
            for camera_name in self.camera_names:
                camera_data = self.cameras[camera_name]
                unified_data['cameras'][camera_name] = {}
                # print("camera_data", camera_data,list(camera_data.keys()))
                for data_type in ['rgb', 'depth', 'segmentation', 'intrinsic_cv', 'extrinsic_cv', 'cam2world_gl']:
                    if data_type in camera_data:
                        unified_data['cameras'][camera_name][data_type] = camera_data[data_type][timestep] #NUMPY_REMOVED
                        # print("unified_data['cameras'][camera_name][data_type]", unified_data['cameras'][camera_name][data_type])
                    else:
                        unified_data['cameras'][camera_name][data_type] = None
        else:
            unified_data['cameras'] = {}
        
        # Load Gaussian data and reconstruct nested dict structure
        if 'gaussian' in load_keys:
            gaussian_data = {}
            
            # Get Gaussian field keys from zarrMap
            gaussian_keys = [key.replace('gaussian_', '') for key in zarrMap.keys() if key.startswith('gaussian_')]
            
            # Load individual Gaussian fields from source_zarr
            for key in gaussian_keys:
                if 'gaussian' in self.source_zarr and key in self.source_zarr['gaussian']:
                    gaussian_data[key] = self.source_zarr['gaussian'][key][timestep] #NUMPY_REMOVED
                else:
                    gaussian_data[key] = None
            
            # Reconstruct nested gaussian dict for unified data structure
            unified_data['gaussian'] = gaussian_data
        elif 'gaussian' in self.pipeline_analysis['write_keys']:
            # Initialize empty gaussian dict for creation
            unified_data['gaussian'] = None
        else:
            # No gaussian data needed
            unified_data['gaussian'] = None
        
        # Load keypose data (key_poses - 48D combined format)
        if 'key_poses' in load_keys:
            if 'data' in self.source_zarr and 'key_poses' in self.source_zarr['data']:
                unified_data['key_poses'] = self.source_zarr['data']['key_poses'][timestep] #NUMPY_REMOVED
            else:
                unified_data['key_poses'] = None
        else:
            unified_data['key_poses'] = None
            
        # Load foundation poses data
        if 'foundationPoses' in load_keys:
            if 'data' in self.source_zarr and 'foundationPoses' in self.source_zarr['data']:
                unified_data['foundationPoses'] = self.source_zarr['data']['foundationPoses'][timestep] #NUMPY_REMOVED
            else:
                unified_data['foundationPoses'] = None
        else:
            unified_data['foundationPoses'] = None
            
        if 'ec' in load_keys and 'ec' in self.data:
            unified_data['ec'] = self.data['ec'][timestep].item() if hasattr(self.data['ec'][timestep], 'item') else self.data['ec'][timestep]
        else:
            unified_data['ec'] = 0
        if 'gc' in load_keys and 'gc' in self.data:
            unified_data['gc'] = self.data['gc'][timestep].item() if hasattr(self.data['gc'][timestep], 'item') else self.data['gc'][timestep]
        else:
            unified_data['gc'] = 0
        
        # Load assets information from scene_info.json
        unified_data['assets'] = self._get_assets_for_timestep(timestep)
            
        return unified_data
    @timeit(force=True)
    def _load_batch_frames(self, timesteps):
        load_keys = self.pipeline_analysis['load_keys']
        start_timestep, end_timestep = timesteps[0], timesteps[-1] + 1
        batch_frames = []
        if 'rgb' in load_keys and 'observer_rgb' in self.data:
            rgb_batch = self.data['observer_rgb'][start_timestep:end_timestep]
        else:
            rgb_batch = [None] * len(timesteps)
        pcd_batches = {}
        for pcd_key, zarr_path in zarrMap.items():
            if pcd_key in load_keys:
                group_name, dataset_name = zarr_path
                if group_name in self.source_zarr and dataset_name in self.source_zarr[group_name]:
                    pcd_batches[pcd_key] = self.source_zarr[group_name][dataset_name][start_timestep:end_timestep]
                else:
                    pcd_batches[pcd_key] = [None] * len(timesteps)
            else:
                pcd_batches[pcd_key] = [None] * len(timesteps)
        state_batch = self.data['state'][start_timestep:end_timestep] if 'state' in load_keys and 'state' in self.data else [None] * len(timesteps)
        action_batch = self.data['action'][start_timestep:end_timestep] if 'action' in load_keys and 'action' in self.data else [None] * len(timesteps)
        
        # Load joint space data in batch
        js_state_batch = self.source_zarr['data']['js_state'][start_timestep:end_timestep] if 'js_state' in load_keys and 'data' in self.source_zarr and 'js_state' in self.source_zarr['data'] else [None] * len(timesteps)
        js_action_batch = self.source_zarr['data']['js_action'][start_timestep:end_timestep] if 'js_action' in load_keys and 'data' in self.source_zarr and 'js_action' in self.source_zarr['data'] else [None] * len(timesteps)
        
        # Load ee and qpos data in batch
        ee_state_batch = self.source_zarr['ee']['state'][start_timestep:end_timestep] if 'ee_state' in load_keys and 'ee' in self.source_zarr and 'state' in self.source_zarr['ee'] else [None] * len(timesteps)
        ee_action_batch = self.source_zarr['ee']['action'][start_timestep:end_timestep] if 'ee_action' in load_keys and 'ee' in self.source_zarr and 'action' in self.source_zarr['ee'] else [None] * len(timesteps)
        qpos_state_batch = self.source_zarr['qpos']['state'][start_timestep:end_timestep] if 'qpos_state' in load_keys and 'qpos' in self.source_zarr and 'state' in self.source_zarr['qpos'] else [None] * len(timesteps)
        qpos_action_batch = self.source_zarr['qpos']['action'][start_timestep:end_timestep] if 'qpos_action' in load_keys and 'qpos' in self.source_zarr and 'action' in self.source_zarr['qpos'] else [None] * len(timesteps)
        camera_batches = {}
        if 'cameras' in load_keys and self.has_cameras and self.cameras:
            # print("camera_batches", self.camera_names,list(self.cameras.keys()),self.cameras['real_camera'])
            for camera_name in self.camera_names:
                camera_data = self.cameras[camera_name]
                camera_batches[camera_name] = {}
                for data_type in ['rgb', 'depth', 'segmentation', 'intrinsic_cv', 'extrinsic_cv', 'cam2world_gl']:
                    if data_type in camera_data:
                        camera_batches[camera_name][data_type] = camera_data[data_type][start_timestep:end_timestep]
                        #print(f"camera_batches[{camera_name}][{data_type}]",len(camera_batches[camera_name][data_type]))
                    else:
                        camera_batches[camera_name][data_type] = [None] * len(timesteps)
        gaussian_batches = {}
        if 'gaussian' in load_keys:
            gaussian_keys = [key.replace('gaussian_', '') for key in zarrMap.keys() if key.startswith('gaussian_')]
            for key in gaussian_keys:
                if 'gaussian' in self.source_zarr and key in self.source_zarr['gaussian']:
                    gaussian_batches[key] = self.source_zarr['gaussian'][key][start_timestep:end_timestep]
                else:
                    gaussian_batches[key] = [None] * len(timesteps)
        key_poses_batch = self.source_zarr['data']['key_poses'][start_timestep:end_timestep] if 'key_poses' in load_keys and 'data' in self.source_zarr and 'key_poses' in self.source_zarr['data'] else [None] * len(timesteps)
        foundation_poses_batch = self.source_zarr['data']['foundationPoses'][start_timestep:end_timestep] if 'foundationPoses' in load_keys and 'data' in self.source_zarr and 'foundationPoses' in self.source_zarr['data'] else [None] * len(timesteps)
        ec_batch = self.data['ec'][start_timestep:end_timestep] if 'ec' in load_keys and 'ec' in self.data else [0] * len(timesteps)
        gc_batch = self.data['gc'][start_timestep:end_timestep] if 'gc' in load_keys and 'gc' in self.data else [0] * len(timesteps)
        for frame_idx in range(len(timesteps)):
            unified_data = {'task_name': self.task_name}
            unified_data['rgb'] = rgb_batch[frame_idx] if isinstance(rgb_batch, list) else rgb_batch[frame_idx]
            for pcd_key in zarrMap.keys():
                if pcd_key in load_keys:
                    pointcloud_data = pcd_batches[pcd_key][frame_idx]
                    if 'pointcloud' in pcd_key and pointcloud_data is not None and pointcloud_data.shape[0] > 0:
                        pointcloud_data = scalePcd(pointcloud_data, tag=f"auto_scale_{pcd_key}")
                    unified_data[pcd_key] = pointcloud_data
                else:
                    unified_data[pcd_key] = None
            unified_data['state'] = state_batch[frame_idx] if isinstance(state_batch, list) else state_batch[frame_idx]
            unified_data['action'] = action_batch[frame_idx] if isinstance(action_batch, list) else action_batch[frame_idx]
            unified_data['js_state'] = js_state_batch[frame_idx] if isinstance(js_state_batch, list) else js_state_batch[frame_idx]
            unified_data['js_action'] = js_action_batch[frame_idx] if isinstance(js_action_batch, list) else js_action_batch[frame_idx]
            unified_data['ee_state'] = ee_state_batch[frame_idx] if isinstance(ee_state_batch, list) else ee_state_batch[frame_idx]
            unified_data['ee_action'] = ee_action_batch[frame_idx] if isinstance(ee_action_batch, list) else ee_action_batch[frame_idx]
            unified_data['qpos_state'] = qpos_state_batch[frame_idx] if isinstance(qpos_state_batch, list) else qpos_state_batch[frame_idx]
            unified_data['qpos_action'] = qpos_action_batch[frame_idx] if isinstance(qpos_action_batch, list) else qpos_action_batch[frame_idx]
            if 'cameras' in load_keys and self.has_cameras and self.cameras:
                unified_data['cameras'] = {}
                for camera_name in self.camera_names:
                    unified_data['cameras'][camera_name] = {}
                    for data_type in ['rgb', 'depth', 'segmentation', 'intrinsic_cv', 'extrinsic_cv', 'cam2world_gl']:
                        unified_data['cameras'][camera_name][data_type] = camera_batches[camera_name][data_type][frame_idx]
                        # print(f"unified_data['cameras'][{camera_name}][{data_type}]", len(unified_data['cameras'][camera_name][data_type]) if unified_data['cameras'][camera_name][data_type] is not None else "None")
            else:
                unified_data['cameras'] = {}
            if 'gaussian' in load_keys:
                gaussian_data = {}
                for key in gaussian_keys:
                    gaussian_data[key] = gaussian_batches[key][frame_idx]
                unified_data['gaussian'] = gaussian_data
            elif 'gaussian' in self.pipeline_analysis['write_keys']:
                unified_data['gaussian'] = None
            else:
                unified_data['gaussian'] = None
            unified_data['key_poses'] = key_poses_batch[frame_idx] if isinstance(key_poses_batch, list) else key_poses_batch[frame_idx]
            unified_data['foundationPoses'] = foundation_poses_batch[frame_idx] if isinstance(foundation_poses_batch, list) else foundation_poses_batch[frame_idx]
            unified_data['ec'] = ec_batch[frame_idx].item() if isinstance(ec_batch, list) and hasattr(ec_batch[frame_idx], 'item') else ec_batch[frame_idx]
            unified_data['gc'] = gc_batch[frame_idx].item() if isinstance(gc_batch, list) and hasattr(gc_batch[frame_idx], 'item') else gc_batch[frame_idx]
            
            # Load assets information from scene_info.json
            unified_data['assets'] = self._get_assets_for_timestep(timesteps[frame_idx])
            
            batch_frames.append(unified_data)
        return batch_frames
    def parse_range_string(self, range_str):
        """Parse range string like "0-10-2" into (start, end, step)."""
        if range_str is None:
            return (-1, -1, -1)
        
        parts = range_str.split('-')
        if len(parts) == 1:
            return (int(parts[0]), int(parts[0]) + 1, 1)
        elif len(parts) == 2:
            return (int(parts[0]), int(parts[1]), 1)
        elif len(parts) == 3:
            return (int(parts[0]), int(parts[1]), int(parts[2]))
        else:
            raise ValueError(f"Invalid range format: {range_str}")

    def parse_ef(self):
        """Parse the `-e` (episodes) and `-f` (frames) CLI ranges **once** and
        return a structured description for every episode that needs to be
        processed.  All later logic (loading, iterating, saving, etc.) can rely
        on this single source of truth.

        Returns
        -------
        list[dict]
            Each dict contains:
              ep_idx    : int       Absolute episode index in dataset
              timesteps : list[int] Absolute timestep indices belonging to the episode
              fr_start  : int       First *relative* frame index inside episode (useful for naming)
              fr_step   : int       Step between frames (stride)
        """

        # Episode range parsing
        if self.episode_range:
            ep_start, ep_end, ep_step = self.parse_range_string(self.episode_range)
            ep_end = min(ep_end, len(self.episode_ends))  # clamp
        else:
            ep_start, ep_end, ep_step = 0, len(self.episode_ends), 1

        # Frame range parsing (relative to each episode)
        if self.frame_range:
            fr_start, fr_end, fr_step = self.parse_range_string(self.frame_range)
        else:
            fr_start, fr_end, fr_step = 0, -1, 1  # -1 → full episode

        # Pre-compute episode start timesteps (absolute indices)
        episode_starts = np.concatenate([[0], self.episode_ends[:-1]])

        episodes_info: list[dict] = []

        for ep_idx in range(ep_start, ep_end, ep_step):
            start_ts = int(episode_starts[ep_idx])
            end_ts = int(self.episode_ends[ep_idx])
            episode_len = end_ts - start_ts

            # Determine effective frame end for this episode
            eff_fr_end = episode_len if fr_end == -1 else min(fr_end, episode_len)

            # Absolute timesteps that will be loaded for this episode
            timesteps = list(range(start_ts + fr_start, start_ts + eff_fr_end, fr_step))

            episodes_info.append({
                'ep_idx': ep_idx,
                'timesteps': timesteps,
                'fr_start': fr_start,
                'fr_step': fr_step
            })

        return episodes_info

    def show_status(self):
        """Show zarr dataset status with recursive structure display and exit."""
        print(colored("=" * 80, 'cyan'))
        print(colored("ZARR DATASET STATUS", 'cyan', attrs=['bold']))
        print(colored("=" * 80, 'cyan'))
        
        print(f"📁 Input Path: {self.input_zarr_path}")
        print(f"📊 Total Episodes: {len(self.episode_ends)}")
        print(f"⏱️  Total Timesteps: {self.total_timesteps}")
        
    
        # Show zarr structure recursively
        print(f"\n📦 ZARR STRUCTURE:")
        self._show_zarr_structure(self.source_zarr, "", 0)
        
        print(colored("=" * 80, 'cyan'))

    def _show_zarr_structure(self, zarr_group, prefix, depth):
        """Recursively show zarr structure with proper indentation."""
        if depth > 10:  # Prevent infinite recursion
            return
        
        indent = "  " * depth
        
        try:
            for key in zarr_group.keys():
                item = zarr_group[key]
                
                if hasattr(item, 'keys'):  # It's a group
                    print(f"{indent}📁 {key}/")
                    self._show_zarr_structure(item, f"{prefix}{key}/", depth + 1)
                elif hasattr(item, 'shape'):  # It's a dataset/array
                    print(f"{indent}📄 {key}: {item.shape} ({item.dtype})")
                    
                    # Show attributes if any
                    if hasattr(item, 'attrs') and len(item.attrs) > 0:
                        for attr_key, attr_value in item.attrs.items():
                            print(f"{indent}    @{attr_key}: {attr_value}")
                else:
                    print(f"{indent}❓ {key}: {type(item)}")
                    
        except Exception as e:
            print(f"{indent}❌ Error reading {prefix}: {e}")
    @timeit()
    def apply_pipeline_steps(self, unified_data,local_ep_idx=0,actual_ep_idx=0,relative_frame_idx=0,frame_idx=0,bbox_min=None,bbox_max=None,urdf_path=None):
        if bbox_min is None or bbox_max is None:
            bbox_min = self.bbox_min
            bbox_max = self.bbox_max
        unified_data['task_name'] = self.task_name
        for step_type, param in self.pipeline_analysis['pipeline_steps']:
            try:
                if step_type == 'fps':
                    unified_data = self.pipeline_modules['fpsPcd'](unified_data, param, False)  # False = use FPS
                elif step_type == 'rands':
                    unified_data = self.pipeline_modules['fpsPcd'](unified_data, param, True)   # True = use random sampling
                elif step_type == 'trim':
                    # if param!=None and param in pipeMap.keys() and unified_data.get(pipeMap[param]) is not None:
                    #     unified_data = self.pipeline_modules['trimPcd'](unified_data, bbox_min, bbox_max, [pipeMap[param]] )
                    # elif param == None:
                    pcd_keys = [key for key in unified_data.keys() if key.endswith('pointcloud')]
                    unified_data = self.pipeline_modules['trimPcd'](unified_data, bbox_min, bbox_max, pcd_keys,param)
                    # else:
                    #     print(colored(f"    ❌ TRIM({param}) | Missing data: {unified_data.get(pipeMap[param])}", 'red'))
                elif step_type == 'arm_trim':
                    if not urdf_path:
                        print(colored(f"    ❌ ARM_TRIM | No URDF path provided", 'red'))
                    elif not os.path.exists(urdf_path):
                        print(colored(f"    ❌ ARM_TRIM | URDF file not found: {urdf_path}", 'red'))
                    elif 'js_state' in unified_data and unified_data['js_state'] is not None:
                        pcd_keys = [key for key in unified_data.keys() if key.endswith('pointcloud')]
                        unified_data = self.pipeline_modules['trimArmPcd'](unified_data, param, urdf_path, pcd_keys)
                    else:
                        print(colored(f"    ❌ ARM_TRIM | No js_state data available", 'red'))
                elif step_type == 'segmentation':
                    input_shape = unified_data.get('segpointcloud', np.array([])).shape if unified_data.get('segpointcloud') is not None else (0, 6)
                    unified_data = self.pipeline_modules['rgbPcd'](unified_data,param,surpress_print=True,color_key="segmentation",output_key="segpointcloud",seg_source=self.seg_source)
                    output_shape = unified_data.get('segpointcloud', np.array([])).shape if unified_data.get('segpointcloud') is not None else (0, 6)
                    print(f"    🎨 SEG | {input_shape} → {output_shape}")
                elif step_type == 'gaussian' :
                    if unified_data.get('pointcloud') is not None:
                        input_shape = unified_data.get('pointcloud').shape
                        unified_data = self.pipeline_modules['pcd2gs'](unified_data)
                        gaussian_data = unified_data.get('gaussian', {})
                        output_info = f"positions:{gaussian_data.get('positions', np.array([])).shape}" if gaussian_data else "None"
                        print(f"    🌟 GAUSSIAN | {input_shape} → {output_info}")
                    else:
                        print(colored(f"    ❌ GAUSSIAN | Missing pointcloud data", 'red'))
                elif step_type == 'pcd_generation':
                    input_shape_re = unified_data.get('repointcloud', np.array([])).shape if unified_data.get('repointcloud') is not None else (0, 6)
                    input_shape_inj = unified_data.get('injpointcloud', np.array([])).shape if unified_data.get('injpointcloud') is not None else (0, 6)
                    # print("unified_data['cameras']['real_camera']['rgb']",unified_data['cameras']['real_camera']['rgb'].shape)
                    # print("unified_data['cameras']['real_camera']['depth']",unified_data['cameras']['real_camera']['depth'].shape)
                    unified_data = self.pipeline_modules['rgbPcd'](unified_data,param=param,surpress_print=True,color_key="rgb",output_key="repointcloud",seg_source=self.seg_source)
                    output_shape_re = unified_data.get('repointcloud', np.array([])).shape if unified_data.get('repointcloud') is not None else (0, 6)
                    output_shape_inj = unified_data.get('injpointcloud', np.array([])).shape if unified_data.get('injpointcloud') is not None else (0, 6)
                    print(f"    📷 PCD_GEN | repointcloud:{input_shape_re} → {output_shape_re} injpointcloud:{input_shape_inj} → {output_shape_inj} gc={unified_data.get('gc')} ec={unified_data.get('ec')}")
                elif step_type == 'vggt':
                    input_shape = unified_data.get(f'vggtpointcloud{param}', np.array([])).shape if unified_data.get(f'vggtpointcloud{param}') is not None else (0, 6)
                    unified_data = self.pipeline_modules['vggtPcd'](unified_data, mode=param)
                    output_shape = unified_data.get(f'vggtpointcloud{param}', np.array([])).shape if unified_data.get(f'vggtpointcloud{param}') is not None else (0, 6)
                    print(f"    🔮 VGGT({param}) | {input_shape} → {output_shape}")
                elif step_type == 'pose':
                    input_shape = unified_data.get('foundationPoses', np.array([])).shape if unified_data.get('foundationPoses') is not None else (0, 6)
                    unified_data = self.pipeline_modules['posePcd'](unified_data,embodiment_char=param,seg_source=self.seg_source)
                    output_shape = unified_data.get('foundationPoses', np.array([])).shape if unified_data.get('foundationPoses') is not None else (0, 6)
                    print(f"    🎯 FoundationPOSE | {input_shape} → {output_shape}")
                elif step_type == 'softlink':
                    # Soft link creation is handled after data saving, skip here
                    if not self.output_zarr_path:
                        # print(colored(f"    ❌ SOFTLINK({param}) | No output path set", 'red'))
                        pass
                    else:
                        print(f"    🔗 SOFTLINK({param}) | Will create after saving")
                    pass
                elif step_type == 'save':
                    # Save operations don't modify zarr data
                    if param in pipeMap.keys() and unified_data.get(pipeMap[param]) is not None:
                        if self.output_zarr_path:
                            output_dir = self.output_zarr_path / f"{param}_pcd_output"
                        else:
                            assert self.input_zarr_path is not None, "input_zarr_path is not set.Perhaps you are collecting data but with save* specified in pipeline"
                            output_dir = self.input_zarr_path / f"{param}_pcd_output"
                        filename_without_ext=f"episode_{actual_ep_idx:03d}_frame_{relative_frame_idx:05d}_{self.pipeline}"
                        unified_data = self.pipeline_modules['savePcd'](unified_data, output_dir=str(output_dir), 
                                                        filename_without_ext=filename_without_ext, typestr=pipeMap[param])
                        # Get the actual saved file path
                        maxcolor=unified_data[pipeMap[param]][:,3:6].max()
                        mincolor=unified_data[pipeMap[param]][:,3:6].min()
                        full_path = output_dir / f"{filename_without_ext}.pcd"
                        print(f"    💾 SAVED:| {pipeMap[param]} {full_path} max{maxcolor} min{mincolor}")
                    else:
                        print(colored(f"    ❌ SAVE({param}) | Missing data: {unified_data.get(pipeMap[param])}", 'red'))
                elif step_type == 'camera_filter':
                    # Camera filtering
                    if unified_data.get('cameras') is not None:
                        input_cameras = list(unified_data['cameras'].keys())
                        unified_data = self.pipeline_modules['cameraFilter'](unified_data, param)
                        output_cameras = [cam for cam, data in unified_data['cameras'].items() 
                                            if any(data.get(dtype) is not None for dtype in ['rgb', 'depth', 'segmentation'])]
                        print(f"    📷 CAMERA_FILTER({param}) | {input_cameras} → {output_cameras}")
                    else:
                        print(colored(f"    ❌ CAMERA_FILTER({param}) | No camera data available", 'red'))
                elif step_type == 'gripper':
                    if 'repointcloud' in unified_data and unified_data['repointcloud'] is not None and 'gc' in unified_data:
                        input_shape = unified_data['repointcloud'].shape
                        unified_data = self.pipeline_modules['gripper'](unified_data, param)
                        output_shape = unified_data['repointcloud'].shape
                        print(f"    🤏 GRIPPER({param}) | {input_shape} → {output_shape}")
                    else:
                        print(colored(f"    ❌ GRIPPER({param}) | Missing repointcloud or gc data", 'red'))
                elif step_type == 'keypcd':
                    if 'key_poses' in unified_data and unified_data['key_poses'] is not None:
                        unified_data = self.pipeline_modules['keyPcd'](unified_data, dist=param)
                    else:
                        print(colored(f"    ❌ KEYPCD | Missing key_poses data", 'red'))
                elif step_type == 'paint':
                    if 'segpointcloud' in unified_data and unified_data['segpointcloud'] is not None:
                        input_shape = unified_data.get('segpointcloud', np.array([])).shape 
                        unified_data = self.pipeline_modules['paintPcd'](unified_data, param, surpress_print=False, seg_source=self.seg_source)
                        output_shape = unified_data.get('segpointcloud', np.array([])).shape    
                        print(f"    🎨 PAINT | {input_shape} → {output_shape}")
                    else:
                        print(colored(f"    ❌ PAINT | Missing segpointcloud data", 'red'))
                elif step_type == 'engrip':
                    unified_data = self.pipeline_modules['enGprPcd'](
                        unified_data,
                        param,
                        surpress_print=True,
                        seg_source=self.seg_source,
                        episode=local_ep_idx,
                        ts=frame_idx,
                    )
                elif step_type == 'trajpcd':
                    if 'state' in unified_data and unified_data['state'] is not None and 'action' in unified_data and unified_data['action'] is not None:
                        unified_data = self.pipeline_modules['trajPcd'](unified_data, param, surpress_print=True)
                    else:
                        print(colored(f"    ❌ TRAJPCD({param}) | Missing state data", 'red'))

    
            except Exception as e:
                print(colored(f"    ❌ ERROR in {step_type}: {e}", 'red'))
                raise
            
            # Update the frame in episodes array
            
        return unified_data
      
    @timeit()
    def traverse(self, bbox_min: np.ndarray=None, bbox_max: np.ndarray=None, output_video_path=None, urdf_path=None):
        """Traverse episodes and frames with processing pipeline."""
        if bbox_min is None or bbox_max is None:
            bbox_min = self.bbox_min
            bbox_max = self.bbox_max
            
        # Use the pipeline analysis that was already computed during initialization
        
        # Check for video visualization in pipeline steps
        use_visualizer = False
        video_format = "gif"
        
        for step_type, param in self.pipeline_analysis['pipeline_steps']:
            if step_type == 'video':
                use_visualizer = True
                if param:  # Has format parameter
                    video_format = param
                break
        
        # Initialize visualizer if needed
        visualizer = None
        if use_visualizer:
            if output_video_path is None:
                if self.output_zarr_path:
                    output_video_path = self.output_zarr_path / f"processed_video_{self.pipeline}"
                else:
                    output_video_path = self.input_zarr_path / f"processed_video_{self.pipeline}"
            visualizer = self.pipeline_modules['VideoVisualizer'](output_path=str(output_video_path), output_format=video_format)
        
        total_frames_captured = 0
        
        if not hasattr(self, 'episodes_info'):
            raise RuntimeError("episodes_info not initialised – _load_selective_data must be called first")

        try:
            for local_ep_idx, episode in enumerate(self.episodes):
                info = self.episodes_info[local_ep_idx]
                actual_ep_idx = info['ep_idx']

                print(f"🎬 Processing episode {local_ep_idx}/{len(self.episodes_info)} (actual episode {actual_ep_idx}) ({len(episode)} frames)")
                for frame_idx, unified_data in enumerate(episode):
                    # Calculate the relative frame number from the user's -f parameter
                    # Parse frame range to get the starting frame number
                    relative_frame_idx = info['fr_start'] + frame_idx * info['fr_step']
                    
                    print(f"  🎯 Processing frame {frame_idx}/{len(episode)} (actual episode {actual_ep_idx})(actual frame {relative_frame_idx}) ")
                    # Apply pipeline steps
                    unified_data=self.apply_pipeline_steps(unified_data,local_ep_idx,actual_ep_idx,relative_frame_idx,frame_idx,bbox_min,bbox_max,urdf_path)
                    self.episodes[local_ep_idx][frame_idx] = unified_data
                    if use_visualizer and visualizer:
                        unique_timestep = actual_ep_idx * 1000 + frame_idx
                        try:
                            visualizer.capture(unified_data, timestep=unique_timestep, episode_idx=actual_ep_idx)
                            total_frames_captured += 1
                        except Exception as e:
                            print(colored(f"    ERROR in visualizer.capture: {e}", 'red'))
                            raise
        
        except Exception as e:
            print(f"❌ Error during traversal: {e}")
            raise
        finally:
            if use_visualizer and visualizer:
                visualizer.finalize_video()
                print(f"📹 Total frames captured: {total_frames_captured}")
        
        # Save results to target zarr if specified
        if self.target_zarr_path:
            self._save_modified_data()
        else:
            print(colored(f"💾 Not saving modified data (no target path specified)", 'blue'))
    
    def _iterate_episodes_and_frames(self, callback):
        """
        Generic method to iterate through episodes and frames with a callback.
        
        Args:
            callback: Function that takes (ep_idx, frame_idx, unified_data, timestep, actual_ep_idx)
                     and can return a value to break early (if returns True, breaks inner loop)
        """
        if not hasattr(self, 'episodes_info'):
            raise RuntimeError("episodes_info not initialised – _load_selective_data must be called first")

        for local_ep_idx, episode in enumerate(self.episodes):
            info = self.episodes_info[local_ep_idx]
            actual_ep_idx = info['ep_idx']

            should_break = False  # defined even if episode has 0 frames

            for frame_idx, unified_data in enumerate(episode):
                timestep = info['timesteps'][frame_idx]
                
                # Call the callback function
                should_break = callback(local_ep_idx, frame_idx, unified_data, timestep, actual_ep_idx)
                if should_break:
                    break
            
            # Check if we should break outer loop too (callback can return 'break_all')
            if should_break == 'break_all':
                break
    def first_store(self):
        """First pass: check for shape mismatches and collect new shapes."""
        def first_pass_callback(ep_idx, frame_idx, unified_data, timestep, actual_ep_idx):
            # Handle gaussian data conversion from nested dict to individual fields
            gaussian_data = unified_data.get('gaussian')
            if gaussian_data is not None and isinstance(gaussian_data, dict):
                # Get Gaussian field keys from zarrMap
                gaussian_keys = [key.replace('gaussian_', '') for key in zarrMap.keys() if key.startswith('gaussian_')]
                
                # Check shapes for individual Gaussian fields
                for field_name in gaussian_keys:
                    if field_name in gaussian_data and gaussian_data[field_name] is not None:
                        data = gaussian_data[field_name]
                        new_data_shape = (self.total_timesteps,) + data.shape
                        group_name, dataset_name = 'gaussian', field_name
                        
                        if self.target_zarr and group_name in self.target_zarr and dataset_name in self.target_zarr[group_name]:
                            # Dataset exists - check if shape matches
                            original_shape = self.target_zarr[group_name][dataset_name].shape
                            if original_shape != new_data_shape:
                                print(f"Shape mismatch for {group_name}/{dataset_name}: original {original_shape} vs new {new_data_shape}")
                                self.datasets_to_recreate[f"{group_name}/{dataset_name}"] = new_data_shape
                        else:
                            # Dataset doesn't exist - needs to be created
                            self.datasets_to_recreate[f"{group_name}/{dataset_name}"] = new_data_shape
            
            # Check all other data shapes using zarrMap (excluding gaussian since we handled it above)
            for unified_key, zarr_path in zarrMap.items():
                if (unified_key in self.write_keys and unified_key != 'gaussian' and 
                    unified_data.get(unified_key) is not None):
                    data = unified_data[unified_key]
                    new_data_shape = (self.total_timesteps,) + data.shape
                    group_name, dataset_name = zarr_path
                    
                    if self.target_zarr and group_name in self.target_zarr and dataset_name in self.target_zarr[group_name]:
                        # Dataset exists - check if shape matches
                        original_shape = self.target_zarr[group_name][dataset_name].shape
                        if original_shape != new_data_shape:
                            print(f"Shape mismatch for {group_name}/{dataset_name}: original {original_shape} vs new {new_data_shape}")
                            self.datasets_to_recreate[f"{group_name}/{dataset_name}"] = new_data_shape
                    else:
                        # Dataset doesn't exist - needs to be created
                        self.datasets_to_recreate[f"{group_name}/{dataset_name}"] = new_data_shape
                        print(f"Dataset {group_name}/{dataset_name} doesn't exist - needs to be created")
            # Only need to check first frame for shape determination
            return 'break_all'  # Break both loops after first frame
        
        self._iterate_episodes_and_frames(first_pass_callback)
    
    def recreate_store(self):
        """Recreate datasets with incompatible shapes."""
        for zarr_path_str, new_shape in self.datasets_to_recreate.items():
            group_name, dataset_name = zarr_path_str.split('/')
            
            if self.target_zarr and group_name in self.target_zarr and dataset_name in self.target_zarr[group_name]:
                print(f"Recreating dataset {group_name}/{dataset_name} with new shape {new_shape}")
                # Delete old dataset
                del self.target_zarr[group_name][dataset_name]
            
            # Find a sample to get dtype
            sample_data = None
            for ep_idx, episode in enumerate(self.episodes):
                for frame_idx, unified_data in enumerate(episode):
                    for unified_key, zarr_path in zarrMap.items():
                        if zarr_path == [group_name, dataset_name] and unified_data.get(unified_key) is not None:
                            sample_data = unified_data[unified_key]
                            break
                    if sample_data is not None:
                        break
                if sample_data is not None:
                    break
            
            if sample_data is not None and self.target_zarr:
                # Create group if it doesn't exist
                if group_name not in self.target_zarr:
                    self.target_zarr.create_group(group_name)
                
                # Create new dataset with correct shape
                self.target_zarr[group_name].create_dataset(
                    dataset_name, shape=new_shape, dtype=sample_data.dtype, 
                    chunks=True, compression='gzip', fill_value=0
                )
                print(f"Created new dataset {group_name}/{dataset_name} with shape {new_shape}")
                
                # Set gaussian attributes if this is a gaussian group
                if group_name == 'gaussian' and dataset_name == 'positions':
                    # Set default gaussian attributes when creating the first gaussian dataset
                    self.target_zarr[group_name].attrs['use_sh'] = False
                    self.target_zarr[group_name].attrs['sh_degree'] = 0
    
    def second_store(self):
        """Second pass: save the data in batches for optimal performance."""
        print("Collecting all data for batch writing...")
        
        # Collect all data by dataset first
        dataset_batches = {}
        def collect_callback(ep_idx, frame_idx, unified_data, timestep, actual_ep_idx):
            current_time = time.time()
            time_interval = current_time - self.last_print_time
            self.frame_count += 1
            # print(f"Collecting episode {ep_idx}, frame {frame_idx} (timestep {timestep}) - Time since last print: {time_interval:.3f}s - Total frames: {self.frame_count}", end="\r")
            self.last_print_time = current_time

            # Handle gaussian data conversion from nested dict to individual fields
            gaussian_data = unified_data.get('gaussian')
            if gaussian_data is not None and isinstance(gaussian_data, dict):
                gaussian_keys = [key.replace('gaussian_', '') for key in zarrMap if key.startswith('gaussian_')]
                for field_name in gaussian_keys:
                    if field_name in gaussian_data and gaussian_data[field_name] is not None:
                        group_name, dataset_name = 'gaussian', field_name
                        dataset_key = f"{group_name}/{dataset_name}"
                        dataset_batches.setdefault(dataset_key, {})[timestep] = gaussian_data[field_name]

            # Collect all other data (including point clouds)
            for unified_key, zarr_path in zarrMap.items():
                if unified_key in self.write_keys and unified_key != 'gaussian' and unified_data.get(unified_key) is not None:
                    pointcloud_data = unified_data[unified_key]
                    
                    # Note: FPS sampling is handled in traverse phase, not here in second_store
                    
                    # Auto-scale pointcloud data back to 0-1 range for storage (but not for savePcd visualization)
                    if 'pointcloud' in unified_key and pointcloud_data is not None and pointcloud_data.shape[0] > 0:
                        pointcloud_data = scalePcdD(pointcloud_data, tag=f"auto_scale_down_{unified_key}")

                    group_name, dataset_name = zarr_path
                    dataset_key = f"{group_name}/{dataset_name}"
                    dataset_batches.setdefault(dataset_key, {})[timestep] = pointcloud_data

            return False  # Continue iteration
        
        # Collect all data
        self._iterate_episodes_and_frames(collect_callback)
        
        print(f"\n💾 Batch writing {len(dataset_batches)} datasets...")
        
        # Now write all data in batches
        for dataset_key, timestep_data in dataset_batches.items():
            group_name, dataset_name = dataset_key.split('/')
            
            if (self.target_zarr and group_name in self.target_zarr and 
                dataset_name in self.target_zarr[group_name]):
                
                print(f"📝 Writing {dataset_key} ({len(timestep_data)} frames)...")
                
                # Sort timesteps for sequential writing
                sorted_timesteps = sorted(timestep_data.keys())
                
                try:
                    # Check if timesteps are consecutive for efficient batch writing
                    is_consecutive = len(sorted_timesteps) > 1 and all(sorted_timesteps[i+1] == sorted_timesteps[i] + 1 for i in range(len(sorted_timesteps)-1))
                    
                    if is_consecutive:
                        # Consecutive timesteps - can use slice indexing for efficiency
                        start_idx = sorted_timesteps[0]
                        end_idx = sorted_timesteps[-1] + 1  # +1 because slice is exclusive
                        
                        # Prepare batch data
                        batch_data = []
                        for ts in sorted_timesteps:
                            batch_data.append(timestep_data[ts])
                        
                        # Convert to numpy array and write using slice
                        batch_array = np.array(batch_data)
                        print(f"Writing consecutive timesteps {start_idx}-{end_idx-1} using slice indexing")
                        self.target_zarr[group_name][dataset_name][start_idx:end_idx] = batch_array
                    else:
                        # Non-consecutive timesteps - write individually
                        print(f"Writing {len(sorted_timesteps)} non-consecutive timesteps individually")
                        for ts in sorted_timesteps:
                            self.target_zarr[group_name][dataset_name][ts] = timestep_data[ts]
                    
                    print(f"  ✅ Completed {dataset_key}")
                    
                except Exception as e:
                    print(f"  ❌ Error writing {dataset_key}: {e}{traceback.format_exc()}")
                    # Fall back to individual writes for this dataset
                    print(f"  🔄 Falling back to individual writes for {dataset_key}...")
                    for ts in sorted(timestep_data.keys()):
                        try:
                            self.target_zarr[group_name][dataset_name][ts] = timestep_data[ts]
                        except Exception as e2:
                            print(f"    ❌ Error at timestep {ts}: {e2}")
                            continue
            else:
                print(f"⚠️  Dataset {dataset_key} not found in target zarr")
        
        print("✅ Batch writing completed!")
    
    def compute_min_pcd_sizes(self):
        """Compute the minimal point count across all frames for every pointcloud variant
        that is going to be written. This guarantees homogenous shapes when persisting
        to Zarr by scheduling recreation of the corresponding datasets with the minimal
        size and storing the value in ``self.min_pcd_sizes``.
        """
        # Holds the minimal number of points observed per pointcloud key
        self.min_pcd_sizes: dict[str, int] = {}

        def _min_size_callback(ep_idx, frame_idx, unified_data, timestep, actual_ep_idx):
            # Iterate over all keys we might write that contain point cloud data
            for unified_key in self.write_keys:
                if 'pointcloud' in unified_key:
                    pcd_arr = unified_data.get(unified_key)
                    if pcd_arr is not None and pcd_arr.size != 0:
                        num_pts = pcd_arr.shape[0]
                        prev_min = self.min_pcd_sizes.get(unified_key, None)
                        if prev_min is None or num_pts < prev_min:
                            self.min_pcd_sizes[unified_key] = num_pts
            # Continue traversal
            return False

        # Traverse every frame to collect statistics
        self._iterate_episodes_and_frames(_min_size_callback)

        if not self.min_pcd_sizes:
            print("⚠️  No pointcloud data found when computing minimal sizes – skipping homogenisation step.")
            return

        # Ensure all pointcloud datasets will be recreated with the uniform minimal size
        for unified_key, min_size in self.min_pcd_sizes.items():
            if unified_key not in zarrMap:
                continue
            group_name, dataset_name = zarrMap[unified_key]
            uniform_shape = (self.total_timesteps, min_size, 6)
            self.datasets_to_recreate[f"{group_name}/{dataset_name}"] = uniform_shape
            print(f"🔧 Uniform shape scheduled for {group_name}/{dataset_name}: {uniform_shape}")

        # After gathering minimal sizes, automatically patch demo_task.yaml so its
        # point_cloud shape matches the newly computed minimal point count.
        min_pts = self.min_pcd_sizes.get('pointcloud')
        if min_pts is not None:
            try:
                yaml_path = (Path(__file__).resolve().parent.parent /
                            "RoboTwin/policy/DP3/3D-Diffusion-Policy/diffusion_policy_3d/config/task/demo_task.yaml")
                if yaml_path.exists():
                    content = yaml_path.read_text()
                    new_content, n_sub = re.subn(r"shape:\s*\[[0-9]+,\s*6\]", f"shape: [{min_pts}, 6]", content)
                    if n_sub:
                        yaml_path.write_text(new_content)
                        print(colored(f"✏️  Patched {yaml_path} with point_cloud shape [{min_pts}, 6]", 'green'))
                    else:
                        print(colored(f"⚠️  No shape line matched in {yaml_path}; file left unchanged", 'yellow'))
                else:
                    print(colored(f"⚠️  YAML file not found for patching: {yaml_path}", 'yellow'))
            except Exception as e:
                print(colored(f"❌ Failed to patch YAML shape automatically: {e}", 'red'))

    def _save_modified_data(self):
        """Save only the modified data back to target zarr."""
        if not self.target_zarr:
            print(colored(f"❌ Cannot save modified data (no target zarr)", 'red'))
            return
        
        print("💾 Saving modified data to target zarr...")
        self.write_keys = self.pipeline_analysis['write_keys']
        
        # Ensure pcd group exists in target zarr
        if self.target_zarr and 'pcd' not in self.target_zarr:
            self.target_zarr.create_group('pcd')
            print("📁 Created pcd group in target zarr")

        # Track which datasets need to be created with new shapes
        self.datasets_to_recreate = {}

        # Store pipeline: initial shape check → compute minimal pcd size → recreate → batch write
        self.first_store()
        # NEW: homogenise point cloud sizes
        self.compute_min_pcd_sizes()
        self.recreate_store()
        self.second_store()

        # Create soft links for any softlink steps in pipeline
        pipeline_steps = self.pipeline_analysis['pipeline_steps']
        print("pipeline_steps",pipeline_steps)
        for step_type, param in pipeline_steps:
            if step_type == 'softlink':
                print("softlink",param)
                self._create_softlink_for_training(param)
        
        print(f"✅ Saved modified data to {self.target_zarr_path}")

    def trim_traverser(self, output_video_path=None, urdf_path=None):
        i=0
        j=0
        for k in range(0, 10, 1):
            bbox_min = np.add(self.bbox_min, [i*0.05, j*0.05, k*0.001])
            bbox_max = np.add(self.bbox_max, [i*0.05, j*0.05, k*0.01])
            print(bbox_min, bbox_max, i, j, k)
            self.traverse(bbox_min=bbox_min, bbox_max=bbox_max, output_video_path=output_video_path, urdf_path=urdf_path)

  

    def _create_softlink_for_training(self, ln_type):
        """Create soft links using zarrMap and pipeMap for training compatibility."""
        assert ln_type and self.target_zarr_path and pipeMap[ln_type] in zarrMap, f"ln_type {ln_type} or not self.target_zarr_path{self.target_zarr_path} or ln_type{ln_type} not in zarrMap{zarrMap} {ln_type in zarrMap}"
        if ln_type in ['p', 'r', 'v2', 'v3']:
            target_keys = ['point_cloud']
        elif ln_type in ['s','ps']:
            target_keys = ['seg_point_cloud']
        elif ln_type in ['ee', 'qpos']:
            target_keys = ['state', 'action']
        else:
            target_keys = []
        assert target_keys, f"target_keys{target_keys} is empty"
        
        source_paths = zarrMap[pipeMap[ln_type]]
        data_dir = self.target_zarr_path / "data"
        data_dir.mkdir(parents=True, exist_ok=True)
        print("softlink",source_paths,target_keys)
        if isinstance(source_paths[0], list):
            for i, target_key in enumerate(target_keys):
                source_path_str = "/".join(source_paths[i] if isinstance(source_paths[i], list) else [source_paths[i]])
                target_path = self.target_zarr_path / "data" / target_key
                if target_path.exists():
                    target_path.unlink() if target_path.is_symlink() else shutil.rmtree(target_path)
                os.symlink(f"../{source_path_str}", target_path)
                print(f"🔗 Created soft link for {target_key} from {source_path_str} to {target_path}")
        else:
            for i, target_key in enumerate(target_keys):
                source_path_str = "/".join(source_paths)
                target_path = self.target_zarr_path / "data" / target_key
                if target_path.exists():
                    target_path.unlink() if target_path.is_symlink() else shutil.rmtree(target_path)
                os.symlink(f"../{source_path_str}", target_path)
                print(f"🔗 Created soft link for {target_key} from {source_path_str} to {target_path}")

def main():
    """Main function with enhanced argument parsing system."""
    parser = argparse.ArgumentParser(description="Data Traverser with optimized processing pipeline")
    parser.add_argument("-z", "--zarr_name", type=str,default=str(Path(__file__).parent.parent / "RoboTwin/policy/DP3/data/place_shoe-demo_clean-1.zarr"), help="Name of zarr dataset")
    parser.add_argument("-o", "--output", type=str, help="Output zarr name (use 'auto' for automatic naming based on pipeline, if not specified, no saving)")
    parser.add_argument("-e", "--episodes", type=str, help="Episode range: start-end-step (e.g., 0-10-2)")
    parser.add_argument("-f", "--frames", type=str, help="Frame range: start-end-step (e.g., 0-100-5)")
    parser.add_argument("-p", "--pipeline", type=str, help="Processing pipeline (e.g., f1024-t-g-v or r512-t-seg-v)")
    parser.add_argument("-s", "--status", action="store_true", help="Show zarr dataset status and exit")
    parser.add_argument("--output_format", "-of", type=str, default="gif", help="Output format for video")
    parser.add_argument("--video_path", "-vp", type=str, help="Custom video output path")
    parser.add_argument("--multi_trim", "-mt", action="store_true", help="Trim point cloud")
    parser.add_argument("--urdf_path", "-u", type=str, help="Path to robot URDF file (required when using arm trimming)",default=str(Path(__file__).parent.parent / "RoboTwin/assets/embodiments/franka-panda/panda.urdf"))

    args = parser.parse_args()
    # print(args.urdf_path)
    # Construct the full zarr paths
    base_data_dir = "../data"
    if not args.zarr_name.startswith("/"):
        input_zarr_path = os.path.join(base_data_dir, args.zarr_name)
    else:
        input_zarr_path = args.zarr_name
    
    # Verify the input zarr path exists
    if not os.path.exists(input_zarr_path):
        print(f"Error: Zarr dataset not found at path: {input_zarr_path}")
        return
    
    # Handle status display - initialize minimal traverser and show status
    if args.status:
        try:
            # Initialize traverser with minimal loading (no pipeline analysis needed for status)
            traverser = DataTraverser(
                seg_source="traverse",
                zarr_path=input_zarr_path,
                output_path=None,  # No output needed for status
                episode_range=args.episodes,
                frame_range=args.frames,
                pipeline=None  # No pipeline needed for status
            )
            
            # Show status and exit
            traverser.show_status()
            exit(0)
            
        except Exception as e:
            print(f"Error reading dataset status: {e}")
            traceback.print_exc()
            exit(1)

    # Handle auto-naming for output
    if str(args.output).startswith("auto"):
        if args.pipeline:
            # Generate automatic name: input_name-pipeline.zarr
            input_name_without_ext = args.zarr_name.replace('.zarr', '')
            auto_output_name = f"{input_name_without_ext}_{args.episodes if args.episodes else 'all'}_{args.frames if args.frames else 'all'}_{args.pipeline}.zarr"
            output_zarr_path = os.path.join(base_data_dir, auto_output_name)
            print(f"Auto-generated output name: {auto_output_name}")
        else:
            print("Error: Cannot use 'auto' output naming without specifying a pipeline (-p)")
            return
    else:
        if str(args.output).startswith("/"):
            output_zarr_path = args.output
        else:
            output_zarr_path = os.path.join(base_data_dir, args.output) if args.output else None
  
    try:
        # Initialize traverser with optimized loading
        traverser = DataTraverser(
            seg_source="traverse",
            zarr_path=input_zarr_path,
            output_path=output_zarr_path,
            episode_range=args.episodes,
            frame_range=args.frames,
            pipeline=args.pipeline,
        )
        
        
        if args.pipeline:

            if args.multi_trim:
                traverser.trim_traverser(
                    output_video_path=args.video_path,
                    urdf_path=args.urdf_path
                )
            else:
                traverser.traverse(
                    output_video_path=args.video_path,
                    urdf_path=args.urdf_path
                )
        else:
            print("\nNo pipeline specified. Use -p to specify processing steps.")
            print("Example: -p f1024-t-seg-g-vgif-s")
            print("  f1024: FPS sampling to 1024 points")
            print("  r1024: Random sampling to 1024 points")
            print("  t: Trim point cloud")
            print("  seg: Apply segmentation coloring from camera views")
            print("  g: Convert to Gaussian splatting")
            print("  s: Save point cloud as .pcd files")
            print("  vgif: Generate video visualization in GIF format")
            print("  vmp4: Generate video visualization in MP4 format")
            print("  v: Generate video visualization (default: GIF format)")
            print("  pcd: Generate RGB point cloud from depth and RGB images")
            print("  vggt2: Generate point cloud using VGGT depth unprojection (Mode 2)")
            print("  vggt3: Generate point cloud using VGGT point head (Mode 3)")
            print("  pose: Generate foundation poses from camera data")
            print("  k: Convert key poses (48D) to point cloud and add to existing point clouds")
            print("  ldp: Load pointcloud data (load only, no processing)")
            print("  lds: Load segmented pointcloud data (load only, no processing)")
            print("  ldr: Load reconstructed pointcloud data (load only, no processing)")
            print("  ldv2: Load VGGT mode 2 pointcloud data (load only, no processing)")
            print("  ldv3: Load VGGT mode 3 pointcloud data (load only, no processing)")
            print("  lnp: Create softlink from data/point_cloud to pcd/pointcloud")
            print("  savev2: Save VGGT mode 2 point cloud as .pcd files")
            print("  savev3: Save VGGT mode 3 point cloud as .pcd files")
            print("  savep: Save regular point cloud as .pcd files")
            print("  saves: Save segmented point cloud as .pcd files")
            print("  saver: Save reconstructed point cloud as .pcd files")
            print("  ac160: Cylindrical arm trimming with 50mm radius")
            print("  am10: Mesh-based arm trimming with 100mm buffer")
            print("  ldkp: Load key poses data (48D) (load only, no processing)")
            print("  savekp: Save key poses as .pcd files")
            print("  Use -s to show dataset status and structure.")
            
    except Exception as e:
        print(f"Error processing dataset: {e}")
        traceback.print_exc()


if __name__ == "__main__":
    main()