#!/usr/bin/env python3
import argparse, subprocess, yaml, sys, os, shutil, datetime, signal, json, threading, time, traceback, itertools
from pathlib import Path
from loadJson import _load_emb_name_map, _load_inv_emb_name_map
from ruamel.yaml import YAML
from termcolor import colored
import zarr
from dataTraverser import PipelineAnalyzer
from enum import Enum
from glob import glob
import time
import numpy as np
import re
from tqdm import tqdm
import matplotlib.pyplot as plt
from loadJson import _load_finger_length
from partial_copy import partial_copy
from rgbPcd import interpret_camera_name
DISABLE_HIGHZ_TOOL=False
current_runs = set()
last_gpu_input = ''
yaml_handler = YAML()
yaml_handler.preserve_quotes = True
yaml_handler.indent(mapping=2, sequence=4, offset=2)
EXIST_NOTE_IS_CHECKED=False
NEVER_MAKE_LN=False
NEVER_MOD_TRAIN_POLICY=False
class BaseTool:
    def __init__(self, yaml_data, group_name, cli_args):
        self.cli_args = cli_args
        self.yaml_data = yaml_data
        self.group_name = group_name
    @staticmethod
    def get_group_key_from_yaml(yaml_data,group_name,query_key):
        if yaml_data is None:
            return None
        group_config=None
        if 'groups' in yaml_data:
            if group_name not in yaml_data['groups']:
                return None
            group_config = yaml_data['groups'][group_name]
        elif 'compact_groups' in yaml_data:
            if group_name not in yaml_data['compact_groups']:
                return None
            group_config = yaml_data['compact_groups'][group_name]
        else:
            print(f"group_name={group_name} not found in yaml_data['groups'] or yaml_data['compact_groups'].{yaml_data.keys()}")
            return None
        if query_key in group_config:
            return group_config[query_key]
        group_type_list = group_config.get('group_type', 'base').split('+')
        for group_type in group_type_list:
            if group_type in yaml_data.get('group_types', {}):
                group_type_config = yaml_data['group_types'][group_type]
                if query_key in group_type_config:
                    return group_type_config[query_key]
        base_config = yaml_data.get('group_types', {}).get('base', {})
        if query_key in base_config:
            return base_config[query_key]
        if query_key in yaml_data:
            return yaml_data[query_key]
        return None
        # assert False, f"query_key={query_key} not found in group={group_name}, group_type={group_type}, base, or yaml root; group_config={group_config}; group_type_config={yaml_data.get('group_types', {}).get(group_type, {})}; base_config={base_config}; yaml_keys={list(yaml_data.keys())}"
        # if not hasattr(self, 'cmd_type'):
    def get_group_key(self, query_key,group_name=None):
        if group_name is None:
            group_name = self.group_name
        if not group_name in self.yaml_data['groups']:
            if group_name != 'real_zarr':
                print(f"group_name={group_name} not found in yaml_data['groups']; available_groups={list(self.yaml_data['groups'].keys())}")
            return ""
        return BaseTool.get_group_key_from_yaml(self.yaml_data,group_name,query_key)
        #     raise NotImplementedError("Subclasses must set self.cmd_type in their __init__ method")
    @staticmethod
    def ensure_policy_configs(yaml_data):
        paths_map = yaml_data.setdefault('paths', {})
        pc_map = paths_map.setdefault('policy_configs', {})
        config_root = Path(__file__).parent.parent/"RoboTwin/policy/DP3/3D-Diffusion-Policy/diffusion_policy_3d/config"
        found = sorted([str(p) for p in config_root.glob(f"robot_dp3-p*.yaml")])
        existing_names = {v.get('name') for v in pc_map.values() if isinstance(v, dict)}
        used_indices = sorted([int(k) for k in pc_map.keys() if str(k).isdigit()])
        next_index = (used_indices[-1] + 1) if used_indices else 1400
        for path in found:
            if path in existing_names:
                continue
            while str(next_index) in pc_map:
                next_index += 1
            pc_map[str(next_index)] = {'name': path, 'status': 'exist', 'note': None}
            next_index += 1
        for idx_str, entry in (pc_map or {}).items():
            if isinstance(entry, dict):
                entry['shortname'] = BaseTool.get_policy_config_shortname(entry.get('name',''))
            entry['status'] = BaseTool.path_to_status('exist',entry.get('name',''),yaml_data)
        if found:
            yaml_data['paths']['policy_configs'] = pc_map
        return yaml_data
    def construct_command(self):
        raise NotImplementedError
    def get_output_path(self):
        return None
    def get_file_type(self):
        if self.cmd_type in ['train','eval','policy_configs','merge_zarr','copy_path','ln_path','trajlocal','clip_zarr','mask_gen']:
            return self.cmd_type
        return self.group_name
    def get_source_flag(self):
        raise NotImplementedError("Subclasses must implement get_source_flag()")
    def get_note_flag(self):
        # if self.cli_args.get("note") is not None:
        #     return f" -note {self.cli_args['note']}"
        return ""
    def create_log_path(self, index):
        timestamp=datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
        log_dir=str(Path(__file__).parent.parent/"shell_log"/sys.argv[1]/self.cmd_type)
        os.makedirs(log_dir,exist_ok=True)
        return f"{log_dir}/{timestamp}.log"
    def remove_existing_path(self, path, cmd,override=False):
        if override:
            return True
        if not os.path.exists(path):
            return True
        path_type = "directory" if os.path.isdir(path) else "file"
        ans=""
        if "test" in self.cmd_type:
            ans = 'y'
            # print("auto remove test data")
        else:
            time_diff_seconds = time.time() - os.path.getctime(path)
            exist_time = str(datetime.timedelta(seconds=int(time_diff_seconds)))
            if int(time_diff_seconds)>120:
                print(colored(f"{path_type.capitalize()} already exists: {path} cmd_type={self.cmd_type} group_name={self.group_name} for ", 'yellow',attrs=['bold'])+colored(exist_time,"red",attrs=['bold']),flush=True)
                while True:
                    ans = input(f"Remove existing {path_type}? (y/n/[c]ontinue/[r]ename): ").strip().lower()
                    if ans in ['y', 'n', 'c','r']:
                        break
                    print("Invalid input, please enter y/n/c/r")
            else:
                ans='y'
        if ans == 'y':
            if os.path.isdir(path):
                if os.path.islink(path):
                    os.remove(path)
                else:
                    shutil.rmtree(path)
            else:
                os.remove(path)
            print(f"Removed {path_type}:",end='\r',flush=True)
            print(f"{path}",end='\r',flush=True)
            return True
        elif ans == 'c':
            return True
        elif ans == 'r':
            self.recur_rename(path)
            return True
        else:
            return False
    def _resolve_index(self):
        if hasattr(self, 'cli_args') and self.cli_args.get('num') is not None:
            return self.cli_args['num']
        key_name = self.get_file_type()
        source_flag = self.get_source_flag()
        resolved_index, _ = self._find_index(key_name, 'source', source_flag)
        assert resolved_index is not None, f"Cannot resolve index for {self.cmd_type} under {key_name} with source={source_flag} {self.cmd_type} {self.get_file_type()}"
        return resolved_index

    def _find_index_everywhere(self, output_path):
        return self._find_index_everywhere_static(self.yaml_data, output_path)
    @staticmethod
    def _find_index_everywhere_static(yaml_data, output_path):
        if yaml_data is None:
            return None, None
        for key_under_paths in yaml_data.get('paths',{}).keys():
            resolved_index, _ = BaseTool._find_index_static(yaml_data, key_under_paths, 'name', output_path)
            if resolved_index is not None:
                return resolved_index, key_under_paths
        return None, None

    def _find_index(self, key_under_paths, keytype, content, current_tool_basenum=None,match_type="exact"):
        return self._find_index_static(self.yaml_data, key_under_paths, keytype, content, current_tool_basenum,match_type)
    @staticmethod
    def _find_index_static( yaml_data, key_under_paths, keytype, content, current_tool_basenum=None,match_type="exact"):
        if yaml_data is None:
            return None, {}
        paths_map =yaml_data.setdefault('paths', {})
        target_map = paths_map.setdefault(key_under_paths, {})
        result_dict = {}
        max_curr_index = -1
        for entry_index in target_map.keys():
            if str(entry_index).isdigit():
                max_curr_index = max(max_curr_index, int(entry_index))
        
        for entry_index, entry_data in target_map.items():
            if match_type == "partial":
                if content in entry_data.get(keytype) :
                    result_dict[entry_index] = entry_data
            else:
                if entry_data.get(keytype) == content:
                    result_dict[entry_index] = entry_data
        
        if len(result_dict) == 0:
            if current_tool_basenum is not None:
                if max_curr_index == -1:
                    resolved_index = current_tool_basenum
                else:
                    resolved_index = max_curr_index + 1
                return resolved_index, target_map
            else:
                return None, target_map
        if match_type == "partial":
            assert len(result_dict) > 0, f"No indices found for {keytype}={content} under {key_under_paths}: {result_dict}"
            return list(result_dict.keys()), target_map
        else:
            assert len(result_dict) == 1, f"{len(result_dict)} indices found for {keytype}={content} under {key_under_paths}: {result_dict}"
            return list(result_dict.keys())[0], target_map
    @staticmethod
    def _find_pathdict_by_index(yaml_data, index, require_zarr=True):
        for group_name, entry_map in yaml_data.get('paths', {}).items():
            if str(index) in entry_map:
                entry_data = entry_map[str(index)]
                path_name = entry_data.get('name')
                if require_zarr:
                    assert path_name and path_name.endswith('.zarr'), f"Index {index} does not point to zarr file: {path_name}"
                return path_name, entry_data,group_name
        assert False, f"Index {index} not found in any path group {yaml_data.get('paths',{}).keys()}"
    def execute_command(self, cmd):
        # self.yaml_data = BaseTool.load_yaml()
        index = self._resolve_index()
        key_name = self.get_file_type()
        entry_map = self.yaml_data.get('paths', {}).get(key_name, {})
        index_str = str(index)
        assert index_str in entry_map, (
            f"No path entry for index={index_str} under key_name={key_name}; "
            f"available_indices={list(entry_map.keys())}"
        )
        entry_data = entry_map[index_str]
        if not self.remove_existing_path(entry_data['name'], cmd):
            print(f"Skipping {self.cmd_type} due to user choice")
            return
        log_file = self.create_log_path(index_str)
        self.update_yaml_data_refresh( ['paths', key_name, index_str, 'log_file'], log_file)
        print(colored(f"Command: ",'magenta',attrs=['bold']),cmd,colored("\nLog: ",'magenta',attrs=['bold']),log_file)
        if self.cmd_type in ['collect_data','collect_test','eval','mask_gen']:
            if not self.cli_args.get('no_demo_clean_check'):
                self.print_demo_task_check()
        if self.cmd_type in ['train']:
            if not self.cli_args.get('no_policy_check'):
                self.print_policy_config_check()
        gpu_stats_map = self.print_gpu_stats()
        try:
            gpu_id = self.select_gpu(gpu_stats_map)
        except KeyboardInterrupt:
            print("\nCancelled")
            self.update_yaml_data_refresh( ['paths', key_name, index_str, 'status'], 'non_exist')
            current_runs.discard((key_name, index_str))
            return
        cmd = f"{self.pre_command_hook()} && {cmd.replace('%gpu_id', str(gpu_id))}"
        self.log_command(cmd)
        print(f"Executing {self.cmd_type} for {self.group_name} original {self.cli_args['original_group_name']} with GPU {gpu_id} log file: {log_file}")
        if hasattr(self, 'print_before_exec'):
            print(self.print_before_exec)
        current_runs.add((key_name, index_str))
        self.update_yaml_data_refresh( ['paths', key_name, index_str, 'status'], 'generating')
        result = self.run_with_realtime_output(cmd, log_file)
        print()
        current_runs.discard((key_name, index_str))
        log_content = ''
        log_lines = ['']
        with open(log_file, 'r') as file_handle:
            log_content = file_handle.read().strip()
            log_lines = file_handle.readlines()
        status = 'exist' if result.returncode == 0 else ('failed')
        print(f"{'✅' if result.returncode==0 else '❌'} {self.cmd_type} {status} for {self.group_name}")
        print(f"Log: {log_file}")
        print(f"output {self.get_output_path()} {self.path_to_status('non_exist',self.get_output_path(), self.yaml_data)}")
        if result.returncode == 0:
            if log_lines:
                for log_line in log_lines[-5:]:
                    print(f"  {log_line.rstrip()}")
        else:
            print(f"Error: {log_content}")
            print(f"Log: {log_file}")
            print(f"status: {status}")
        self.update_yaml_data_refresh( ['paths', key_name, index_str, 'status'], status)
        return entry_data,result

    def log_command(self, command):
        log_filename = f"{self.get_group_key('custom_name')}_log"
        timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
        command_log_entry = {timestamp: f"{self.cmd_type}: {command}"}
        file_path = str(Path(__file__).parent/log_filename)+".yaml"
        if not os.path.exists(file_path):
            current_data = {}
        else:
            with open(file_path, 'r') as file_handle:
                current_data = yaml.safe_load(file_handle) or {}
        if 'command_log' not in current_data:
            current_data['command_log'] = {}
        current_data['command_log'].update(command_log_entry)
        with open(file_path, 'w') as file_handle:
            yaml.dump(current_data, file_handle, default_flow_style=False, sort_keys=False, default_style=None)
    @staticmethod
    def load_yaml(yaml_path=None):
        if yaml_path is None:
            yaml_name = sys.argv[1]
            BaseTool.yaml_full_path=str(Path(__file__).parent/yaml_name)+".yaml"
        else:
            BaseTool.yaml_full_path=yaml_path
        with open(BaseTool.yaml_full_path, 'r') as file_handle:
            return yaml.safe_load(file_handle)
    def update_yaml_data_refresh(self,field_path, new_value):
        self.yaml_data = BaseTool.update_yaml_field(field_path, new_value)
        return self.yaml_data
    @staticmethod
    def update_yaml_field(field_path, new_value,file_path=None):
        if file_path is None:
            file_path = str(Path(__file__).parent/sys.argv[1])+".yaml"
        if not os.path.exists(file_path):
            current_data = {}
        else:
            with open(file_path, 'r') as file_handle:
                current_data = yaml.safe_load(file_handle) or {}
        if isinstance(field_path, str):
            current_data[field_path] = new_value
        elif not field_path:  
            current_data = new_value
        else:
            current = current_data
            for key in field_path[:-1]:
                if key not in current:
                    current[key] = {}
                current = current[key]
            current[field_path[-1]] = new_value
        with open(file_path, 'w') as file_handle:
            yaml.dump(current_data, file_handle, default_flow_style=False, sort_keys=False, default_style=None)
        return current_data
    def pre_command_hook(self):
        return "bash -c 'source ~/.bashrc && eval \"$(conda shell.bash hook)\" && conda activate 3dpidm'"
    def run_with_realtime_output(self, cmd, log_file):
        with open(log_file, 'w') as file_handle:
            file_handle.write(f"{cmd}\n")
            file_handle.write(f"{self.get_source_flag()}\n")
        filter_keywords = []
        if self.cli_args['no_filter_output']:
            filter_keywords = []
        else:
            if hasattr(self, 'realtime_output_filter'):
                filter_keywords = self.realtime_output_filter()
                if filter_keywords:
                    if not isinstance(filter_keywords, list):
                        filter_keywords = [filter_keywords]
        process = subprocess.Popen(cmd,shell=True,executable='/bin/bash', stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True, bufsize=1)
        with open(log_file, 'a') as log_handle:
            for line in iter(process.stdout.readline, ''):
                if line:
                    log_handle.write(line)
                    log_handle.flush()
                    line_stripped = line.strip()
                    should_display = any(keyword in line_stripped for keyword in filter_keywords) if filter_keywords else True
                    if should_display:
                        print(f"\r{' ' * 100}\r[{self.cmd_type}] {line_stripped}", end='', flush=True)
        
        process.wait()
        return type('ProcessResult', (), {'returncode': process.returncode})()
    @staticmethod
    def path_to_status(existing_status, path, yaml_data):
        if existing_status in ['aborted', 'generating']:
            return existing_status
        if path is None:
            return 'non_exist'
        if not os.path.exists(path):
            return 'non_exist'
        path=Path(path)
        if str(path).endswith('pcd_output'):
            path=path.parent
        if str(path).endswith('.zarr'):
            if path.is_symlink():
                index,group_name=BaseTool._find_index_everywhere_static(yaml_data, str(path.parent/path.readlink()))
                return f'exist:ln {group_name} {index}'
        return 'exist'
    def print_gpu_stats(self):
        result = subprocess.run(['nvidia-smi', '--query-gpu=index,utilization.gpu,memory.used,memory.total', '--format=csv,noheader,nounits'], capture_output=True, text=True)
        gpu_data = {}
        for line in result.stdout.strip().split('\n'):
            if line.strip():
                gpu_id, util, mem_used, mem_total = map(int, line.split(', '))
                mem_pct = int((mem_used / mem_total) * 100) if mem_total > 0 else 0
                gpu_data[gpu_id] = {'util': util, 'mem': mem_pct}
        print("id util mem")
        for gpu_id in sorted(gpu_data.keys()):
            red = int(255 * gpu_data[gpu_id]['util'] / 100)
            green = int(255 * (100 - gpu_data[gpu_id]['util']) / 100)
            blue = 0
            util_color = f"\033[38;2;{red};{green};{blue}m{gpu_data[gpu_id]['util']:>3}\033[0m"
            red = int(255 * gpu_data[gpu_id]['mem'] / 100)
            green = int(255 * (100 - gpu_data[gpu_id]['mem']) / 100)
            mem_color = f"\033[38;2;{red};{green};{blue}m{gpu_data[gpu_id]['mem']:>3}\033[0m"
            print(f"{gpu_id} {util_color} {mem_color}")
        return gpu_data
    def select_gpu(self, gpu_stats):
        global last_gpu_input
        if self.cmd_type not in ['eval', 'train','collect_test','collect_data']:
            last_gpu_input = 'a'
        while True:
            try:
                if last_gpu_input:
                    user_input = last_gpu_input
                    print(f"Using last GPU input: {user_input}")
                else:
                    user_input = input("GPU ID (0-7/u/m/a)").strip().lower()
                    last_gpu_input = user_input
                if user_input == 'm':
                    gpu_id = min(gpu_stats.keys(), key=lambda k: gpu_stats[k]['mem'] if k != 'best_util' and k != 'best_mem' else float('inf'))
                    print(f"Selected GPU {gpu_id} (lowest mem: {gpu_stats[gpu_id]['mem']}%, util: {gpu_stats[gpu_id]['util']}%)")
                    return gpu_id
                elif user_input == 'u':
                    gpu_id = min(gpu_stats.keys(), key=lambda k: gpu_stats[k]['util'] if k != 'best_util' and k != 'best_mem' else float('inf'))
                    print(f"Selected GPU {gpu_id} (lowest util: {gpu_stats[gpu_id]['util']}%, mem: {gpu_stats[gpu_id]['mem']}%)")
                    return gpu_id
                elif user_input == 'a':
                    gpu_id = min(gpu_stats.keys(), key=lambda k: gpu_stats[k]['util'] + gpu_stats[k]['mem'] if k != 'best_util' and k != 'best_mem' else float('inf'))
                    print(f"Selected GPU {gpu_id} (lowest util+mem: {gpu_stats[gpu_id]['util'] + gpu_stats[gpu_id]['mem']}%, util: {gpu_stats[gpu_id]['util']}%, mem: {gpu_stats[gpu_id]['mem']}%)")
                    return gpu_id
                else:
                    gpu_id = int(user_input)
                    if 0 <= gpu_id <= 7 and gpu_id in gpu_stats:
                        print(f"Selected GPU {gpu_id} (util: {gpu_stats[gpu_id]['util']}%, mem: {gpu_stats[gpu_id]['mem']}%)")
                        return gpu_id
                    else:
                        print("Invalid GPU ID")
            except ValueError:
                print("Enter valid integer, 'm', 'u', or 'a'")
            except KeyboardInterrupt:
                raise
    
    def get_demo_task_name(self, collect_num, is_test=False,original_group_name=None):
        group_name = self.group_name
        if original_group_name is not None:
            collect_num,_=SingleGroupTool.get_collect_process_num(self.yaml_data,original_group_name)
            group_name=original_group_name
        ln_as_type = self.get_group_key('ln_as_type',group_name)
        name_value = f"demo_clean_{self.get_group_key('custom_name',group_name)}-{collect_num}-{group_name}-{ln_as_type}-ta1-cam1" #NOW NO LONGER SUPPORTING TRIM_ARM AND CAMERA_USE_FULL 0 0
        return name_value
    def get_want_cams(self,pipeline):
        full_cams=['midBack_camera','midFront_camera','midLeft_camera','midRight_camera','midHead_camera']
        #note that midBack_real_camera will not be injected into pointcloud, since unproject.py's TARGET_CAMERAS exclude midBack_real_camera
   
        want_cams=PipelineAnalyzer.get_key_from_pipeline(pipeline,get_full_key=True,match="c",multi_single_char=True) or full_cams
        for cam in want_cams:
            assert cam in full_cams, f"camera {cam} of {want_cams} not in full_cams {full_cams}.pipeline={pipeline}"
        mask_camstr=PipelineAnalyzer.get_match_from_pipeline(pipeline,match="pcd")
        print('pipeline',pipeline,'mask_camstr',mask_camstr)
        if mask_camstr and "_" in mask_camstr:
            camstr=mask_camstr.split("_")[1]
        else:
            camstr='mb'
        mask_cam = interpret_camera_name(camstr)+"_camera"
        mask_real_cam = interpret_camera_name(camstr+"R")+"_camera"
        if mask_cam not in want_cams:
            want_cams.append(mask_cam)
        if mask_real_cam not in want_cams:
            want_cams.append(mask_real_cam)
        return want_cams
    def create_demo_task_yml(self, name, collect_num, pipeline=None,embodiment=None):
        if embodiment is None:
            embodiment=self.embodiment
        source_path = Path(__file__).parent.parent/"RoboTwin/task_config/demo_clean.yml"
        dest_path = Path(__file__).parent.parent/f"RoboTwin/task_config/{name}.yml"
        assert source_path.exists(), f"{source_path} not found"
        shutil.copy2(source_path, dest_path)
        with open(dest_path, 'r') as file_handle:
            content = yaml.safe_load(file_handle)
        content['save_freq'] = int(self.get_group_key('save_freq') or 15)
        content['episode_num'] = collect_num
        content['usemimic'] = bool(self.get_group_key('usemimic'))
        content['eval_rand_success'] = bool(self.get_group_key('eval_rand_success'))
        content['use_ws'] = bool(self.get_group_key('use_ws'))
        use_js_diffusion = bool(self.get_group_key('use_js_diffusion'))
        content['action_type'] = self.get_group_key('ln_js_type') if use_js_diffusion else self.get_group_key('ln_as_type')
        content['use_js_diffusion'] = bool(use_js_diffusion)
        content['obs_agent_pos_type'] = self.get_group_key("ln_as_type")
        content['override_qpos'] = list(self.get_group_key('override_qpos')) if self.get_group_key('override_qpos') else []
        content['uniform_qpos_unpack'] = bool(self.get_group_key('uniform_qpos_unpack'))
        content['permissive_debug'] = bool(self.get_group_key('permissive_debug'))
        content['domain_randomization']['cluttered_table'] = bool(self.get_group_key('cluttered_table'))
        dual_arm_dist=float(self.get_group_key('dual_arm_dist')) if self.get_group_key('dual_arm_dist') else 0.8
        assert 'embodiment' in content and isinstance(content['embodiment'], list)
        content['embodiment'] = [embodiment] if (embodiment == "aloha-agilex" or self.get_group_key('force_single_arm')) else [embodiment, embodiment, dual_arm_dist]
        content['camera']['pcd_camera_names'] = self.get_want_cams(pipeline)
        content['pipeline'] = pipeline if pipeline else ''
        
        seed_file_path = self.check_seed_file(self.cli_args.get('seed_file'))
        content['seed_file'] = seed_file_path
        
        content['override_qpos'] = self.get_group_key('override_qpos')
        
        zarr_filename = f"{self.get_group_key('task_name')}-{name}-{collect_num}-{self.get_group_key('ln_as_type')}.zarr"
        content['zarr_path'] = f"{Path(__file__).parent.parent}/RoboTwin/policy/DP3/data/{zarr_filename}"
        with open(dest_path, 'w') as file_handle:
            yaml.dump(content, file_handle, default_flow_style=False, sort_keys=False, default_style=None)
        print(f"Created config: {dest_path}")
        return content
    def recur_rename(path):
        path=str(path)
        pathnew=path
        while os.path.exists(pathnew):
            pathnew=pathnew+"_b"
        os.rename(path,pathnew)
        return pathnew
    def print_yaml_check(self,yaml_path,wanted_fields):
        if not os.path.exists(yaml_path):
            print(colored(f"yaml config not found: {yaml_path}", 'red', attrs=['bold']))
            return
        print(colored(f"PLEASE FIRST CHECK {os.path.abspath(yaml_path)}", 'blue', attrs=['bold']))
        with open(yaml_path, 'r') as file_handle:
            yaml_data = yaml.safe_load(file_handle)
        for field in wanted_fields:
            value = self.get_nested_value(yaml_data, field) 
            if isinstance(value,dict):
                value=json.dumps(value,indent=2)
            input(f"{field}: {value}")
    def print_demo_task_check(self):
        demo_task_name = self.get_demo_task_name(1 if self.cmd_type == 'collect_test' else self.collect_num)
        demo_task_path = f"{Path(__file__).parent.parent}/RoboTwin/task_config/{demo_task_name}.yml"
        self.print_yaml_check(demo_task_path,['episode_num','use_ws', 'action_type','obs_agent_pos_type','override_qpos','uniform_qpos_unpack','permissive_debug', 'usemimic','camera.pcd_camera_names','ee_location','pipeline','embodiment','use_js_diffusion','domain_randomization.cluttered_table','seed_file','eval_rand_success'])
    def print_policy_config_check(self): 
        policy_config_path = self._resolve_policy_config(get_full_path=True)
        policy_config=BaseTool.load_yaml(policy_config_path)
        if self.cli_args['resume_ckpt_index'] is not None:
            resume_source_train_dir, _,_= self._find_pathdict_by_index(self.yaml_data,  self.cli_args['resume_ckpt_index'],require_zarr=False)
            resume_ckpt_num=Train.path_to_status('exist',resume_source_train_dir,self.yaml_data,get_ckpt_num=True)
            status=Train.path_to_status('exist',resume_source_train_dir,self.yaml_data)
            if policy_config['training']['resume'] is False:
                if input(colored(f"SINCE resume_ckpt_index is {self.cli_args['resume_ckpt_index']} status is {status} with checkpoint num  {resume_ckpt_num},\n{resume_source_train_dir} set yaml resume to True?(y/n)", 'cyan', attrs=['bold']))=='y':
                    policy_config['training']['resume'] = True
                    BaseTool.update_yaml_field(['training','resume'],True,policy_config_path)
                    BaseTool.update_yaml_field(['checkpoint_num'],resume_ckpt_num,policy_config_path)
        else:
            if policy_config['training']['resume'] is True:
                if input(colored(f"SINCE no resume_ckpt_index is specified and resume is True, set resume back to False?(y/n)", 'cyan', attrs=['bold']))=='y':
                    policy_config['training']['resume'] = False
                    BaseTool.update_yaml_field(['training','resume'],False,policy_config_path)
        self.print_yaml_check(policy_config_path,['policy.pointcloud_encoder_cfg.encode_time_dim','policy.enable_level2_residue','policy.ee_pred_noise_sigma','training.num_epochs','training.checkpoint_every','training.resume','checkpoint_num','shape_meta.jsaction.shape','shape_meta.obs.agent_js_pos.shape','policy.freeze_ee_diffusion','policy.freeze_ee_encoder','policy.use_pc_color','policy.use_js_diffusion','policy.reshape_batch_dim','policy.ee_pred_encoder_cfg','policy.js_encoder_cfg','policy.ik_cfg','policy.emb_spec_qpos_diffusion','policy.inject_level1_cls','policy.inject_level2_cls','policy.no_level1_loss','dataloader.batch_size'])
    @staticmethod
    def get_policy_config_shortname(policy_config_path):
        if not os.path.exists(policy_config_path):
            return ''
        policy_config=BaseTool.load_yaml(policy_config_path)
        ik_ddim = policy_config['policy'].get('ik_cfg',{}).get('down_dims',[0])[0]
        use_js_diffusion = policy_config['policy'].get('use_js_diffusion',False)
        batch_size_d = policy_config['dataloader']['batch_size']
        batch_size_v = policy_config['val_dataloader']['batch_size']
        assert batch_size_d == batch_size_v, f"batch_size in dataloader and val_dataloader must be the same but got dataloader={batch_size_d} and val_dataloader={batch_size_v}"
        emb_spec_qpos_diffusion = policy_config['policy'].get('emb_spec_qpos_diffusion',0)
        inject_level1_cls = policy_config['policy'].get('inject_level1_cls',False)
        inject_level2_cls = policy_config['policy'].get('inject_level2_cls',False)
        no_level1_loss = policy_config['policy'].get('no_level1_loss',False)
        freeze_ee_diffusion = policy_config['policy'].get('freeze_ee_diffusion',False)
        freeze_ee_encoder = policy_config['policy'].get('freeze_ee_encoder',False)
        noshuffle =not  policy_config['dataloader']['shuffle']
        resume = policy_config['training'].get('resume',False)
        resume_ckpt_num = policy_config.get('checkpoint_num',0)
        ckpt_num = policy_config['training'].get("num_epochs",0)
        level1_out_dim = policy_config['policy'].get('level1_out_dim',0)
        noshuffle_str = 'Ns' if noshuffle else ''
        if ik_ddim > 0 :
            dim_batch_str=f'(b{batch_size_d}{noshuffle_str}-d{ik_ddim})'
        else:
            dim_batch_str=f'(b{batch_size_d}{noshuffle_str})'
        if resume:
            dim_batch_str += f'R{resume_ckpt_num}-{ckpt_num}'
        elif ckpt_num != 3000:
            dim_batch_str += f'{ckpt_num}'
        if isinstance(policy_config.get("shape_meta",{}),dict):
            agent_js_pos_dim=policy_config.get("shape_meta",{}).get("obs",{}).get("agent_js_pos",{}).get("shape",[0])[0]
            agent_pos_dim=policy_config.get("shape_meta",{}).get("obs",{}).get("agent_pos",{}).get("shape",[0])[0]
            action_dim =policy_config.get("shape_meta",{}).get("action",{}).get("shape",[0])[0]
            jsaction_dim =policy_config.get("shape_meta",{}).get("jsaction",{}).get("shape",[0])[0]
        else:
            action_dim = 16
            jsaction_dim = 16
            agent_pos_dim = 16
            agent_js_pos_dim = 16
        assert action_dim == agent_pos_dim, f"action_dim={action_dim} and agent_pos_dim={agent_pos_dim} must be the same"
        assert jsaction_dim == agent_js_pos_dim, f"jsaction_dim={jsaction_dim} and agent_js_pos_dim={agent_js_pos_dim} must be the same"
        shortname = '#'
        if action_dim != 16:
            shortname += f'[{action_dim}]'
        if inject_level1_cls:
            shortname += 'C'
        if no_level1_loss:
            shortname += 'Nl'
        if freeze_ee_diffusion:
            shortname += 'Fd'
        if freeze_ee_encoder:
            shortname += 'Fe'
        if not use_js_diffusion:
            shortname += dim_batch_str
            return shortname
        shortname +=":"
        if level1_out_dim:
            shortname +=f"{level1_out_dim}>"
        if emb_spec_qpos_diffusion > 0:
            shortname += str(emb_spec_qpos_diffusion)
        shortname +='$'
        if jsaction_dim !=16:
            shortname += f'[{jsaction_dim}]'
        if inject_level2_cls:
            shortname += 'C'
        shortname += dim_batch_str
        return shortname

    def get_nested_value(self, data, field_path):
        keys = field_path.split('.')
        value = data
        for key in keys:
            if key in value:
                value = value[key]
            else:
                return None
        return value
    
    def check_seed_file(self, seed_file_index):
        if seed_file_index is None:
            return None
        seed_file_index_str = str(seed_file_index)
        zarr_path, _, _ = self._find_pathdict_by_index(self.yaml_data, seed_file_index, require_zarr=False)
        seed_file_path = os.path.join(zarr_path, "seed.txt")
        assert os.path.exists(seed_file_path), f"seed.txt not found in {os.path.dirname(zarr_path)}"
        with open(seed_file_path, 'r') as f:
            seed_line = f.read().strip()
            seeds = [int(x) for x in seed_line.split()]
        if len(seeds) < 100:
            print(colored(f"Warning: Only {len(seeds)} seeds exist in {seed_file_path}", 'yellow', attrs=['bold']))
        else:
            print(colored(f"Found {len(seeds)} seeds in {seed_file_path}", 'green', attrs=['bold']))
        return seed_file_path
    
    
    def set_nested_value(self, data, field_path, new_value):
        keys = field_path.split('.')
        target = data
        for key in keys[:-1]:
            if key not in target:
                target[key] = {}
            target = target[key]
        target[keys[-1]] = new_value
    def get_zarr_episode_count(self, zarr_path):
        if not zarr_path or zarr_path.strip() == '':
            raise ValueError(f"Invalid zarr_path: {zarr_path!r}")
        z = zarr.open(zarr_path, 'r')
        return (z['meta']['episode_ends'].shape[0])
    def _extract_group_name_recursive(self, index):
        paths_map = self.yaml_data.get('paths', {})
        index_str = str(index)
        for key_name, entry_map in paths_map.items():
            if key_name in ['train', 'eval', 'policy_configs']: continue
            if index_str in entry_map:
                entry = entry_map[index_str]
                if key_name == 'merge_zarr':
                    source_str = entry.get('source', '')
                    indices = [int(x) for x in source_str.split() if x.isdigit()]
                    group_dicts = [self._extract_group_name_recursive(idx) for idx in indices]
                    result_dict = {}
                    for group_dict in group_dicts:
                        result_dict.update(group_dict)
                    return result_dict
                elif key_name == 'clip_zarr':
                    source_str = entry.get('source', '')
                    parts = source_str.split()
                    assert len(parts) >= 4 and parts[0] == '-clip', f"Invalid clip_zarr source format: {source_str}"
                    source_index = int(parts[1])
                    start_episode = int(parts[2])
                    end_episode = int(parts[3])
                    source_dict = self._extract_group_name_recursive(source_index)
                    clipped_dict = {}
                    for group_name, group_info in source_dict.items():
                        original_length = group_info['length']
                        clipped_length = min(original_length, end_episode - start_episode)
                        if clipped_length > 0:
                            clipped_dict[group_name] = {
                                'emb': group_info['emb'],
                                'length': clipped_length
                            }
                    return clipped_dict
                else:
                    name_value = entry.get('name', '')
                    if not name_value or name_value.strip() == '':
                        raise ValueError(f"Entry {index} ({key_name}) has empty or missing 'name' field: {entry}")
                    try:
                        episode_count = self.get_zarr_episode_count(name_value)
                        group_name = key_name
                        result_dict = {
                            group_name: {
                                'emb': self.get_group_key('embodiment', group_name),
                                'length': episode_count
                            }
                        }
                        return result_dict
                    except Exception as e:
                        traceback.print_exc()
                        print(f"Failed to process entry {index} ({key_name}): {e}. Entry data: {entry}")
                        exit(1)
        assert False, f"Index {index} not found in any path group"
    def remove_all_pcd_output_directory(self,zarr_path):
        if str(zarr_path).endswith('pcd_output'):
            zarr_path=Path(zarr_path).parent
        else:
            zarr_path=Path(zarr_path)
        for pcd_short_name in "spirR":
            pcd_output_dir = f"{zarr_path}/{pcd_short_name}_pcd_output"
            # print(f"Removing {pcd_output_dir} for {self.cmd_type}")
            if not self.remove_existing_path(pcd_output_dir, self.cmd_type):
                print(f"Skipping {self.cmd_type} due to user choice")
                return
    def list_all_pcd_output_directory(self,zarr_path):
        for pcd_short_name in "spirR":
            self.list_pcd_output_directory(f"{zarr_path}/{pcd_short_name}_pcd_output")
    def list_pcd_output_directory(self, pcd_output_dir):
        header = colored(os.path.basename(pcd_output_dir),color='magenta',attrs=['bold'])
        ln_type=get_ln_status(pcd_output_dir)
        if os.path.exists(pcd_output_dir):
            pcd_files = [f for f in os.listdir(pcd_output_dir) if f.endswith('.pcd')]
            if pcd_files:
                pcd_files.sort()
                for pcd_file in pcd_files:
                    full_path = os.path.join(pcd_output_dir, pcd_file)
                    file_size = os.path.getsize(full_path)
                    if str(Path(full_path).parent.name)[0]==ln_type[1]:
                        print(header,colored(f"{full_path}",'blue',attrs=['bold']),colored(ln_type,"cyan",attrs=['bold']))
                    else:
                        print(header,f"{full_path}",colored(ln_type,"cyan",attrs=['bold']))
            else:
                print(header,"file not exist")
        else:
            print(header,f"dir not exist")

class SingleGroupTool(BaseTool):
    def __init__(self, yaml_data, group_name, cli_args):
        assert group_name, f"Invalid group_name: {group_name!r}; available_groups={list(yaml_data.get('groups', {}).keys())}"
        assert 'groups' in yaml_data and group_name in yaml_data['groups'], f"'groups' missing or group '{group_name}' not found; yaml_keys={list(yaml_data.keys())}; available_groups={list(yaml_data.get('groups', {}).keys())}"
        super().__init__(yaml_data, group_name, cli_args)
        self.emb_config = yaml_data['groups'][group_name]
        self.embodiment = self.emb_config.get('embodiment')
        assert self.embodiment, f"Missing 'embodiment' for group='{group_name}'; group_config={self.emb_config}"
        self.target_embodiment = self.emb_config.get('target_embodiment')
        self.collect_num,self.process_num=SingleGroupTool.get_collect_process_num(self.yaml_data,self.group_name)
        assert self.collect_num > 0, f"No collect_num specified for {group_name}"
    @staticmethod
    def get_collect_process_num(yaml_data,group_name):
        collect_num = BaseTool.get_group_key_from_yaml(yaml_data,group_name,'collect')
        process_num = BaseTool.get_group_key_from_yaml(yaml_data,group_name,'process') or collect_num
        emb=BaseTool.get_group_key_from_yaml(yaml_data,group_name,'embodiment')
        target_emb=BaseTool.get_group_key_from_yaml(yaml_data,group_name,'target_embodiment')
        assert str(collect_num).isdigit() and str(process_num).isdigit(), f"collect_num={collect_num} and process_num={process_num} must be digits. emb_config={emb_config}"
        collect_num=int(collect_num)
        process_num=int(process_num)
        if emb == 'ur5-wsg' and target_emb==None and process_num==collect_num and collect_num>1:
            collect_num=int(process_num*1.5)
            # print(f"auto assign collect_num={collect_num} to 1.5*process_num={process_num}")
        return collect_num,process_num
    def replace_pipeline_emb(self, pipeline):
        emb_config = self.emb_config
        embodiment=emb_config.get('embodiment')
        target_embodiment=emb_config.get('target_embodiment')
        inv_emb_map = _load_inv_emb_name_map()
        target_char = ''
        assert pipeline is not None
        if '$' in pipeline:
            assert target_embodiment, f"Pipeline {pipeline} contains '$' but no target_embodiment specified for emb_config '{self.group_name}' with emb{embodiment} targemb{target_embodiment} emb_config {emb_config}"
            target_char = inv_emb_map.get(target_embodiment)
            assert target_char, f"Target embodiment '{target_embodiment}' not found in inv_emb_map"
        emb_char = inv_emb_map.get(embodiment)
        assert emb_char, f"Embodiment '{embodiment}' not found in inv_emb_map"
        return pipeline.replace('%', emb_char).replace('$', target_char)
    def get_pipeline(self, pipe_type,is_test=False):
        emb_config = self.emb_config
        group_type_str = emb_config.get('group_type','base')
        pipeline = self.get_group_key(f'pipeline_{pipe_type}')
        assert pipeline is not None, f"pipeline_{pipe_type} not found for group={self.group_name}, group_type={group_type_str}, emb_config={emb_config}"
        if pipe_type=='process':
            pipeline_process_test=self.get_group_key(f'pipeline_process_test')
            if PipelineAnalyzer.test_to_real(pipeline_process_test)!=pipeline:
                print(f"after removing j* k save*, pipeline_process_test={pipeline_process_test} shall equal to pipeline_process={pipeline}")
                exit(1)
        if ("save" in pipeline) and not is_test:
            print(f"pipeline_{pipe_type} contains save: {pipeline} which is not recommended if is_test={is_test}, emb_config={emb_config}")
            # exit(1)
        return self.replace_pipeline_emb(pipeline)
class GeneralTool(BaseTool):
    def __init__(self, yaml_data, group_name, cli_args):
        super().__init__(yaml_data, group_name, cli_args)
class CollectTest(SingleGroupTool):
    def __init__(self, yaml_data, group_name, cli_args):
        super().__init__(yaml_data, group_name, cli_args)
        self.cmd_type = 'collect_test'
    def remove_existing_path(self, path, cmd):
        if os.path.isdir(path):
            data_dir = os.path.join(path, "data")
            hdf5_files = []
            if os.path.exists(data_dir):
                hdf5_files = [f for f in os.listdir(data_dir) if f.startswith("episode") and f.endswith(".hdf5")]
            if hdf5_files:
                print(f"Found HDF5 files in data/: {', '.join(sorted(hdf5_files))}\nOptions:\n  y - Remove only HDF5 files ({len(hdf5_files)} files)\n  all - Remove entire directory '{os.path.basename(path)}' (including all files and subdirectories)\n  n - Keep all files")
                while True:
                    user_input = input(f"Choose action (y/n/all): ").strip().lower()
                    if user_input == 'y':
                        for hdf5_file in hdf5_files:
                            os.remove(os.path.join(data_dir, hdf5_file))
                        print(f"✓ Removed {len(hdf5_files)} HDF5 files from {data_dir}")
                        return True
                    elif user_input == 'all':
                        shutil.rmtree(path)
                        print(f"✓ Removed entire directory: {path}")
                        return True
                    else:
                        print(f"Invalid input: {user_input}")
        return super().remove_existing_path(path, cmd)
    @staticmethod
    def path_to_status(existing_status, path, yaml_data):
        if not os.path.exists(path):
            return f"non_exist"
        data_path = os.path.join(path, "data")
        hdf5_count = len(glob(os.path.join(data_path, "*.hdf5")))
        if hdf5_count > 0:
            return f"exist:{hdf5_count} hdf5"
        traj_data_path = os.path.join(path, "_traj_data")
        pkl_count = len(glob(os.path.join(traj_data_path, "*.pkl")))
        if pkl_count > 0:
            return f"exist:{pkl_count} pkl"
        return BaseTool.path_to_status(existing_status, path, yaml_data)
    def get_source_flag(self):
        return f"-ct -g {self.group_name}{self.get_note_flag()}"
    def construct_command(self):
        name_value = self.get_demo_task_name(1, is_test=True)
        root_path = f"{Path(__file__).parent.parent}/RoboTwin"
        return f'cd {root_path}; bash collect_data.sh {self.get_group_key("task_name")} {name_value} %gpu_id'
    def get_output_path(self):
        collect_num = 1
        name_value = self.get_demo_task_name(collect_num, is_test=True)
        return f"{Path(__file__).parent.parent}/RoboTwin/data/{self.get_group_key('task_name')}/{name_value}"
    def execute_command(self, cmd):
        pipeline_to_use = self.get_pipeline('collect',is_test=True)
        demo_task_yaml=self.create_demo_task_yml(self.get_demo_task_name(1, is_test=True), 1, pipeline=pipeline_to_use)
        super().execute_command(cmd)
        if demo_task_yaml.get('permissive_debug',False):
            print(colored("permissive_debug is enabled for "+self.group_name+" Don't use in production","red",attrs=["bold"]))
    def realtime_output_filter(self):
        return ["[LOGPRINT]"]
class CollectData(SingleGroupTool):
    def __init__(self, yaml_data, group_name, cli_args):
        super().__init__(yaml_data, group_name, cli_args)
        self.cmd_type = 'collect_data'
    def get_source_flag(self):
        return f"-c -g {self.group_name}{self.get_note_flag()}"
    def construct_command(self):
        name_value = self.get_demo_task_name(self.collect_num, is_test=False)
        root_path = f"{Path(__file__).parent.parent}/RoboTwin"
        return f'cd {root_path}; bash collect_data.sh {self.get_group_key("task_name")} {name_value} %gpu_id'
    def get_output_path(self):
        name_value = self.get_demo_task_name(self.collect_num, is_test=False)
        return f"{Path(__file__).parent.parent}/RoboTwin/data/{self.get_group_key('task_name')}/{name_value}"
    def execute_command(self, cmd):
        pipeline_to_use = self.get_pipeline('collect',is_test=False)
        demo_task_yaml=self.create_demo_task_yml(self.get_demo_task_name(self.collect_num, is_test=False), self.collect_num, pipeline=pipeline_to_use)
        super().execute_command(cmd)
        output_path = self.get_output_path()
        print(f"{output_path} {self.path_to_status('non_exist',output_path, self.yaml_data)}")
        if self.collect_num>self.process_num:
            print(f"Running highz filter for {self.group_name} after collect_data")
            highz_tool = HighZFilter(self.yaml_data, self.group_name, self.cli_args)
            highz_cmd = highz_tool.construct_command()
            highz_tool.execute_command(highz_cmd)
        if demo_task_yaml.get('permissive_debug',False):
            print(colored("permissive_debug is enabled for "+self.group_name+" Don't use in production","red",attrs=["bold"]))
class ProcessTest(SingleGroupTool):
    def __init__(self, yaml_data, group_name, cli_args):
        super().__init__(yaml_data, group_name, cli_args)
        self.cmd_type = 'process_test'
    def get_source_flag(self):
        return f"-pt -g {self.group_name}{self.get_note_flag()}"
    def construct_command(self):
        name_value = self.get_demo_task_name(1, is_test=True)
        root_path = f"{Path(__file__).parent.parent}/RoboTwin"
        return f'cd {root_path}/policy/DP3; bash process_data.sh {self.get_group_key("task_name")} {name_value} 1 {self.get_group_key("ln_as_type")}'
    def get_output_path(self):
        process_num = 1
        collect_num = 1
        name_value = self.get_demo_task_name(collect_num, is_test=True)
        return f"{Path(__file__).parent.parent}/RoboTwin/policy/DP3/data/{self.get_group_key('task_name')}-{name_value}-{process_num}-{self.get_group_key('ln_as_type')}.zarr"
class ProcessData(ProcessTest):
    def __init__(self, yaml_data, group_name, cli_args):
        super().__init__(yaml_data, group_name, cli_args)
        self.cmd_type = 'process_data'
    def get_source_flag(self):
        return f"-p -g {self.group_name}{self.get_note_flag()}"
    def construct_command(self):
        name_value = self.get_demo_task_name(self.collect_num, is_test=False)
        root_path = f"{Path(__file__).parent.parent}/RoboTwin"
        return f'cd {root_path}/policy/DP3; bash process_data.sh {self.get_group_key("task_name")} {name_value} {self.process_num} {self.get_group_key("ln_as_type")}'
    def get_output_path(self):
        name_value = self.get_demo_task_name(self.collect_num, is_test=False)
        return f"{Path(__file__).parent.parent}/RoboTwin/policy/DP3/data/{self.get_group_key('task_name')}-{name_value}-{self.process_num}-{self.get_group_key('ln_as_type')}.zarr"


class DataTraverserCollectTest(SingleGroupTool):
    def __init__(self, yaml_data, group_name, cli_args):
        super().__init__(yaml_data, group_name, cli_args)
        self.cmd_type = 'data_traverser_collect_test'
    def get_dt_pipeline(self):
        return self.get_pipeline('collect_test',is_test=True)
    def get_source_flag(self):
        return f"-dct -g {self.group_name}{self.get_note_flag()}"
    def get_test_name_value(self):
        return self.get_demo_task_name(1, is_test=True)
    def get_source_zarr(self):
        if self.cli_args.get("train_zarr_index"):
            zarr_path, _,_ = self._find_pathdict_by_index(self.yaml_data, self.cli_args.get("train_zarr_index"))
            return zarr_path
        name_value = self.get_test_name_value()
        root_path = f"{Path(__file__).parent.parent}/RoboTwin"
        return f"{root_path}/policy/DP3/data/{self.get_group_key('task_name')}-{name_value}-1-{self.get_group_key('ln_as_type')}.zarr"
    def get_eparam(self):
        return "-e 0"
    def construct_command(self):
        source_zarr = self.get_source_zarr()
        pipeline_replaced = self.get_dt_pipeline()
        eparam = self.get_eparam()
        return f'cd {Path(__file__).parent}; python3 dataTraverser.py -z {source_zarr} -p {pipeline_replaced} -f 0-200-100 {eparam}'
    def get_output_path(self):
        name_value = self.get_test_name_value()
        zarr_path = f"{Path(__file__).parent.parent}/RoboTwin/policy/DP3/data/{self.get_group_key('task_name')}-{name_value}-1-{self.get_group_key('ln_as_type')}.zarr"
        pipeline_out_key = PipelineAnalyzer.get_key_from_pipeline(self.get_dt_pipeline())
        return f"{zarr_path}/{pipeline_out_key}_pcd_output"
    def execute_command(self, cmd):
        if 'test' not in self.cmd_type:
            source_test = DataTraverserSingleTest(self.yaml_data, self.group_name, self.cli_args, self.get_source_zarr())
            source_test.execute_command()
        pipeline=self.get_dt_pipeline()
        check_prompt = f"confirm pipeline as expected: "+colored(pipeline+" group_name="+self.group_name,"cyan",attrs=["bold"])+f" for {self.cmd_type}"
        if not self.cli_args.get("no_pipeline_check"):
            input(check_prompt)
        else:
            print(check_prompt)
        self.remove_all_pcd_output_directory(Path(self.get_output_path()))
        entry_data,result = super().execute_command(cmd)
        if result.returncode == 0 and self.cmd_type.endswith('test'):
            self.list_all_pcd_output_directory(Path(entry_data['name']).parent)
        print(colored(pipeline+" group_name="+self.group_name,"cyan",attrs=["bold"])+f" for {self.cmd_type}")
        

class DataTraverserCollectFullTest(DataTraverserCollectTest):
    def __init__(self, yaml_data, group_name, cli_args):
        super().__init__(yaml_data, group_name, cli_args)
        self.cmd_type = 'data_traverser_collect_full_test'
    def get_dt_pipeline(self):
        return self.get_pipeline('collect_full_test',is_test=True)
    def get_source_flag(self):
        return f"-dcft -g {self.group_name}{self.get_note_flag()}"
    def get_eparam(self):
        last_episode = self.process_num - 1
        if self.get_zarr_episode_count(self.get_source_zarr())<self.process_num:
            last_episode = self.get_zarr_episode_count(self.get_source_zarr()) - 1
        eparam="-e 0"
        if last_episode>0:
            eparam=f"-e {last_episode-1}-{last_episode+1}"
            # eparam=f"-e {last_episode}"
        return eparam
    def get_test_name_value(self):
        return self.get_demo_task_name(self.collect_num, is_test=False)
    def get_source_zarr(self):
        if self.cli_args.get("train_zarr_index"):
            zarr_path, _,_ = self._find_pathdict_by_index(self.yaml_data, self.cli_args.get("train_zarr_index"))
            return zarr_path
        name_value = self.get_test_name_value()
        root_path = f"{Path(__file__).parent.parent}/RoboTwin"
        return f"{root_path}/policy/DP3/data/{self.get_group_key('task_name')}-{name_value}-{self.process_num}-{self.get_group_key('ln_as_type')}.zarr"
    def get_output_path(self):
        name_value = self.get_test_name_value()
        zarr_path = f"{Path(__file__).parent.parent}/RoboTwin/policy/DP3/data/{self.get_group_key('task_name')}-{name_value}-{self.process_num}-{self.get_group_key('ln_as_type')}.zarr"
        pipeline_out_key = PipelineAnalyzer.get_key_from_pipeline(self.get_dt_pipeline())
        return f"{zarr_path}/{pipeline_out_key}_pcd_output"

class DataTraverserProcessTest(DataTraverserCollectFullTest):
    def __init__(self, yaml_data, group_name, cli_args):
        super().__init__(yaml_data, group_name, cli_args)
        self.cmd_type = 'data_traverser_process_test'
    def get_dt_pipeline(self):
        return self.get_pipeline('process_test',is_test=True)
    def get_source_flag(self):
        return f"-dpt -g {self.group_name}{self.get_note_flag()}"

class DataTraverserProcess(DataTraverserProcessTest):
    def __init__(self, yaml_data, group_name, cli_args):
        super().__init__(yaml_data, group_name, cli_args)
        self.cmd_type = 'data_traverser_process'
    def get_dt_pipeline(self):
        return self.get_pipeline('process',is_test=False)
    def get_source_flag(self):
        return f"-dp -g {self.group_name}{self.get_note_flag()}"
    def construct_command(self):
        source_zarr = self.get_source_zarr()
        pipeline_replaced = self.get_dt_pipeline()
        output_zarr = self.get_output_path()
        self.validate_zarr_episode_count(source_zarr, self.process_num)
        return f'cd {Path(__file__).parent}; python3 dataTraverser.py -z {source_zarr} -p {pipeline_replaced} -o {output_zarr}'
    def get_output_path(self):
        name_value = self.get_test_name_value()
        return f"{Path(__file__).parent.parent}/RoboTwin/policy/DP3/data/{self.get_group_key('task_name')}-{name_value}-{self.process_num}-{self.get_group_key('ln_as_type')}-{self.get_dt_pipeline()}.zarr"
    def validate_zarr_episode_count(self, zarr_path, expected_episode_count):
        actual_episode_count = self.get_zarr_episode_count(zarr_path)
        assert actual_episode_count == expected_episode_count, f"expected episode count ({expected_episode_count}) != actual episode count ({actual_episode_count}) in {zarr_path}"
        return True
class DataTraverserAftProcessTest(DataTraverserProcess):
    def __init__(self, yaml_data, group_name, cli_args):
        super().__init__(yaml_data, group_name, cli_args)
        self.cmd_type = 'data_traverser_aft_process_test'
    def get_dt_pipeline(self,aft_process=True):
        if aft_process:
            return self.get_pipeline('aft_process_test',is_test=True)
        else:
            return self.get_pipeline('process',is_test=True)
    def get_output_zarr(self):
        name_value = self.get_test_name_value()
        return f"{Path(__file__).parent.parent}/RoboTwin/policy/DP3/data/{self.get_group_key('task_name')}-{name_value}-{self.process_num}-{self.get_group_key('ln_as_type')}-{self.get_dt_pipeline(aft_process=False)}.zarr"
    def get_source_flag(self):
        return f"-dapt -g {self.group_name}{self.get_note_flag()}"
    def construct_command(self):
        source_zarr = self.get_output_zarr()
        pipeline_replaced = self.get_dt_pipeline()
        eparam = self.get_eparam()
        return f'cd {Path(__file__).parent}; python3 dataTraverser.py -z {source_zarr} -p {pipeline_replaced} -f 0-200-100 {eparam}'
    def get_output_path(self):
        pipeline_out_key = PipelineAnalyzer.get_key_from_pipeline(self.get_dt_pipeline())
        return f"{self.get_output_zarr()}/{pipeline_out_key}_pcd_output"
class DataTraverserSingleTest(GeneralTool):
    def __init__(self, yaml_data, group_name, cli_args, zarr_path=None):
        # print(f"DataTraverserSingleTest group_name: {group_name}")
        if cli_args.get('non_run'):
            raise NotImplementedError("NO CONSTRUCTED PATH FOR construct_all_path")
        if zarr_path is None:
            single_index = cli_args.get('single_test')
            assert single_index is not None, "single_test index required when zarr_path not provided"
            zarr_path, _,_= self._find_pathdict_by_index(yaml_data, single_index)
        self.zarr_path = str(zarr_path)
        assert os.path.exists(self.zarr_path), f"Zarr path does not exist: {self.zarr_path}"
        assert self.zarr_path.endswith('.zarr'), f"Path must end with .zarr: {self.zarr_path}"
        super().__init__(yaml_data, group_name, cli_args)
        self.cmd_type = 'single_test'
        self.episode_count = self.get_zarr_episode_count(self.zarr_path)
        self.last_episode_idx = self.episode_count - 1
        assert self.last_episode_idx >= 0, f"Invalid episode count: {self.episode_count}"
        
    def _resolve_index(self):
        return -1 #since it has its own execute_command
        
    def construct_command(self):
        return None
    def get_source_flag(self):
        return f"-s {self.cli_args.get('single_test')}{self.get_note_flag()}"
        
    def get_output_path(self):
        return None
    def print_zarr_fields(self,zarr_group,fields,episode_timestep):
        for field in fields:
            if isinstance(field,str):
                field_path=[field]
            else:
                field_path=field
            current_group=zarr_group
            for i,key in enumerate(field_path):
                if key not in current_group:
                    break
                if i==len(field_path)-1:
                    timestep=episode_timestep
                    array_length=current_group[key].shape[0]
                    if timestep>=array_length:
                        array_data=f"ts:{array_length} {list(np.round(current_group[key][array_length-1], 3))} (original call {timestep} out of bounds, max: {array_length-1})"
                    else:
                        array_data=f"ts:{timestep} {list(np.round(current_group[key][timestep], 3))}"
                    print(colored(f"{'.'.join(field_path)}: {current_group[key].shape} ", 'cyan',attrs=['bold']),array_data)
                else:
                    current_group=current_group[key]
    def print_as_jas_data(self,zarr_path,episode_num):
        zarr_group = zarr.open(zarr_path, mode='r')
        episode_ends = zarr_group['meta']['episode_ends']
        episode_timestep = episode_ends[episode_num-1]-1
        print("episode_timestep",episode_num,episode_ends[episode_num-1]-1)
        # if 'eelocal' in zarr_group and 'data' not in zarr_group:
        self.print_zarr_fields(zarr_group,[['eelocal','state'],['ee','state'],['qpos','state']],0)
        # else:
        self.print_zarr_fields(zarr_group,[['data','action'],['data','state'],['data','js_action'],['data','js_state']],episode_timestep)

    def execute_command_inner(self,episode_num=None):
        index, key_name = self._find_index_everywhere(self.zarr_path)
        if index is None:
            print(f"Could not find corresponding index {index} in YAML path {self.zarr_path}")
            return -1
        print(colored(f"original ln status {get_ln_status(self.zarr_path)}","cyan",attrs=['bold']))
        print(colored(f"ref:{self.group_name} ln status {get_ln_status(self.zarr_path,self.yaml_data,self.group_name)}","cyan",attrs=['bold']))
        original_note = self.yaml_data['paths'][key_name][str(index)].get('note', '')
        if original_note and EXIST_NOTE_IS_CHECKED:
            print(colored(f"Original note: {original_note}. Skip examine since EXIST_NOTE_IS_CHECKED is {EXIST_NOTE_IS_CHECKED}", 'green',attrs=['bold']))
            print(self.zarr_path)
            return 0
        log_file = self.create_log_path("single_test")
        if episode_num is None:
            episode_num=self.last_episode_id
        if self.cli_args.get('test'):
            episode_num = 0
        self.remove_all_pcd_output_directory(Path(self.zarr_path))
        cmd = f'cd {Path(__file__).parent}; python3 dataTraverser.py -z {self.zarr_path} -p ldp-ldr-k-js-ja-savep-saver -f 19 -e {episode_num}'
        with open(log_file, 'w') as log_handle:
            log_handle.write(f"[DataTraverserSingleTest] {cmd}\n")
            log_handle.write(f"[DataTraverserSingleTest] zarr: {self.zarr_path}\n")
        print(f"Log file: {os.path.abspath(log_file)}")
        process = subprocess.Popen(cmd, shell=True, executable='/bin/bash', stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True, bufsize=1)
        with open(log_file, 'a') as log_handle:
            for line in iter(process.stdout.readline, ''):
                if line:
                    log_handle.write(line)
                    log_handle.flush()
        process.wait()
        if process.returncode == 0:
            self.list_all_pcd_output_directory(self.zarr_path)
            self.print_as_jas_data(self.zarr_path,episode_num)
            if original_note:
                print(colored(f"Original note: {original_note}", 'green',attrs=['bold']))
            while True:
                user_input=input(colored(f"confirm as expected for {os.path.basename(self.zarr_path)} (y/note/n)", 'green',attrs=['bold'])).strip().lower()
                if user_input == 'n':
                    exit()
                elif user_input == 'note':
                    new_note = input("Enter new note (press Enter to keep original): ").strip()
                    if new_note:
                        self.yaml_data['paths'][key_name][str(index)]['note'] = new_note
                        self.update_yaml_data_refresh( ['paths', key_name, str(index), 'note'], new_note)
                        print(f"Updated note for index {index} in {key_name} {BaseTool.yaml_full_path}")
                    else:
                        print("Note unchanged")
                    break
                elif user_input == 'y':
                    break
                else:
                    print("Invalid input")
        return process.returncode
    def zarr_decompose(self, zarr_path,return_type):
        index, key_name = self._find_index_everywhere(zarr_path)
        group_dict = self._extract_group_name_recursive(index)
        if return_type == 'group_name_composite':
            group_parts = []
            for group_name, group_info in group_dict.items():
                group_parts.append(f"{group_name}_{group_info['length']}")
            return '-'.join(group_parts)
        elif return_type=='episode_lengths':
            episode_lengths = [group_info['length'] for group_info in group_dict.values()]
            return episode_lengths
        elif return_type=='dict':
            # "arepf100_100-urepa100_100-a100_100" -> {"arepf100":100, "urepa100": 100, "a100": 100}
            group_len_dict = {k:v['length'] for k,v in group_dict.items()}
            return group_len_dict
        else:
            raise ValueError(f"Invalid return_type: {return_type}")
    def get_emb_int_map(self):
        return {'aloha-agilex':0,'franka-panda':1,'ur5-wsg':2}
    def get_inv_emb_int_map(self):
        return {v:k for k,v in self.get_emb_int_map().items()}
    def make_emb_cls(self):
        emb_dict=self.zarr_decompose(self.zarr_path, 'dict')
        zarr_data=zarr.open(self.zarr_path, mode='r')
        episode_ends=zarr_data['meta']['episode_ends']
        expected_length = episode_ends[-1]
        emb_cls_array=np.zeros(expected_length,dtype=np.int32)
        episode_start=0
        emb_dict_items=list(emb_dict.items())
        current_emb_idx=0
        episode_count=0
        for episode_end in episode_ends:
            current_emb_name=emb_dict_items[current_emb_idx][0]
            current_emb_type=emb_dict_items[current_emb_idx][1]['emb']
            current_emb_cls=self.get_emb_int_map()[current_emb_type]
            emb_cls_array[episode_start:episode_end]=current_emb_cls
            episode_start=episode_end
            episode_count+=1
            if episode_count>=emb_dict_items[current_emb_idx][1]['length']:
                current_emb_idx+=1
                episode_count=0
        zarr_data=zarr.open(self.zarr_path, mode='a')
        zarr_data['data']['emb_cls']=emb_cls_array
    def view_emb_cls(self):
        zarr_data=zarr.open(self.zarr_path, mode='r')
        emb_cls_array=zarr_data['data']['emb_cls']
        episode_ends=zarr_data['meta']['episode_ends']
        cls_counts={}
        episode_start=0
        for episode_end in episode_ends:
            episode_length=episode_end-episode_start
            current_cls=emb_cls_array[episode_start]
            cls_counts[current_cls]=cls_counts.get(current_cls,0)+1
            episode_start=episode_end
        output_parts=[]
        for cls_num,episode_count in sorted(cls_counts.items()):
            output_parts.append(f"{cls_num}({self.get_inv_emb_int_map()[cls_num]}) for {episode_count} episodes")
        print(colored(", ".join(output_parts),'magenta',attrs=['bold']))
        emb_cls_diff = np.diff(emb_cls_array) != 0
        change_positions = np.where(emb_cls_diff)[0]
        for pos in change_positions:
            assert (pos+1) in episode_ends, f"emb_cls changed at position {pos+1} but {pos} is not in episode_ends; episode_ends={episode_ends}"
    def make_timestep(self):
        zarr_data=zarr.open(self.zarr_path, mode='r')
        episode_ends=zarr_data['meta']['episode_ends']
        expected_length = episode_ends[-1]
        timestep_array=np.zeros(expected_length,dtype=np.int32)
        episode_start=0
        for episode_end in episode_ends:
            episode_length=episode_end-episode_start
            timestep_array[episode_start:episode_end]=np.arange(episode_length)
            episode_start=episode_end
        zarr_data=zarr.open(self.zarr_path, mode='a')
        zarr_data['data']['timestep']=timestep_array
        action_array=zarr_data['data']['action']
        assert timestep_array.shape[0]==action_array.shape[0], f"timestep array length {timestep_array.shape[0]} != action array length {action_array.shape[0]}"
        print(colored(f"Created timestep array with length {timestep_array.shape[0]}",'green',attrs=['bold']))
    def check_timestep(self):
        zarr_data=zarr.open(self.zarr_path, mode='r')
        timestep_array=zarr_data['data']['timestep']
        episode_ends=zarr_data['meta']['episode_ends']
        diff_array=np.diff(timestep_array)
        reset_positions=np.where(diff_array<0)[0]
        groups=[]
        start=0
        for pos in reset_positions:
            groups.append(f"{timestep_array[start]}-{timestep_array[pos]}")
            start=pos+1
        groups.append(f"{timestep_array[start]}-{timestep_array[-1]}")
        first_five_groups=groups[:5]
        last_group=groups[-1]
        timestep_info=f"Timestep groups: {', '.join(first_five_groups)}{'...' if len(groups)>5 else ''}, {last_group}"
        first_five_ends=episode_ends[:5]
        last_end=episode_ends[-1]
        episode_ends_info=f"Episode ends: {first_five_ends}{'...' if len(episode_ends)>5 else ''}, {last_end}"
        print(colored(timestep_info,'cyan',attrs=['bold']))
        print(colored(episode_ends_info,'cyan',attrs=['bold']))
    def execute_command(self):
        if self.cli_args.get('emb_cls'):
            self.make_emb_cls()
            self.view_emb_cls()
        if self.cli_args.get('timestep'):
            self.make_timestep()
            self.check_timestep()
        episode_lengths = self.zarr_decompose(self.zarr_path, 'episode_lengths')
        accumulated_length = 0
        print(f"episode_lengths: {episode_lengths}")
        for i, length in enumerate(episode_lengths):
            episode_num = accumulated_length + length - 1
            print(f"episode_num: {episode_num}")
            result = self.execute_command_inner(episode_num)
            if result != 0:
                print(f"Warning: Part {i+1} failed with return code {result}")
            accumulated_length += length

class ReplayBufferTest(SingleGroupTool):
    def __init__(self, yaml_data, group_name, cli_args):
        super().__init__(yaml_data, group_name, cli_args)
        self.cmd_type = 'replay_buffer_test'
    def construct_command(self):
        name_value = self.get_demo_task_name(self.collect_num, is_test=False)
        root_path = f"{Path(__file__).parent.parent}/RoboTwin"
        zarr_path = f"{root_path}/policy/DP3/data/{self.get_group_key('task_name')}-{name_value}-{self.process_num}-{self.get_group_key('ln_as_type')}.zarr"
        return f'cd {root_path}/policy/DP3; python3 replay_buffer_test.py {zarr_path} 0'
    def get_output_path(self):
        return None
class MergeZarr(GeneralTool):
    def __init__(self, yaml_data, group_name, cli_args):
        # print(f"MergeZarr group_name: {group_name}")
        super().__init__(yaml_data, group_name, cli_args)
        self.cmd_type = 'merge_zarr'
        self.merge_indices = []
        self.output_zarr = None
        merge_zarr_arg = self.cli_args.get('merge_zarr')
        self.merge_indices = [int(x) for x in (merge_zarr_arg or [])] if merge_zarr_arg is not None else []
        self.group_name = group_name
        if self.cli_args.get('non_run'):
            raise NotImplementedError("NO CONSTRUCTED PATH FOR construct_all_path")
        # if self.merge_indices and not self.output_zarr: self._compute_merge_spec()
    
    def get_output_path(self):
        if self.output_zarr is None:
            group_counts=[]
            has_dp_flag=False
            zarr_paths=[]
            for merge_index in self.merge_indices:
                zarr_path, entry_data,real_group_name = self._find_pathdict_by_index(self.yaml_data, merge_index)
                if entry_data.get('tool_name', '') == 'data_traverser':
                    has_dp_flag = True
                group_dict = self._extract_group_name_recursive(merge_index)
                group_name = list(group_dict.keys())[0]
                if self.group_name is None:
                    print(colored(f"group_name is None, auto set to {group_name} with ln_as_type={self.get_group_key('ln_as_type',group_name)} ln_js_type={self.get_group_key('ln_js_type',group_name)} ln_pcd_type={self.get_group_key('ln_pcd_type',group_name)}","yellow",attrs=['bold']))
                    self.group_name = group_name
                assert os.path.exists(zarr_path), f"Zarr path does not exist: {zarr_path}; merge_index={merge_index}; group={group_name}"
                episode_count=self.get_zarr_episode_count(zarr_path)
                group_counts.append((group_name,episode_count))
                zarr_paths.append(zarr_path)
            prefix=f"{Path(__file__).parent.parent}/RoboTwin/policy/DP3/data/{self.get_group_key('task_name')}-demo_clean_{self.get_group_key('custom_name')}-"
            groups_part='-'.join([f"{group_name}_{episode_count}" for group_name,episode_count in group_counts])
            if has_dp_flag: groups_part=f"{groups_part}-dp"
            total_count=sum(episode_count for _,episode_count in group_counts)
            ln_spec = self.get_ln_spec_from_mg()
            output_path=f"{prefix}{groups_part}-{total_count}-{ln_spec}.zarr"
            self.output_zarr = output_path
            self.zarr_files = zarr_paths
        return self.output_zarr
    
    def get_source_flag(self):
        assert self.group_name, f"group_name is required but not provided; cli_args={self.cli_args}; group={self.group_name}; yaml_custom_name={self.yaml_data.get('custom_name')}"
        assert self.merge_indices, f"merge_zarr requires indices; cli_args={self.cli_args}; group={self.group_name}; yaml_custom_name={self.yaml_data.get('custom_name')}"
        return '-g ' + self.group_name + self.get_note_flag() + ' -m '+' '.join(map(str,self.merge_indices))
    def get_merge_ln_types(self):
        ln_pcd_type = self.get_group_key('ln_pcd_type',self.group_name)
        ln_as_type = self.get_group_key('ln_as_type',self.group_name)
        ln_js_type = self.get_group_key('ln_js_type',self.group_name)
        return ln_pcd_type,ln_as_type,ln_js_type
    def get_ln_spec_from_mg(self):
        ln_pcd_type,ln_as_type,ln_js_type = self.get_merge_ln_types()
        return f"{ln_pcd_type[0] if ln_pcd_type else 'N'}-{ln_as_type[0] if ln_as_type else 'N'}-{ln_js_type[0] if ln_js_type else 'N'}"
    def _resolve_index(self):
        self.get_output_path()  # This will set self.output_zarr and self.zarr_files
        merged_index_key, merged_map = self._find_index('merge_zarr', 'name', self.output_zarr, 1200)
        merged_map[str(merged_index_key)] = {
            'name': self.output_zarr,
            'status': 'non_exist',
            'log_file': merged_map.get(str(merged_index_key),{}).get('log_file', None),
            'source': self.get_source_flag(),
            'tool_name': 'merge_zarr',
            'note': merged_map.get(str(merged_index_key),{}).get('note', None)
        }
        self.update_yaml_data_refresh( ['paths','merge_zarr'], merged_map)
        return merged_index_key
    def construct_command(self):
        self._resolve_index()
        assert self.zarr_files, f"No zarr files computed to merge; merge_indices={self.merge_indices}; paths_groups={list(self.yaml_data.get('paths', {}).keys())}"
        merge_script = Path(__file__).parent/"merge_zarr.py"
        cmd=f"python3 {merge_script} -z {' '.join(self.zarr_files)} -o {self.output_zarr}"
        return cmd
    def _check_qpos_shape(self):
        qpos_shapes={}
        need_check=False
        for zarr_path in self.zarr_files:
            zarr_data=zarr.open(zarr_path, mode='r')
            qpos_shape=zarr_data['qpos/state'].shape[-1]
            qpos_shapes[os.path.basename(zarr_path)]=qpos_shape
            if qpos_shape!=16:
                need_check=True
        if need_check:
            print(colored(f"qpos_shape mismatch\n{json.dumps(qpos_shapes,indent=3)}\nmergezarr may skip it.[ENTER]","yellow",attrs=['bold']))
    def _correct_qpos_shape(self):
        qpos_shapes={}
        need_check=False
        for i, zarr_path in enumerate(self.zarr_files):
            zarr_data=zarr.open(zarr_path, mode='r')
            qpos_shape=zarr_data['qpos/state'].shape[-1]
            qpos_shapes[os.path.basename(zarr_path)]=qpos_shape
            if qpos_shape!=16:
                self.cli_args['qpos_convert']=self.merge_indices[i]
                qpos_tool = QposConvert(self.yaml_data, self.group_name, self.cli_args)
                qpos_tool.execute_command()
    def execute_command(self, cmd):
        self._correct_qpos_shape()
        self._check_qpos_shape()
        for zarr_path in self.zarr_files:
            source_test = DataTraverserSingleTest(self.yaml_data, self.group_name, self.cli_args, zarr_path)
            source_test.execute_command()
        print(colored(f"merge cmd: {cmd}",'blue',attrs=['bold']))
        entry_data, result = None, {'returncode':0}
        if not self.cli_args.get('ln_only'):
            entry_data, result = super().execute_command(cmd)
        if self.cli_args.get('ln_only') or (not self.cli_args.get('ln_only') and result.returncode==0):
            merged_test = DataTraverserSingleTest(self.yaml_data, self.group_name, self.cli_args, self.output_zarr)
            merged_test.execute_command()
        return entry_data, result 
class CopyPath(GeneralTool):
    def __init__(self, yaml_data, group_name, cli_args):
        super().__init__(yaml_data, group_name, cli_args)
        self.cmd_type = 'copy_path'
        copy_indices = self.cli_args.get('copy_path')
        self.partial = self.cli_args.get('partial')
        if self.cli_args.get('non_run'):
            raise NotImplementedError("NO CONSTRUCTED PATH FOR construct_all_path")
        assert copy_indices and len(copy_indices) == 2, f"copy_path requires exactly 2 indices, got {copy_indices}"
        self.source_index = int(copy_indices[0])
        self.target_index = int(copy_indices[1])
        self.source_path = self._find_path_by_index(self.source_index)
        self.target_path = self._find_path_by_index(self.target_index)
    def _find_path_by_index(self, index):
        _, entry_data,_ = self._find_pathdict_by_index(self.yaml_data, index)
        assert entry_data and 'name' in entry_data, f"Entry at index {index} missing 'name' field"
        return entry_data['name']
    def get_source_flag(self):
        return f"-cp {self.source_index} {self.target_index}{self.get_note_flag()}"
    def construct_command(self):
        return f"cp -r {self.source_path} {self.target_path}"
    def get_output_path(self):
        return None
    def _resolve_index(self):
        return -1
    def execute_command(self, cmd):
        source_test = DataTraverserSingleTest(self.yaml_data, self.group_name, self.cli_args, self.source_path)
        source_test.execute_command()
        print(f"Copy command: {cmd}")
        self.remove_existing_path(self.target_path, "copy_path")
        user_input = input("Execute copy? (y/n): ").strip().lower()
        if user_input != 'y':
            exit(0)
        if self.partial:
            include_list = [
                Path("cameras"),
                # Path("data")
            ]
            try:
                partial_copy(from_dir=self.source_path, to_dir=self.target_path, include_list=include_list)
            except Exception as e:
                print(f"Partial copy failed: {e}")
        else:
            result = subprocess.run(cmd, shell=True, executable='/bin/bash', capture_output=True, text=True)
            assert result.returncode == 0, f"Copy failed: {result.stderr}"
        print("Copy completed successfully. Remember to run print available paths to check for ln")
class TrajLocal(GeneralTool):
    def __init__(self, yaml_data, group_name, cli_args):
        super().__init__(yaml_data, group_name, cli_args)
        self.cmd_type = 'trajlocal'
        traj_index = self.cli_args.get('trajlocal')
        if self.cli_args.get('non_run'):
            raise NotImplementedError("NO CONSTRUCTED PATH FOR construct_all_path")
        assert traj_index is not None, f"trajlocal requires an index, got {traj_index}"
        self.source_index = int(traj_index)
        self.source_path, _, self.group_name = self._find_pathdict_by_index(self.yaml_data, self.source_index, require_zarr=True)
        assert self.source_path.endswith('.zarr'), f"Index {self.source_index} does not point to zarr file: {self.source_path}"
        zarr_basename=os.path.basename(self.source_path)
        self.output_path = str(Path(__file__).parent.parent / 'zarrtraj2' / f'{zarr_basename}-trajlocal.zarr')
    def get_source_flag(self):
        return f"-traj {self.source_index}{self.get_note_flag()}"
    def construct_command(self):
        return None
    def get_output_path(self):
        return self.output_path
    def _copy_group_recursive(self, source_group, target_group):
        for key, item in source_group.items():
            if hasattr(item, 'shape'):
                target_group.create_dataset(key, data=item[:], chunks=item.chunks, dtype=item.dtype, compressor=item.compressor, fill_value=item.fill_value if hasattr(item, 'fill_value') else None)
            else:
                new_group = target_group.create_group(key)
                self._copy_group_recursive(item, new_group)
    def _print_zarr_structure(self, zarr_obj, indent=0):
        for key, item in zarr_obj.items():
            print("  " * indent + f"├── {key}")
            if hasattr(item, 'shape'):
                print("  " * (indent + 1) + f"    (array: {item.shape}, type: {item.dtype})")
            else:
                self._print_zarr_structure(item, indent + 1)
    def _process_zarr_franka(self, zarr_path):
        zarr_data = zarr.open(zarr_path, mode='a')
        assert 'qpos' in zarr_data, f"qpos not found in {zarr_path}"
        assert 'state' in zarr_data['qpos'], f"state not found in qpos"
        qpos_state = zarr_data['qpos']['state']
        assert qpos_state.shape[1] == 16, f"Expected shape[1]=16, got {qpos_state.shape[1]}"
        ik_solver = init_ik_solver(self.get_group_key('embodiment'))
        timesteps = qpos_state.shape[0]
        eelocal_state = np.zeros((timesteps, 16))
        for timestep in tqdm(range(timesteps), desc="Processing timesteps"):
            left_joints = torch.tensor(qpos_state[timestep, 0:7], dtype=torch.float32).unsqueeze(0).cuda()
            right_joints = torch.tensor(qpos_state[timestep, 8:15], dtype=torch.float32).unsqueeze(0).cuda()
            left_kin_state = ik_solver.fk(left_joints)
            right_kin_state = ik_solver.fk(right_joints)
            left_pose = torch.cat([left_kin_state.ee_position, left_kin_state.ee_quaternion], dim=-1)
            right_pose = torch.cat([right_kin_state.ee_position, right_kin_state.ee_quaternion], dim=-1)
            eelocal_state[timestep, 0:7] = left_pose.cpu().numpy()
            eelocal_state[timestep, 7] = qpos_state[timestep, 7]
            eelocal_state[timestep, 8:15] = right_pose.cpu().numpy()
            eelocal_state[timestep, 15] = qpos_state[timestep, 15]
        if 'eelocal' not in zarr_data:
            zarr_data.create_group('eelocal')
        zarr_data['eelocal']['state'] = eelocal_state
        return zarr_data
    def _copy_zarr_fields(self):
        input_zarr = zarr.open(self.source_path, mode='r')
        output_zarr = zarr.open(self.output_path, mode='w')
        fields_to_keep = ['qpos/action', 'qpos/state','ee/action','ee/state', 'meta/episode_ends']
        for field_path in tqdm(fields_to_keep, desc="Copying fields"):
            if field_path not in input_zarr:
                continue
            field_data = input_zarr[field_path]
            output_path = field_path
            parent_path = '/'.join(output_path.split('/')[:-1])
            if parent_path:
                if parent_path not in output_zarr:
                    output_zarr.create_group(parent_path)
            if hasattr(field_data, 'shape'):
                output_zarr.create_dataset(output_path, data=field_data[:], chunks=field_data.chunks, dtype=field_data.dtype, compressor=field_data.compressor, fill_value=field_data.fill_value if hasattr(field_data, 'fill_value') else None)
            else:
                output_zarr.create_group(output_path)
                self._copy_group_recursive(field_data, output_zarr[output_path])
        if '.zgroup' in input_zarr:
            group_info = input_zarr.attrs.asdict()
            output_zarr.attrs.update(group_info)
        self._print_zarr_structure(output_zarr)
        print(f"NEW ZARR: {self.output_path}")
        self._process_zarr_franka(self.output_path)# qpos/state->eelocal/state
        #frank control interface have qpos control和 eelocal control （which inside have eelocal -> qpos 's mapping，because we eventually control motor angle qpos）
        print(f"Added eelocal/state field to {self.output_path}")
    def _resolve_index(self):
        if 'trajlocal' not in self.yaml_data['paths']:
            self.yaml_data['paths']['trajlocal'] = {}
        trajlocal_index,_ = self._find_index('trajlocal', 'name', self.output_path, 1600)
        assert trajlocal_index is not None, f"trajlocal index not found for {self.output_path}"
        self.yaml_data['paths']['trajlocal'][str(trajlocal_index)] = {
            'name': self.output_path,
            'status': 'exist',
            'log_file': None,
            'source': self.get_source_flag(),
            'tool_name': 'trajlocal',
            'note': None
        }
        self.update_yaml_data_refresh(['paths', 'trajlocal',str(trajlocal_index)], self.yaml_data['paths']['trajlocal'][str(trajlocal_index)])
        return trajlocal_index
    def execute_command(self):
        source_test = DataTraverserSingleTest(self.yaml_data, self.group_name, self.cli_args, self.source_path)
        source_test.execute_command()
        if self.remove_existing_path(self.output_path, "trajlocal", override=False):
            self._copy_zarr_fields()
        self._resolve_index()
        output_test = DataTraverserSingleTest(self.yaml_data, self.group_name, self.cli_args, self.output_path)
        output_test.execute_command()

class ClipZarr(GeneralTool):
    def __init__(self, yaml_data, group_name, cli_args):
        super().__init__(yaml_data, group_name, cli_args)
        self.cmd_type = 'clip_zarr'
        clip_args = self.cli_args.get('clip_zarr')
        if self.cli_args.get('non_run'):
            raise NotImplementedError("NO CONSTRUCTED PATH FOR construct_all_path")
        assert clip_args and len(clip_args) == 3, f"clip requires exactly 3 arguments [index, start, end], got {clip_args}"
        self.source_index = int(clip_args[0])
        self.start_episode = int(clip_args[1])
        self.end_episode = int(clip_args[2])
        self.include_data = ['key_poses']
        assert self.start_episode < self.end_episode, f"start_episode ({self.start_episode}) must be less than end_episode ({self.end_episode})"
        self.source_path, _, self.group_name = self._find_pathdict_by_index(self.yaml_data, self.source_index, require_zarr=True)
        assert self.source_path.endswith('.zarr'), f"Index {self.source_index} does not point to zarr file: {self.source_path}"
        source_basename = os.path.basename(self.source_path)
        source_name = source_basename.replace('.zarr', '')
        self.output_path = f"{Path(__file__).parent.parent}/RoboTwin/policy/DP3/data/{source_name}-clip{self.start_episode}-{self.end_episode}.zarr"
    def get_source_flag(self):
        return f"-clip {self.source_index} {self.start_episode} {self.end_episode}{self.get_note_flag()}"
    def construct_command(self):
        return None
    def get_output_path(self):
        return self.output_path
    def _clip_zarr_episodes(self):
        source_zarr = zarr.open(self.source_path, mode='r')
        output_zarr = zarr.open(self.output_path, mode='w')
        episode_ends = source_zarr['meta']['episode_ends'][:]
        total_episodes = len(episode_ends)
        assert self.end_episode <= total_episodes, f"end_episode ({self.end_episode}) exceeds total episodes ({total_episodes})"
        start_timestep = episode_ends[self.start_episode - 1] if self.start_episode > 0 else 0
        end_timestep = episode_ends[self.end_episode - 1]
        clipped_episode_ends = episode_ends[self.start_episode:self.end_episode] - start_timestep
        include_data_set = set(self.include_data) if self.include_data else None
        for key, item in tqdm(source_zarr.items(), desc="Processing datasets"):
            if hasattr(item, 'is_link') and item.is_link:
                continue
            if hasattr(item, 'shape') and len(item.shape) > 0:
                if key == 'meta/episode_ends':
                    output_zarr.create_dataset(key, data=clipped_episode_ends, chunks=item.chunks, dtype=item.dtype, compressor=item.compressor, fill_value=item.fill_value if hasattr(item, 'fill_value') else None)
                else:
                    clipped_data = item[start_timestep:end_timestep]
                    output_zarr.create_dataset(key, data=clipped_data, chunks=item.chunks, dtype=item.dtype, compressor=item.compressor, fill_value=item.fill_value if hasattr(item, 'fill_value') else None)
            else:
                new_group = output_zarr.create_group(key)
                self._copy_group_recursive(item, new_group, start_timestep, end_timestep, key, clipped_episode_ends, include_data_set)
        if '.zgroup' in source_zarr:
            group_info = source_zarr.attrs.asdict()
            output_zarr.attrs.update(group_info)
    def _copy_group_recursive(self, source_group, target_group, start_timestep, end_timestep, group_name="", clipped_episode_ends=None, include_data_set=None):
        for key, item in tqdm(source_group.items(), desc=f"Processing {group_name}", leave=False):
            if hasattr(item, 'is_link') and item.is_link:
                continue
            if hasattr(item, 'shape') and len(item.shape) > 0:
                if group_name == "meta" and key == "episode_ends" and clipped_episode_ends is not None:
                    target_group.create_dataset(key, data=clipped_episode_ends, chunks=item.chunks, dtype=item.dtype, compressor=item.compressor, fill_value=item.fill_value if hasattr(item, 'fill_value') else None)
                elif group_name == "data" and include_data_set is not None and key not in include_data_set:
                    continue
                else:
                    clipped_data = item[start_timestep:end_timestep]
                    target_group.create_dataset(key, data=clipped_data, chunks=item.chunks, dtype=item.dtype, compressor=item.compressor, fill_value=item.fill_value if hasattr(item, 'fill_value') else None)
            else:
                new_group = target_group.create_group(key)
                self._copy_group_recursive(item, new_group, start_timestep, end_timestep, key, clipped_episode_ends, include_data_set)
    def _resolve_index(self):
        if 'clip_zarr' not in self.yaml_data['paths']:
            self.yaml_data['paths']['clip_zarr'] = {}
        clip_index, _ = self._find_index('clip_zarr', 'name', self.output_path, 1700)
        assert clip_index is not None, f"clip_zarr index not found for {self.output_path}"
        self.yaml_data['paths']['clip_zarr'][str(clip_index)] = {
            'name': self.output_path,
            'status': 'exist',
            'log_file': None,
            'source': self.get_source_flag(),
            'tool_name': 'clip_zarr',
            'note': None
        }
        self.update_yaml_data_refresh(['paths', 'clip_zarr', str(clip_index)], self.yaml_data['paths']['clip_zarr'][str(clip_index)])
        return clip_index
    def execute_command(self):
        source_test = DataTraverserSingleTest(self.yaml_data, self.group_name, self.cli_args, self.source_path)
        source_test.execute_command()
        if self.remove_existing_path(self.output_path, "clip_zarr", override=False):
            self._clip_zarr_episodes()
        self._resolve_index()
        output_test = DataTraverserSingleTest(self.yaml_data, self.group_name, self.cli_args, self.output_path)
        output_test.execute_command()

class MaskGenTool(SingleGroupTool):
    def __init__(self, yaml_data, group_name, cli_args):
        super().__init__(yaml_data, group_name, cli_args)
        self.cmd_type = 'mask_gen'
        mask_gen_args = self.cli_args.get('mask_gen')
        if self.cli_args.get('non_run'):
            raise NotImplementedError("NO CONSTRUCTED PATH FOR construct_all_path")
        assert mask_gen_args and len(mask_gen_args) == 1, f"mask_gen requires exactly 1 argument [zarr_index], got {mask_gen_args}"
        self.zarr_index = int(mask_gen_args[0])
        self.zarr_path, _, _ = self._find_pathdict_by_index(self.yaml_data, self.zarr_index, require_zarr=True)
        assert self.zarr_path.endswith('.zarr'), f"Index {self.zarr_index} does not point to zarr file: {self.zarr_path}"
    def get_source_flag(self):
        return f"-mg {self.zarr_index} -g {self.group_name}{self.get_note_flag()}"
    def construct_command(self):
        name_value = self.get_demo_task_name(self.collect_num, is_test=False)
        root_path = f"{Path(__file__).parent.parent}/RoboTwin"
        return f'cd {root_path}; bash collect_cammask.sh {self.get_group_key("task_name")} {name_value} {self.zarr_path} %gpu_id {"--test" if self.cli_args.get("test") else ""}'
    def get_output_path(self):
        return self.zarr_path
        # zarr_basename = os.path.basename(self.zarr_path)
        # zarr_name = zarr_basename.replace('.zarr', '')
        # return f"{Path(__file__).parent.parent}/RoboTwin/policy/DP3/data/{zarr_name}-maskgen.zarr"
    def remove_existing_path(self, path, cmd, override=False):
        if os.path.exists(path):
            zarr_root = zarr.open(path, mode='r')
            for subpath in [ "cameras/midBack_real_camera", "cameras/midBack_camera", "pcd/repointcloud"]:
                fullpath = Path(path)/subpath
                if subpath in zarr_root:
                    shape = np.inf
                    zarr_obj = zarr.open(fullpath, mode='r')
                    if hasattr(zarr_obj, 'shape'):
                        shapes_str = f" [shape: {zarr_obj.shape}]"
                        shape = min(shape, zarr_obj.shape[0])
                    else:
                        shapes_info = []
                        for key, value in zarr_obj.items():
                            if hasattr(value, 'shape'):
                                shapes_info.append(f"{key}: {value.shape}")
                                shape = min(shape, value.shape[0])
                        shapes_str = f" [shapes: {', '.join(shapes_info)}]" if shapes_info else " [no arrays found]"
                    while True:
                        option = 'n'
                        if override or shape < 100:
                            option = 'y'
                        else:
                            option = input(f"{fullpath} exists\n{shapes_str}, remove it? (y/n/[r]ename): ")
                        if option == 'y':
                            shutil.rmtree(fullpath)
                            break
                        elif option == 'n':
                            return False
                        elif option == 'r':
                            self.recur_rename(fullpath)
                            break
                        else:
                            print(f"Invalid option: {option}")
        return True
    def _resolve_index(self):
        if 'mask_gen' not in self.yaml_data['paths']:
            self.yaml_data['paths']['mask_gen'] = {}
        output_path = self.get_output_path()
        mask_index, _ = self._find_index('mask_gen', 'name', output_path, 1800)
        assert mask_index is not None, f"mask_gen index not found for {output_path}"
        self.yaml_data['paths']['mask_gen'][str(mask_index)] = {
            'name': output_path,
            'status': 'exist',
            'log_file': None,
            'source': self.get_source_flag(),
            'tool_name': 'mask_gen',
            'note': None
        }
        self.update_yaml_data_refresh(['paths', 'mask_gen', str(mask_index)], self.yaml_data['paths']['mask_gen'][str(mask_index)])
        return mask_index
    def execute_command(self, cmd):
        pipeline_to_use = self.get_pipeline('collect',is_test=False)
        demo_task_yaml=self.create_demo_task_yml(self.get_demo_task_name(self.collect_num, is_test=False), self.collect_num, pipeline=pipeline_to_use)
        source_test = DataTraverserSingleTest(self.yaml_data, self.group_name, self.cli_args, self.zarr_path)
        source_test.execute_command()
        output_path = self.get_output_path()
        if self.remove_existing_path(output_path, "mask_gen", override=False):
            super().execute_command(cmd)
        output_test = DataTraverserSingleTest(self.yaml_data, self.group_name, self.cli_args, output_path)
        output_test.execute_command()
        print(f"{output_path} {self.path_to_status('non_exist',output_path, self.yaml_data)}")

class LnPath(GeneralTool):
    def __init__(self, yaml_data, group_name, cli_args):
        super().__init__(yaml_data, group_name, cli_args)
        self.cmd_type = 'ln_path'
        ln_indices = self.cli_args.get('ln_path')
        if self.cli_args.get('non_run'):
            raise NotImplementedError("NO CONSTRUCTED PATH FOR construct_all_path")
        assert ln_indices and len(ln_indices) == 2, f"ln_path requires exactly 2 indices, got {ln_indices}"
        self.source_index = int(ln_indices[0])
        self.target_index = int(ln_indices[1])
        self.source_path = self._find_path_by_index(self.source_index)
        self.target_path = self._find_path_by_index(self.target_index)
    def _find_path_by_index(self, index):
        _, entry_data,_ = self._find_pathdict_by_index(self.yaml_data, index)
        assert entry_data and 'name' in entry_data, f"Entry at index {index} missing 'name' field"
        return entry_data['name']
    def get_source_flag(self):
        return f"-ln {self.source_index} {self.target_index}{self.get_note_flag()}"
    def construct_command(self):
        relative_source = os.path.relpath(self.source_path, os.path.dirname(self.target_path))
        return f"ln -s {relative_source} {self.target_path}"
    def get_output_path(self):
        return None
    def _resolve_index(self):
        return -1
    def execute_command(self, cmd):
        source_test = DataTraverserSingleTest(self.yaml_data, self.group_name, self.cli_args, self.source_path)
        source_test.execute_command()
        print(f"Symbolic link command: {cmd}")
        user_input = input("Execute symbolic link? (y/n): ").strip().lower()
        assert user_input == 'y', "Symbolic link cancelled by user"
        result = subprocess.run(cmd, shell=True, executable='/bin/bash', capture_output=True, text=True)
        assert result.returncode == 0, f"Symbolic link failed: {result.stderr}"
        print("Symbolic link completed successfully")
class QposConvert(GeneralTool):
    def __init__(self, yaml_data, group_name, cli_args):
        super().__init__(yaml_data, group_name, cli_args)
        self.cmd_type = 'qpos_convert'
    def _find_path_by_index(self, index):
        _, entry_data,_ = self._find_pathdict_by_index(self.yaml_data, index)
        assert entry_data and 'name' in entry_data, f"Entry at index {index} missing 'name' field"
        return entry_data['name']
    def get_source_flag(self):
        return f"-qc {self.cli_args.get('qpos_convert', 'INDEX')}{self.get_note_flag()}"
    def construct_command(self):
        return None
    def get_output_path(self):
        return None
    def _resolve_index(self):
        return -1
    @staticmethod
    def _convert_14_16(data_14, embodiment_name=None):
        if embodiment_name is not None:
            orig_action_dim = {'aloha': 14, 'franka-panda': 16,'ur5-wsg':14}[embodiment_name]
            if orig_action_dim == 16:
                assert data_14.shape[-1] == 16, f"Expected last dim 16, got {data_14.shape}"
                return data_14
        assert data_14.shape[-1] == 14, f"Expected last dim 14, got {data_14.shape}"
        shape_16 = list(data_14.shape)
        shape_16[-1] = 16
        data_16 = np.zeros(shape_16, dtype=data_14.dtype)
        data_16[..., :6] = data_14[..., :6]
        data_16[..., 6] = 0
        data_16[..., 7:14] = data_14[..., 6:13]
        data_16[..., 14] = 0
        data_16[..., 15] = data_14[..., 13]
        return data_16
    def _load_finger_length(self):
        return _load_finger_length()
    def execute_command(self):
        zarr_path, entry_data,group_name = self._find_pathdict_by_index(self.yaml_data, int(self.cli_args.get('qpos_convert')))
        zarr_path = Path(zarr_path)
        self.zarr_path = zarr_path
        assert os.path.exists(zarr_path), f"ZARR path does not exist: {zarr_path}"
        zarr_group = zarr.open(zarr_path, mode='r+')
        for key in ["action", "state"]:
            js_key_path = os.path.join(zarr_path, 'data', f'js_{key}')
            assert os.path.exists(js_key_path), f"js_{key} path does not exist: {js_key_path}"
            js_key_source = Path(js_key_path).readlink()
            source_path = zarr_path/"data"/js_key_source
            source_group = zarr.open(source_path, mode='r+')
            js_key_data = source_group
            if js_key_data.shape[-1] == 16:
                continue
            assert js_key_data.shape[-1] == 14, f"{key} last dim should be 14, got {js_key_data.shape}"
            js_key_16 = QposConvert._convert_14_16(js_key_data)
            chunks_16 = list(js_key_data.chunks)
            chunks_16[-1] = 16
            print(colored("convert source: "+str(source_path)+" "+get_ln_status(zarr_path),"green",attrs=['bold']))
            print(colored(f"orig {list(np.round(js_key_data[10], 3))}","green"))
            print(colored(f"new {list(np.round(js_key_16[10], 3))}","green"))
            input(colored(f"js_key_data.shape: {js_key_data.shape}, js_key_16.shape: {js_key_16.shape} chunks_16: {chunks_16}. {group_name} continue?","green"))
            os.rename(str(source_path), str(source_path)+"_14")
            zarr_group[source_path.parent.name].create_dataset(f'{key}', data=js_key_16, chunks=chunks_16, compressor=js_key_data.compressor, dtype=js_key_data.dtype, fill_value=js_key_data.fill_value, order=js_key_data.order)
class Train(SingleGroupTool):
    def __init__(self, yaml_data, group_name, cli_args):
        super().__init__(yaml_data, group_name, cli_args)
        self.cmd_type = 'train'
        if self.cli_args.get('non_run'):
            raise NotImplementedError("NO CONSTRUCTED PATH FOR construct_all_path")
        self.policy_configs = yaml_data.get('policy_configs', {})
        self.train_zarr_index=self.cli_args.get('train_zarr_index')
        self.train_policy_config_index=self.cli_args.get('train_policy_config_index')
        self.train_zarr_path=self._resolve_zarr_path()
        self.train_policy_config=self._resolve_policy_config()
    def remove_existing_path(self, path, cmd):
        return super().remove_existing_path(path, cmd, override=self.cli_args['resume_ckpt_index'] is not None)
    def _note_override(self, existing_note):
        new_note= self.cli_args.get('note', existing_note)
        if new_note is not None and existing_note is not None and new_note != existing_note:
            while True:
                option=input(colored(f"note override from {existing_note} to {new_note}","red",attrs=['bold'])+f" (y/n/app): ")
                if option == 'y':
                    new_note = new_note
                    break
                elif option == 'n':
                    new_note = existing_note
                    break
                elif option == 'app':
                    new_note = existing_note + new_note
                    break
                else:
                    print(colored(f"Invalid option: {option}","red",attrs=['bold']))
        return new_note
    def _checkpoint_root(self):
        return str(Path(__file__).parent.parent/"RoboTwin/policy/DP3/checkpoints")
    @staticmethod
    def _extract_compare_path_part(path, cmd_type):
        # print(f"path: {path}, cmd_type: {cmd_type}")
        path_parts = path.split('/')
        result = None
        if len(path_parts) < 4:
            print(f"path_parts is less than 4 for path: {path}")
            return None
        if cmd_type == 'eval':
            compare_part = f"{path_parts[-4]}/{path_parts[-3]}"
            if compare_part.endswith('/'):
                result = compare_part[:-1] + '_w_rgb'
            else:
                result = compare_part + '_w_rgb'
        else:
            compare_part = f"{path_parts[-2]}/{path_parts[-1]}"
            if compare_part.endswith('_w_rgb'):
                result = compare_part[:-6] + '/'
            else:
                result = compare_part + '/'
        # print(f"result: {result}")
        return result

    @staticmethod
    def get_related_status(from_key, to_key, target_dir, yaml_data):
        if yaml_data is None:
            return ""
        target_index = next((key_name for key_name, entry_map in yaml_data.get('paths', {}).get(from_key, {}).items() if target_dir in str(entry_map.get('name', ''))), None)
        if target_index is None:
            return ""
        target_entry = yaml_data['paths'][from_key][target_index]
        target_path = target_entry.get('name', '')
        compare_path_part = Train._extract_compare_path_part(target_path, from_key)
        if not compare_path_part:
            return ""
        related_indices = []
        for key_name, entry_map in yaml_data.get('paths', {}).get(to_key, {}).items():
            related_path = entry_map.get('name', '')
            if compare_path_part in related_path:
                related_indices.append(key_name)
        if not related_indices:
            return ""
        return f"{to_key}_by:[{','.join(related_indices)}] "

    @staticmethod
    def path_to_status(existing_status, train_dir, yaml_data,get_ckpt_num=False):
        global NEVER_MOD_TRAIN_POLICY
        if train_dir is None:
            return "non_exist"

        def _check_policy_idx():
            global NEVER_MOD_TRAIN_POLICY
            train_idx, group_name = BaseTool._find_index_everywhere_static(yaml_data, train_dir)
            status = yaml_data["paths"]["train"][train_idx]["status"] if train_idx is not None else "unknown"
            if status == "non_exist":
                return
            policy_name = train_dir.split('/')[-1]
            # if policy_name ends with "_w_rgb", remove it
            if policy_name.endswith('_w_rgb'):
                policy_name = policy_name[:-6]
            policy_path = "/home/algo/geyiheng/RoboTwin/policy/DP3/3D-Diffusion-Policy/diffusion_policy_3d/config/" + policy_name + ".yaml"
            desired_policy_idx, desired_policy_group_name = BaseTool._find_index_everywhere_static(yaml_data, policy_path)

            source = yaml_data["paths"]["train"][train_idx]["source"] if train_idx is not None else "unknown"
            # policy_idx: the number after '-tp' in source
            policy_idx = source.split('-tp ')[-1] if '-tp ' in source else None
            if ' ' in policy_idx:
                # remove blanks surrounding
                policy_idx = policy_idx.strip() if policy_idx else None
                policy_idx = policy_idx.split(' ')[0]

            if not desired_policy_idx == policy_idx:
                # ask user whether to modify or not
                print(colored(f"Policy index mismatch:", "red", attrs=['bold']))
                print(f"train_idx = {train_idx};\ntrain_dir = {train_dir};\nsource = {source};\npolicy_idx = {policy_idx};\ndesired_policy_idx = {desired_policy_idx}.")
                while True:
                    option = input(colored(f"Modify? (y/n/never): ", "red", attrs=['bold']))
                    if option.lower() == 'y':
                        # modify this in yaml_data
                        text = source.split(policy_idx)
                        new_source = text[0] + str(desired_policy_idx) + text[1]
                        BaseTool.update_yaml_field(["paths", "train", str(train_idx), "source"], new_source)
                        yaml_data["paths"]["train"][train_idx]["source"] = new_source
                        break
                    elif option.lower() == 'n':
                        break
                    elif option.lower() == 'never':
                        NEVER_MOD_TRAIN_POLICY = True
                        break
                    else:
                        print(colored(f"Invalid option: {option}","red",attrs=['bold']))

        def _get_training_status():
            if (os.path.exists(train_dir)):
                assert os.path.isdir(train_dir), f"train_dir={train_dir}"
            else:
                return "non_exist"
            status = 'not_started'
            loss_json = os.path.join(train_dir, 'loss_result.json')
            if os.path.exists(loss_json):
                try:
                    with open(loss_json) as file_handle:
                        loss_data = json.load(file_handle)
                except json.JSONDecodeError:
                    time.sleep(1)
                    try:
                        with open(loss_json) as file_handle:
                            loss_data = json.load(file_handle)
                    except:
                        loss_data = {}
                numeric_keys = [int(key_name) for key_name in loss_data.keys() if str(key_name).isdigit()]
                if numeric_keys:
                    status = f"training:{max(numeric_keys)} "
            return status
        def _get_checkpoint_status():
            if not os.path.exists(train_dir):
                return ""
            cp_yamls = glob(os.path.join(train_dir, 'CP_robot_dp3*.yaml'))
            if not cp_yamls:
                return ""
            if len(cp_yamls) > 1:
                cp_yamls.sort(key=lambda x: os.path.getctime(x), reverse=True)
            with open(cp_yamls[0]) as file_handle:
                cfg = yaml.safe_load(file_handle)
            checkpoint_every = cfg['training']['checkpoint_every']
            num_epochs = cfg['training']['num_epochs']
            target_epoch = (num_epochs) // checkpoint_every * checkpoint_every
            ckpt_files = glob(os.path.join(train_dir, "*.ckpt"))
            largest_epoch = None
            if ckpt_files:
                epoch_numbers = [int(filename.replace('.ckpt', '')) for ckpt_file in ckpt_files if (filename := os.path.basename(ckpt_file)).endswith('.ckpt') and filename.replace('.ckpt', '').isdigit()]
                largest_epoch = max(epoch_numbers) if epoch_numbers else None
            status_suffix =  ""
            # status_suffix += f"checkpoint_every: {checkpoint_every}, num_epochs: {num_epochs}, target_epoch: {target_epoch}"
            if largest_epoch is not None and largest_epoch != target_epoch:
                status_suffix += f"training:{largest_epoch}.ckpt "
            if os.path.exists(os.path.join(train_dir, f"{target_epoch}.ckpt")):
                status_suffix += f"exist:{target_epoch}.ckpt "
            if get_ckpt_num:
                return largest_epoch
            return status_suffix

        if not NEVER_MOD_TRAIN_POLICY:
            _check_policy_idx()
        
        if get_ckpt_num:
            return _get_checkpoint_status()
        return _get_checkpoint_status()+_get_training_status()  + Train.get_related_status('train', 'eval', train_dir, yaml_data)

    def get_source_flag(self):
        assert self.train_zarr_index is not None and self.train_policy_config_index is not None, f"Training indices missing; group={self.group_name}; train_zarr_index={self.train_zarr_index}, train_policy_config_index={self.train_policy_config_index}"
        resume_index_str=f" -rc {self.cli_args['resume_ckpt_index']}" if self.cli_args['resume_ckpt_index'] is not None else ""
        return f"-t -tz {self.train_zarr_index} -g {self.group_name} -tp {self.train_policy_config_index}{resume_index_str}{self.get_note_flag()}"
    def construct_command(self):
        name_value = self.get_demo_task_name(self.collect_num, is_test=False)
        root_path = f"{Path(__file__).parent.parent}/RoboTwin"
        return f'cd {root_path}/policy/DP3; bash train.sh {self.get_group_key("task_name")} {name_value} {self.process_num} 1 %gpu_id {self.train_zarr_path} {self.train_policy_config}'
    def get_output_path(self):
        return None
    def _resolve_policy_config(self,get_full_path=False):
        if self.train_policy_config_index is None:
            return None
        policy_index = str(self.train_policy_config_index)
        pc_map = self.yaml_data.get('paths', {}).get('policy_configs', {})
        assert policy_index in pc_map, f"Invalid policy_config_index={policy_index}; available={list(pc_map.keys())}; group={self.group_name}; custom_name={self.yaml_data.get('custom_name')}"
        policy_path = pc_map[policy_index]['name']
        config_name = Path(policy_path).stem
        if get_full_path:
            return policy_path
        return config_name
    def _resolve_zarr_path(self):
        if self.train_zarr_index is None:
            return None
        zarr_path, _ ,_= self._find_pathdict_by_index(self.yaml_data, self.train_zarr_index)
        return zarr_path
    def _policy_use_pc_color(self):
        cfg_path = Path(__file__).parent.parent/"RoboTwin/policy/DP3/3D-Diffusion-Policy/diffusion_policy_3d/config"/f"{self.train_policy_config }.yaml"
        with open(cfg_path,'r') as file_handle:
            data=yaml.safe_load(file_handle)
        return bool(data.get('policy',{}).get('use_pc_color',False))
    def _resolve_index(self):
        self.yaml_data=BaseTool.load_yaml()
        source_flag = self.get_source_flag()
        expected_dir = self._expected_ckpt_dir()
        resolved_index, train_map = self._find_index('train', 'name', expected_dir, 3000)
        existing_note = train_map.get(str(resolved_index),{}).get('note',None)
        new_note = self._note_override(existing_note)
        train_map[str(resolved_index)] = {'name': expected_dir, 'status': 'non_exist', 'log_file': train_map.get(str(resolved_index),{}).get('log_file',None), 'source': source_flag, 'tool_name': 'train', 'note': new_note}
        self.update_yaml_data_refresh( ['paths','train'], train_map)
        return resolved_index
    def _expected_ckpt_dir(self,original_group_name=None):
        checkpoint_root = self._checkpoint_root()
        seed_str = str(self.yaml_data.get('seed', 1))
        zarr_base = Path(self.train_zarr_path).name.replace('.zarr','')
        demo_clean_name=f"{self.get_group_key('task_name')}-"+self.get_demo_task_name(self.collect_num, is_test=False,original_group_name=original_group_name)
        config_name = self.train_policy_config 
        base_dir = os.path.join(checkpoint_root, f"{demo_clean_name}-{self.process_num}_{seed_str}",zarr_base)
        return os.path.join(base_dir, f"{config_name}_w_rgb" if self._policy_use_pc_color() else config_name)
    # def auto_eval_after_train(self):
    #     for group_name in self.cli_args.get('group',[]):
    #         eval_tool = Eval(self.yaml_data, group_name, self.cli_args)
    #         eval_tool.execute_command(eval_tool.construct_command())
    def execute_command(self, cmd):
        self.cp_resume_ckpt()
        self.ln_group_ckpt()
        train_zarr_test = DataTraverserSingleTest(self.yaml_data, self.group_name, self.cli_args, self.train_zarr_path)
        train_zarr_test.execute_command()
        return super().execute_command(cmd)
        # if result.returncode == 0:
        #     self.auto_eval_after_train()
    def cp_resume_ckpt(self):
        print(f"resume_ckpt_index: {self.cli_args['resume_ckpt_index']}")
        if self.cli_args['resume_ckpt_index'] is not None:
            resume_ckpt_index=self.cli_args['resume_ckpt_index']
            resume_ckpt_path,_,_=self._find_pathdict_by_index(self.yaml_data,resume_ckpt_index,False)
            original_ckpt=Path(resume_ckpt_path)
            want_ckpt=Path(self._expected_ckpt_dir())
            if not os.path.exists(original_ckpt):
                print(colored(f"resume_ckpt_path ckpt not found: {original_ckpt}","red",attrs=['bold']))
                exit(1)
            want_status=self.path_to_status('non_exist',str(want_ckpt),self.yaml_data)
            print(f"want_status: {want_status}")
            if os.path.exists(want_ckpt) and not want_ckpt.is_symlink() and "exist" in want_status:
                print(colored(f"resume ckpt already exists: {want_ckpt} status {want_status}","green",attrs=['bold']))
                return
            if os.path.exists(want_ckpt):
                option=input(colored(f"resume ckpt already exists: {want_ckpt} status {want_status}","green",attrs=['bold'])+f" remove it? (y/n/[g]uarantee its fine): "+colored('FIRST ASK GYH FOR PERMISSION, DONT PRESS y DIRECTLY','red',attrs=['bold']))
                if option == 'y':
                    if input(colored('FIRST ASK GYH FOR PERMISSION SINCE ITS VERY DANGEROUS TO REMOVE CKPT!','red',attrs=['bold'])) == 'yyy':
                        os.system(f"rm -rf {want_ckpt}")
                elif option == 'g':
                    return
            want_ckpt_ln=""
            if want_ckpt.is_symlink():
                want_ckpt_ln=colored(f"\nwant_ckpt's orig sym link source also about to remove ","cyan",attrs=['bold'])+str(want_ckpt.readlink())
            while True:
                option= input(colored(f"cp",'green',attrs=['bold'])+f" {original_ckpt}\n   {want_ckpt}{want_ckpt_ln} ? (y/n/c): ")
                if option=='y':
                    if want_ckpt.is_symlink():
                        want_ckpt.unlink()
                    os.system(f"cp -r {original_ckpt} {want_ckpt}")
                elif option=='n':
                    exit(0)
                elif option=='c':
                    break
                else:
                    print(f"invalid cp option {option}")

        
    def ln_group_ckpt(self):
        original_ckpt=None
        if self.cli_args['original_group_name'] is not None:
            original_ckpt=Path(self._expected_ckpt_dir(self.cli_args['original_group_name']))
        else:
            print(f"since original group name is not specified, then self.group is not overrided. using unchanged group name: {self.group_name}")
            return
        want_ckpt=Path(self._expected_ckpt_dir())
        # print(f"original_ckpt: {original_ckpt}")
        # print(f"want_ckpt:      {want_ckpt}")
        if not os.path.exists(original_ckpt):
            print(colored(f"original ckpt not found: \n[orig]{original_ckpt}\n[new ]{want_ckpt}","red",attrs=['bold']))
            exit(1)
        if not original_ckpt==want_ckpt:
            if not os.path.exists(want_ckpt) or (want_ckpt.is_symlink() and want_ckpt.readlink()!=original_ckpt):
                parent_need_create=colored('\nparent to dest also need to create: ', 'cyan', attrs=['bold']) + str(want_ckpt.parent) if not os.path.exists(want_ckpt.parent) else ''
                unlink_original_source=colored('\nunlink original source:', 'cyan', attrs=['bold']) + str(want_ckpt.readlink()) if want_ckpt.is_symlink() else ''
                if input(f"{colored('ln -s','green',attrs=['bold'])} {original_ckpt}\n      {want_ckpt} {parent_need_create} {unlink_original_source}? (y/n): ") == 'y':
                    if want_ckpt.exists():
                        want_ckpt.unlink()
                    if not os.path.exists(want_ckpt.parent):
                        os.makedirs(want_ckpt.parent)
                    os.system(f"ln -s {original_ckpt} {want_ckpt}")
            elif os.path.exists(want_ckpt) and (not want_ckpt.is_symlink()):
                print(colored(f"new ckpt dest is a hardlink: {want_ckpt}\nrefusing to overwrite it. Perhaps change the override group name of eval? current override {self.group_name} original {self.cli_args['original_group_name']}",'red',attrs=['bold']))
                exit(1)
class Eval(Train):
    def __init__(self, yaml_data, group_name, cli_args):
        super().__init__(yaml_data, group_name, cli_args)
        self.cmd_type = 'eval'
    def realtime_output_filter(self):
        return ["[LOGPRINT]"]
    def remove_existing_path(self, path, cmd):
        if not os.path.exists(path):
            return True
        if os.path.isdir(path):
            stat = os.stat(path)
            birth_time = stat.st_ctime
            birth_time = datetime.datetime.fromtimestamp(birth_time)
            new_name = f"{birth_time.strftime('%Y-%m-%d %H:%M:%S')}-{os.path.basename(path)}"
            new_path = os.path.join(os.path.dirname(path), new_name)
            status=self.path_to_status('exist',path,self.yaml_data)
            if "%" not in status:
                return True
            while True:
                option=input(colored(f"cmd:","green",attrs=['bold'])+f"{cmd}\n{colored('Rename existing directory:','green',attrs=['bold'])} {path} -> '{new_path}' [status: {status}]? (y/n): ")
                if option == 'y':
                    os.rename(path, new_path)
                    return True
                elif option == 'n':
                    return False
                else:
                    print(f"Invalid input: {option}")
            return False
        return super().remove_existing_path(path, cmd)
    
    def get_source_flag(self):
        assert self.train_zarr_index is not None and self.train_policy_config_index is not None, f"train_zarr_index={self.train_zarr_index}, train_policy_config_index={self.train_policy_config_index}"
        flag=f"-v -tz {self.train_zarr_index} -g {self.group_name} -tp {self.train_policy_config_index}{self.get_note_flag()}"
        if self.cli_args['original_group_name'] is not None:
            flag+=f" -orig {self.cli_args['original_group_name']}"
        if self.cli_args['original_ckpt_index'] is not None:
            flag+=f" -ockpt {self.cli_args['original_ckpt_index']}"
        return flag
    def construct_command(self):
        name_value = self.get_demo_task_name(self.collect_num, is_test=False)
        zarr_to_use = self._resolve_zarr_path()
        root_path = f"{Path(__file__).parent.parent}/RoboTwin"
        ckpt_num = self.get_eval_num(root_path, self.train_policy_config)
        return f'cd {root_path}/policy/DP3; bash eval.sh {self.get_group_key("task_name")} {name_value} {self.process_num} 1 %gpu_id {zarr_to_use} {self.train_policy_config} {ckpt_num}'
    def get_output_path(self):
        return None
    def get_eval_num(self, root_path, policy_config_name):
        if self.cli_args['original_ckpt_index'] is not None:
            return int(self.cli_args['original_ckpt_index'])
        config_path = f"{root_path}/policy/DP3/3D-Diffusion-Policy/diffusion_policy_3d/config/{policy_config_name}.yaml"
        with open(config_path, 'r') as file_handle:
            config_data = yaml.safe_load(file_handle)
        num_epochs, checkpoint_every = config_data['training']['num_epochs'], config_data['training']['checkpoint_every']
        return (num_epochs // checkpoint_every) * checkpoint_every or checkpoint_every

    def _resolve_index(self):
        source_flag = self.get_source_flag()
        expected_dir = self._expected_eval_dir()
        self.yaml_data=BaseTool.load_yaml()
        resolved_index, eval_map = self._find_index('eval', 'name', expected_dir, 1500)
        existing_note = eval_map.get(str(resolved_index), {}).get('note', None)
        new_note = self._note_override(existing_note)
        eval_map[str(resolved_index)] = {'name': expected_dir,'status': 'non_exist','log_file': eval_map.get(str(resolved_index), {}).get('log_file', None),'source': source_flag,'tool_name': 'eval','note': new_note}
        self.update_yaml_data_refresh( ['paths', 'eval'], eval_map)
        return resolved_index
    def _expected_eval_dir(self):
        task_name=self.get_group_key('task_name')
        policy_name="DP3"
        name_value=self.get_demo_task_name(self.collect_num, is_test=False)
        root_dir=str(Path(__file__).parent.parent/"RoboTwin")
        task_config=name_value
        zarr_base=os.path.basename(self._resolve_zarr_path()).replace('.zarr','')
        config_name=self.train_policy_config
        demo_cfg_path=os.path.join(root_dir,"task_config",f"{name_value}.yml")
        assert os.path.exists(demo_cfg_path), f"{demo_cfg_path} not found"
        with open(demo_cfg_path,'r') as f:
            demo_cfg=yaml.safe_load(f)
        assert isinstance(demo_cfg,dict) and 'usemimic' in demo_cfg and 'embodiment' in demo_cfg, f"Invalid demo config: {demo_cfg_path}"
        usemimic=bool(demo_cfg['usemimic'])
        embodiment=demo_cfg['embodiment']
        assert isinstance(embodiment,list) and len(embodiment) in [1,3], f"Invalid embodiment: {embodiment}"
        embodiment_name=str(embodiment[0]) if len(embodiment)==1 else f"{embodiment[0]}+{embodiment[1]}"
        mode="mimic" if usemimic else "real"
        return os.path.join(root_dir,"eval_result",task_name,policy_name,task_config,zarr_base,config_name,mode,embodiment_name)
    
    def execute_command(self, cmd):
        pipeline_to_use = self.get_pipeline('collect',is_test=False)
        self.create_demo_task_yml(self.get_demo_task_name(self.collect_num, is_test=False), self.collect_num, pipeline=pipeline_to_use,embodiment=self.embodiment)
        self.ln_group_ckpt()
        self.print_before_exec=colored(f"possible fpspcd visualization at ",'magenta',attrs=['bold'])+f"{self._expected_eval_dir()}/cam-0_0_pcd.pcd"
        super().execute_command(cmd)
    @staticmethod
    def logtime_compare(log_file_path, compare_time_str="20250827_165800"):
        compare_time = datetime.datetime.strptime(compare_time_str, "%Y%m%d_%H%M%S")
        filename = os.path.basename(log_file_path)
        assert filename.endswith('.log'), f"filename={filename}"
        time_str = filename.replace('.log', '')
        log_time = datetime.datetime.strptime(time_str, "%Y%m%d_%H%M%S")
        return log_time > compare_time
 
    @staticmethod
    def path_to_status(existing_status, eval_dir, yaml_data):
        if not eval_dir:
            return 'non_exist'
        log_file = None
        if yaml_data:
            for path_key in yaml_data.get('paths', {}):
                indices, _ = BaseTool._find_index_static(yaml_data, path_key, 'name', eval_dir)
                if indices is not None:
                    index = indices[0] if isinstance(indices, list) else indices
                    log_file = yaml_data['paths'][path_key][str(index)].get('log_file')
                    break
        train_by_status = Train.get_related_status('eval', 'train', eval_dir, yaml_data)
        if log_file and not Eval.logtime_compare(log_file) :
            if "train_by" in existing_status:
                return existing_status
            return existing_status+train_by_status
        if not os.path.isdir(eval_dir):
            return 'non_exist'
        success_rate_path = os.path.join(eval_dir, 'success_rate.json')
        status_str = ""
        if os.path.exists(success_rate_path):
            with open(success_rate_path) as file_handle:
                success_data = json.load(file_handle)
            success_num = success_data['success_num']
            total_num = success_data['total_num']
            eval_rand_success = success_data.get('eval_rand_success', False)
            percentage = success_num / total_num * 100 if total_num > 0 else 0
            eval_rand_success_str = f"RAND_SUCCESS!" if eval_rand_success else ""
            status_str = f"exist:{success_num}/{total_num} ({percentage:.1f}%) {eval_rand_success_str} {train_by_status}"
        else:
            mp4_files = glob(os.path.join(eval_dir, '*.mp4'))
            status_str = f"exist:{len(mp4_files)} {train_by_status}"
        action_log_path = os.path.join(eval_dir, 'action_log.json')
        if os.path.exists(action_log_path):
            with open(action_log_path) as file_handle:
                try:
                    action_data = json.load(file_handle)
                except Exception:
                    print(traceback.format_exc())
                    print(f"Failed to load action_log.json: {action_log_path}")
                    exit(1)
            action_num = len(action_data.keys())
            status_str +=f" action_log len:{action_num}"
        return status_str
class TrainEval(Eval):
    def __init__(self, yaml_data, group_name, cli_args):
        super().__init__(yaml_data, group_name, cli_args)
        self.cmd_type = 'train_eval'
        self.eval_tools = []
        self.train_tool = Train(yaml_data, group_name, cli_args)
        emb_chars=['a','f','u']
        assert group_name[0] in emb_chars, f"group_name={group_name} must start with one of {emb_chars}"
        for emb_char in emb_chars:
            self.eval_tools.append(Eval(yaml_data, group_name.replace(group_name[0],emb_char), cli_args))
    def execute_command(self):
        self.train_tool.execute_command(self.train_tool.construct_command())
        for eval_tool in self.eval_tools:
            eval_tool.execute_command(eval_tool.construct_command())
    def construct_command(self):
        return None
        
        
class HighZFilter(SingleGroupTool):
    def __init__(self, yaml_data, group_name, cli_args=None):
        self.cmd_type = 'highz_filter'
        super().__init__(yaml_data, group_name, cli_args or {})
        # if self.cli_args.get('non_run'):
        #     raise NotImplementedError("NO CONSTRUCTED PATH FOR construct_all_path")
        assert self.collect_num != self.process_num, f"collect_num equals process_num for {group_name} (collect_num={self.collect_num}, process_num={self.process_num})"
    def construct_command(self):
        base_dir = f"{Path(__file__).parent.parent}/RoboTwin/data/{self.get_group_key('task_name')}/{self.get_demo_task_name(self.collect_num, is_test=False)}"
        filter_script = Path(__file__).parent.parent/"RoboTwin/script/filter_hdf5_highz.py"
        assert filter_script.exists(), f"Filter script not found at {filter_script}"
        return f"python3 {filter_script} {base_dir}"
    def get_output_path(self):
        base_dir = f"{Path(__file__).parent.parent}/RoboTwin/data/{self.get_group_key('task_name')}/{self.get_demo_task_name(self.collect_num, is_test=False)}"
        return f"{base_dir}/highz"
    def get_source_flag(self):
        return f"-hz -g {self.group_name}{self.get_note_flag()}"

class NoiseMeasureTool(GeneralTool):
    def __init__(self, yaml_data, group_name, cli_args):
        super().__init__(yaml_data, group_name, cli_args)
        self.cmd_type = 'noise_measure'
        noise_measure_arg = self.cli_args.get('noise_measure')
        if self.cli_args.get('non_run'):
            raise NotImplementedError("NO CONSTRUCTED PATH FOR construct_all_path")
        assert noise_measure_arg and len(noise_measure_arg) >= 2, f"noise_measure_arg={noise_measure_arg}, len={len(noise_measure_arg) if noise_measure_arg else 0}"
        self.eval_index = int(noise_measure_arg[0])
        self.zarr_index = int(noise_measure_arg[1])
        self.seed_file_index = self.cli_args.get('seed_file')
        assert self.seed_file_index is not None, f"seed_file_index={self.seed_file_index}"
        self.visualize_args = self.cli_args.get('visualize_noise')
        self.align_actions = self.cli_args.get('align', False)
        self.eps = self.cli_args.get('eps', 0.05)
        self.yaml_data=yaml_data
    def get_source_flag(self):
        align_flag = " -align" if self.align_actions else ""
        return f"-nm {self.eval_index} {self.zarr_index} -sf {self.seed_file_index}{align_flag}"
    def construct_command(self):
        return None
    def get_output_path(self):
        return None
    def _sparse_sample_actions(self, actions, target_length):
        assert len(actions) >= target_length, f"actions_length={len(actions)} < target_length={target_length}"
        if len(actions) == target_length:
            return actions
        indices = np.linspace(0, len(actions) - 1, target_length, dtype=int)
        return actions[indices]
    def execute_command(self):
        eval_path, entry_data, real_group_name = self._find_pathdict_by_index(self.yaml_data, self.eval_index,False)
        assert os.path.exists(eval_path), f"eval_path={eval_path} not exist!"
        assert entry_data.get('tool_name') == 'eval', f"tool_name={entry_data.get('tool_name')}, eval_index={self.eval_index}"
        action_log_path = os.path.join(eval_path, 'action_log.json')
        assert os.path.exists(action_log_path), f"action_log_path={action_log_path} not exist!"
        zarr_path, entry_data, real_group_name = self._find_pathdict_by_index(self.yaml_data, self.zarr_index,True)
        assert os.path.exists(zarr_path), f"zarr_path={zarr_path} not exist!"
        collect_path, entry_data, real_group_name = self._find_pathdict_by_index(self.yaml_data, self.seed_file_index,False)
        assert os.path.exists(collect_path), f"collect_path={collect_path} not exist!"
        seed_txt_path = os.path.join(collect_path, 'seed.txt')
        assert os.path.exists(seed_txt_path), f"seed_txt_path={seed_txt_path} not exist!"
        with open(action_log_path, 'r') as f:
            action_log = json.load(f)
        with open(seed_txt_path, 'r') as f:
            seed_lines = f.readlines()
        seed_txt = [int(num) for num in seed_lines[0].split()]
        zarr_data = zarr.open(zarr_path, mode='r')
        zarr_actions = zarr_data[f'{action_log["1"]["action_type"]}/action'][:]
        episode_ends = zarr_data['meta/episode_ends'][:]
        episode_mapping = {}
        seeds_in_eval_log = []
        for episode_key, episode_data in action_log.items():
            assert 'seed' in episode_data, f"seed not in episode_data={episode_data}"
            seeds_in_eval_log.append(episode_data['seed'])
        for seed_eval in seeds_in_eval_log:
            if seed_eval in seed_txt:
                zarr_episode_idx = seed_txt.index(seed_eval)
                episode_mapping[seed_eval] = zarr_episode_idx
        if not episode_mapping:
            print(f"Warning: seeds {seed_txt[0]}~{seed_txt[-1]} have no overlap with eval log {seeds_in_eval_log[0]}~{seeds_in_eval_log[-1]} Perhaps you didn't turn on eval_rand_success in group when eval")
            return
        all_deviations = []
        stagnation_episodes = []
        for episode_key, zarr_episode_idx in episode_mapping.items():
            episode_data = action_log.get(str(episode_key))
            if episode_data is None:
                print(f"Warning: episode_data={episode_data} not found in action_log")
                continue
            logged_actions = np.array(episode_data['taken_actions'])
            if zarr_episode_idx == 0:
                start_idx = 0
            else:
                start_idx = episode_ends[zarr_episode_idx - 1]
            end_idx = episode_ends[zarr_episode_idx]
            zarr_episode_actions = zarr_actions[start_idx:end_idx]
            stagnation_idx = StagnationExamineTool._find_stagnation_timestep(logged_actions, self.eps)
            if len(logged_actions) - (stagnation_idx + 1) < 8:
                se_str = ""
            else:
                se_str = colored(f"stag_len={stagnation_idx+1}, ", 'blue')
                stagnation_episodes.append((episode_key, zarr_episode_idx, stagnation_idx))
            print(f"[{episode_key}] length diff z_len={len(zarr_episode_actions)} eval_len={len(logged_actions)} {se_str}")
            if self.align_actions:
                target_length = min(len(logged_actions), len(zarr_episode_actions))
                logged_actions = self._sparse_sample_actions(logged_actions, target_length)
                zarr_episode_actions = self._sparse_sample_actions(zarr_episode_actions, target_length)
            else:
                min_length = min(len(logged_actions), len(zarr_episode_actions))
                logged_actions = logged_actions[:min_length]
                zarr_episode_actions = zarr_episode_actions[:min_length]
            deviations = logged_actions - zarr_episode_actions
            all_deviations.append(deviations)
        if len(all_deviations) == 0:
            print("Warning: No episodes found for deviation calculation")
            return
        deviations_array = np.concatenate(all_deviations, axis=0)
        noise_sigmas = np.std(deviations_array, axis=0)
        print(f"Calculated noise sigmas for 16 action dimensions:")
        print(list(np.round(noise_sigmas[:8], 4)))
        print(list(np.round(noise_sigmas[8:], 4)))
        for episode_key, zarr_episode_idx, stagnation_idx in stagnation_episodes:
            episode_idx = int(episode_key)
            self._create_all_dims_visualization(action_log, zarr_actions, episode_ends, episode_mapping, episode_idx)
        if self.visualize_args and len(self.visualize_args) >= 1:
            episode_idx = int(self.visualize_args[0])
            if len(self.visualize_args) >= 2:
                action_dim = int(self.visualize_args[1])
                self._create_visualization(action_log, zarr_actions, episode_ends, episode_mapping, episode_idx, action_dim)
            else:
                self._create_all_dims_visualization(action_log, zarr_actions, episode_ends, episode_mapping, episode_idx)
        print("eval path:", eval_path)
    def _create_visualization(self, action_log, zarr_actions, episode_ends, episode_mapping, episode_idx, action_dim):
        episode_key = str(episode_idx)
        assert episode_key in action_log, f"Episode {episode_idx} not found in action_log"
        assert episode_key in episode_mapping, f"Episode {episode_idx} not found in episode_mapping {episode_mapping}"
        episode_data = action_log[episode_key]
        zarr_episode_idx = episode_mapping[episode_key]
        logged_actions = np.array(episode_data['taken_actions'])
        if zarr_episode_idx == 0:
            start_idx = 0
        else:
            start_idx = episode_ends[zarr_episode_idx - 1]
        end_idx = episode_ends[zarr_episode_idx]
        zarr_episode_actions = zarr_actions[start_idx:end_idx]
        if self.align_actions:
            target_length = min(len(logged_actions), len(zarr_episode_actions))
            logged_actions = self._sparse_sample_actions(logged_actions, target_length)
            zarr_episode_actions = self._sparse_sample_actions(zarr_episode_actions, target_length)
            timesteps = np.arange(target_length)
        else:
            min_length = min(len(logged_actions), len(zarr_episode_actions))
            logged_actions = logged_actions[:min_length]
            zarr_episode_actions = zarr_episode_actions[:min_length]
            timesteps = np.arange(min_length)
        plt.figure(figsize=(12, 6))
        plt.plot(timesteps, logged_actions[:, action_dim], 'b-', label='Logged Actions', linewidth=2)
        plt.plot(timesteps, zarr_episode_actions[:, action_dim], 'r-', label='Ground Truth (Zarr)', linewidth=2)
        plt.xlabel('Timestep')
        plt.ylabel(f'Action Value (Dimension {action_dim})')
        plt.title(f'Episode {episode_idx} - Action Dimension {action_dim} Comparison')
        plt.legend()
        plt.grid(True, alpha=0.3)
        plt.tight_layout()
        os.makedirs(Path(__file__).parent / "visnoise", exist_ok=True)
        output_filename = Path(__file__).parent / "visnoise" / f'n{self.eval_index}_z{self.zarr_index}_sf{self.seed_file_index}_e{episode_idx}_d{action_dim}_{"align" if self.align_actions else ""}.png'
        plt.savefig(output_filename, dpi=300, bbox_inches='tight')
        plt.close()
        print(f"Visualization saved as {output_filename}")
    def _create_all_dims_visualization(self, action_log, zarr_actions, episode_ends, episode_mapping, episode_idx):
        episode_key = str(episode_idx)
        assert episode_key in action_log, f"Episode {episode_idx} not found in action_log"
        assert episode_idx in episode_mapping, f"Episode {episode_idx} not found in episode_mapping {episode_mapping}"
        episode_data = action_log[episode_key]
        zarr_episode_idx = episode_mapping[episode_idx]
        logged_actions = np.array(episode_data['taken_actions'])
        if zarr_episode_idx == 0:
            start_idx = 0
        else:
            start_idx = episode_ends[zarr_episode_idx - 1]
        end_idx = episode_ends[zarr_episode_idx]
        zarr_episode_actions = zarr_actions[start_idx:end_idx]
        if self.align_actions:
            target_length = min(len(logged_actions), len(zarr_episode_actions))
            logged_actions = self._sparse_sample_actions(logged_actions, target_length)
            zarr_episode_actions = self._sparse_sample_actions(zarr_episode_actions, target_length)
            timesteps = np.arange(target_length)
        else:
            min_length = min(len(logged_actions), len(zarr_episode_actions))
            logged_actions = logged_actions[:min_length]
            zarr_episode_actions = zarr_episode_actions[:min_length]
            timesteps = np.arange(min_length)
        stagnation_idx = StagnationExamineTool._find_stagnation_timestep(logged_actions, self.eps)
        colors = plt.cm.tab20(np.linspace(0, 1, 16))
        os.makedirs(Path(__file__).parent / "visnoise", exist_ok=True)  
        logged_actions = QposConvert._convert_14_16(logged_actions)
        zarr_episode_actions = QposConvert._convert_14_16(zarr_episode_actions)
        self._plot_action_dims(timesteps, logged_actions, zarr_episode_actions, colors, episode_idx, 0, 8, "left", stagnation_idx)
        self._plot_action_dims(timesteps, logged_actions, zarr_episode_actions, colors, episode_idx, 8, 16, "right", stagnation_idx)

        # corresponding video path
        dir_path = self.yaml_data["paths"]["eval"][str(self.eval_index)]["name"]
        # find all .mp4 files in dir_path with this certain episode_idx
        # note: video names in such format: episodexx_xxxxxx_xxxxx.mp4
        video_files = [
            fname for fname in os.listdir(dir_path)
            if fname.endswith('.mp4') and (f'episode{episode_idx}_' in fname or f'episode{episode_idx}.' in fname)
        ]

        # there should only be one such video
        assert len(video_files) == 1, f"Expected exactly one video file for episode {episode_idx}, found {len(video_files)}: {video_files}"
        if video_files:
            print(colored(f"Corresponding video file for episode {episode_idx}:", "green",attrs=['bold']))
            print(colored(os.path.join(dir_path, video_files[0]), "blue", attrs=['bold']))
        else:
            print(f"No corresponding video files found for episode {episode_idx} in {dir_path}")

    def _plot_action_dims(self, timesteps, logged_actions, zarr_episode_actions, colors, episode_idx, start_dim, end_dim, suffix, stagnation_idx):
        def _check_flat_line(actions, start_dim, end_dim, eps=0.05):
            for dim in range(start_dim, end_dim):
                if not np.allclose(actions[:, dim], actions[0, dim], atol=eps):
                    return False
            return True
        if _check_flat_line(zarr_episode_actions, start_dim, end_dim, 0.05):
            return
        fig, ax = plt.subplots(1, 1, figsize=(10, 6))
        for dim in range(start_dim, end_dim):
            color = colors[dim]
            ax.plot(timesteps, logged_actions[:, dim], color=color, label=f'Log Dim {dim}', linewidth=2)
            ax.plot(timesteps, zarr_episode_actions[:, dim], color=color, alpha=0.6, linestyle='--', label=f'GT Dim {dim}', linewidth=2)
        if stagnation_idx < len(timesteps):
            ax.axvline(x=timesteps[stagnation_idx], color='red', linestyle='-', linewidth=2, label=f'Stagnation at t={stagnation_idx}')
        ax.set_xlabel('Timestep')
        ax.set_ylabel('Action Value')
        ax.set_title(f'Episode {episode_idx} - Action Dimensions {start_dim}-{end_dim-1}')
        ax.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
        ax.grid(True, alpha=0.3)
        plt.tight_layout()
        output_filename = Path(__file__).parent / "visnoise" / f'n{self.eval_index}_z{self.zarr_index}_sf{self.seed_file_index}_e{episode_idx}_{suffix}_{"align" if self.align_actions else ""}.png'
        plt.savefig(output_filename, dpi=150, bbox_inches='tight')
        plt.close()
        print(colored(f"{suffix.capitalize()} dimensions visualization saved as {output_filename}", "red", attrs=['bold']))

class StagnationExamineTool(GeneralTool):
    def __init__(self, yaml_data, group_name, cli_args):
        super().__init__(yaml_data, group_name, cli_args)
        self.cmd_type = 'stagnation_examine'
        stagnation_arg = self.cli_args.get('stagnation_examine')
        if self.cli_args.get('non_run'):
            raise NotImplementedError("NO CONSTRUCTED PATH FOR construct_all_path")
        assert stagnation_arg and len(stagnation_arg) >= 1, f"stagnation_arg={stagnation_arg}, len={len(stagnation_arg) if stagnation_arg else 0}"
        self.target_index = int(stagnation_arg[0])
        self.eps = self.cli_args.get('eps', 0.05)
      
    def get_source_flag(self):
        return f"-se {self.target_index} {self.eps}"
    def construct_command(self):
        return None
    def get_output_path(self):
        return None
    def _is_eval_path(self, path):
        success_rate_path = os.path.join(path, 'success_rate.json')
        return os.path.exists(success_rate_path)
    def execute_command(self):
        eval_path, entry_data, real_group_name = self._find_pathdict_by_index(self.yaml_data, self.target_index, False)
        assert self._is_eval_path(eval_path), f"eval_path={eval_path} is not an eval path"
        assert os.path.exists(eval_path), f"eval_path={eval_path} not exist!"
        action_log_path = os.path.join(eval_path, 'action_log.json')
        assert os.path.exists(action_log_path), f"action_log_path={action_log_path} not exist!"
        with open(action_log_path, 'r') as f:
            action_log = json.load(f)
        stagnation_result = {}
        for episode_key, episode_data in action_log.items():
            if 'taken_actions' in episode_data:
                episode_actions = np.array(episode_data['taken_actions'])
                assert len(episode_actions.shape) == 2, f"episode_actions.shape={episode_actions.shape} should be 2D"
                episode_len = len(episode_actions) - 1
                stagnation_timestep = self._find_stagnation_timestep(episode_actions, self.eps)
                stagnation_result[episode_key] = {
                    'original_total_idx': episode_len,
                    'stagnation_idx': stagnation_timestep
                }
        if not stagnation_result:
            print("No action data found in eval log")
            return
        print(f"Stagnation analysis for eval: {eval_path}")
        for episode_key, result in stagnation_result.items():
            print(f"Episode {episode_key}: stagnation at timestep {result['stagnation_idx']}/{result['original_total_idx']}")
    @staticmethod
    def _find_stagnation_timestep(episode_actions, eps):
        if len(episode_actions) <= 1:
            return 0
        max_flux = np.zeros(episode_actions.shape[1])
        for t in range(len(episode_actions) - 1, 0, -1):
            current_flux = np.abs(episode_actions[t] - episode_actions[t-1])
            max_flux = np.maximum(max_flux, current_flux)
            if np.any(max_flux > eps):
                return t
        return 0
# class SegJsonSingleGroupTool(GeneralTool):
#     def 
TOOL_CLASSES = {
    'collect_data': CollectData,
    'collect_test': CollectTest,
    'process_data': ProcessData,
    'process_test': ProcessTest,
    'data_traverser_process': DataTraverserProcess,
    'data_traverser_collect_test': DataTraverserCollectTest,
    'data_traverser_collect_full_test': DataTraverserCollectFullTest,
    'data_traverser_process_test': DataTraverserProcessTest,
    'data_traverser_aft_process_test': DataTraverserAftProcessTest,
    'single_test': DataTraverserSingleTest,
    'merge_zarr': MergeZarr,
    'train': Train,
    'eval': Eval,
    'replay_buffer_test': ReplayBufferTest,
    'copy_path': CopyPath,
    'ln_path': LnPath,
    'qpos_convert': QposConvert,
    'trajlocal': TrajLocal,
    'clip_zarr': ClipZarr,
    'mask_gen': MaskGenTool,
    'noise_measure': NoiseMeasureTool,
    'stagnation_examine': StagnationExamineTool,
    'train_and_eval': TrainEval
}
if not DISABLE_HIGHZ_TOOL:TOOL_CLASSES['highz_filter']=HighZFilter
def get_status_for_path(existing_status, path, tool_name=None, yaml_data=None):
    tool_cls = TOOL_CLASSES.get(tool_name, BaseTool)
    return tool_cls.path_to_status(existing_status, path, yaml_data)

def get_non_group_paths_status(yaml_data):
    non_group_tools = {'train': Train, 'eval': Eval, 'merge_zarr': MergeZarr, 'trajlocal': TrajLocal, 'clip_zarr': ClipZarr}
    paths = yaml_data.get('paths', {})
    for path_key, tool_class in non_group_tools.items():
        if path_key in paths:
            path_entries = paths[path_key]
            for entry_index, entry_data in path_entries.items():
                assert isinstance(entry_data, dict), f"Invalid entry data for {path_key}[{entry_index}]"
                path_value = entry_data.get('name')
                assert path_value, f"Missing name for {path_key}[{entry_index}]"
                existing_status = entry_data.get('status', 'non_exist')
                tool_name = entry_data.get('tool_name', path_key)
                new_status = tool_class.path_to_status(existing_status, path_value, yaml_data)
                if new_status != existing_status:
                    paths[path_key][entry_index]['status'] = new_status
    return paths

def create_path_info_dict(yaml_data):
    path_info = {}
    paths = yaml_data.get('paths', {})
    for key_name, entry_map in paths.items():
        assert isinstance(entry_map, dict)
        for entry_index, entry_data in entry_map.items():
            assert isinstance(entry_data, dict)
            assert 'name' in entry_data,f"entry_data={entry_data}  index={entry_index}"
            path_name = entry_data['name']
            path_info[path_name] = {
                'log_file': entry_data.get('log_file'),
                'note': entry_data.get('note')
            }
    return path_info

def restore_path_info(paths, path_info_dict):
    for key_name, entry_map in paths.items():
        assert isinstance(entry_map, dict)
        for entry_index, entry_data in entry_map.items():
            assert isinstance(entry_data, dict)
            assert 'name' in entry_data
            path_name = entry_data['name']
            if path_name in path_info_dict:
                path_info = path_info_dict[path_name]
                if path_info.get('log_file') is not None:
                    entry_data['log_file'] = path_info['log_file']
                if path_info.get('note') is not None:
                    entry_data['note'] = path_info['note']
    return paths

def construct_all_path(yaml_data,cli_args):
    inv_emb_map=_load_inv_emb_name_map()
    paths=yaml_data.get('paths',{})
    sort_order={'-ct':0,'-pt':1,'-dct':2,'-c ':3,'-p ':5,'-dcft':6,'-dpt':7,'-dp ':8,'-dapt':9}
    if not DISABLE_HIGHZ_TOOL:sort_order['-hz']=10
    for group_index,(group_name,group_config) in enumerate(yaml_data['groups'].items()):
        embodiment=group_config.get('embodiment',None)
        assert embodiment, f"Group '{group_name}' missing 'embodiment'; group_config={group_config}"
        assert inv_emb_map.get(embodiment,None), f"Embodiment '{embodiment}' not recognized; valid={list(inv_emb_map.keys())}; group={group_name}"
        base_index=10*group_index
        command_index=0
        group_commands=[]
        for tool_name,tool_class in TOOL_CLASSES.items():
            try:
                cli_args['non_run']=True
                tool_instance=tool_class(yaml_data,group_name,cli_args)#highz_filter will only be shown if process num not equal to collect num
                output_path=tool_instance.get_output_path()
                if output_path:
                #train/eval/replay_buffer_test/merge_zarr's path will not be shown due to output_path None
                    key_name=tool_instance.get_file_type()
                    source_flag=tool_instance.get_source_flag()
                    tool_name=tool_instance.cmd_type
                    group_commands.append((key_name,output_path,source_flag,tool_name))
            except NotImplementedError as e:
                continue
            except Exception as e:
                if 'collect_num equals process_num' in str(e):
                    continue
                print(traceback.format_exc())
                continue
        cli_args.pop('non_run')
        def sort_key(command_tuple):
            source_str=command_tuple[2]
            for pattern,order in sort_order.items():
                if pattern in source_str: return order
            assert False, f"Unknown source string '{source_str}' for sorting; expected one of {list(sort_order.keys())}"
        group_commands.sort(key=sort_key)
        cache_scene_info_path=None
        for key_name,path_value,source_flag,tool_name in group_commands:
            entry_index=str(base_index+command_index)
            if command_index == 3:
                cache_scene_info_path = Path(path_value) / "scene_info.json"
                if not os.path.exists(cache_scene_info_path):
                    cache_scene_info_path = None
            if command_index in [4,7]:
                if cache_scene_info_path and not os .path.exists(Path(path_value) / 'scene_info.json') and os.path.exists(cache_scene_info_path) and os.path.exists(path_value):
                    print("copy scene info",cache_scene_info_path,Path(path_value) / 'scene_info.json')
                    os.system(f"cp {cache_scene_info_path} {Path(path_value) / 'scene_info.json'}")
            if key_name not in paths or not isinstance(paths[key_name],dict): paths[key_name]={}
            existing_entry=paths.get(key_name,{}).get(entry_index,{})
            status_value=get_status_for_path(existing_entry.get('status','non_exist'),path_value,tool_name,yaml_data)
            paths[key_name][entry_index]={'name':path_value,'status':status_value,'log_file':existing_entry.get('log_file',None),'source':source_flag,'tool_name':tool_name,'note':existing_entry.get('note',None)}
            command_index+=1
    if 'merge_zarr' not in paths: paths['merge_zarr']={}
    if 'train' not in paths : paths['train']= {}
    if 'eval' not in paths : paths['eval']= {}
    if 'trajlocal' not in paths: paths['trajlocal']={}
    if 'clip_zarr' not in paths: paths['clip_zarr']={}
    get_non_group_paths_status(yaml_data)
    return paths

def ensure_tool_name_fields(yaml_data):
    paths_map = yaml_data.get('paths', {}) or {}
    pattern_to_tool = {
        '-ct': 'collect_test',
        '-pt': 'process_test',
        '-dct': 'data_traverser_collect_test',
        '-c ': 'collect_data',
        '-p ': 'process_data',
        '-dcft': 'data_traverser_collect_full_test',
        '-dpt': 'data_traverser_process_test',
        '-dp ': 'data_traverser_process',
        '-dapt': 'data_traverser_aft_process_test',
        '-m ': 'merge_zarr',
        '-t ': 'train',
        '-v ': 'eval',
        '-cp': 'copy_path',
        '-traj': 'trajlocal'
    }
    if not DISABLE_HIGHZ_TOOL:pattern_to_tool['-hz']='highz_filter'
    changed = False
    for key_name, entry_map in paths_map.items():
        for idx_str, entry in (entry_map or {}).items():
            if isinstance(entry, dict) and 'tool_name' not in entry:
                if key_name in ['merge_zarr','train','eval']:
                    entry['tool_name'] = key_name
                    if 'note' not in entry: entry['note'] = None
                    changed = True
                else:
                    source_str = entry.get('source', '') or ''
                    for pattern, tool in pattern_to_tool.items():
                        if pattern in source_str:
                            entry['tool_name'] = tool
                            if 'note' not in entry: entry['note'] = None
                            changed = True
                            break
    if changed:
        yaml_data['paths'] = paths_map
    return yaml_data['paths']

def get_emb_perm(num_replacers, round_sym_embs):
    assert num_replacers > 0, f"num_replacers must be positive, got {num_replacers}"
    assert len(round_sym_embs) >= num_replacers, f"Not enough round_sym_embs: {len(round_sym_embs)} < {num_replacers}"
    perms = list(itertools.permutations(round_sym_embs.keys(), num_replacers))
    result = []
    for perm in perms:
        perm_dict = {}
        for i, key in enumerate(perm):
            perm_dict[f"<{i+1}>"] = key
            perm_dict[f"~{i+1}~"] = round_sym_embs[key]
        result.append(perm_dict)
    return result

def compact_groups_unpack(yaml_data):
    if 'compact_groups' not in yaml_data:
        print(f"No compact_groups found in yaml_data {yaml_data.keys()}")
        return yaml_data
    compact_groups = yaml_data['compact_groups']
    round_sym_embs = yaml_data.get('round_sym_embs', {})
    assert round_sym_embs, "round_sym_embs not found in yaml_data"
    expanded_groups = {}
    for group_name, group_config in compact_groups.items():
        replacers = set()
        for key,value in group_config.items():
            if isinstance(value, str):
                key_matches = re.findall(r'<(\d+)>', key)
                value_matches = re.findall(r'~(\d+)~', value)
                replacers.update(map(int, key_matches + value_matches))
        if isinstance(group_name, str):
            name_matches = re.findall(r'<(\d+)>', group_name)
            name_tilde_matches = re.findall(r'~(\d+)~', group_name)
            replacers.update(map(int, name_matches + name_tilde_matches))
        # print(f"replacers: {replacers}")
        # print(f"group_name: {group_name}")
        # print(f"group_config: {group_config}")
        num_replacers = len(replacers)
        if num_replacers == 0:
            expanded_groups[group_name] = group_config
            continue
        perms = get_emb_perm(num_replacers, round_sym_embs)
        for perm in perms:
            expanded_name = group_name
            expanded_config = {}
            for key, value in group_config.items():
                if isinstance(value, str):
                    expanded_key = key
                    expanded_value = value
                    for replacer_key, replacer_value in perm.items():
                        expanded_value = expanded_value.replace(replacer_key, replacer_value)
                    expanded_config[expanded_key] = expanded_value
                else:
                    expanded_config[key] = value
            for replacer_key, replacer_value in perm.items():
                expanded_name = expanded_name.replace(replacer_key, replacer_value)
            # print(f"expanded_name: {expanded_name}")
            # print(f"expanded_config: {expanded_config}")
            expanded_groups[expanded_name] = expanded_config
    yaml_data['groups'] = expanded_groups
    return yaml_data
def get_ln_status(zarr_path,yaml_data=None,group_name=None,force_fix=False):
    ln_as_type=BaseTool.get_group_key_from_yaml(yaml_data,group_name,"ln_as_type")
    ln_js_type=BaseTool.get_group_key_from_yaml(yaml_data,group_name,"ln_js_type")
    ln_pcd_type=BaseTool.get_group_key_from_yaml(yaml_data,group_name,"ln_pcd_type")
    # print(f"yaml_data {yaml_data.keys() if yaml_data else 'None'} group_name: {group_name} zarr {os.path.basename(zarr_path)} ln_as_type: {ln_as_type} ln_js_type: {ln_js_type} ln_pcd_type: {ln_pcd_type}")
    if not os.path.exists(zarr_path):
        return ""
    if not (str(zarr_path).endswith('.zarr') or str(zarr_path).endswith('pcd_output')):
        return ""
    if str(zarr_path).endswith('pcd_output'):
        zarr_path=Path(zarr_path).parent
    else:
        zarr_path=Path(zarr_path)
    def make_check_ln(src,dst,type_str,get_parent=False):
        make_ln(src,dst,type_str)
        return check_ln(dst,get_parent)
    def handle_missing_src(src,dst,option):
        if not "point_cloud" in str(dst):
            return
        if not os.path.exists(dst.parent/src):
            if not os.path.exists(Path(dst.parent/src).parent):
                print(colored(f"error: can not find candidate: {Path(dst.parent/src).parent} not exist","red",attrs=['bold']))
                return
            candidate_srcs=os.listdir(Path(dst.parent/src).parent)
            for i,candidate_src in enumerate(candidate_srcs):
                print(f"{i}: {candidate_src}")
            if option=='ac':
                count=[can for can in candidate_srcs if not str(can).endswith("zgroup")]
                if len(count)!=1:
                    print(f"more than one candidate src found: {count}, please manual select one")
                else:
                    shutil.copytree(Path(dst.parent/src).parent/count[0],dst.parent/src)
                    print(colored(f"copied {Path(dst.parent/src).parent/count[0]} to {dst.parent/src}","cyan",attrs=['bold']))
            if not os.path.exists(dst.parent/src):
                while True:
                    option2=input(colored(f'error: ln new source {dst.parent/src} not exist. select candidate src by index: (cp/mv)+number:','red'))
                    num=int(option2[2:])
                    if option2.startswith('cp'):
                        shutil.copytree(Path(dst.parent/src).parent/candidate_srcs[num],dst.parent/src)
                        print(colored(f"copied {Path(dst.parent/src).parent/candidate_srcs[num]} to {dst.parent/src}","cyan",attrs=['bold']))
                        break
                    elif option2.startswith('mv'):
                        shutil.move(Path(dst.parent/src).parent/candidate_srcs[num],dst.parent/src)
                        print(colored(f"moved {Path(dst.parent/src).parent/candidate_srcs[num]} to {dst.parent/src}","cyan",attrs=['bold']))
                        break
                    else:
                        print(f"invalid option2 {option2}")
    def make_ln(src,dst,type_str):
        global NEVER_MAKE_LN
        if type_str is None:
            return
        original_src="DEST_HARDLINKED" if dst.exists() else "DEST_NON_EXIST"
        # print("src",src,"dst",dst,"type_str",type_str)
        if dst.is_symlink():
            original_src=Path(dst.readlink())
            # print(f"original_src:{original_src} src:{src} {str(src)==str(original_src)}")
            if str(src)==str(original_src):
                return 
        if NEVER_MAKE_LN:
            return
        while True:
            # if not os.path.exists(dst.parent/src):
            #     print(colored(f"{dst.parent/src} not exist",'red',attrs=['bold']))
            #     break
            option=input(colored(f"ln -s from {original_src} [exist {dst.exists()}] to {src} [exist:{os.path.exists(dst.parent/src)}] -> {dst} ? ([y]renameAndLink/[ac]AutoCopyOnlyOneIfMissing/[n]eglect/[d]eleteAndLink/never)","green",attrs=['bold']))
            if option=='y' or option=='ac':
                handle_missing_src(src,dst,option)
                if dst.exists():
                    # print("debug: dst exist")
                    dst_recur=dst
                    while os.path.exists(str(dst_recur)):
                        dst_recur=str(dst_recur)+"_b"
                    # print(dst_recur)
                    os.rename(dst,dst_recur)
                os.symlink(src,dst)
                break
            elif option=='d':
                if os.path.exists(dst):
                    os.remove(dst)
                os.symlink(src,dst)
                break
            elif option=='never':
                NEVER_MAKE_LN=True
                break
            elif option=='n':
                break
            else:
                print(f"invalid option: {option}")
    def check_ln(dst,get_parent=False):
        if dst.is_symlink():
            original_src=Path(dst.readlink())
            full_original_src=dst.parent/original_src
            if get_parent:
                original_src=original_src.parent
            if full_original_src.exists():
                return f"[{original_src.name}]"
            else:
                return f"[NONEXIST {original_src.name}]"
        if dst.exists():
            return f"[Hard {dst.name}]"
        return f"[]"
    ln_status=""
    ln_status+=make_check_ln(f"../pcd/{ln_pcd_type}",zarr_path / "data/point_cloud",ln_pcd_type,False)
    ln_status+=make_check_ln(f"../{ln_as_type}/action",zarr_path / "data/action",ln_as_type,True)
    ln_status+=make_check_ln(f"../{ln_as_type}/state",zarr_path / "data/state",ln_as_type,True)
    ln_status+=make_check_ln(f"../{ln_js_type}/action",zarr_path / "data/js_action",ln_js_type,True)
    ln_status+=make_check_ln(f"../{ln_js_type}/state",zarr_path / "data/js_state",ln_js_type,True)

    # if this zarr is partial_copied from another zarr file, mark the idx of origin behind ln_status
    # note: if a zarr file is partial_copied, its "cameras" directory is soft_linked, use this to find the origin
    # check the link of "cameras"
    cameras_path = zarr_path / "cameras"
    if cameras_path.is_symlink():
        original_src = Path(cameras_path.readlink()).parent
        # find the idx in yaml_data corresponding to the original_src
        origin_path = cameras_path.parent / original_src
        # print(origin_path)
        if yaml_data is not None:
            idx, key_name = BaseTool._find_index_everywhere_static(yaml_data, str(origin_path))
            if idx is not None:
                ln_status += f"[partial_copy:{key_name}:{idx}]"

    return ln_status
def expand_match(yaml_data, key_name):
    assert 'paths' in yaml_data, f"'paths' missing in yaml_data; keys={list(yaml_data.keys())}"
    if '*' not in key_name:
        assert key_name in yaml_data['paths'], f"key_name={key_name} not in paths; available_keys={list(yaml_data.get('paths', {}).keys())}"
        return [key_name]
    pattern = key_name.replace('*', '.*')
    import re
    matched_keys = [k for k in yaml_data['paths'].keys() if re.match(pattern, k)]
    assert matched_keys, f"key_name={key_name} matches no keys; available_keys={list(yaml_data.get('paths', {}).keys())}"
    return matched_keys
def get_colliding_indices(yaml_data,query_value,location=['paths','policy_configs','*','shortname']):
    assert yaml_data is not None, f"yaml_data is None"
    def traverse_path(data, path_remaining, current_indices):
        if not path_remaining:
            return current_indices
        current_key = path_remaining[0]
        remaining_path = path_remaining[1:]
        if current_key == '*':
            assert isinstance(data, dict), f"data {data} is not dict for wildcard"
            for index, item_data in data.items():
                assert str(index).isdigit(), f"index {index} is not digit"
                new_indices = current_indices + [int(index)]
                traverse_path(item_data, remaining_path, new_indices)
        else:
            assert current_key in data, f"current_key {current_key} not in data, keys={list(data.keys())}"
            traverse_path(data[current_key], remaining_path, current_indices)
    def collect_matches(data, path_remaining, current_indices, target_field, query_value, results):
        if not path_remaining:
            field_value = data.get(target_field, '')
            if field_value == query_value:
                results.append(current_indices)
            return
        current_key = path_remaining[0]
        remaining_path = path_remaining[1:]
        if current_key == '*':
            assert isinstance(data, dict), f"data {data} is not dict for wildcard"
            for index, item_data in data.items():
                assert str(index).isdigit(), f"index {index} is not digit"
                new_indices = current_indices + [int(index)]
                collect_matches(item_data, remaining_path, new_indices, target_field, query_value, results)
        else:
            assert current_key in data, f"current_key {current_key} not in data, keys={list(data.keys())}"
            collect_matches(data[current_key], remaining_path, current_indices, target_field, query_value, results)
    target_field = location[-1]
    path_without_field = location[:-1]
    results = []
    collect_matches(yaml_data, path_without_field, [], target_field, query_value, results)
    return [result[0] if len(result) == 1 else result for result in results]
def print_available_paths(yaml_data,key_list=None):
    print("Available commands:")
    assert 'paths' in yaml_data, f"'paths' missing in yaml_data; keys={list(yaml_data.keys())}"
    key_list = key_list if key_list else list(yaml_data['paths'].keys())
    for key_name_to_match in key_list:
        matched_keys=expand_match(yaml_data,key_name_to_match)
        for key_name in matched_keys:
            group_cfg = yaml_data.get('groups', {}).get(key_name, {})
            if group_cfg.get('invisible_group', False):
                continue 
            print(colored(f"\n{key_name}:",'blue'))
            items=sorted(yaml_data['paths'][key_name].items(),key=lambda x:int(x[0]))
            last_note=None
            for entry_index,entry_data in items:
                status_value=entry_data.get('status','')
                note_value=entry_data.get('note','')
                pc_shortname = ''
                if key_name == 'policy_configs':
                    pc_shortname = entry_data.get('shortname','')
                    if pc_shortname:
                        colliding_indices = get_colliding_indices(yaml_data,pc_shortname)
                        if colliding_indices and len(colliding_indices) > 1:
                            colliding_indices = [int(idx) for idx in colliding_indices if str(idx) != str(entry_index)]
                            pc_shortname += f" [colliding:{colliding_indices}]"
                line=f"  {entry_index}: {entry_data.get('source','')} -> {get_ln_status(entry_data.get('name',''),yaml_data,key_name)} {pc_shortname} {entry_data.get('name','')}"
                if note_value and note_value!=last_note:
                    print(colored(f"Note: {note_value}",'blue',attrs=['bold']))
                    last_note=note_value
                if status_value=='exist' or status_value.startswith('exist:'):
                    if "eval_by" in status_value or "train_by" in status_value:
                        print(colored(line if status_value=='exist' else f"{line}  [{status_value}]",'green',attrs=['bold']))
                    else:
                        print(colored(line if status_value=='exist' else f"{line} ",'green')+colored(f" [{status_value}]",'green',attrs=['bold']))
                elif status_value=='generating' or status_value.startswith('training:'):
                    log_info=f" (log: {entry_data.get('log_file','N/A')})" if entry_data.get('log_file') else ""
                    print(colored(f"{line}{log_info}" if status_value=='generating' else f"{line} [{status_value}]{log_info}",'magenta'))
                elif status_value in ['not_started','non_exist']:
                    print(f"{line}")
                else:
                    print(colored(f"{line} [{status_value}]",'red'))
    print(f"Available paths constructed at {os.path.abspath(sys.argv[1]+'.yaml')}")
def add_orig_group():
    g_positions = []
    for i, arg in enumerate(sys.argv):
        if arg == '-g':
            g_positions.append(i)
    if len(g_positions) > 1:
        first_g_pos = g_positions[0]
        sys.argv[first_g_pos] = '-orig'
    return sys.argv
def handle_num_argument(args,parser,yaml_data):
    if args.num is None:
        return args, None
    num_str = str(args.num)
    rep_cmd = None
    _, entry_data,_ = BaseTool._find_pathdict_by_index(yaml_data, args.num,False)
    rep_cmd = entry_data.get('source')
    assert rep_cmd, f"Command number {args.num} not found"
    if getattr(args, 'eval', False):
        rep_toks = rep_cmd.split()
        if '-rc' in rep_toks:
            index_rc = rep_toks.index('-rc')
            rep_toks.remove('-rc')
            rep_toks.pop(index_rc)

        assert '-t' in rep_toks, f"rep_toks:{rep_toks}"
        rep_toks[rep_toks.index('-t')] = '-v'
        sys.argv = [sys.argv[0], args.yaml] + rep_toks + sys.argv[4:]
        return  None
    sys.argv = [sys.argv[0], args.yaml] + rep_cmd.split() + sys.argv[3:]
    return  int(num_str)

def graceful_exit(signum, frame):
    print(f"\n\nReceived interrupt signal, gracefully exiting...")
    try:
        assert hasattr(sys, 'argv') and len(sys.argv) > 1, "Cannot determine YAML filename, exiting"
        yaml_name = sys.argv[1]
        yaml_data = BaseTool.load_yaml()
        updated = False
        if 'paths' in yaml_data:
            for key_name,entry_map in yaml_data['paths'].items():
                for entry_index,entry_data in entry_map.items():
                    if entry_data.get('status')=='generating' and (key_name,str(entry_index)) in current_runs:
                        yaml_data['paths'][key_name][entry_index]['status']='aborted'
                        updated=True
                        print(f"Updated {key_name}[{entry_index}] status to 'aborted'")
        if updated:
            for key_name,entry_map in yaml_data.get('paths',{}).items():
                for entry_index,entry_data in entry_map.items():
                    if entry_data.get('status')=='aborted' and (key_name,str(entry_index)) in current_runs:
                        BaseTool.update_yaml_field(['paths',key_name,str(entry_index),'status'],'aborted')
            print("Aborted status saved to YAML file")
        else:
            print("No generating commands found")
    except Exception as e:
        print(f"Error during graceful exit: {e}")
    print("Exit completed")
    sys.exit(0)
signal.signal(signal.SIGINT, graceful_exit)
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("yaml", help="YAML file to read parameters and run commands")
    parser.add_argument("num", nargs="?", type=int, help="Execute command by number from paths")
    parser.add_argument("-clr",default=True, action="store_true", help="Clear existing paths before constructing new ones")
    cmd_map = {'ct': 'collect_test','c': 'collect_data', 'p': 'process_data', 'r': 'replay_buffer_test', 'dp': 'data_traverser_process', 'dpt': 'data_traverser_process_test', 'dct': 'data_traverser_collect_test', 'dcft': 'data_traverser_collect_full_test', 'dapt': 'data_traverser_aft_process_test', 'pt': 'process_test'}
    if not DISABLE_HIGHZ_TOOL:cmd_map['hz']='highz_filter'
    for flag, cmd in cmd_map.items():
        parser.add_argument(f"-{flag}", f"--{cmd}", action="store_true")
    parser.add_argument("-cp", "--copy_path", nargs=2, type=int, help="Copy path from source index to target index")
    parser.add_argument("--partial",default=False ,action="store_true", help="Partial copy")
    parser.add_argument("-ln", "--ln_path", nargs=2, type=int, help="Create symbolic link from source index to target index")
    parser.add_argument("-traj", "--trajlocal", type=int, help="Process zarr file to keep trajectory fields and add eelocal/state using IK solver")
    parser.add_argument("-clip", "--clip_zarr", nargs=3, type=int, help="Clip zarr episodes: index start_episode end_episode")
    parser.add_argument("-mg", "--mask_gen", nargs=1, type=int, help="Generate camera mask data: zarr_index")
    parser.add_argument("-qc", "--qpos_convert", type=int, help="Convert qpos from 14 to 16 dimensions")
    parser.add_argument("-m", "--merge_zarr", nargs="*", type=int)
    parser.add_argument("-nm", "--noise_measure", nargs=2, type=int, help="Noise measurement tool: eval_index zarr_index")
    parser.add_argument("-vn", "--visualize_noise", nargs="+", type=int, help="Visualize episode and action dimension: episode_idx [action_dim]")
    parser.add_argument("-se", "--stagnation_examine",type=int, help="Stagnation examine tool: target_index [eps]")
    parser.add_argument("-eps", "--eps", type=float, default=0.05, help="EPS value for stagnation detection")
    parser.add_argument("-v", "--eval",action="store_true", help="Eval index")
    parser.add_argument("-t", "--train", action="store_true")
    parser.add_argument("-tv", "--train_and_eval", action="store_true")
    parser.add_argument("-g", "--group", nargs="*", help="Group names from YAML configuration")
    parser.add_argument("-tz", "--train_zarr_index", type=int)
    parser.add_argument("-tp", "--train_policy_config_index", type=int)
    parser.add_argument("-orig", "--original_group_name", help="Original group name, for eval in different settings")
    parser.add_argument("-s", "--single_test", type=int, help="Execute DataTraverserSingleTest on zarr at specified index")
    parser.add_argument("-enic", "--exist_note_is_checked", action="store_true", help="if exist note, will not check using DataTraverserSingleTest")
    parser.add_argument("-gpu", "--gpu_id", default='', type=str, help="gpu id")
    parser.add_argument("-npc", "--no_policy_check", action="store_true", help="Do not print policy check")
    parser.add_argument("-ndc", "--no_demo_clean_check", action="store_true", help="Do not print demo clean check")
    parser.add_argument("-nlc", "--no_ln_check", action="store_true", help="Do not check symbolic links")
    parser.add_argument("-nplc", "--no_pipeline_check", action="store_true", help="Do not check pipeline")
    parser.add_argument("-rc", "--resume_ckpt_index", help="Resume training from specified checkpoint path")
    parser.add_argument("-note", "--note", help="Note for the command")
    parser.add_argument("-lno","--ln_only",action="store_true",help="Only create symbolic links, do not merge data")
    parser.add_argument("-ec","--emb_cls",action="store_true",help="Make embedding class array in zarr")
    parser.add_argument("-ts","--timestep",action="store_true",help="Make timestep array in zarr")
    parser.add_argument("--zip",action="store_true",help="Zip the zarr file for trajZarr")
    parser.add_argument("-nfo","--no_filter_output",action="store_true",help="Do not filter output")
    parser.add_argument("-ockpt","--original_ckpt_index",help="Original checkpoint index")
    parser.add_argument("-sf","--seed_file",type=int,help="Seed file index (last digit must be 0 or 3)")
    parser.add_argument("-align","--align",action="store_true",help="Align action sequences using sparse sampling")
    parser.add_argument("--test",action="store_true",help="Test mode for Mask Gen")
    cmd_general_map = cmd_map.copy()
    cmd_general_map.update({'cp': 'copy_path', 'ln': 'ln_path', 'm': 'merge_zarr','v': 'eval','t': 'train', 's': 'single_test', 'qc': 'qpos_convert', 'traj': 'trajlocal', 'clip': 'clip_zarr', 'mg': 'mask_gen', 'nm': 'noise_measure', 'se': 'stagnation_examine', 'tv': 'train_and_eval'})
    args = parser.parse_args()
    global EXIST_NOTE_IS_CHECKED
    EXIST_NOTE_IS_CHECKED = args.exist_note_is_checked
    global NEVER_MAKE_LN
    NEVER_MAKE_LN = args.no_ln_check
    global last_gpu_input
    last_gpu_input = args.gpu_id
    yaml_data = BaseTool.load_yaml()
    if args.clr:
        if "groups" in yaml_data and "compact_groups"  in yaml_data:
            del yaml_data["groups"]
    yaml_data=compact_groups_unpack(yaml_data)
    BaseTool.update_yaml_field(None,yaml_data)
    selected_index = handle_num_argument(args, parser, yaml_data)
    add_orig_group()
    args=parser.parse_args()
    required_params = ['groups']
    missing_params = [param for param in required_params if param not in yaml_data]
    assert not missing_params, f"Missing required parameters in YAML: {missing_params}"
    # Save path information before clearing (for log_file and note preservation)
    saved_path_info = {}
    if args.clr and 'paths' in yaml_data:
        saved_path_info = create_path_info_dict(yaml_data)
        deleted_keys=[]
        total_keys=list(yaml_data['paths'].keys())
        for key in total_keys:
            if key in ['merge_zarr','train','eval','policy_configs','trajlocal','clip_zarr','real_zarr','mask_gen']:
                continue 
            del yaml_data['paths'][key] 
            deleted_keys.append(key)
        # print(f"Cleared existing paths {deleted_keys}")
    # print(f"yaml_data:{yaml_data['paths'].keys()}")
    paths=construct_all_path(yaml_data,vars(args))
    if saved_path_info:
        paths = restore_path_info(paths, saved_path_info)
        # print(f"Restored path information for {len(saved_path_info)} paths")
    yaml_data['paths']=paths
    # print(f"restore_path_infoyaml_data:{yaml_data['paths'].keys()}")
    yaml_data=BaseTool.ensure_policy_configs(yaml_data)
    BaseTool.update_yaml_field(None,yaml_data)
    path_types = []
    argvs = sys.argv[2:].copy()
    has_execution_params = (
        args.train_zarr_index is not None or
        args.train_policy_config_index is not None or
        len(args.merge_zarr or []) > 0 or
        args.single_test is not None or
        args.copy_path is not None or
        args.ln_path is not None or
        args.qpos_convert is not None or
        args.trajlocal is not None or
        args.clip_zarr is not None or
        args.mask_gen is not None or
        any(getattr(args, argkey, False) for argkey in vars(args).keys() if argkey in cmd_map.values())
    )   
    if not has_execution_params:
        if '-g' in argvs:
            g_index = argvs.index('-g')
            for i in range(g_index + 1, len(argvs)):
                if argvs[i].startswith('-'):
                    break
                path_types.append(argvs[i])
            if len(path_types) == 0:
                path_types.extend([ key for key in yaml_data['paths'].keys() if key not in ['merge_zarr','train','eval','policy_configs']])
        if '-v' in argvs:
            path_types.append('eval')
        if '-t' in argvs:
            path_types.append('policy_configs')
            path_types.append('train')
        if '-m' in argvs:
            path_types.append('merge_zarr')
        if '-traj' in argvs:
            path_types.append('trajlocal')
        if '-clip' in argvs:
            path_types.append('clip_zarr')
        if '-mg' in argvs:
            path_types.append('mask_gen')
    
        if path_types or len(argvs) == 0:
            print_available_paths(yaml_data, path_types)
            print(f"printed path types:{path_types} {argvs}")
            return
    # print("args",args)
    for flag, cmd_name in cmd_general_map.items():
        if getattr(args, cmd_name, False):
            assert cmd_name in TOOL_CLASSES, f"Tool class not found for {cmd_name}"
            tool = TOOL_CLASSES[cmd_name](yaml_data, args.group[0] if args.group else None, vars(args))
            cmd = tool.construct_command()
            if cmd is not None:
                print(f"Executing command for {cmd_name}: {cmd}")
                tool.execute_command(cmd)
            else:
                tool.execute_command()
if __name__ == "__main__":
    if '-traj' in sys.argv:
        import torch
        from franka_fk import init_ik_solver
    main()