#!/usr/bin/env python3
import argparse, subprocess, yaml, sys, os, shutil, datetime, signal
from pathlib import Path
from loadJson import _load_emb_name_map
from ruamel.yaml import YAML
from termcolor import colored
import zarr

# 全局变量来跟踪当前运行的命令索引
current_runs = set()

yaml_handler = YAML()
yaml_handler.preserve_quotes = True
yaml_handler.indent(mapping=2, sequence=4, offset=2)

def generate_merged_zarr_name(params):
    zarr_files = params.get('zarr_files', [])
    yaml_data = params.get('yaml_data', {})
    if not zarr_files: return params.get('output_zarr', "merged_output.zarr")
    emb_counts = {}
    for zarr_file in zarr_files:
        # 从yaml数据中查找对应的embodiment
        for file_type in ['zarrs', 'hdf5_dirs', 'pcds']:
            if file_type in yaml_data:
                for idx, data in yaml_data[file_type].items():
                    if data.get('name') == zarr_file and 'embodiment' in data and data['embodiment'] != 'merged':
                        emb_char = data['embodiment']
                        episode_count = get_zarr_episode_count(zarr_file)
                        emb_counts[emb_char] = emb_counts.get(emb_char, 0) + episode_count
                        break
    if not emb_counts: return params.get('output_zarr', "merged_output.zarr")
    emb_parts = [f"{emb_char}{count}" for emb_char, count in sorted(emb_counts.items())]
    custom_name = params.get('custom_name', '')
    action_type = params.get('action_type', '')
    trim_arm = params.get('trim_arm', True)
    camera_use_full = params.get('camera_use_full', True)
    return f"demo_clean_{custom_name}-{'-'.join(emb_parts)}-{action_type}-ta{1 if trim_arm else 0}-cam{1 if camera_use_full else 0}-1-{action_type}.zarr"

def graceful_exit(signum, frame):
    """Graceful exit function that marks generating commands as aborted"""
    print(f"\n\nReceived interrupt signal, gracefully exiting...")
    
    try:
        if hasattr(sys, 'argv') and len(sys.argv) > 1:
            yaml_name = sys.argv[1]
        else:
            print("Cannot determine YAML filename, exiting")
            sys.exit(1)
        
        yaml_data = load_yaml(yaml_name)
        updated = False
        print(f"Debug: current_runs={current_runs}")
        for file_type in ['hdf5_dirs', 'zarrs', 'pcds']:
            if file_type in yaml_data:
                for idx, data in yaml_data[file_type].items():
                    if data.get('status') == 'generating' and (file_type, str(idx)) in current_runs:
                        yaml_data[file_type][idx]['status'] = 'aborted'
                        updated = True
                        print(f"Updated {file_type}[{idx}] status to 'aborted'")
        
        if updated:
            save_yaml(yaml_name, yaml_data)
            print("Aborted status saved to YAML file")
            print(f"Debug: yaml_name={yaml_name}, updated entries={updated}")
        else:
            print("No generating commands found")
            
    except Exception as e:
        print(f"Error during graceful exit: {e}")
    
    print("Exit completed")
    sys.exit(0)

# 设置信号处理器
signal.signal(signal.SIGINT, graceful_exit)

def load_yaml(yaml_path):
    with open(str(Path(__file__).parent/yaml_path)+".yaml", 'r') as f:
        return yaml.safe_load(f)
def save_yaml(yaml_path, data):
    with open(str(Path(__file__).parent/yaml_path)+".yaml", 'w') as f:
        yaml.dump(data, f, default_flow_style=False, sort_keys=False, default_style=None)

def update_yaml_field(yaml_path, field_path, new_value):
    """Update a specific field in YAML file by reading from disk, modifying, and writing back"""
    file_path = str(Path(__file__).parent/yaml_path)+".yaml"
    # Read current content from disk
    with open(file_path, 'r') as f:
        current_data = yaml.safe_load(f)
    
    # Navigate to the field and update it
    if isinstance(field_path, str):
        current_data[field_path] = new_value
    else:  # field_path is a list like ['hdf5_dirs', '1', 'status']
        current = current_data
        for key in field_path[:-1]:
            current = current[key]
        current[field_path[-1]] = new_value
    
    # Write back to disk
    with open(file_path, 'w') as f:
        yaml.dump(current_data, f, default_flow_style=False, sort_keys=False, default_style=None)
def get_timestamp():
    return datetime.datetime.now().strftime("%Y%m%d_%H%M%S")

def list_pcd_output_directory(pcd_output_dir):
    """List all PCD files in the output directory with their full paths"""
    if os.path.exists(pcd_output_dir):
        print(f"\n📁 PCD Output Directory Contents ({pcd_output_dir}):")
        pcd_files = [f for f in os.listdir(pcd_output_dir) if f.endswith('.pcd')]
        if pcd_files:
            pcd_files.sort()
            for pcd_file in pcd_files:
                full_path = os.path.join(pcd_output_dir, pcd_file)
                file_size = os.path.getsize(full_path)
                print(f"  📄 {full_path} ({file_size} bytes)")
            print(f"  Total: {len(pcd_files)} PCD files")
        else:
            print("  ⚠️  No PCD files found in directory")
    else:
        print(f"  ⚠️  PCD output directory not found: {pcd_output_dir}")

def create_log_path(yaml_data, cmd_type, index):
    """Create log file path for command execution"""
    timestamp = get_timestamp()
    log_dir = str(Path(__file__).parent.parent/"shell_log"/yaml_data['custom_name'])
    if cmd_type in ['collect_data', 'collect_test']:
        log_dir += f"/hdf5_dirs{index}"
    else:
        log_dir += f"/zarrs{index}"
    os.makedirs(log_dir, exist_ok=True)
    return f"{log_dir}/{timestamp}.log"
def get_status_for_path(existing_status, path_exists):
    """Generate status for a path, preserving aborted status"""
    if existing_status in ['aborted', 'generating']: return existing_status
    return 'non_exist' if not path_exists else 'exist'

def get_existing_log_file(yaml_data, file_type, index): return yaml_data.get(file_type, {}).get(str(index), {}).get('log_file', None)

def get_zarr_path(name_value, process_num, action_type, root_path=None, pipeline_suffix=None):
    """Get zarr path with consistent format"""
    if root_path is None:
        root_path = f"{Path(__file__).parent.parent}/RoboTwin"
    base_path = f"{root_path}/policy/DP3/data/place_shoe-{name_value}-{process_num}-{action_type}"
    if pipeline_suffix:
        return f"{base_path}-{pipeline_suffix}.zarr"
    return f"{base_path}.zarr"

def construct_all_path(yaml_data):
    emb_map = _load_emb_name_map()
    hdf5_dirs, zarrs, pcds, index = {}, {}, {}, 1
    for emb_char, emb_name in emb_map.items():
        if emb_name not in yaml_data.get('embodiments', {}): continue
        emb_config = yaml_data['embodiments'][emb_name]
        collect_num, process_num = int(emb_config.get('collect', 0)), emb_config.get('process', None)
        
        # Base params for all commands
        base_params = {'custom_name': yaml_data['custom_name'], 'embodiment': emb_name, 'action_type': yaml_data['action_type'], 'trim_arm': yaml_data['trim_arm'], 'camera_use_full': yaml_data['camera_use_full']}
        
        if collect_num > 0:
            collect_params = {**base_params, 'collect_num': collect_num}
            process_params = {**base_params, 'collect_num': collect_num, 'process_num': process_num if process_num is not None else collect_num}
            hdf5_path, zarr_path = get_output_path('collect_data', collect_params), get_output_path('process_data', process_params)
            existing_status = yaml_data.get('hdf5_dirs', {}).get(str(index), {}).get('status', 'non_exist')
            hdf5_dirs[str(index)] = {'name': hdf5_path, 'status': get_status_for_path(existing_status, os.path.exists(hdf5_path)), 'log_file': get_existing_log_file(yaml_data, 'hdf5_dirs', index), 'source': f"-c -e {emb_char}", 'embodiment': emb_char}
            index += 1
            existing_status = yaml_data.get('zarrs', {}).get(str(index), {}).get('status', 'non_exist')
            zarrs[str(index)] = {'name': zarr_path, 'status': get_status_for_path(existing_status, os.path.exists(zarr_path)), 'log_file': get_existing_log_file(yaml_data, 'zarrs', index), 'source': f"-p -e {emb_char}", 'embodiment': emb_char}
            index += 1
        
        # 为完整测试模式确定测试参数
        test_collect_num = 1 
        # 处理 process_num 为 None 的情况
        if process_num is None:
            process_num = collect_num
        test_process_num = process_num if full_test else 1
        
        test_params = {**base_params, 'collect_num': test_collect_num, 'process_num': test_process_num}
        test_hdf5_path, test_zarr_path = get_output_path('collect_test', test_params), get_output_path('process_test', test_params)
        existing_status = yaml_data.get('hdf5_dirs', {}).get(str(index), {}).get('status', 'non_exist')
        hdf5_dirs[str(index)] = {'name': test_hdf5_path, 'status': get_status_for_path(existing_status, os.path.exists(test_hdf5_path)), 'log_file': get_existing_log_file(yaml_data, 'hdf5_dirs', index), 'source': f"-ct -e {emb_char}", 'embodiment': emb_char}
        index += 1
        existing_status = yaml_data.get('zarrs', {}).get(str(index), {}).get('status', 'non_exist')
        zarrs[str(index)] = {'name': test_zarr_path, 'status': get_status_for_path(existing_status, os.path.exists(test_zarr_path)), 'log_file': get_existing_log_file(yaml_data, 'zarrs', index), 'source': f"-pt -e {emb_char}", 'embodiment': emb_char}
        index += 1
        
        if 'pipeline_test' in emb_config:
            test_dt_params = {**base_params, 'process_num': test_process_num, 'pipeline_out_key': emb_config.get('pipeline_out_key', 'pointcloud')}
            test_pcd_path = get_output_path('data_traverser_test', test_dt_params)
            existing_status = yaml_data.get('pcds', {}).get(str(index), {}).get('status', 'non_exist')
            pcds[str(index)] = {'name': test_pcd_path, 'status': get_status_for_path(existing_status, os.path.exists(test_pcd_path)), 'log_file': get_existing_log_file(yaml_data, 'pcds', index), 'source': f"-dt -e {emb_char}", 'embodiment': emb_char}
            index += 1
    for idx, data in yaml_data.get('zarrs', {}).items():
        if data.get('source').startswith('-m'):
            zarrs[idx] = data
    return hdf5_dirs, zarrs, pcds
def log_command(yaml_data, cmd_type, command):
    if 'command_log' not in yaml_data:
        yaml_data['command_log'] = {}
    yaml_data['command_log'][get_timestamp()] = f"{cmd_type}: {command}"
    # 保存到YAML文件
    update_yaml_field(yaml_data['custom_name'], 'command_log', yaml_data['command_log'])
def create_demo_task_yml(name, collect_num, embodiment, action_type, trim_arm, camera_use_full, pipeline=None, pipeline_out_key=None):
    source_path = Path(__file__).parent.parent/"RoboTwin/task_config/demo_clean.yml"
    dest_path = Path(__file__).parent.parent/f"RoboTwin/task_config/{name}.yml"
    if not source_path.exists(): sys.exit(f"Error: {source_path} not found")
    shutil.copy2(source_path, dest_path)
    with open(dest_path, 'r') as f: content = yaml.safe_load(f)
    content['episode_num'] = collect_num
    content['action_type'] = action_type
    content['trim_arm'] = trim_arm
    if 'embodiment' in content and isinstance(content['embodiment'], list):
        content['embodiment'] = [embodiment] if embodiment == "aloha-agilex" else [embodiment, embodiment, 0.8]
    if 'camera' in content and 'pcd_camera_names' in content['camera']:
        content['camera']['pcd_camera_names'] = ['midBack_camera','midFront_camera','midLeft_camera','midRight_camera','midHead_camera'] if camera_use_full else ['midBack_camera']
    if pipeline_out_key is not None:
        content['pipeline'] = pipeline if pipeline else ''
        content['pipeline_out_key'] = pipeline_out_key
    content['zarr_path'] = get_zarr_path(name, collect_num, action_type)
    with open(dest_path, 'w') as f: yaml.dump(content, f, default_flow_style=False, sort_keys=False, default_style=None)
    print(f"Created config: {dest_path}")
def run_filter_hdf5_highz(base_dir, episode_num):
    filter_script = Path(__file__).parent.parent/"RoboTwin/script/filter_hdf5_highz.py"
    if not filter_script.exists():
        print(f"Warning: filter script not found at {filter_script}")
        return False
    cmd = f"python3 {filter_script} {base_dir}"
    print(f"Running filter: {cmd}")
    result = subprocess.run(cmd, shell=True)
    return result.returncode == 0
def run_merge_zarr(zarr_files, output_zarr):
    merge_script = Path(__file__).parent/"merge_zarr.py"
    if not merge_script.exists():
        print(f"Error: merge_zarr.py not found at {merge_script}")
        return False
    zarr_files_str = " ".join(zarr_files)
    cmd = f"python3 {merge_script} -z {zarr_files_str} -o {output_zarr}"
    print(f"Running merge_zarr: {cmd}")
    result = subprocess.run(cmd, shell=True)
    return result.returncode == 0
def create_test_config_and_cleanup(custom_name, embodiment, action_type, trim_arm, camera_use_full):
    name_value = get_demo_task_name(custom_name, 1, embodiment, action_type, trim_arm, camera_use_full, is_test=True)
    data_dir = f"{Path(__file__).parent.parent}/RoboTwin/data/place_shoe/{name_value}/data"
    if os.path.exists(data_dir):
        hdf5_files = [f for f in os.listdir(data_dir) if f.startswith("episode") and f.endswith(".hdf5")]
        if hdf5_files:
            print(f"Found HDF5 files: {', '.join(sorted(hdf5_files))}\nOptions:\n  y - Remove only HDF5 files ({len(hdf5_files)} files)\n  all - Remove entire directory '{name_value}' (including all files and subdirectories)\n  n - Keep all files")
            user_input = input(f"Choose action (y/n/all): ")
            if user_input == 'y':
                for hdf5_file in hdf5_files: os.remove(os.path.join(data_dir, hdf5_file))
                print(f"✓ Removed {len(hdf5_files)} HDF5 files from {data_dir}")
            elif user_input == 'all':
                import shutil
                parent_dir = os.path.dirname(data_dir)
                shutil.rmtree(parent_dir)
                print(f"✓ Removed entire directory: {parent_dir}")
            else:
                print("✓ No files removed")
    return name_value
def replace_pipeline_percent(pipeline, embodiment):
    return pipeline.replace('%', embodiment[0])
def construct_data_traverser_command(custom_name, embodiment, action_type, trim_arm, camera_use_full, process_num, pipeline, is_test=False, manual_zarr_files=None):
    root_path = f"{Path(__file__).parent.parent}/RoboTwin"
    if is_test:
        name_value = get_demo_task_name(custom_name, process_num, embodiment, action_type, trim_arm, camera_use_full, is_test=True)
    else:
        name_value = get_demo_task_name(custom_name, process_num, embodiment, action_type, trim_arm, camera_use_full)
    if manual_zarr_files:
        source_zarr = manual_zarr_files[0]
    else:
        source_zarr = get_zarr_path(name_value, process_num, action_type, root_path)
    pipeline_replaced = replace_pipeline_percent(pipeline, embodiment)
    last_episode = process_num - 1
    if is_test:
        return f'cd {Path(__file__).parent}; python3 dataTraverser.py -z {source_zarr} -p {pipeline_replaced} -f 0-200-20 -e {last_episode}'
    else:
        output_zarr = get_zarr_path(name_value, process_num, action_type, root_path, pipeline_replaced)
        validate_zarr_episode_count(source_zarr, process_num)
        return f'cd {Path(__file__).parent}; python3 dataTraverser.py -z {source_zarr} -p {pipeline_replaced} -o {output_zarr} '
def construct_process_data_command(custom_name, embodiment, action_type, trim_arm, camera_use_full, process_num, is_test=False):
    root_path = f"{Path(__file__).parent.parent}/RoboTwin"
    name_value = get_demo_task_name(custom_name, process_num, embodiment, action_type, trim_arm, camera_use_full, is_test=is_test)
    return f'cd {root_path}/policy/DP3; bash process_data.sh place_shoe {name_value} {process_num} {action_type}'
def construct_collect_data_command(gpu_id, name_value, is_test=False):
    root_path = f"{Path(__file__).parent.parent}/RoboTwin"
    return f'cd {root_path}; bash collect_data.sh place_shoe {name_value} %gpu_id'
def get_eval_num(root_path, dp3_config_name):
    config_path = f"{root_path}/3D-Diffusion-Policy/diffusion_policy_3d/config/{dp3_config_name}.yaml"
    with open(config_path, 'r') as f: config_data = yaml.safe_load(f)
    num_epochs, checkpoint_every = config_data['training']['num_epochs'], config_data['training']['checkpoint_every']
    return (num_epochs // checkpoint_every) * checkpoint_every or checkpoint_every

def construct_train_eval_commands(root_path, name_value, process_num, gpu_id, zarr_to_use, dp3_config_name):
    episode_num = get_eval_num(root_path, dp3_config_name)
    return {
        'train': f'cd {root_path}/policy/DP3; bash train.sh place_shoe {name_value} {process_num} 1 %gpu_id {zarr_to_use} {dp3_config_name}',
        'eval': f'cd {root_path}/policy/DP3; bash eval.sh place_shoe {name_value} {process_num} 1 %gpu_id {zarr_to_use} {dp3_config_name} {episode_num}'
    }
def update_yaml_commands(yaml_data, cmd_type, original_cmd, override_cmd=None):
    if override_cmd and override_cmd != original_cmd:
        yaml_data[cmd_type] = [override_cmd]
    else:
        yaml_data[cmd_type] = [original_cmd]

def handle_num_argument(args, parser, yaml_data):
    """Handle num positional argument by finding command and re-injecting arguments"""
    if args.num is None: return args
    cmd_found = None
    num_str = str(args.num)
    if 'hdf5_dirs' in yaml_data and num_str in yaml_data['hdf5_dirs']:
        cmd_found = yaml_data['hdf5_dirs'][num_str]['source']
    elif 'zarrs' in yaml_data and num_str in yaml_data['zarrs']:
        cmd_found = yaml_data['zarrs'][num_str]['source']
    elif 'pcds' in yaml_data and num_str in yaml_data['pcds']:
        cmd_found = yaml_data['pcds'][num_str]['source']
    if cmd_found:
        import sys
        sys.argv = [sys.argv[0], args.yaml] + cmd_found.split()
        return parser.parse_args()
    else:
        print(f"Error: Command number {args.num} not found in hdf5_dirs, zarrs, or pcds")
        exit(1)


def pre_command_hook(): 
    # 使用更可靠的方法来激活conda环境
    return "bash -c 'source ~/.bashrc && eval \"$(conda shell.bash hook)\" && conda activate 3dpidm'"

def run_with_realtime_output(cmd, log_file, cmd_type):
    """运行命令并实时显示最后一行输出"""
    import threading, time
    def monitor(): 
        last_line = ""
        while True:
            try:
                with open(log_file, 'r') as f:
                    lines = f.readlines()
                    if lines and lines[-1].strip() != last_line:
                        current_line = lines[-1].strip()
                        # 清除整行并显示新内容
                        print(f"\r{' ' * 100}\r[{cmd_type}] {current_line}", end='', flush=True)
                        last_line = current_line
            except: pass
            time.sleep(0.5)
    threading.Thread(target=monitor, daemon=True).start()
    return subprocess.run(cmd, shell=True, executable='/bin/bash', stdout=open(log_file, 'w'), stderr=subprocess.STDOUT, text=True)

def get_zarr_episode_count(zarr_path):
    try:
        z = zarr.open(zarr_path, 'r')
        if 'episode_ends' in z:
            return len(z['episode_ends'])
        elif 'data' in z and 'episode_ends' in z['data']:
            return len(z['data']['episode_ends'])
        else:
            for key in z.keys():
                if 'episode' in key.lower():
                    return len(z[key])
        return 0
    except:
        return 0

def validate_zarr_episode_count(zarr_path, expected_episode_count):
    actual_episode_count = get_zarr_episode_count(zarr_path)
    if actual_episode_count != expected_episode_count:
        print(colored(f"⚠️  WARNING: expected episode count ({expected_episode_count}) != actual episode count ({actual_episode_count}) in {zarr_path}", 'red', attrs=['bold']))
    return actual_episode_count == expected_episode_count

def get_nested_value(data, field_path):
    """Get nested value from data using dot notation"""
    keys = field_path.split('.')
    value = data
    for key in keys:
        if key in value: value = value[key]
        else: return None
    return value

def set_nested_value(data, field_path, new_value):
    """Set nested value in data using dot notation"""
    keys = field_path.split('.')
    target = data
    for key in keys[:-1]:
        if key not in target: target[key] = {}
        target = target[key]
    target[keys[-1]] = new_value

def print_demo_task_check(demo_task_path):
    """Print and optionally modify specific fields from demo_task*.yaml file"""
    with open(demo_task_path, 'r') as f: demo_data = yaml_handler.load(f)
    fields_to_print = ['episode_num', 'action_type', 'usemimic', 'embodiment','ee_location','pipeline','pipeline_out_key','usemimic','rand_asset_ids%'] #'camera.pcd_camera_names', 'data_type'
    print(f"demo_data: {os.path.abspath(demo_task_path)}")
    for field in fields_to_print:
        value = get_nested_value(demo_data, field) if '.' in field else demo_data.get(field)
        if value is not None:
            if isinstance(value, (dict, list)) and not field.endswith('%'):
                print(f"{field}:")
                for i, (k, v) in enumerate(value.items() if isinstance(value, dict) else enumerate(value)):
                    print(f"  {k}: {v}")
                    user_input = input("")
                    if user_input.strip():
                        try:
                            new_val = int(user_input) if user_input.isdigit() else (yaml.safe_load(user_input) if user_input.lower() in ['true', 'false'] else user_input)
                        except ValueError:
                            new_val = user_input
                        if isinstance(value, dict): value[k] = new_val
                        else: value[i] = new_val
                        print(f"Updated {field}: {value} {demo_task_path}")
                        with open(demo_task_path, 'w') as f: yaml_handler.dump(demo_data, f)
            else:
                field_name = field.rstrip('%')
                print(f"{field_name}: {value}")
                user_input = input("")
                if user_input.strip():
                    try:
                        new_value = int(user_input) if user_input.isdigit() else (yaml.safe_load(user_input) if user_input.lower() in ['true', 'false'] else user_input)
                    except ValueError:
                        new_value = user_input
                    if '.' in field_name: set_nested_value(demo_data, field_name, new_value)
                    else: demo_data[field_name] = new_value
                    print(f"Updated {field_name}: {demo_data[field_name]} {demo_task_path}")
                    with open(demo_task_path, 'w') as f: yaml_handler.dump(demo_data, f)

def execute_command(yaml_data, cmd_type, cmd, embodiment, collect_num=0, needs_filter=False, base_dir=""):
    if cmd_type in ['collect_data', 'collect_test']:
        file_type = 'hdf5_dirs'
    elif cmd_type in ['data_traverser', 'data_traverser_test']:
        file_type = 'pcds'
    else:
        file_type = 'zarrs'
    for idx, data in yaml_data.get(file_type, {}).items():
        if data['source'] == f"-{cmd_type[0]}{'t' if cmd_type.endswith('_test') else ''} -e {embodiment[0]}":
            # Remove existing output path before execution
            if not remove_existing_path(data['name'], is_dir=(cmd_type in ['collect_data', 'collect_test']), yaml_data=yaml_data):
                print(f"Skipping {cmd_type} due to user choice")
                return
            
            log_file = create_log_path(yaml_data, cmd_type, idx)
            update_yaml_field(yaml_data['custom_name'], [file_type, str(idx), 'status'], 'generating')
            update_yaml_field(yaml_data['custom_name'], [file_type, str(idx), 'log_file'], log_file)
            
            print(f"Command: {cmd}\nLog: {log_file}")
            if cmd_type in ['collect_data', 'collect_test']:
                demo_task_name = get_demo_task_name(yaml_data['custom_name'], collect_num if cmd_type == 'collect_data' else 1, embodiment, yaml_data['action_type'], yaml_data['trim_arm'], yaml_data['camera_use_full'], is_test=(cmd_type == 'collect_test'))
                demo_task_path = f"{Path(__file__).parent.parent}/RoboTwin/task_config/{demo_task_name}.yml"
                if os.path.exists(demo_task_path):
                    print_demo_task_check(demo_task_path)
            while True:
                try: 
                    gpu_id = int(input("GPU ID (0-7): "))
                    if 0 <= gpu_id <= 7: break
                    else: print("Invalid GPU ID")
                except ValueError: print("Enter valid integer")
                except KeyboardInterrupt: print("\nCancelled"); update_yaml_field(yaml_data['custom_name'], [file_type, str(idx), 'status'], 'non_exist'); return
            cmd = f"{pre_command_hook()} && {cmd.replace('%gpu_id', str(gpu_id))}"; log_command(yaml_data, cmd_type, cmd)
            print(f"Executing {cmd_type} for {embodiment} with GPU {gpu_id} log file: {log_file}")
            
            current_runs.add((file_type, str(idx)))
            result = run_with_realtime_output(cmd, log_file, cmd_type)
            print()
            current_runs.discard((file_type, str(idx)))
            
            with open(log_file, 'r') as f: log_content = f.read().strip()
            status = 'completed' if result.returncode == 0 else 'aborted'
            print(f"{'✅' if result.returncode == 0 else '❌'} {cmd_type} {'completed' if result.returncode == 0 else 'failed'} for {embodiment}")
            if result.returncode != 0: print(f"Error: {log_content}"); print(f"Log: {log_file}")
            update_yaml_field(yaml_data['custom_name'], [file_type, str(idx), 'status'], status)
            
            # List PCD output directory contents after successful data_traverser_test
            if cmd_type == 'data_traverser_test' and result.returncode == 0:
                list_pcd_output_directory(data['name'])

            
            if cmd_type == 'collect_data' and needs_filter and base_dir and result.returncode == 0: print(f"Running filter for {embodiment}"); run_filter_hdf5_highz(base_dir, collect_num)
            return
    print(f"Warning: Could not find corresponding index for {cmd_type}")

def get_output_path(command_name, params):
    """Get output path for a command"""
    custom_name = params['custom_name']
    embodiment = params['embodiment']
    action_type = params['action_type']
    trim_arm = params['trim_arm']
    camera_use_full = params['camera_use_full']
    
    if command_name in ['collect_data', 'collect_test']:
        collect_num = params.get('collect_num', 1 if command_name == 'collect_test' else 0)
        name_value = get_demo_task_name(custom_name, collect_num, embodiment, action_type, trim_arm, camera_use_full, is_test=(command_name.endswith('test')))
        return f"{Path(__file__).parent.parent}/RoboTwin/data/place_shoe/{name_value}"
    elif command_name in ['process_data', 'process_test']:
        process_num = params.get('process_num', 1 if command_name == 'process_test' else params.get('collect_num', 0))
        name_value = get_demo_task_name(custom_name, process_num, embodiment, action_type, trim_arm, camera_use_full, is_test=(command_name.endswith('test')))
        return get_zarr_path(name_value, process_num, action_type)
    elif command_name == 'data_traverser_test':
        process_num = params.get('process_num', 1)
        name_value = get_demo_task_name(custom_name, process_num, embodiment, action_type, trim_arm, camera_use_full, is_test=True)
        zarr_path = get_zarr_path(name_value, process_num, action_type)
        pipeline_out_key = params.get('pipeline_out_key', 'pointcloud')
        print(f"pipeline_out_key: {pipeline_out_key}")
        return f"{zarr_path}/{pipeline_out_key[0]}_pcd_output"
    elif command_name == 'merge_zarr':
        return generate_merged_zarr_name(params)
    else:
        raise ValueError(f"Unsupported command: {command_name}")

def print_available_paths(yaml_data):
    """Print available paths like when only yaml name is provided"""
    print(f"Available paths constructed at {os.path.abspath(yaml_data['custom_name']+'.yaml')}")
    print("Available commands:")
    emb_map = _load_emb_name_map()
    for emb_char, emb_name in emb_map.items():
        if emb_name not in yaml_data.get('embodiments', {}): continue
        print(f"\n{emb_name.upper()}:")
        all_entries = []
        if 'hdf5_dirs' in yaml_data:
            for idx, data in yaml_data['hdf5_dirs'].items():
                if data['source'].endswith(f" -e {emb_char}"):
                    all_entries.append((idx, data))
        if 'zarrs' in yaml_data:
            for idx, data in yaml_data['zarrs'].items():
                if data['source'].endswith(f" -e {emb_char}"):
                    all_entries.append((idx, data))
        if 'pcds' in yaml_data:
            for idx, data in yaml_data['pcds'].items():
                if data['source'].endswith(f" -e {emb_char}"):
                    all_entries.append((idx, data))
        def sort_key(entry):
            source = entry[1]['source']
            if '-ct' in source: return 0
            if '-pt' in source: return 1
            if '-dt' in source: return 2
            if '-c ' in source: return 3
            if '-p ' in source: return 4
            return 5
        all_entries.sort(key=sort_key)
        for idx, data in all_entries:
            if data['status'] == 'exist':
                print(colored(f"  {idx}: {data['source']} -> {data['name']}", 'green'))
            elif data['status'] == 'generating':
                log_info = f" (log: {data.get('log_file', 'N/A')})" if data.get('log_file') else ""
                print(colored(f"  {idx}: {data['source']} -> {data['name']}{log_info}", 'magenta'))
            elif data['status'] == 'aborted':
                print(colored(f"  {idx}: {data['source']} -> {data['name']} [ABORTED]", 'red'))
            else:
                print(f"  {idx}: {data['source']} -> {data['name']}")

def remove_existing_path(path, is_dir=False, yaml_data=None):
    """Remove existing path after user confirmation"""
    # Check if path exists or if it's marked as aborted in yaml_data
    path_exists = os.path.exists(path)
    is_aborted = False
    if yaml_data:
        for file_type in ['hdf5_dirs', 'zarrs', 'pcds']:
            if file_type in yaml_data:
                for idx, data in yaml_data[file_type].items():
                    if data.get('name') == path and data.get('status') == 'aborted':
                        is_aborted = True
                        break
                if is_aborted:
                    break
    
    if path_exists or is_aborted:
        if yaml_data:
            print_available_paths(yaml_data)
        path_type = "directory" if is_dir else "file"
        status_info = " (ABORTED)" if is_aborted else ""
        print(f"{path_type.capitalize()} already exists{status_info}: {path}")
        ans = input(f"Remove existing {path_type}? (y/n): ").strip().lower()
        if ans == 'y':
            if path_exists:
                if is_dir or os.path.isdir(path):
                    shutil.rmtree(path)
                    print(f"Removed directory: {path}")
                else:
                    os.remove(path)
                    print(f"Removed file: {path}")
            # Clear aborted status in yaml
            if is_aborted and yaml_data:
                for file_type in ['hdf5_dirs', 'zarrs', 'pcds']:
                    if file_type in yaml_data:
                        for idx, data in yaml_data[file_type].items():
                            if data.get('name') == path and data.get('status') == 'aborted':
                                update_yaml_field(yaml_data['custom_name'], [file_type, str(idx), 'status'], 'non_exist')
                                break
            return True
        else:
            print(f"Keeping existing: {path}")
            return False
    return True

def get_demo_task_name(custom_name, collect_num, embodiment, action_type, trim_arm, camera_use_full, is_test=False):
    name_value = f"demo_clean_{custom_name}-{collect_num}-{embodiment}-{action_type}-ta{1 if trim_arm else 0}-cam{1 if camera_use_full else 0}"
    if is_test:
        name_value += "-test1"
    return name_value


def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("yaml", help="YAML file to read parameters and run commands")
    parser.add_argument("num", nargs="?", type=int, help="Execute command by number from hdf5_dirs or zarrs")
    parser.add_argument("-e", "--embodiment", help="Embodiment character (a/p/u/f/x)")
    parser.add_argument("-clr", action="store_true", help="Clear existing hdf5_dirs and zarrs before constructing new ones")

    cmd_map = {'c': 'collect_data', 'p': 'process_data', 'r': 'replay_buffer_test', 'd': 'data_traverser', 'dt': 'data_traverser_test', 'm': 'merge_zarr', 't': 'train', 'v': 'eval', 'ct': 'collect_test', 'pt': 'process_test', 'pft': 'process_from_full_test', 'dft': 'data_traverser_from_full_test'}
    for flag, cmd in cmd_map.items(): parser.add_argument(f"-{flag}", f"--{cmd}", action="store_true")
    parser.add_argument("-z", "--zarr-files", nargs="+", help="Manual zarr files to use")
    args = parser.parse_args()
    
    yaml_data = load_yaml(args.yaml)
    yaml_data['custom_name'] = args.yaml
    args = handle_num_argument(args, parser, yaml_data)
    
    required_params = ['gpu_id', 'action_type', 'trim_arm', 'camera_use_full', 'dp3_config_name', 'embodiments']
    missing_params = [param for param in required_params if param not in yaml_data]
    if missing_params:
        print(f"Error: Missing required parameters in YAML: {missing_params}")
        return
    
    if args.clr:
        if 'hdf5_dirs' in yaml_data:
            del yaml_data['hdf5_dirs']
        if 'zarrs' in yaml_data:
            del yaml_data['zarrs']
        if 'pcds' in yaml_data:
            del yaml_data['pcds']
        print("Cleared existing hdf5_dirs, zarrs, and pcds fields")
    
    if not any(getattr(args, cmd, False) for cmd in cmd_map.values()):
        hdf5_dirs, zarrs, pcds = construct_all_path(yaml_data)
        yaml_data['hdf5_dirs'] = hdf5_dirs
        yaml_data['zarrs'] = zarrs
        yaml_data['pcds'] = pcds
        save_yaml(args.yaml, yaml_data)
    
    if not any(getattr(args, cmd, False) for cmd in cmd_map.values()):
        print_available_paths(yaml_data)
        print("Use <yaml_file> <number> to execute a specific command, or use specific commands with -e <embodiment> to proceed.")
        return
    
    if not args.embodiment:
        print("Error: -e <embodiment> is required for all commands")
        return
    
    emb_map = _load_emb_name_map()
    if args.embodiment not in emb_map:
        print(f"Error: Invalid embodiment '{args.embodiment}'. Valid options: {list(emb_map.keys())}")
        return
    
    embodiment = emb_map[args.embodiment]
    if embodiment not in yaml_data['embodiments']:
        print(f"Error: Embodiment '{embodiment}' not found in YAML configuration")
        return
    
    emb_config = yaml_data['embodiments'][embodiment]
    collect_num = emb_config.get('collect', 0)
    process_num = emb_config.get('process', None)
    pipeline = emb_config.get('pipeline', '')
    pipeline_test = emb_config.get('pipeline_test', '')
    
    collect_num = int(collect_num) if collect_num else 0
    
    if process_num is None:
        process_num = collect_num
        needs_filter = False
    else:
        process_num = int(process_num)
        needs_filter = True
    
    # 如果有命令要执行，构建正确的路径
    if any(getattr(args, cmd, False) for cmd in cmd_map.values()):
        hdf5_dirs, zarrs, pcds = construct_all_path(yaml_data)
        yaml_data['hdf5_dirs'] = hdf5_dirs
        yaml_data['zarrs'] = zarrs
        yaml_data['pcds'] = pcds
        save_yaml(args.yaml, yaml_data)
    
    if getattr(args, 'collect_data', False):
        if collect_num > 0:
            name_value = get_demo_task_name(yaml_data['custom_name'], collect_num, embodiment, yaml_data['action_type'], yaml_data['trim_arm'], yaml_data['camera_use_full'])
            use_runtime_pipeline = emb_config.get('use_runtime_pipeline', False)
            pipeline_out_key = emb_config.get('pipeline_out_key', None)
            pipeline_to_use = replace_pipeline_percent(pipeline, embodiment) if use_runtime_pipeline else None
            create_demo_task_yml(name_value, collect_num, embodiment, yaml_data['action_type'], yaml_data['trim_arm'], yaml_data['camera_use_full'], pipeline_to_use, pipeline_out_key)
            cmd = construct_collect_data_command(yaml_data['gpu_id'], name_value)
            base_dir = f"{Path(__file__).parent.parent}/RoboTwin/data/{yaml_data['custom_name']}-{collect_num}-{embodiment}-{yaml_data['action_type']}-ta{1 if yaml_data['trim_arm'] else 0}-cam{1 if yaml_data['camera_use_full'] else 0}"
            execute_command(yaml_data, 'collect_data', cmd, embodiment, collect_num, needs_filter, base_dir)
        else:
            print(f"No collect_num specified for {embodiment}")
    if getattr(args, 'collect_test', False):
        name_value = get_demo_task_name(yaml_data['custom_name'], 1, embodiment, yaml_data['action_type'], yaml_data['trim_arm'], yaml_data['camera_use_full'], is_test=True)
        use_runtime_pipeline = emb_config.get('use_runtime_pipeline', False)
        pipeline_out_key = emb_config.get('pipeline_out_key', None)
        pipeline_to_use = replace_pipeline_percent(pipeline, embodiment) if use_runtime_pipeline else None
        create_demo_task_yml(name_value, 1, embodiment, yaml_data['action_type'], yaml_data['trim_arm'], yaml_data['camera_use_full'], pipeline_to_use, pipeline_out_key)
        create_test_config_and_cleanup(yaml_data['custom_name'], embodiment, yaml_data['action_type'], yaml_data['trim_arm'], yaml_data['camera_use_full'])
        cmd = construct_collect_data_command(yaml_data['gpu_id'], name_value, is_test=True)
        execute_command(yaml_data, 'collect_test', cmd, embodiment)
    
    if getattr(args, 'process_data', False):
        if collect_num > 0:
            cmd = construct_process_data_command(yaml_data['custom_name'], embodiment, yaml_data['action_type'], yaml_data['trim_arm'], yaml_data['camera_use_full'], process_num, is_test=False)
            execute_command(yaml_data, 'process_data', cmd, embodiment)
        else:
            print(f"No collect_num specified for {embodiment}")
    if getattr(args, 'process_test', False):
        cmd = construct_process_data_command(yaml_data['custom_name'], embodiment, yaml_data['action_type'], yaml_data['trim_arm'], yaml_data['camera_use_full'], 1, is_test=True)
        execute_command(yaml_data, 'process_test', cmd, embodiment)
    
    if getattr(args, 'data_traverser', False):
        if pipeline:
            cmd = construct_data_traverser_command(yaml_data['custom_name'], embodiment, yaml_data['action_type'], yaml_data['trim_arm'], yaml_data['camera_use_full'], process_num, pipeline, is_test=False, manual_zarr_files=args.zarr_files)
            execute_command(yaml_data, 'data_traverser', cmd, embodiment)
        else:
            print(f"No pipeline specified for {embodiment}")
    if getattr(args, 'data_traverser_test', False):
        if pipeline_test:
            cmd = construct_data_traverser_command(yaml_data['custom_name'], embodiment, yaml_data['action_type'], yaml_data['trim_arm'], yaml_data['camera_use_full'], 1, pipeline_test, is_test=True, manual_zarr_files=args.zarr_files)
            execute_command(yaml_data, 'data_traverser_test', cmd, embodiment)
        else:
            print(f"No pipeline_test specified for {embodiment}")
    
    if getattr(args, 'process_from_full_test', False):
        if collect_num > 0:
            cmd = construct_process_data_command(yaml_data['custom_name'], embodiment, yaml_data['action_type'], yaml_data['trim_arm'], yaml_data['camera_use_full'], collect_num, is_test=True)
            execute_command(yaml_data, 'process_from_full_test', cmd, embodiment)
        else:
            print(f"No collect_num specified for {embodiment}")
    
    if getattr(args, 'data_traverser_from_full_test', False):
        if pipeline:
            cmd = construct_data_traverser_command(yaml_data['custom_name'], embodiment, yaml_data['action_type'], yaml_data['trim_arm'], yaml_data['camera_use_full'], collect_num, pipeline, is_test=True, manual_zarr_files=args.zarr_files)
            execute_command(yaml_data, 'data_traverser_from_full_test', cmd, embodiment)
        else:
            print(f"No pipeline specified for {embodiment}")
    
    if getattr(args, 'replay_buffer_test', False):
        if collect_num > 0:
            name_value = get_demo_task_name(yaml_data['custom_name'], collect_num, embodiment, yaml_data['action_type'], yaml_data['trim_arm'], yaml_data['camera_use_full'])
            root_path = f"{Path(__file__).parent.parent}/RoboTwin"
            zarr_path = get_zarr_path(name_value, process_num, yaml_data['action_type'], root_path)
            cmd = f'cd {root_path}/policy/DP3; python3 replay_buffer_test.py {zarr_path} 0'
            execute_command(yaml_data, 'replay_buffer_test', cmd, embodiment)
        else:
            print(f"No collect_num specified for {embodiment}")
    
    if getattr(args, 'merge_zarr', False):
        zarr_files = []
        if args.zarr_files:
            for item in args.zarr_files:
                try:
                    idx = int(item)
                    if 'zarrs' in yaml_data and str(idx) in yaml_data['zarrs']:
                        zarr_files.append(yaml_data['zarrs'][str(idx)]['name'])
                except ValueError:
                    zarr_files.append(item)
        if zarr_files:
            # 构建参数传递给get_output_path
            merge_params = {
                'zarr_files': zarr_files,
                'yaml_data': yaml_data,
                'custom_name': yaml_data['custom_name'],
                'action_type': yaml_data['action_type'],
                'trim_arm': yaml_data['trim_arm'],
                'camera_use_full': yaml_data['camera_use_full']
            }
            output_zarr = get_output_path('merge_zarr', merge_params)
            if not remove_existing_path(output_zarr, yaml_data=yaml_data):
                print("Skipping merge_zarr due to user choice")
                return
            if run_merge_zarr(zarr_files, output_zarr):
                print("Successfully merged zarr files")
                if 'zarrs' not in yaml_data:
                    yaml_data['zarrs'] = {}
                merged_idx = str(len(yaml_data['zarrs']) + 1)
                yaml_data['zarrs'][merged_idx] = {'name': output_zarr, 'status': 'exist', 'log_file': None, 'source': f"-m {' '.join(args.zarr_files)}", 'embodiment': 'merged'}
                save_yaml(args.yaml, yaml_data)
            else:
                print("Failed to merge zarr files")
                return
        else:
            print("Warning: merge_zarr option specified but no zarr files to merge")
            return
    
    if getattr(args, 'train', False) or getattr(args, 'eval', False):
        if collect_num > 0:
            name_value = get_demo_task_name(yaml_data['custom_name'], collect_num, embodiment, yaml_data['action_type'], yaml_data['trim_arm'], yaml_data['camera_use_full'])
            zarr_to_use = get_zarr_path(name_value, process_num, yaml_data['action_type'])
            root_path = f"{Path(__file__).parent.parent}/RoboTwin"
            commands = construct_train_eval_commands(root_path, name_value, process_num, yaml_data['gpu_id'], zarr_to_use, yaml_data['dp3_config_name'])
            
            for flag, cmd_name in cmd_map.items():
                if getattr(args, cmd_name, False) and cmd_name in commands:
                    cmd = commands[cmd_name]
                    log_command(yaml_data, cmd_name, cmd)
                    print(f"Executing: {cmd_name} using zarr: {zarr_to_use}")
                    subprocess.run(cmd, shell=True)
        else:
            print(f"No collect_num specified for {embodiment}")
    
    save_yaml(args.yaml, yaml_data)

if __name__ == "__main__": main()
