| """ |
| Prepare EVAC inference inputs from RLBench-style episode data. |
| |
| Input layout: |
| episodes_root/ |
| ├── episode_0/ |
| │ ├── actions.npy # (T_act, 8), single-hand [xyz, quat_xyzw, gripper] |
| │ ├── view1/ |
| │ │ ├── rgb/video.mp4 |
| │ │ └── camera_params.json # {"<frame_id>": {"extrinsics":..., "intrinsics":...}} |
| │ └── view2/ ... |
| ├── episode_1/ |
| │ └── ... |
| |
| Output layout: |
| <user-specified output_root>/ |
| ├── episode_0/ |
| │ ├── <view1>/ # only FIXED-camera views |
| │ │ ├── frame.png # video frame at t_start (first frame gripper visible) |
| │ │ ├── actions.npy # (T + 3, 16), dual-hand, history-padded |
| │ │ ├── extrinsics.npy # (4, 4) c2w |
| │ │ └── intrinsics.npy # (3, 3) K (abs-valued fx, fy) |
| │ └── <view2>/ ... |
| └── ... |
| |
| Pipeline: |
| 1. Discover view folders (contain camera_params.json). |
| 2. Filter: keep views whose extrinsics are identical across all recorded frames. |
| 3. Map each video frame t -> action[round(t * T_action / T_video)] (handles |
| non-exact ratios like 41:163 or 41:164 by clamping at the tail). |
| 4. Find t_start: first video frame where right-hand EEF projects inside |
| the image with positive depth (gripper enters camera view). |
| 5. Slice: frames [t_start, T_video), actions aligned to those frames. |
| 6. Convert 8D single-hand -> 16D dual-hand (real on right, placeholder on left). |
| 7. Prepend (n_previous - 1) copies of first frame to align with EVAC's history slots. |
| 8. Write frame.png from video at t_start; write actions.npy; write K and c2w. |
| |
| Usage: |
| python prepare_evac_input.py -i /path/to/episodes_root -o /path/to/out |
| python prepare_evac_input.py -i ... -o ... --hand right |
| python prepare_evac_input.py -i ... -o ... --fix_tol 1e-6 --n_previous 4 |
| python prepare_evac_input.py -i ... -o ... --episodes episode_0 episode_5 |
| """ |
|
|
| import argparse |
| import json |
| import os |
| from pathlib import Path |
|
|
| import cv2 |
| import numpy as np |
|
|
|
|
| |
| |
| |
|
|
| def single_to_dual(actions_8d: np.ndarray, hand: str = "right") -> np.ndarray: |
| """[T, 8] -> [T, 16]. Real data on `hand`, placeholder on the other.""" |
| assert actions_8d.ndim == 2 and actions_8d.shape[1] == 8 |
| T = actions_8d.shape[0] |
| out = np.zeros((T, 16), dtype=np.float32) |
|
|
| if hand == "right": |
| out[:, 3:7] = np.array([0, 0, 0, 1], dtype=np.float32) |
| out[:, 7] = 1.0 |
| out[:, 8:11] = actions_8d[:, 0:3] |
| out[:, 11:15] = actions_8d[:, 3:7] |
| out[:, 15] = actions_8d[:, 7] |
| elif hand == "left": |
| out[:, 0:3] = actions_8d[:, 0:3] |
| out[:, 3:7] = actions_8d[:, 3:7] |
| out[:, 7] = actions_8d[:, 7] |
| out[:, 11:15] = np.array([0, 0, 0, 1], dtype=np.float32) |
| out[:, 15] = 1.0 |
| else: |
| raise ValueError(f"hand must be 'left' or 'right', got {hand!r}") |
| return out |
|
|
|
|
| def prepend_history_pad(actions_16d: np.ndarray, n_previous: int) -> np.ndarray: |
| """Prepend (n_previous - 1) copies of the first frame.""" |
| if n_previous <= 1: |
| return actions_16d |
| return np.concatenate([actions_16d[:1]] * (n_previous - 1) + [actions_16d], axis=0) |
|
|
|
|
| |
| |
| |
|
|
| def load_camera_params(camera_params_path: Path): |
| """Parse camera_params.json -> sorted frame_ids, (T, 4, 4) ext, (T, 3, 3) K.""" |
| with open(camera_params_path, "r") as f: |
| data = json.load(f) |
| frame_ids = sorted(data.keys()) |
| ext = np.stack([np.array(data[k]["extrinsics"], dtype=np.float64) for k in frame_ids]) |
| K = np.stack([np.array(data[k]["intrinsics"], dtype=np.float64) for k in frame_ids]) |
| return frame_ids, ext, K |
|
|
|
|
| def is_fixed_camera(ext: np.ndarray, tol: float = 1e-6) -> bool: |
| """True if extrinsics are identical across all frames (within tol).""" |
| if ext.shape[0] < 2: |
| return True |
| return np.abs(ext - ext[0:1]).max() < tol |
|
|
|
|
| def normalize_intrinsic(K_3x3: np.ndarray) -> np.ndarray: |
| """Fix RLBench/OpenGL-style negative fx/fy by taking absolute values.""" |
| K_out = K_3x3.astype(np.float32).copy() |
| K_out[0, 0] = abs(K_out[0, 0]) |
| K_out[1, 1] = abs(K_out[1, 1]) |
| return K_out |
|
|
|
|
| |
| |
| |
|
|
| def project_points_world_to_pixel(points_world: np.ndarray, |
| c2w: np.ndarray, |
| K: np.ndarray): |
| """ |
| points_world: (N, 3) world-frame 3D points |
| c2w: (4, 4) camera-to-world; we invert to get world-to-camera |
| K: (3, 3) intrinsic |
| Returns: (N, 2) pixel coords (u, v), (N,) camera-frame z depth |
| """ |
| w2c = np.linalg.inv(c2w) |
| N = points_world.shape[0] |
| pts_h = np.concatenate([points_world, np.ones((N, 1))], axis=1) |
| pts_cam = (w2c @ pts_h.T).T[:, :3] |
| uv_h = (K @ pts_cam.T).T |
| |
| z = uv_h[:, 2] |
| uv = np.zeros((N, 2), dtype=np.float64) |
| valid = np.abs(z) > 1e-8 |
| uv[valid] = uv_h[valid, :2] / z[valid, None] |
| return uv, pts_cam[:, 2] |
|
|
|
|
| def find_gripper_entry_frame(eef_world_per_video_frame: np.ndarray, |
| c2w: np.ndarray, K: np.ndarray, |
| H: int, W: int, |
| margin: int = 0) -> int: |
| """ |
| Find first t such that EEF at video-frame t projects inside [margin, W-margin) x |
| [margin, H-margin) with positive depth. |
| |
| eef_world_per_video_frame: (T_video, 3) |
| Returns index in [0, T_video), or -1 if never visible. |
| """ |
| uv, z_cam = project_points_world_to_pixel(eef_world_per_video_frame, c2w, K) |
| u = uv[:, 0] |
| v = uv[:, 1] |
| in_view = ( |
| (u >= margin) & (u < W - margin) & |
| (v >= margin) & (v < H - margin) & |
| (z_cam > 0.0) |
| ) |
| idx = np.where(in_view)[0] |
| return int(idx[0]) if len(idx) > 0 else -1 |
|
|
|
|
| |
| |
| |
|
|
| def read_video_frame(video_path: Path, frame_idx: int) -> np.ndarray: |
| """Return a single frame (H, W, 3) in BGR order from an .mp4 at frame_idx.""" |
| cap = cv2.VideoCapture(str(video_path)) |
| if not cap.isOpened(): |
| raise IOError(f"Cannot open video: {video_path}") |
| cap.set(cv2.CAP_PROP_POS_FRAMES, frame_idx) |
| ok, frame = cap.read() |
| cap.release() |
| if not ok: |
| raise IOError(f"Cannot read frame {frame_idx} from {video_path}") |
| return frame |
|
|
|
|
| def video_frame_count_and_size(video_path: Path): |
| cap = cv2.VideoCapture(str(video_path)) |
| if not cap.isOpened(): |
| raise IOError(f"Cannot open video: {video_path}") |
| n = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) |
| w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) |
| h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) |
| cap.release() |
| return n, h, w |
|
|
|
|
| |
| |
| |
|
|
| def frame_to_action_index(frame_idx: int, num_frames: int, num_actions: int) -> int: |
| """ |
| Map a video frame index to its corresponding action index. |
| |
| Handles non-exact ratios (e.g. 41 video frames : 163 actions = 3.976:1) |
| by rounding and clamping to [0, num_actions - 1]. The last video frame |
| always maps to the last action, absorbing the ±1 ragged tail. |
| """ |
| if num_frames <= 1: |
| return 0 |
| |
| if frame_idx >= num_frames - 1: |
| return num_actions - 1 |
| ratio = num_actions / num_frames |
| act_idx = int(round(frame_idx * ratio)) |
| return max(0, min(act_idx, num_actions - 1)) |
|
|
|
|
| def build_action_for_video_frames(actions_8d_full: np.ndarray, |
| n_video: int) -> np.ndarray: |
| """Return (n_video, 8) by mapping each video frame idx -> action idx.""" |
| T_act = actions_8d_full.shape[0] |
| idxs = np.array([frame_to_action_index(t, n_video, T_act) |
| for t in range(n_video)], dtype=np.int64) |
| return actions_8d_full[idxs] |
|
|
|
|
| |
| |
| |
|
|
| def process_view(episode_dir: Path, view_dir: Path, out_view_dir: Path, |
| actions_8d_full: np.ndarray, |
| n_previous: int, hand: str, |
| fix_tol: float, margin: int, observation_offset: int, |
| verbose: bool = True): |
| """Returns a status string for logging.""" |
| cam_path = view_dir / "camera_params.json" |
| video_path = view_dir / "rgb" / "video.mp4" |
|
|
| if not cam_path.exists(): |
| return f"SKIP (no camera_params.json)" |
| if not video_path.exists(): |
| return f"SKIP (no rgb/video.mp4)" |
|
|
| |
| frame_ids, ext, K_stack = load_camera_params(cam_path) |
| if not is_fixed_camera(ext, tol=fix_tol): |
| max_diff = float(np.abs(ext - ext[0:1]).max()) |
| return f"SKIP (camera not fixed, max ext diff={max_diff:.4f})" |
|
|
| c2w = ext[0].astype(np.float32) |
| K = normalize_intrinsic(K_stack[0]) |
|
|
| |
| n_video, H, W = video_frame_count_and_size(video_path) |
| if n_video < 2: |
| return f"SKIP (video has {n_video} frame(s))" |
|
|
| |
| |
| |
| T_act_full = actions_8d_full.shape[0] |
| actions_video_rate = build_action_for_video_frames(actions_8d_full, n_video) |
|
|
| |
| eef_world_seq = actions_video_rate[:, 0:3].astype(np.float32) |
| t_entry = find_gripper_entry_frame(eef_world_seq, c2w, K, |
| H=H, W=W, margin=margin) |
| if t_entry < 0: |
| return f"SKIP (gripper never projects into view; video={n_video})" |
|
|
| |
| |
| t_start = t_entry + observation_offset |
| if t_start >= n_video - 1: |
| return (f"SKIP (t_start={t_start} (entry={t_entry} + offset={observation_offset}) " |
| f">= n_video={n_video})") |
|
|
| |
| actions_sliced_8d = actions_video_rate[t_start:] |
| T_out = actions_sliced_8d.shape[0] |
|
|
| |
| a16 = single_to_dual(actions_sliced_8d, hand=hand) |
| a16 = prepend_history_pad(a16, n_previous=n_previous) |
|
|
| |
| frame_bgr = read_video_frame(video_path, t_start) |
|
|
| |
| out_view_dir.mkdir(parents=True, exist_ok=True) |
| cv2.imwrite(str(out_view_dir / "frame.png"), frame_bgr) |
| np.save(out_view_dir / "actions.npy", a16) |
| np.save(out_view_dir / "extrinsics.npy", c2w) |
| np.save(out_view_dir / "intrinsics.npy", K) |
|
|
| return (f"OK (entry={t_entry}, t_start={t_start}, T_out={T_out}, padded={a16.shape[0]}, " |
| f"video={n_video}x{H}x{W}, actions={T_act_full}, ratio={T_act_full/n_video:.3f})") |
|
|
|
|
| |
| |
| |
|
|
| def process_episode(ep_dir: Path, out_ep_dir: Path, |
| n_previous: int, hand: str, |
| fix_tol: float, margin: int, observation_offset: int, |
| verbose: bool = True): |
| actions_path = ep_dir / "actions.npy" |
| if not actions_path.exists(): |
| print(f"[{ep_dir.name}] SKIP: no actions.npy") |
| return |
|
|
| actions_8d_full = np.load(actions_path) |
| if actions_8d_full.ndim != 2 or actions_8d_full.shape[1] != 8: |
| print(f"[{ep_dir.name}] SKIP: actions.npy shape {actions_8d_full.shape} != (T, 8)") |
| return |
|
|
| view_dirs = [d for d in sorted(ep_dir.iterdir()) |
| if d.is_dir() and (d / "camera_params.json").exists()] |
| if not view_dirs: |
| print(f"[{ep_dir.name}] SKIP: no view folders with camera_params.json") |
| return |
|
|
| for view_dir in view_dirs: |
| out_view_dir = out_ep_dir / view_dir.name |
| status = process_view( |
| episode_dir=ep_dir, view_dir=view_dir, out_view_dir=out_view_dir, |
| actions_8d_full=actions_8d_full, |
| n_previous=n_previous, hand=hand, |
| fix_tol=fix_tol, margin=margin, |
| observation_offset=observation_offset, |
| verbose=verbose, |
| ) |
| print(f"[{ep_dir.name}/{view_dir.name}] {status}") |
|
|
|
|
| def main(): |
| p = argparse.ArgumentParser( |
| formatter_class=argparse.RawDescriptionHelpFormatter, description=__doc__ |
| ) |
| p.add_argument("-i", "--input_root", required=True, type=Path, |
| help="Root folder containing episode_* subfolders") |
| p.add_argument("-o", "--output_root", required=True, type=Path, |
| help="Output folder (will be created)") |
| p.add_argument("--episodes", nargs="*", default=None, |
| help="Only process these episode subfolder names (default: all)") |
| p.add_argument("--n_previous", type=int, default=4, |
| help="EVAC history length to pad (default 4)") |
| p.add_argument("--hand", choices=["left", "right"], default="right", |
| help="Which side of the 16D layout gets the real data") |
| p.add_argument("--fix_tol", type=float, default=1e-6, |
| help="Max per-element extrinsics diff to count as 'fixed camera'") |
| p.add_argument("--margin", type=int, default=0, |
| help="Pixel margin for gripper-in-view check (default 0)") |
| p.add_argument("--observation_offset", type=int, default=3, |
| help="Number of video frames to advance past the gripper-entry " |
| "frame before taking observation (default 2)") |
| args = p.parse_args() |
|
|
| if not args.input_root.exists(): |
| raise FileNotFoundError(args.input_root) |
| args.output_root.mkdir(parents=True, exist_ok=True) |
|
|
| |
| ep_dirs = sorted([d for d in args.input_root.iterdir() if d.is_dir()]) |
| if args.episodes: |
| wanted = set(args.episodes) |
| ep_dirs = [d for d in ep_dirs if d.name in wanted] |
|
|
| print(f"Found {len(ep_dirs)} episode(s) to process in {args.input_root}") |
| print(f"Output -> {args.output_root}") |
| print(f"Params: n_previous={args.n_previous}, hand={args.hand}, " |
| f"fix_tol={args.fix_tol}, margin={args.margin}, " |
| f"observation_offset={args.observation_offset}") |
| print("-" * 70) |
|
|
| for ep_dir in ep_dirs: |
| out_ep_dir = args.output_root / ep_dir.name |
| process_episode( |
| ep_dir=ep_dir, out_ep_dir=out_ep_dir, |
| n_previous=args.n_previous, |
| hand=args.hand, fix_tol=args.fix_tol, margin=args.margin, |
| observation_offset=args.observation_offset, |
| ) |
|
|
|
|
| if __name__ == "__main__": |
| main() |