| """ |
| get_llm_calib_data.py |
| |
| Script to extract LLM input embeddings from OpenVLA-OFT forward pass for use as calibration data in quantization. |
| This script captures the multimodal embeddings (vision + text + proprio) that are fed to the LLM. |
| |
| The script randomly samples episodes from the dataset and captures ALL frames within each selected episode. |
| |
| Run with: |
| python vla-scripts/get_llm_calib_data.py \ |
| --vla_path <PATH/TO/CHECKPOINT> \ |
| --dataset_name libero_spatial_no_noops \ |
| --output_path ./calib_data/libero_spatial.bin \ |
| --num_episodes 10 |
| """ |
|
|
| import json |
| import os |
| import random |
| import struct |
| from dataclasses import dataclass |
| from pathlib import Path |
| from typing import Dict, List |
|
|
| import draccus |
| import numpy as np |
| import torch |
| import tqdm |
| from transformers import AutoConfig, AutoImageProcessor, AutoModelForVision2Seq, AutoProcessor |
|
|
| from prismatic.extern.hf.configuration_prismatic import OpenVLAConfig |
| from prismatic.extern.hf.modeling_prismatic import OpenVLAForActionPrediction |
| from prismatic.extern.hf.processing_prismatic import PrismaticImageProcessor, PrismaticProcessor |
| from prismatic.models.backbones.llm.prompting import PurePromptBuilder |
| from prismatic.util.data_utils import PaddedCollatorForActionPrediction |
| from prismatic.vla.action_tokenizer import ActionTokenizer |
| from prismatic.vla.datasets import EpisodicRLDSDataset, RLDSBatchTransform |
|
|
| os.environ["TOKENIZERS_PARALLELISM"] = "false" |
|
|
| |
| CALIB_MAGIC = b"OPENVLA_CALIB\0\0\0" |
| CALIB_VERSION = 2 |
|
|
|
|
| @dataclass |
| class CalibrationConfig: |
| |
| vla_path: str = "openvla/openvla-7b" |
| data_root_dir: Path = Path("modified_libero_rlds_data") |
| dataset_name: str = "libero_spatial_no_noops" |
| output_path: Path = Path("calibration_data.bin") |
| num_episodes: int = -1 |
| num_images_in_input: int = 2 |
| use_proprio: bool = True |
| batch_size: int = 1 |
| seed: int = 42 |
| targets_only: bool = False |
| |
|
|
|
|
| class EpisodeTaskTransform: |
| """Minimal transform to extract task language per episode.""" |
|
|
| def __call__(self, rlds_batch: Dict) -> Dict[str, str]: |
| lang = rlds_batch["task"]["language_instruction"].decode().lower() |
| return {"language_instruction": lang} |
|
|
|
|
| def select_episode_indices_stratified( |
| cfg: CalibrationConfig, |
| image_sizes, |
| ) -> set: |
| """Select episode indices with equal per-task sampling.""" |
| |
| |
| index_dataset = EpisodicRLDSDataset( |
| cfg.data_root_dir, |
| cfg.dataset_name, |
| EpisodeTaskTransform(), |
| resize_resolution=image_sizes, |
| shuffle_buffer_size=1, |
| image_aug=False, |
| ) |
|
|
| task_to_indices: Dict[str, List[int]] = {} |
| num_total = len(index_dataset) |
| for ep_idx, episode_frames in enumerate(tqdm.tqdm(index_dataset, total=num_total, desc="Indexing episodes by task")): |
| if len(episode_frames) == 0: |
| continue |
| task = episode_frames[0]["language_instruction"] |
| task_to_indices.setdefault(task, []).append(ep_idx) |
|
|
| if cfg.num_episodes == -1 or cfg.num_episodes >= num_total: |
| selected = set(range(num_total)) |
| print(f"[*] Collecting all episodes: {len(selected)}") |
| return selected |
|
|
| num_tasks = len(task_to_indices) |
| if num_tasks == 0: |
| raise ValueError("No tasks found while indexing episodes.") |
| if cfg.num_episodes % num_tasks != 0: |
| raise ValueError( |
| f"num_episodes={cfg.num_episodes} must be divisible by number of tasks={num_tasks} " |
| f"for balanced per-task sampling." |
| ) |
|
|
| per_task = cfg.num_episodes // num_tasks |
| selected = set() |
| print(f"[*] Stratified sampling: {per_task} episode(s) per task across {num_tasks} tasks") |
|
|
| for task in sorted(task_to_indices.keys()): |
| indices = task_to_indices[task] |
| if per_task > len(indices): |
| raise ValueError( |
| f"Requested {per_task} episodes for task '{task}', but only {len(indices)} available." |
| ) |
| chosen = random.sample(indices, per_task) |
| selected.update(chosen) |
| print(f" - {task}: selected {len(chosen)} / {len(indices)}") |
|
|
| print(f"[*] Selected {len(selected)} episodes total (balanced)") |
| return selected |
|
|
|
|
| def save_embeddings_to_binary(embeddings_list: List[np.ndarray], output_path: Path, config: dict) -> None: |
| """Save embeddings to binary format for llama.cpp imatrix calibration.""" |
| num_samples = len(embeddings_list) |
| if num_samples == 0: |
| raise ValueError("No embeddings to save!") |
|
|
| hidden_dim = embeddings_list[0].shape[1] |
| print(f"\nSaving {num_samples} samples to {output_path}") |
| print(f" Hidden dim: {hidden_dim}") |
|
|
| |
| sequence_lengths = [emb.shape[0] for emb in embeddings_list] |
| offsets = [] |
| current_offset = 0 |
| for emb in embeddings_list: |
| offsets.append(current_offset) |
| current_offset += emb.shape[0] * hidden_dim * 4 |
|
|
| seq_lens_array = np.array(sequence_lengths) |
| print(f" Seq lengths: min={seq_lens_array.min()}, max={seq_lens_array.max()}, mean={seq_lens_array.mean():.1f}") |
| print(f" Total size: {current_offset / (1024**2):.2f} MB") |
|
|
| |
| output_path.parent.mkdir(parents=True, exist_ok=True) |
|
|
| |
| with open(output_path, 'wb') as f: |
| f.write(CALIB_MAGIC) |
| f.write(struct.pack('<I', CALIB_VERSION)) |
| f.write(struct.pack('<I', num_samples)) |
| f.write(struct.pack('<I', hidden_dim)) |
| f.write(struct.pack('<IIII', 0, 0, 0, 0)) |
| f.write(struct.pack('<I', 0)) |
|
|
| for seq_len in sequence_lengths: |
| f.write(struct.pack('<I', seq_len)) |
| for offset in offsets: |
| f.write(struct.pack('<Q', offset)) |
| for emb in embeddings_list: |
| f.write(emb.astype(np.float32).tobytes()) |
|
|
| |
| metadata = { |
| "format": "openvla_oft_calibration", |
| "version": CALIB_VERSION, |
| "num_frames": num_samples, |
| "hidden_dim": hidden_dim, |
| "dataset": config['dataset_name'], |
| "model": config['vla_path'], |
| "num_episodes": config['num_episodes'], |
| "sequence_length_stats": { |
| "min": int(seq_lens_array.min()), |
| "max": int(seq_lens_array.max()), |
| "mean": float(seq_lens_array.mean()), |
| }, |
| } |
| with open(output_path.with_suffix('.json'), 'w') as f: |
| json.dump(metadata, f, indent=2) |
|
|
| print(f"Saved to {output_path}") |
|
|
|
|
| @draccus.wrap() |
| def collect_calibration_data(cfg: CalibrationConfig) -> None: |
| print(f"Collecting calibration data from `{cfg.vla_path}` using `{cfg.dataset_name}`") |
| if cfg.targets_only: |
| print("[*] targets_only mode: skipping model loading, only extracting action labels") |
|
|
| random.seed(cfg.seed) |
|
|
| |
| AutoConfig.register("openvla", OpenVLAConfig) |
| AutoImageProcessor.register(OpenVLAConfig, PrismaticImageProcessor) |
| AutoProcessor.register(OpenVLAConfig, PrismaticProcessor) |
| AutoModelForVision2Seq.register(OpenVLAConfig, OpenVLAForActionPrediction) |
|
|
| |
| print("[*] Loading processor...") |
| processor = AutoProcessor.from_pretrained(cfg.vla_path, trust_remote_code=True) |
|
|
| |
| vla = None |
| image_sizes = None |
| if not cfg.targets_only: |
| device_id = 0 |
| torch.cuda.set_device(device_id) |
| print("[*] Loading model...") |
| vla = AutoModelForVision2Seq.from_pretrained( |
| cfg.vla_path, |
| torch_dtype=torch.bfloat16, |
| low_cpu_mem_usage=True, |
| trust_remote_code=True, |
| ).to(device_id) |
| vla.vision_backbone.set_num_images_in_input(cfg.num_images_in_input) |
| vla.eval() |
|
|
| |
| from prismatic.extern.hf.modeling_prismatic import PrismaticForConditionalGeneration |
| vla.forward = PrismaticForConditionalGeneration.forward.__get__(vla, type(vla)) |
|
|
| print(f" Hidden dim: {vla.llm_dim}") |
| print(f" Num patches: {vla.vision_backbone.get_num_patches()}") |
| image_sizes = tuple(vla.config.image_sizes) |
| else: |
| |
| model_config = AutoConfig.from_pretrained(cfg.vla_path, trust_remote_code=True) |
| image_sizes = tuple(model_config.image_sizes) |
|
|
| |
| print(f"[*] Loading dataset: {cfg.dataset_name}") |
| action_tokenizer = ActionTokenizer(processor.tokenizer) |
| batch_transform = RLDSBatchTransform( |
| action_tokenizer, |
| processor.tokenizer, |
| image_transform=processor.image_processor.apply_transform, |
| prompt_builder_fn=PurePromptBuilder, |
| use_wrist_image=(cfg.num_images_in_input > 1), |
| use_proprio=cfg.use_proprio, |
| ) |
|
|
| dataset = EpisodicRLDSDataset( |
| cfg.data_root_dir, |
| cfg.dataset_name, |
| batch_transform, |
| resize_resolution=image_sizes, |
| shuffle_buffer_size=1, |
| image_aug=False, |
| ) |
| print(f" Total episodes: {len(dataset)}") |
|
|
| collator = PaddedCollatorForActionPrediction( |
| processor.tokenizer.model_max_length, |
| processor.tokenizer.pad_token_id, |
| padding_side="right" |
| ) |
|
|
| |
| selected = select_episode_indices_stratified(cfg, image_sizes) |
| num_total = len(dataset) |
| print(f"[*] Collecting {len(selected)} episodes (all frames per episode)") |
|
|
| |
| embeddings_list: List[np.ndarray] = [] |
| labels_list: List[np.ndarray] = [] |
| oft_actions_list: List[np.ndarray] = [] |
| episodes_done = 0 |
| IGNORE_INDEX = -100 |
|
|
| with torch.no_grad(): |
| for ep_idx, episode_frames in enumerate(tqdm.tqdm(dataset, total=num_total)): |
| if ep_idx not in selected: |
| continue |
|
|
| |
| for i in range(0, len(episode_frames), cfg.batch_size): |
| batch_frames = episode_frames[i:i + cfg.batch_size] |
| batch = collator(batch_frames) |
|
|
| if not cfg.targets_only: |
| |
| with torch.autocast("cuda", dtype=torch.bfloat16): |
| output = vla( |
| input_ids=batch["input_ids"].to(device_id), |
| attention_mask=batch["attention_mask"].to(device_id), |
| pixel_values=batch["pixel_values"].to(torch.bfloat16).to(device_id), |
| labels=batch["labels"].to(device_id), |
| calibration_mode=True, |
| ) |
|
|
| |
| mm_embeds = output["multimodal_embeddings"] |
| for j in range(mm_embeds.shape[0]): |
| embeddings_list.append(mm_embeds[j].float().cpu().numpy()) |
|
|
| |
| frame_labels = batch["labels"] |
| for j in range(frame_labels.shape[0]): |
| lbl = frame_labels[j].cpu().numpy() |
| labels_list.append(lbl[lbl != IGNORE_INDEX]) |
|
|
| |
| |
| if "actions" in batch: |
| actions = batch["actions"].float().numpy() |
| for j in range(actions.shape[0]): |
| oft_actions_list.append(actions[j]) |
|
|
| episodes_done += 1 |
| if episodes_done >= len(selected): |
| break |
|
|
| print(f"\n[*] Collected {len(labels_list)} frames from {episodes_done} episodes") |
|
|
| |
| if not cfg.targets_only: |
| if embeddings_list: |
| print(f" Sample shape: {embeddings_list[0].shape}") |
| config_dict = { |
| "dataset_name": cfg.dataset_name, |
| "vla_path": cfg.vla_path, |
| "num_episodes": episodes_done, |
| } |
| save_embeddings_to_binary(embeddings_list, cfg.output_path, config_dict) |
|
|
| |
| targets_array = np.stack(labels_list, axis=0) |
| targets_path = cfg.output_path.with_name(cfg.output_path.stem + "_targets.npy") |
| np.save(targets_path, targets_array) |
| print(f"Saved token-ID targets: shape={targets_array.shape} to {targets_path}") |
|
|
| |
| if oft_actions_list: |
| oft_targets_array = np.stack(oft_actions_list, axis=0) |
| oft_targets_path = cfg.output_path.with_name(cfg.output_path.stem + "_oft_targets.npy") |
| np.save(oft_targets_path, oft_targets_array) |
| print(f"Saved OFT action targets: shape={oft_targets_array.shape} to {oft_targets_path}") |
|
|
| print("\nDone!") |
|
|
|
|
| if __name__ == "__main__": |
| collect_calibration_data() |
|
|