| """ |
| SmolLM Variance Analysis for MaxRL Policy Gradient. |
| |
| Measures the gradient variance of MaxRL's policy gradient estimator by sampling |
| different rollout subsets from pre-computed data and computing how much the |
| resulting policy gradients vary. |
| |
| This script also supports an ablation on the MaxRL baseline term. Here, |
| `BASELINE=True` means we use the standard MaxRL-style mean-centering in the |
| numerator: |
| |
| (score - mean_score) / (mean_score + epsilon) |
| |
| and `BASELINE=False` removes only that centering term while keeping the MaxRL |
| normalization in the denominator: |
| |
| score / (mean_score + epsilon) |
| |
| So the ablation isolates the effect of the baseline term inside MaxRL rather |
| than switching to vanilla REINFORCE. |
| |
| Within each round, rollout subsets for the same prompt are sampled without |
| replacement across subsets, so different subsets do not reuse the same rollout. |
| """ |
|
|
| import json |
| import os |
| import random |
|
|
| import numpy as np |
| import torch |
| from transformers import AutoModelForCausalLM, AutoTokenizer |
|
|
| |
| |
| |
| BATCH_SIZE = 16 |
| ROLLOUT_NUM = 4 |
| NUMBER_BATCHES_PER_ROUND = 4 |
| TOTAL_ROUNDS = 5 |
| BASELINE = False |
|
|
| MODEL_PATH = "/work/nvme/bgif/gzeng/MAXRL/checkpoints/math/smollm2_0.3B_MaxRL_gsm8k_1000_steps" |
| DATA_PATH = "/work/nvme/bgif/gzeng/MAXRL/variance_analysis/data/SmolLM/512x512.jsonl" |
| MAX_SEQ_LEN = 2048 |
| MICRO_BATCH_SIZE = 8 |
| SEED = 42 |
| DEVICE = "cuda" if torch.cuda.is_available() else "cpu" |
| DTYPE = torch.bfloat16 if torch.cuda.is_available() else torch.float32 |
|
|
|
|
| |
| |
| |
| def load_rollout_data(data_path: str) -> dict: |
| """Load pre-computed rollouts and group by prompt. |
| |
| Returns: |
| dict mapping prompt_id (int) -> { |
| "input": str, |
| "rollouts": [{"output": str, "score": float}, ...] |
| } |
| """ |
| prompt_to_id = {} |
| prompt_data = {} |
|
|
| with open(data_path, "r") as f: |
| for line in f: |
| item = json.loads(line) |
| prompt_text = item["input"] |
| if prompt_text not in prompt_to_id: |
| pid = len(prompt_to_id) |
| prompt_to_id[prompt_text] = pid |
| prompt_data[pid] = {"input": prompt_text, "rollouts": []} |
| pid = prompt_to_id[prompt_text] |
| prompt_data[pid]["rollouts"].append({ |
| "output": item["output"], |
| "score": item["score"], |
| }) |
|
|
| print(f"Loaded {len(prompt_data)} prompts, " |
| f"each with {len(prompt_data[0]['rollouts'])} rollouts") |
| return prompt_data |
|
|
|
|
| |
| |
| |
| def compute_maxrl_advantage(scores: list[float], epsilon: float = 1e-6) -> list[float]: |
| """Compute MaxRL-style advantages for a single prompt's rollouts. |
| |
| This function is used to study the effect of the baseline term in MaxRL. |
| |
| If BASELINE is True: |
| advantage_j = (score_j - mean) / (mean + epsilon) |
| |
| If BASELINE is False: |
| advantage_j = score_j / (mean + epsilon) |
| |
| In both cases, the denominator stays the same. The ablation only removes |
| the baseline/mean-centering term from the numerator. |
| """ |
| mean = sum(scores) / len(scores) |
| if BASELINE: |
| return [(s - mean) / (mean + epsilon) for s in scores] |
| else: |
| return [(s - 0.0) / (mean + epsilon) for s in scores] |
|
|
|
|
| |
| |
| |
| def tokenize_and_get_response_mask( |
| tokenizer, |
| prompt: str, |
| response: str, |
| max_seq_len: int, |
| ) -> tuple[torch.Tensor, torch.Tensor]: |
| """Tokenize prompt+response and create a response-only mask. |
| |
| Returns: |
| input_ids: (seq_len,) token ids |
| response_mask: (seq_len,) binary mask, 1 for response tokens |
| """ |
| prompt_ids = tokenizer.encode(prompt, add_special_tokens=False) |
| response_ids = tokenizer.encode(response, add_special_tokens=False) |
|
|
| |
| total_len = len(prompt_ids) + len(response_ids) |
| if total_len > max_seq_len: |
| |
| max_resp = max_seq_len - len(prompt_ids) |
| if max_resp <= 0: |
| |
| prompt_ids = prompt_ids[:max_seq_len // 2] |
| max_resp = max_seq_len - len(prompt_ids) |
| response_ids = response_ids[:max_resp] |
|
|
| input_ids = prompt_ids + response_ids |
| response_mask = [0] * len(prompt_ids) + [1] * len(response_ids) |
|
|
| return ( |
| torch.tensor(input_ids, dtype=torch.long), |
| torch.tensor(response_mask, dtype=torch.float32), |
| ) |
|
|
|
|
| def pad_batch( |
| batch_input_ids: list[torch.Tensor], |
| batch_response_masks: list[torch.Tensor], |
| pad_token_id: int, |
| ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: |
| """Pad a batch of variable-length sequences. |
| |
| Returns: |
| input_ids: (B, max_len) |
| response_mask: (B, max_len) |
| attention_mask: (B, max_len) |
| """ |
| max_len = max(ids.shape[0] for ids in batch_input_ids) |
| B = len(batch_input_ids) |
|
|
| input_ids = torch.full((B, max_len), pad_token_id, dtype=torch.long) |
| response_mask = torch.zeros(B, max_len) |
| attention_mask = torch.zeros(B, max_len) |
|
|
| for i, (ids, rmask) in enumerate(zip(batch_input_ids, batch_response_masks)): |
| seq_len = ids.shape[0] |
| |
| input_ids[i, max_len - seq_len:] = ids |
| response_mask[i, max_len - seq_len:] = rmask |
| attention_mask[i, max_len - seq_len:] = 1.0 |
|
|
| return input_ids, response_mask, attention_mask |
|
|
|
|
| def compute_policy_gradient_loss( |
| model, |
| input_ids: torch.Tensor, |
| attention_mask: torch.Tensor, |
| response_mask: torch.Tensor, |
| advantages: torch.Tensor, |
| ) -> tuple[torch.Tensor, int]: |
| """Compute the token-mean REINFORCE loss for a micro-batch. |
| |
| This matches the repo's default `loss_agg_mode=token-mean`: each response |
| token gets the same per-sequence advantage, and we average over valid |
| response tokens. |
| |
| Returns: |
| loss: scalar loss (with grad) |
| valid_token_count: number of valid response tokens in this micro-batch |
| """ |
| outputs = model(input_ids=input_ids, attention_mask=attention_mask) |
| logits = outputs.logits |
|
|
| |
| |
| shift_logits = logits[:, :-1, :] |
| shift_labels = input_ids[:, 1:] |
| shift_response_mask = response_mask[:, 1:] |
|
|
| |
| log_probs = torch.log_softmax(shift_logits, dim=-1) |
| token_log_probs = torch.gather( |
| log_probs, dim=-1, index=shift_labels.unsqueeze(-1) |
| ).squeeze(-1) |
|
|
| |
| |
| token_losses = -advantages.unsqueeze(-1) * token_log_probs * shift_response_mask |
| valid_token_count = int(shift_response_mask.sum().item()) |
| loss = token_losses.sum() / max(valid_token_count, 1) |
| return loss, valid_token_count |
|
|
|
|
| |
| |
| |
| def collect_flat_gradient(model) -> torch.Tensor: |
| """Flatten all parameter gradients into a single vector (float32).""" |
| grads = [] |
| for p in model.parameters(): |
| if p.grad is not None: |
| grads.append(p.grad.detach().float().flatten()) |
| else: |
| grads.append(torch.zeros(p.numel(), dtype=torch.float32, device=p.device)) |
| return torch.cat(grads) |
|
|
|
|
| def compute_variance_metrics( |
| grad_sum: torch.Tensor, |
| grad_sq_sum: torch.Tensor, |
| K: int, |
| grad_norms: list[float], |
| grad_samples: list[torch.Tensor], |
| ) -> dict: |
| """Compute variance metrics from accumulated gradient statistics. |
| |
| Args: |
| grad_sum: sum of K gradient vectors |
| grad_sq_sum: sum of K element-wise squared gradient vectors |
| K: number of gradient samples |
| grad_norms: list of gradient norms for each sample |
| grad_samples: list of the K gradient vectors for cosine-to-mean stats |
| """ |
| grad_mean = grad_sum / K |
| mean_grad_norm = grad_mean.norm().item() |
|
|
| |
| |
| elementwise_var = (grad_sq_sum / K - grad_mean ** 2) * (K / (K - 1)) |
| trace_variance = elementwise_var.sum().item() |
|
|
| |
| relative_variance = trace_variance / (mean_grad_norm ** 2 + 1e-10) |
|
|
| cosine_sims_to_mean = [] |
| if mean_grad_norm > 0: |
| for grad in grad_samples: |
| cos_sim = torch.nn.functional.cosine_similarity( |
| grad.unsqueeze(0), grad_mean.unsqueeze(0), |
| ).item() |
| cosine_sims_to_mean.append(cos_sim) |
|
|
| return { |
| "mean_grad_norm": mean_grad_norm, |
| "trace_variance": trace_variance, |
| "relative_variance": relative_variance, |
| "avg_sample_grad_norm": np.mean(grad_norms), |
| "std_sample_grad_norm": np.std(grad_norms), |
| "avg_cosine_similarity_to_mean": ( |
| np.mean(cosine_sims_to_mean) if cosine_sims_to_mean else float("nan") |
| ), |
| } |
|
|
|
|
| |
| |
| |
| def run_variance_analysis(): |
| random.seed(SEED) |
| np.random.seed(SEED) |
| torch.manual_seed(SEED) |
|
|
| |
| print(f"Loading model from {MODEL_PATH} ...") |
| tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH) |
| if tokenizer.pad_token is None: |
| tokenizer.pad_token = tokenizer.eos_token |
|
|
| model = AutoModelForCausalLM.from_pretrained( |
| MODEL_PATH, torch_dtype=DTYPE, |
| ).to(DEVICE) |
| model.eval() |
| for p in model.parameters(): |
| p.requires_grad_(True) |
|
|
| total_params = sum(p.numel() for p in model.parameters()) |
| print(f"Model loaded: {total_params:,} parameters on {DEVICE}") |
|
|
| |
| print(f"Loading rollout data from {DATA_PATH} ...") |
| prompt_data = load_rollout_data(DATA_PATH) |
| all_prompt_ids = list(prompt_data.keys()) |
|
|
| |
| all_round_metrics = [] |
|
|
| for round_idx in range(TOTAL_ROUNDS): |
| print(f"\n{'='*60}") |
| print(f"Round {round_idx + 1}/{TOTAL_ROUNDS}") |
| print(f"{'='*60}") |
|
|
| |
| sampled_prompts = random.sample(all_prompt_ids, BATCH_SIZE) |
|
|
| |
| |
| |
| rollouts_needed_per_prompt = NUMBER_BATCHES_PER_ROUND * ROLLOUT_NUM |
| round_rollout_subsets = {} |
| for pid in sampled_prompts: |
| rollouts = prompt_data[pid]["rollouts"] |
| if len(rollouts) < rollouts_needed_per_prompt: |
| raise ValueError( |
| "Not enough rollouts for non-overlapping sampling in one round: " |
| f"prompt {pid} has {len(rollouts)} rollouts, but " |
| f"{rollouts_needed_per_prompt} are required " |
| f"({NUMBER_BATCHES_PER_ROUND} subsets x {ROLLOUT_NUM} rollouts)." |
| ) |
|
|
| sampled_rollouts_for_round = random.sample( |
| rollouts, rollouts_needed_per_prompt, |
| ) |
| round_rollout_subsets[pid] = [ |
| sampled_rollouts_for_round[ |
| subset_start:subset_start + ROLLOUT_NUM |
| ] |
| for subset_start in range( |
| 0, rollouts_needed_per_prompt, ROLLOUT_NUM, |
| ) |
| ] |
|
|
| |
| grad_sum = torch.zeros(total_params, dtype=torch.float32) |
| grad_sq_sum = torch.zeros(total_params, dtype=torch.float32) |
| grad_samples = [] |
| grad_norms = [] |
|
|
| for subset_idx in range(NUMBER_BATCHES_PER_ROUND): |
| print(f" Subset {subset_idx + 1}/{NUMBER_BATCHES_PER_ROUND} ...", end=" ") |
|
|
| |
| all_input_ids = [] |
| all_response_masks = [] |
| all_advantages = [] |
|
|
| for pid in sampled_prompts: |
| prompt_text = prompt_data[pid]["input"] |
|
|
| |
| sampled_rollouts = round_rollout_subsets[pid][subset_idx] |
| scores = [r["score"] for r in sampled_rollouts] |
| advantages = compute_maxrl_advantage(scores) |
|
|
| for rollout, adv in zip(sampled_rollouts, advantages): |
| ids, rmask = tokenize_and_get_response_mask( |
| tokenizer, prompt_text, rollout["output"], MAX_SEQ_LEN, |
| ) |
| all_input_ids.append(ids) |
| all_response_masks.append(rmask) |
| all_advantages.append(adv) |
|
|
| |
| model.zero_grad() |
| num_samples = len(all_input_ids) |
| total_valid_tokens = int( |
| sum(rmask[1:].sum().item() for rmask in all_response_masks) |
| ) |
| total_loss = 0.0 |
|
|
| for mb_start in range(0, num_samples, MICRO_BATCH_SIZE): |
| mb_end = min(mb_start + MICRO_BATCH_SIZE, num_samples) |
|
|
| mb_ids = all_input_ids[mb_start:mb_end] |
| mb_masks = all_response_masks[mb_start:mb_end] |
| mb_advs = all_advantages[mb_start:mb_end] |
|
|
| input_ids, response_mask, attention_mask = pad_batch( |
| mb_ids, mb_masks, tokenizer.pad_token_id, |
| ) |
| input_ids = input_ids.to(DEVICE) |
| response_mask = response_mask.to(DEVICE) |
| attention_mask = attention_mask.to(DEVICE) |
| advantages_t = torch.tensor(mb_advs, dtype=DTYPE, device=DEVICE) |
|
|
| mb_loss, mb_valid_tokens = compute_policy_gradient_loss( |
| model, input_ids, attention_mask, response_mask, advantages_t, |
| ) |
| scaled_loss = mb_loss * (mb_valid_tokens / max(total_valid_tokens, 1)) |
| scaled_loss.backward() |
| total_loss += mb_loss.item() * (mb_valid_tokens / max(total_valid_tokens, 1)) |
|
|
| |
| flat_grad = collect_flat_gradient(model).cpu() |
| grad_norm = flat_grad.norm().item() |
| grad_samples.append(flat_grad) |
| grad_norms.append(grad_norm) |
|
|
| |
| grad_sum += flat_grad |
| grad_sq_sum += flat_grad ** 2 |
|
|
| print(f"loss={total_loss:.6f}, grad_norm={grad_norm:.6f}") |
|
|
| |
| K = NUMBER_BATCHES_PER_ROUND |
| metrics = compute_variance_metrics(grad_sum, grad_sq_sum, K, grad_norms, grad_samples) |
|
|
| all_round_metrics.append(metrics) |
|
|
| print(f"\n Round {round_idx + 1} Results:") |
| print(f" Mean gradient norm: {metrics['mean_grad_norm']:.6e}") |
| print(f" Trace of covariance: {metrics['trace_variance']:.6e}") |
| print(f" Relative variance: {metrics['relative_variance']:.6e}") |
| print(f" Avg sample grad norm: {metrics['avg_sample_grad_norm']:.6e}") |
| print(f" Std sample grad norm: {metrics['std_sample_grad_norm']:.6e}") |
| print( |
| " Avg cosine sim to mean:" |
| f" {metrics['avg_cosine_similarity_to_mean']:.6f}" |
| ) |
|
|
| |
| print(f"\n{'='*60}") |
| print(f"FINAL RESULTS (averaged over {TOTAL_ROUNDS} rounds)") |
| print(f"{'='*60}") |
| print(f" BATCH_SIZE={BATCH_SIZE}, ROLLOUT_NUM={ROLLOUT_NUM}, " |
| f"NUMBER_BATCHES_PER_ROUND={NUMBER_BATCHES_PER_ROUND}") |
|
|
| for key in all_round_metrics[0]: |
| values = [m[key] for m in all_round_metrics] |
| mean_val = np.mean(values) |
| std_val = np.std(values) |
| print(f" {key}: {mean_val:.6e} +/- {std_val:.6e}") |
|
|
| |
| output_path = os.path.join( |
| os.path.dirname(os.path.abspath(__file__)), |
| f"results_bs{BATCH_SIZE}_nr{ROLLOUT_NUM}_nb{NUMBER_BATCHES_PER_ROUND}_r{TOTAL_ROUNDS}_bl{BASELINE}.json", |
| ) |
| results = { |
| "config": { |
| "batch_size": BATCH_SIZE, |
| "rollout_num": ROLLOUT_NUM, |
| "number_batches_per_round": NUMBER_BATCHES_PER_ROUND, |
| "total_rounds": TOTAL_ROUNDS, |
| "model_path": MODEL_PATH, |
| "max_seq_len": MAX_SEQ_LEN, |
| "seed": SEED, |
| "baseline": BASELINE, |
| }, |
| "per_round": all_round_metrics, |
| "averaged": { |
| key: { |
| "mean": float(np.mean([m[key] for m in all_round_metrics])), |
| "std": float(np.std([m[key] for m in all_round_metrics])), |
| } |
| for key in all_round_metrics[0] |
| }, |
| } |
| with open(output_path, "w") as f: |
| json.dump(results, f, indent=2) |
| print(f"\nResults saved to {output_path}") |
|
|
|
|
| if __name__ == "__main__": |
| run_variance_analysis() |
|
|