diff --git "a/unsloth_compiled_cache/UnslothGRPOTrainer.py" "b/unsloth_compiled_cache/UnslothGRPOTrainer.py" new file mode 100644--- /dev/null +++ "b/unsloth_compiled_cache/UnslothGRPOTrainer.py" @@ -0,0 +1,2924 @@ +""" +2025.8.8 +2025.8.9 +4.55.2 +0.21.0 +__UNSLOTH_VERSIONING__ +""" +from torch import Tensor +import torch +import torch.nn as nn +from torch.nn import functional as F +from typing import Any, List, Optional, Tuple, Union, Dict, Set, Callable +from trl.trainer.grpo_trainer import (Any, AutoConfig, AutoModelForSequenceClassification, AutoProcessor, AutoTokenizer, DataLoader, Dataset, FSDP, GRPOConfig, GRPOTrainer, GenerationConfig, IterableDataset, Optional, Path, PeftConfig, PreTrainedModel, PreTrainedTokenizerBase, ProcessorMixin, RepeatSampler, RewardFunc, Sampler, Sequence, SyncRefModelCallback, Trainer, TrainerCallback, Union, VLLMClient, _ForwardRedirection, apply_chat_template, broadcast_object_list, copy, datasets, defaultdict, deque, disable_dropout_in_model, entropy_from_logits, gather, gather_object, generate_model_card, get_comet_experiment_url, identity, inspect, is_conversational, is_datasets_available, is_flash_attn_2_available, is_liger_kernel_available, is_peft_available, is_peft_model, is_rich_available, is_vllm_available, is_wandb_available, maybe_apply_chat_template, nanmax, nanmin, nanstd, nn, nullcontext, os, pad, partial, prepare_deepspeed, prepare_fsdp, print_prompt_completions_sample, profiling_context, profiling_decorator, re, seed_worker, selective_log_softmax, set_seed, shuffle_sequence_dict, split_pixel_values_by_grid, split_tensor_dict, textwrap, torch, transformers, unsplit_pixel_values_by_grid, unwrap_model_for_generation, wandb, warnings, Any, FSDP, Union, apply_chat_template, broadcast_object_list, copy, gather, gather_object, is_conversational, is_flash_attn_2_available, maybe_apply_chat_template, nanstd, nullcontext, os, pad, profiling_context, re, torch, transformers, unwrap_model_for_generation, entropy_from_logits, os, re, selective_log_softmax, torch, transformers, re, Any, Union, os, profiling_decorator, re, shuffle_sequence_dict, split_pixel_values_by_grid, split_tensor_dict, torch, unsplit_pixel_values_by_grid, Optional, PreTrainedModel, Trainer, is_peft_available, os, re, torch, FSDP, nn, os, re, FSDP, nn, re, torch, GRPOTrainer, Trainer, gather, os, re, torch) + + +import os +from typing import * +from dataclasses import dataclass, field +from packaging.version import Version +import torch +import numpy as np +from contextlib import nullcontext +from torch.nn import functional as F +from transformers import DataCollatorForSeq2Seq, DataCollatorForLanguageModeling as TransformersDataCollatorForLanguageModeling + +torch_compile_options = { + "epilogue_fusion" : True, + "max_autotune" : False, + "shape_padding" : True, + "trace.enabled" : False, + "triton.cudagraphs" : False, +} + +@torch.compile(dynamic = True, fullgraph = True, options = torch_compile_options,) +def chunked_selective_log_softmax(logits, index): + # Split into 4 chunks only + chunked_logits = torch.chunk(logits.reshape(-1, logits.shape[-1]), chunks = 4, dim = 0) + chunked_index = torch.chunk(index.reshape(-1), chunks = 4, dim = 0) + all_per_token_logps = [] + # Below loop does the same as selective_log_softmax(chunk_logits, chunk_index) + for chunk_logits, chunk_index in zip(chunked_logits, chunked_index): + chunk_logits = chunk_logits.to(torch.float32) + selected_logits = torch.gather(chunk_logits, dim = -1, index = chunk_index.unsqueeze(-1)).squeeze(-1) + logsumexp_values = torch.logsumexp(chunk_logits, dim = -1) + per_token_logps = selected_logits - logsumexp_values + all_per_token_logps.append(per_token_logps) + pass + all_per_token_logps = torch.concat(all_per_token_logps) + all_per_token_logps = all_per_token_logps.reshape((logits.shape[0], logits.shape[1])) + return all_per_token_logps + +def grpo_compute_loss( + ref_logits, + new_logits, + old_logits, + input_ids, + mask, + beta, + advantages, + **kwargs +): + # All Unsloth Zoo code licensed under LGPLv3 + # Set defaults for optional arguments + loss_type = kwargs.get("loss_type", "grpo") + epsilon_low = kwargs.get("epsilon_low", 0.2) + epsilon_high = kwargs.get("epsilon_high", 0.2) + max_completion_length = kwargs.get("max_completion_length", 8192) + delta = kwargs.get("delta", None) + temperature = kwargs.get("temperature", 1.0) + logit_scale_multiply = kwargs.get("logit_scale_multiply", 0.0) + logit_scale_divide = kwargs.get("logit_scale_divide", 0.0) + logit_softcapping = kwargs.get("logit_softcapping", 0.0) + + input_ids = input_ids.unsqueeze(-1) + + # Optional logit softcapping and logit dividing + if logit_scale_multiply != 0: new_logits = new_logits * logit_scale_multiply + if logit_scale_divide != 0: new_logits = new_logits / logit_scale_divide + if logit_softcapping != 0: new_logits = new_logits * torch.tanh(new_logits / logit_softcapping) + + new_logits = new_logits.to(torch.float32) + # See https://huggingface.co/blog/the_n_implementation_details_of_rlhf_with_ppo#policy-training-implementation-details + if temperature != 1.0: new_logits = new_logits / temperature + new_x = torch.gather(new_logits, dim = -1, index = input_ids).squeeze(-1) + new = new_x - torch.logsumexp(new_logits, dim = -1) + + # x_i - logsumexp(x_i) + with torch.no_grad(): + if beta != 0.0: + assert ref_logits is not None, "ref_logits should not be None when beta != 0.0" + + # Optional logit softcapping and logit dividing + if logit_scale_multiply != 0: ref_logits = ref_logits * logit_scale_multiply + if logit_scale_divide != 0: ref_logits = ref_logits / logit_scale_divide + if logit_softcapping != 0: ref_logits = ref_logits * torch.tanh(ref_logits / logit_softcapping) + + ref_logits = ref_logits.to(torch.float32) + # See https://huggingface.co/blog/the_n_implementation_details_of_rlhf_with_ppo#policy-training-implementation-details + if temperature != 1.0: ref_logits = ref_logits / temperature + ref_x = torch.gather(ref_logits, dim = -1, index = input_ids).squeeze(-1) + ref = ref_x - torch.logsumexp(ref_logits, dim = -1) + pass + + if old_logits is not None: + # Optional logit softcapping and logit dividing + if logit_scale_multiply != 0: old_logits = old_logits * logit_scale_multiply + if logit_scale_divide != 0: old_logits = old_logits / logit_scale_divide + if logit_softcapping != 0: old_logits = old_logits * torch.tanh(old_logits / logit_softcapping) + + old_logits = old_logits.to(torch.float32) + # See https://huggingface.co/blog/the_n_implementation_details_of_rlhf_with_ppo#policy-training-implementation-details + if temperature != 1.0: old_logits = old_logits / temperature + old_x = torch.gather(old_logits, dim = -1, index = input_ids).squeeze(-1) + old = old_x - torch.logsumexp(old_logits, dim = -1) + pass + pass + + # Reverse KL + # Note that this is a low variance low bias estimator for the KL divergence as used in GRPO paper + if beta != 0.0: + kl_i = torch.exp(ref - new) - (ref - new) - 1.0 + + else: + kl_i = 0.0 # set it to 0 to not effect the downstream computation + # Full correct reverse KL divergence?? Missing term maybe? + # kl_i = torch.exp(new) * kl_i + + # Below is forward KL (normal KL) + # kl_i = torch.exp(old) * (old - new) + if old_logits is not None: + coef_1 = torch.exp(new - old) + else: + coef_1 = torch.exp(new - new.detach()) + coef_2 = torch.clamp(coef_1, 1 - epsilon_low, 1 + epsilon_high) + + if delta is not None: + loss_1 = torch.clamp(coef_1, max=delta) * advantages.unsqueeze(1) + else: + loss_1 = coef_1 * advantages.unsqueeze(1) + pass + + # Must detach - otherwise gradients are not propagated correctly! + # exp(x - x) == 1 + # loss_i = torch.exp(new - new.detach()) * advantages.unsqueeze(1) + + loss_2 = coef_2 * advantages.unsqueeze(1) + loss_i = -torch.min(loss_1, loss_2) + if beta != 0.0: + loss_i = loss_i + beta * kl_i + + mask = mask.to(torch.float32) + n_mask_per_reward = mask.sum(1) + + # https://github.com/huggingface/trl/blob/main/trl/trainer/grpo_trainer.py#L1363-L1370 + if loss_type == "grpo": + loss = ((loss_i * mask).sum(-1) / mask.sum(-1).clamp(min=1.0)).mean() + elif loss_type == "bnpo": + loss = (loss_i * mask).sum() / mask.sum().clamp(min=1.0) + elif loss_type == "dr_grpo": + loss = (loss_i * mask).sum() / (loss_i.size(0) * max_completion_length) + else: + raise ValueError(f"Unknown loss type: {loss_type}") + + # loss = (loss_i * mask).sum() / mask.sum() + + # Get metrics as well which are folded + with torch.inference_mode(): + completion_length = n_mask_per_reward.mean() + mean_kl_per_reward = (kl_i * mask).sum(1) / n_mask_per_reward + mean_kl = mean_kl_per_reward.mean() + pass + + return loss, completion_length, mean_kl + +class UnslothEfficientGRPO(torch.autograd.Function): + # All Unsloth Zoo code licensed under LGPLv3 + @staticmethod + def forward(ctx, _new_hidden_states, _old_hidden_states, _ref_hidden_states, lm_head, _input_ids, _mask, _advantages, beta, scaler = None, n_chunks = 1, extra_kwargs=None): + if extra_kwargs is None: + extra_kwargs = {} + def compute_loss(new_hidden_states, old_hidden_states, ref_hidden_states, input_ids, mask, advantages, scaling): + new_logits = torch.matmul(new_hidden_states, lm_head.t()) + new_logits = new_logits[:, :-1, :] # exclude the last logit: it corresponds to the next token pred + with torch.no_grad(): + if beta != 0.0: + ref_logits = torch.matmul(ref_hidden_states, lm_head.t()) + ref_logits = ref_logits[:, :-1, :] # exclude the last logit: it corresponds to the next token pred + else: + ref_logits = None + if old_hidden_states is not None: + old_logits = torch.matmul(old_hidden_states, lm_head.t()) + old_logits = old_logits[:, :-1, :] # exclude the last logit: it corresponds to the next token pred + else: + old_logits = None + # if old_hidden_states is not None: + # old_logits = torch.matmul(old_hidden_states, lm_head.t()) #last logit already excluded + # old_logits = old_logits[:, :-1, :] # exclude the last logit: it corresponds to the next token pred + # else: + # old_logits = None + # unsloth_zoo/rl_replacements.py + loss, completion_length, mean_kl = grpo_compute_loss( + ref_logits, + new_logits, + old_logits, + input_ids, + mask, + beta, + advantages, + **extra_kwargs, + ) + + # Scale loss if needed for mixed precision training + scaled_loss = loss * scaling + # Must add .loss.detach otherwise autograd uses 2x VRAM + return scaled_loss, (loss.detach(), completion_length, mean_kl,) + pass + + device =_new_hidden_states.device + grad_inputs = torch.empty_like(_new_hidden_states) + accumulated_loss = torch.zeros(1, device = device) + accumulated_completion_length = torch.zeros(1, device = device) + accumulated_mean_kl = torch.zeros(1, device = device) + + def accumulate_chunk( + new_hidden_states_j, + old_hidden_states_j, + ref_hidden_states_j, + input_ids_j, + mask_j, + advantages_j, + scaling, + grad_inputs_j, + ): + (chunk_grad_input,), (chunk_loss, (unscaled_loss, chunk_completion_length, chunk_mean_kl,)) = torch.func.grad_and_value( + compute_loss, + argnums = (0,), + has_aux = True, + )(new_hidden_states_j, old_hidden_states_j, ref_hidden_states_j, input_ids_j, mask_j, advantages_j, scaling) + accumulated_loss .add_(unscaled_loss) + accumulated_completion_length.add_(chunk_completion_length) + accumulated_mean_kl .add_(chunk_mean_kl) + grad_inputs_j[:] = chunk_grad_input + pass + + accumulate_chunk = torch.compile( + accumulate_chunk, + fullgraph = True, + # [TODO] Dynamic marking causes torch.compile errors if sequence length is long + dynamic = True, + options = torch_compile_options, + ) + + grad_inputs_chunks = torch.chunk(grad_inputs, chunks = n_chunks, dim = 0) + new_hidden_states = torch.chunk(_new_hidden_states, chunks = n_chunks, dim = 0) + if _old_hidden_states is not None: + old_hidden_states = torch.chunk(_old_hidden_states, chunks = n_chunks, dim = 0) + else: + old_hidden_states = [None] * n_chunks + ref_hidden_states = torch.chunk(_ref_hidden_states, chunks = n_chunks, dim = 0) + input_ids = torch.chunk(_input_ids, chunks = n_chunks, dim = 0) + mask = torch.chunk(_mask, chunks = n_chunks, dim = 0) + advantages = torch.chunk(_advantages, chunks = n_chunks, dim = 0) + + # Get mixed precision scaling if seen + scaling = scaler.get_scale() if scaler is not None else 1.0 + + # Force torch.compile to use dynamic shapes for seqlen dim + # mark_dynamic = lambda x: torch._dynamo.mark_dynamic(x, 1) + + for (grad_inputs_j, new_hidden_states_j, old_hidden_states_j, ref_hidden_states_j, input_ids_j, mask_j, advantages_j,) in \ + zip(grad_inputs_chunks, new_hidden_states, old_hidden_states, ref_hidden_states, input_ids, mask, advantages): + + # [TODO] Dynamic marking causes torch.compile errors if sequence length is long + + # mark_dynamic(new_hidden_states_j) + # mark_dynamic(ref_hidden_states_j) + # if old_hidden_states_j is not None: + # mark_dynamic(old_hidden_states_j) + # mark_dynamic(input_ids_j) + # mark_dynamic(mask_j) + + accumulate_chunk( + new_hidden_states_j, + old_hidden_states_j, + ref_hidden_states_j, + input_ids_j, + mask_j, + advantages_j, + scaling, + grad_inputs_j, + ) + pass + + grad_inputs .div_(n_chunks) + accumulated_loss .div_(n_chunks) + accumulated_completion_length.div_(n_chunks) + accumulated_mean_kl .div_(n_chunks) + ctx.save_for_backward(grad_inputs) + return ( + accumulated_loss, + accumulated_completion_length, + accumulated_mean_kl, + ) + pass + + @staticmethod + def backward(ctx, grad_output, dcompletion_length, dmean_kl): + (grad_input,) = ctx.saved_tensors + return (grad_input, None, None, None, None, None, None, None, None, None, None) + pass + +def grpo_accumulated_loss( + trainer, + input_ids, + attention_mask, + logits_to_keep, + completion_mask, + advantages, + old_hidden_states, + n_chunks = -1, + **kwargs, +): + # All Unsloth Zoo code licensed under LGPLv3 + bsz, qlen = input_ids.shape + + # Find closest multiple + factors = [i for i in range(1, bsz + 1) if bsz % i == 0] + if n_chunks == -1: n_chunks = bsz + n_chunks = factors[min(np.searchsorted(factors, n_chunks), len(factors)-1)] + + if not hasattr(trainer, '_autocast_dtype'): + trainer._autocast_dtype = torch.float16 if os.environ.get('ACCELERATE_MIXED_PRECISION', 'fp16') == 'fp16' else torch.bfloat16 + if os.environ.get('UNSLOTH_FORCE_FLOAT32', '0') == '1': trainer._autocast_dtype = torch.float16 + pass + os.environ["UNSLOTH_RETURN_HIDDEN_STATES"] = "1" + + completion_input_ids = input_ids[:, -logits_to_keep:] + lm_head = trainer.model.get_output_embeddings().weight + + with torch.amp.autocast(device_type = trainer.model.device.type, dtype = trainer._autocast_dtype): + with torch.inference_mode(), trainer.accelerator.unwrap_model(trainer.model, keep_fp32_wrapper = False).disable_adapter(): + ref_hidden_states = trainer.model( + input_ids = input_ids, + attention_mask = attention_mask, + logits_to_keep = logits_to_keep + 1, + ).logits + pass + new_hidden_states = trainer.model( + input_ids = input_ids, + attention_mask = attention_mask, + logits_to_keep = logits_to_keep + 1, + ).logits + + loss, completion_length, mean_kl = UnslothEfficientGRPO.apply( + new_hidden_states, + old_hidden_states, + ref_hidden_states, + lm_head, + completion_input_ids, + completion_mask, + advantages, + trainer.beta, + trainer.accelerator.scaler, + n_chunks, + kwargs # pass kwargs as a dict + ) + pass + # Must force not returning hidden states but logits otherwise gibberish + os.environ["UNSLOTH_RETURN_HIDDEN_STATES"] = "0" + return loss, completion_length, mean_kl + + # Old non efficient code path + new_logits = torch.matmul(new_hidden_states, lm_head.t()) + new_logits = new_logits[:, :-1, :] # exclude the last logit: it corresponds to the next token pred + old_logits = torch.matmul(old_hidden_states, lm_head.t()) + old_logits = old_logits[:, :-1, :] # exclude the last logit: it corresponds to the next token pred + loss, completion_length, mean_kl = grpo_compute_loss( + old_logits, + new_logits, + completion_input_ids, + completion_mask, + trainer.beta, + advantages, + ) + return loss, completion_length, mean_kl + pass + +@torch.compile(dynamic = True, fullgraph = True, options = torch_compile_options) +def grpo_compute_loss_slow( + ref_logits, + new_logits, + old_logits, + input_ids, + mask, + beta, + advantages, + **kwargs +): + # All Unsloth Zoo code licensed under LGPLv3 + # Set defaults for optional arguments + loss_type = kwargs.get("loss_type", "grpo") + epsilon_low = kwargs.get("epsilon_low", 0.2) + epsilon_high = kwargs.get("epsilon_high", 0.2) + max_completion_length = kwargs.get("max_completion_length", 8192) + delta = kwargs.get("delta", None) + temperature = kwargs.get("temperature", 1.0) + logit_scale_multiply = kwargs.get("logit_scale_multiply", 0.0) + logit_scale_divide = kwargs.get("logit_scale_divide", 0.0) + logit_softcapping = kwargs.get("logit_softcapping", 0.0) + + input_ids = input_ids.unsqueeze(-1) + + # Optional logit softcapping and logit dividing + if logit_scale_multiply != 0: new_logits = new_logits * logit_scale_multiply + if logit_scale_divide != 0: new_logits = new_logits / logit_scale_divide + if logit_softcapping != 0: new_logits = new_logits * torch.tanh(new_logits / logit_softcapping) + + new_logits = new_logits.to(torch.float32) + # See https://huggingface.co/blog/the_n_implementation_details_of_rlhf_with_ppo#policy-training-implementation-details + if temperature != 1.0: new_logits = new_logits / temperature + new_x = torch.gather(new_logits, dim = -1, index = input_ids).squeeze(-1) + new = new_x - torch.logsumexp(new_logits, dim = -1) + + # x_i - logsumexp(x_i) + with torch.no_grad(): + if beta != 0.0: + assert ref_logits is not None, "ref_logits should not be None when beta != 0.0" + + # Optional logit softcapping and logit dividing + if logit_scale_multiply != 0: ref_logits = ref_logits * logit_scale_multiply + if logit_scale_divide != 0: ref_logits = ref_logits / logit_scale_divide + if logit_softcapping != 0: ref_logits = ref_logits * torch.tanh(ref_logits / logit_softcapping) + + ref_logits = ref_logits.to(torch.float32) + # See https://huggingface.co/blog/the_n_implementation_details_of_rlhf_with_ppo#policy-training-implementation-details + if temperature != 1.0: ref_logits = ref_logits / temperature + ref_x = torch.gather(ref_logits, dim = -1, index = input_ids).squeeze(-1) + ref = ref_x - torch.logsumexp(ref_logits, dim = -1) + pass + + if old_logits is not None: + # Optional logit softcapping and logit dividing + if logit_scale_multiply != 0: old_logits = old_logits * logit_scale_multiply + if logit_scale_divide != 0: old_logits = old_logits / logit_scale_divide + if logit_softcapping != 0: old_logits = old_logits * torch.tanh(old_logits / logit_softcapping) + + old_logits = old_logits.to(torch.float32) + # See https://huggingface.co/blog/the_n_implementation_details_of_rlhf_with_ppo#policy-training-implementation-details + if temperature != 1.0: old_logits = old_logits / temperature + old_x = torch.gather(old_logits, dim = -1, index = input_ids).squeeze(-1) + old = old_x - torch.logsumexp(old_logits, dim = -1) + pass + pass + + # Reverse KL + # Note that this is a low variance low bias estimator for the KL divergence as used in GRPO paper + if beta != 0.0: + kl_i = torch.exp(ref - new) - (ref - new) - 1.0 + + else: + kl_i = 0.0 # set it to 0 to not effect the downstream computation + # Full correct reverse KL divergence?? Missing term maybe? + # kl_i = torch.exp(new) * kl_i + + # Below is forward KL (normal KL) + # kl_i = torch.exp(old) * (old - new) + if old_logits is not None: + coef_1 = torch.exp(new - old) + else: + coef_1 = torch.exp(new - new.detach()) + coef_2 = torch.clamp(coef_1, 1 - epsilon_low, 1 + epsilon_high) + + if delta is not None: + loss_1 = torch.clamp(coef_1, max=delta) * advantages.unsqueeze(1) + else: + loss_1 = coef_1 * advantages.unsqueeze(1) + pass + + # Must detach - otherwise gradients are not propagated correctly! + # exp(x - x) == 1 + # loss_i = torch.exp(new - new.detach()) * advantages.unsqueeze(1) + + loss_2 = coef_2 * advantages.unsqueeze(1) + loss_i = -torch.min(loss_1, loss_2) + if beta != 0.0: + loss_i = loss_i + beta * kl_i + + mask = mask.to(torch.float32) + n_mask_per_reward = mask.sum(1) + + # https://github.com/huggingface/trl/blob/main/trl/trainer/grpo_trainer.py#L1363-L1370 + if loss_type == "grpo": + loss = ((loss_i * mask).sum(-1) / mask.sum(-1).clamp(min=1.0)).mean() + elif loss_type == "bnpo": + loss = (loss_i * mask).sum() / mask.sum().clamp(min=1.0) + elif loss_type == "dr_grpo": + loss = (loss_i * mask).sum() / (loss_i.size(0) * max_completion_length) + else: + raise ValueError(f"Unknown loss type: {loss_type}") + + # loss = (loss_i * mask).sum() / mask.sum() + + # Get metrics as well which are folded + with torch.inference_mode(): + completion_length = n_mask_per_reward.mean() + mean_kl_per_reward = (kl_i * mask).sum(1) / n_mask_per_reward + mean_kl = mean_kl_per_reward.mean() + pass + + return loss, completion_length, mean_kl + +def vLLMSamplingParams(**kwargs): + from vllm import SamplingParams + sampling_params = SamplingParams(**kwargs) + sampling_params._set_kwargs = kwargs + return sampling_params +@dataclass +class UnslothGRPOConfig(GRPOConfig): + """ + + Configuration class for the [`GRPOTrainer`]. + + This class includes only the parameters that are specific to GRPO training. For a full list of training arguments, + please refer to the [`~transformers.TrainingArguments`] documentation. Note that default values in this class may + differ from those in [`~transformers.TrainingArguments`]. + + Using [`~transformers.HfArgumentParser`] we can turn this class into + [argparse](https://docs.python.org/3/library/argparse#module-argparse) arguments that can be specified on the + command line. + + Parameters: + > Parameters that control the model and reference model + + model_init_kwargs (`str`, `dict[str, Any]` or `None`, *optional*, defaults to `None`): + Keyword arguments for [`~transformers.AutoModelForCausalLM.from_pretrained`], used when the `model` + argument of the [`GRPOTrainer`] is provided as a string. + disable_dropout (`bool`, *optional*, defaults to `False`): + Whether to disable dropout in the model. This is useful for training with a reference model, as it prevents + the model from generating different logprobs for the same input. + + > Parameters that control the data preprocessing + + remove_unused_columns (`bool`, *optional*, defaults to `False`): + Whether to only keep the column `"prompt"` in the dataset. If you use a custom reward function that + requires any column other than `"prompts"` and `"completions"`, you should keep this to `False`. + max_prompt_length (`int` or `None`, *optional*, defaults to `512`): + Maximum length of the prompt. If the prompt is longer than this value, it will be truncated left. + num_generations (`int` or `None`, *optional*, defaults to `8`): + Number of generations per prompt to sample. The effective batch size (num_processes * per_device_batch_size + * gradient_accumulation_steps) must be evenly divisible by this value. + max_completion_length (`int` or `None`, *optional*, defaults to `256`): + Maximum length of the generated completion. + ds3_gather_for_generation (`bool`, *optional*, defaults to `True`): + This setting applies to DeepSpeed ZeRO-3. If enabled, the policy model weights are gathered for generation, + improving generation speed. However, disabling this option allows training models that exceed the VRAM + capacity of a single GPU, albeit at the cost of slower generation. Disabling this option is not compatible + with vLLM generation. + shuffle_dataset (`bool`, *optional*, defaults to `True`): + Whether to shuffle the training dataset. + + > Parameters that control generation + + generation_batch_size: (`int` or `None`, *optional*, defaults to `None`): + Batch size to use for generation. If `None`, it defaults to the effective training batch size: + `per_device_train_batch_size * num_processes * steps_per_generation`. In other words, there is one + generation batch processed per optimization step. Mutually exclusive with `steps_per_generation`. + steps_per_generation: (`int` or `None`, *optional*, defaults to `None`): + Number of steps per generation. If `None`, it defaults to `gradient_accumulation_steps`. Mutually exclusive + with `generation_batch_size`. + temperature (`float`, defaults to `1.0`): + Temperature for sampling. The higher the temperature, the more random the completions. + top_p (`float`, *optional*, defaults to `1.0`): + Float that controls the cumulative probability of the top tokens to consider. Must be in (0, 1]. Set to + `1.0` to consider all tokens. + top_k (`int` or `None`, *optional*, defaults to `None`): + Number of highest probability vocabulary tokens to keep for top-k-filtering. If `None`, top-k-filtering is + disabled and all tokens are considered. + min_p (`float` or `None`, *optional*, defaults to `None`): + Minimum token probability, which will be scaled by the probability of the most likely token. It must be a + value between `0.0` and `1.0`. Typical values are in the `0.01-0.2` range. + repetition_penalty (`float`, *optional*, defaults to `1.0`): + Float that penalizes new tokens based on whether they appear in the prompt and the generated text so far. + Values > `1.0` encourage the model to use new tokens, while values < `1.0` encourage the model to repeat + tokens. + use_transformers_paged (`bool`, *optional*, defaults to `False`): + Whether to use the `transformers` paged implementation for generation. If set to `True`, the `transformers` + paged implementation will be used for generation instead of the default padded implementation. This + parameter is only effective when `use_vllm` is set to `False`. + cache_implementation (`str` or `None`, *optional*, defaults to `None`): + Implementation of the cache method for faster generation when `use_vllm` is set to `False`. + generation_kwargs (`dict[str, Any]` or `None`, *optional*, defaults to `None`): + Additional keyword arguments to pass to `GenerationConfig` (if using transformers) or `SamplingParams` (if + using vLLM) when sampling completions. This can be used to further customize the generation behavior, such + as setting `supress_tokens`, `num_beams`, etc. If it contains keys that conflict with the other generation + parameters (like `min_p`, `top_p`, etc.), they will override them. + + > Parameters that control generation acceleration powered by vLLM + + use_vllm (`bool`, *optional*, defaults to `False`): + Whether to use vLLM for generating completions. If set to `True`, the trainer will use vLLM for generation + instead of the default model.generate(). Requires `vllm` to be installed. + vllm_mode (`str`, *optional*, defaults to `"server"`): + Mode to use for vLLM integration when `use_vllm` is set to `True`. Must be one of `"server"` or + `"colocate"`. + + - `"server"`: The trainer will send generation requests to a separate vLLM server. Make sure a TRL vLLM + server is running (start with `trl vllm-serve`). + - `"colocate"`: vLLM will run in the same process and share the training GPUs. This avoids the need for a + separate server but may cause resource contention with training. + vllm_guided_decoding_regex (`str` or `None`, *optional*, defaults to `None`): + Regex for vLLM guided decoding. If `None` (default), guided decoding is disabled. + + > Parameters that control the vLLM server (only used when `vllm_mode` is `"server"`) + + vllm_server_base_url (`str` or `None`, *optional*, defaults to `None`): + Base URL for the vLLM server (e.g., `"http://localhost:8000"`). If provided, `vllm_server_host` and + `vllm_server_port` are ignored. + vllm_server_host (`str`, *optional*, defaults to `"0.0.0.0"`): + Host of the vLLM server to connect to. Ignored if `vllm_server_base_url` is provided. + vllm_server_port (`int`, *optional*, defaults to `8000`): + Port of the vLLM server to connect to. Ignored if `vllm_server_base_url` is provided. + vllm_server_timeout (`float`, *optional*, defaults to `240.0`): + Total timeout duration in seconds to wait for the vLLM server to be up. If the server is not up after the + timeout, a `ConnectionError` is raised. + + > Parameters that control colocated vLLM execution (only used when `vllm_mode` is `"colocate"`) + + vllm_gpu_memory_utilization (`float`, *optional*, defaults to `0.3`): + Control the GPU memory utilization for vLLM. This setting only applies when `vllm_mode` is set to + `"colocate"`. If you are using `vllm_mode="server"`, this parameter must be passed separately when + launching the vLLM server via the `--vllm_gpu_memory_utilization` flag. + vllm_tensor_parallel_size (`int`, *optional*, defaults to `1`): + Control the tensor parallel size for vLLM. This setting only applies when `vllm_mode` is set to + `"colocate"`. If you are using `vllm_mode="server"`, this parameter must be passed separately when + launching the vLLM server via the `--vllm_tensor_parallel_size` flag. + vllm_model_impl (`str`, *optional*, defaults to `"vllm"`): + Model implementation to use for vLLM. Must be one of `"transformers"` or `"vllm"`. `"transformers"`: Use + the `transformers` backend for model implementation. `"vllm"`: Use the `vllm` library for model + implementation. + + > Parameters that control the training + + beta (`float`, *optional*, defaults to `0.0`): + KL coefficient. If `0.0` (default), the reference model is not loaded, reducing memory usage and improving + training speed. + num_iterations (`int`, *optional*, defaults to `1`): + Number of iterations per batch (denoted as μ in the algorithm). + epsilon (`float`, *optional*, defaults to `0.2`): + Epsilon value for clipping. + delta: (`float` or `None`, *optional*, defaults to `None`): + Enables the upper clipping bound in two-sided GRPO loss when set to a float. If `None` (default), standard + GRPO clipping is used. Recommended to be greater than `1 + ε` when enabled. This method is introduced in + the [INTELLECT-2 tech report](https://huggingface.co/papers/2505.07291). + epsilon_high (`float` or `None`, *optional*, defaults to `None`): + Upper-bound epsilon value for clipping. If not specified, it defaults to the same value as the lower-bound + specified in argument `epsilon`. Paper [DAPO](https://huggingface.co/papers/2503.14476) recommends `0.28`. + importance_sampling_level (`str`, *optional*, defaults to `"token"`): + Controls whether importance sampling ratios are computed at the `"token"` or `"sequence"` level. `"token"` + keeps the raw per-token log-probability ratios (one weight per token). `"sequence"` averages the + log-probability ratios across valid tokens to produce a single ratio per sequence. The + [GSPO paper](https://huggingface.co/papers/2507.18071) shows that sequence-level sampling often yields more + stable training and better alignment with sequence-level rewards. + reward_weights (`list[float]` or `None`, *optional*, defaults to `None`): + Weights for each reward function. Must match the number of reward functions. If `None`, all rewards are + weighted equally with weight `1.0`. + scale_rewards (`bool`, *optional*, defaults to `True`): + Whether to scale the rewards by dividing them by their standard deviation. If `True` (default), the rewards + are normalized by the standard deviation, ensuring they have unit variance. If `False`, no scaling is + applied. The [Dr. GRPO paper](https://huggingface.co/papers/2503.20783) recommends not scaling the rewards, + as scaling by the standard deviation introduces a question-level difficulty bias. + loss_type (`str`, *optional*, defaults to `"bnpo"`): + Specifies the loss formulation to use. Supported values are: + + - `"grpo"`: Aggregates token-level losses by normalizing over sequence length. Not recommended due to + length bias—this approach tends to prefer shorter completions with positive advantages and longer ones + with negative advantages. + - `"bnpo"`: Aggregates token-level losses by normalizing number of active token in the local batch. + Note that normalization is performed over the local batch only, so results may slightly vary depending + on the local batch size, despite a constant effective batch size. When using + `per_device_train_batch_size==1`, the loss is equivalent to the GRPO loss. + - `"dr_grpo"`: Aggregates token-level losses by normalizing with a global constant. This method was + introduced in the [Dr. GRPO paper](https://huggingface.co/papers/2503.20783) to eliminate length bias. + The value of the constant corresponds to `max_completion_length`. + mask_truncated_completions (`bool`, *optional*, defaults to `False`): + When enabled, truncated completions are excluded from the loss calculation, preventing them from being + incorrectly penalized and introducing noise during training. According to the + [DAPO](https://huggingface.co/papers/2503.14476) paper, this is a good practice for training stability. + sync_ref_model (`bool`, *optional*, defaults to `False`): + Whether to synchronize the reference model with the active model every `ref_model_sync_steps` steps, using + the `ref_model_mixup_alpha` parameter. This synchronization originates from the + [TR-DPO](https://huggingface.co/papers/2404.09656) paper. + ref_model_mixup_alpha (`float`, *optional*, defaults to `0.6`): + α parameter from the [TR-DPO](https://huggingface.co/papers/2404.09656) paper, which controls the mix + between the current policy and the previous reference policy during updates. The reference policy is + updated according to the equation: `π_ref = α * π_θ + (1 - α) * π_ref_prev`. To use this parameter, you + must set `sync_ref_model=True`. + ref_model_sync_steps (`int`, *optional*, defaults to `512`): + τ parameter from the [TR-DPO](https://huggingface.co/papers/2404.09656) paper, which determines how + frequently the current policy is synchronized with the reference policy. To use this parameter, you must + set `sync_ref_model=True`. + top_entropy_quantile (`float`, *optional*, defaults to `1.0`): + ρ parameter from [Beyond the 80/20 Rule](https://huggingface.co/papers/2506.01939). Keeps in the policy + loss term only the top-ρ quantile of tokens by entropy of the probability distribution at each sequence + position, improving results. Range: `[0.0-1.0]`. A value of `0.0` masks all but the highest entropy token; + `1.0` keeps all tokens. The paper recommends a value of `0.2`. + If used with `mask_truncated_completions=True`, only tokens from non-truncated completions are considered. + use_liger_loss (`bool`, *optional*, defaults to `False`): + Whether to use the Liger GRPO loss. + + > Parameters that control the logging + + log_completions (`bool`, *optional*, defaults to `False`): + Whether to log a sample of (prompt, completion) pairs every `logging_steps` steps. If `rich` is installed, + it prints the sample. If `wandb` logging is enabled, it logs it to `wandb`. + num_completions_to_print (`int` or `None`, *optional*, defaults to `None`): + Number of completions to print with `rich`. If `None`, all completions are logged. + wandb_log_unique_prompts (`bool`, *optional*, defaults to `False`): + Whether to log unique prompts in wandb. If `True`, only unique prompts are logged. If `False`, all prompts + are logged. + + """ + vllm_sampling_params: Optional[Any] = field( + default = None, + metadata = {'help': 'vLLM SamplingParams'}, + ) + unsloth_num_chunks : Optional[int] = field( + default = -1, + metadata = {'help': 'Chunk size to reduce memory usage. -1 is most efficient.'}, + ) + + def __init__( + self, + output_dir = None, + overwrite_output_dir = None, + do_train = False, + do_eval = False, + do_predict = False, + eval_strategy = 'no', + prediction_loss_only = False, + per_device_train_batch_size = 4, + per_device_eval_batch_size = 4, + per_gpu_train_batch_size = None, + per_gpu_eval_batch_size = None, + gradient_accumulation_steps = 2, + eval_accumulation_steps = 2, + eval_delay = 0, + torch_empty_cache_steps = 250, + learning_rate = 5e-05, + weight_decay = 0.01, + adam_beta1 = 0.9, + adam_beta2 = 0.999, + adam_epsilon = 1e-08, + max_grad_norm = 1.0, + num_train_epochs = 3.0, + max_steps = -1, + lr_scheduler_type = 'linear', + warmup_ratio = 0.1, + warmup_steps = 0, + log_level = 'passive', + log_level_replica = 'warning', + log_on_each_node = True, + logging_dir = None, + logging_strategy = 'steps', + logging_first_step = False, + logging_steps = 1, + logging_nan_inf_filter = False, + save_strategy = 'steps', + save_steps = 500, + save_total_limit = None, + save_safetensors = True, + save_on_each_node = False, + save_only_model = False, + restore_callback_states_from_checkpoint = False, + no_cuda = False, + use_cpu = False, + use_mps_device = False, + seed = 3407, + data_seed = 3407, + jit_mode_eval = False, + use_ipex = False, + bf16 = False, + fp16 = False, + fp16_opt_level = 'O1', + half_precision_backend = 'auto', + bf16_full_eval = False, + fp16_full_eval = False, + tf32 = None, + local_rank = -1, + ddp_backend = None, + tpu_num_cores = None, + tpu_metrics_debug = False, + debug = '', + dataloader_drop_last = False, + eval_steps = None, + dataloader_num_workers = 0, + dataloader_prefetch_factor = None, + past_index = -1, + run_name = None, + disable_tqdm = None, + remove_unused_columns = False, + label_names = None, + load_best_model_at_end = False, + metric_for_best_model = None, + greater_is_better = None, + ignore_data_skip = False, + fsdp = '', + fsdp_min_num_params = 0, + fsdp_config = None, + fsdp_transformer_layer_cls_to_wrap = None, + accelerator_config = None, + deepspeed = None, + label_smoothing_factor = 0.0, + optim = 'adamw_8bit', + optim_args = None, + adafactor = False, + group_by_length = False, + length_column_name = 'length', + report_to = None, + ddp_find_unused_parameters = None, + ddp_bucket_cap_mb = None, + ddp_broadcast_buffers = None, + dataloader_pin_memory = True, + dataloader_persistent_workers = False, + skip_memory_metrics = True, + use_legacy_prediction_loop = False, + push_to_hub = False, + resume_from_checkpoint = None, + hub_model_id = None, + hub_strategy = 'every_save', + hub_token = None, + hub_private_repo = None, + hub_always_push = False, + hub_revision = None, + gradient_checkpointing = False, + gradient_checkpointing_kwargs = None, + include_inputs_for_metrics = False, + eval_do_concat_batches = True, + fp16_backend = 'auto', + push_to_hub_model_id = None, + push_to_hub_organization = None, + push_to_hub_token = None, + mp_parameters = '', + auto_find_batch_size = True, + full_determinism = False, + torchdynamo = None, + ray_scope = 'last', + ddp_timeout = 1800, + torch_compile = False, + torch_compile_backend = None, + torch_compile_mode = None, + include_tokens_per_second = False, + include_num_input_tokens_seen = False, + neftune_noise_alpha = None, + optim_target_modules = None, + batch_eval_metrics = False, + eval_on_start = False, + use_liger_kernel = False, + liger_kernel_config = None, + eval_use_gather_object = False, + average_tokens_across_devices = True, + model_init_kwargs = None, + disable_dropout = False, + max_prompt_length = 512, + num_generations = 8, + max_completion_length = 256, + ds3_gather_for_generation = True, + shuffle_dataset = True, + generation_batch_size = None, + steps_per_generation = None, + temperature = 1.0, + top_p = 1.0, + top_k = None, + min_p = None, + generation_kwargs = {}, + repetition_penalty = 1.0, + use_transformers_paged = False, + cache_implementation = None, + use_vllm = False, + vllm_server_base_url = None, + vllm_mode = 'colocate', + vllm_model_impl = 'vllm', + vllm_guided_decoding_regex = None, + vllm_server_host = '0.0.0.0', + vllm_server_port = 8000, + vllm_server_timeout = 240.0, + vllm_gpu_memory_utilization = 0.3, + vllm_tensor_parallel_size = 1, + beta = 0.001, + num_iterations = 1, + epsilon = 0.2, + delta = None, + epsilon_high = None, + importance_sampling_level = 'token', + reward_weights = None, + scale_rewards = True, + loss_type = 'bnpo', + mask_truncated_completions = False, + sync_ref_model = False, + ref_model_mixup_alpha = 0.6, + ref_model_sync_steps = 512, + top_entropy_quantile = 1.0, + use_liger_loss = False, + log_completions = False, + num_completions_to_print = None, + wandb_log_unique_prompts = False, + vllm_sampling_params = None, + unsloth_num_chunks = -1, + + **kwargs, + ): + if learning_rate < 1e-7: raise FloatingPointError(f'Unsloth: Your learning rate of `{learning_rate}` is too small and less than 1e-7! Consider increasing it, otherwise gradient updates will be close to 0!') + if learning_rate > 1: raise OverflowError(f'Unsloth: Your learning rate of `{learning_rate}` is way too larger > 1! Consider decreasing it to 1e-1, otherwise gradient updates will explode!') + if output_dir is None and save_strategy == 'steps' and save_steps == 500: + output_dir = 'unsloth_training_checkpoints' + save_strategy = 'no' + if loss_type.lower() == 'dr_grpo': + loss_type = 'dr_grpo' + elif loss_type.lower() == 'dapo': + loss_type = 'dapo' + if loss_type.lower() == 'dr_grpo': + if scale_rewards == None: + scale_rewards = True + elif scale_rewards == True: + print('Unsloth: The Dr GRPO paper recommends setting `scale_rewards` to False! Will override. Set it to `None` to force False.') + scale_rewards = False + elif loss_type.lower() == 'dapo': + print('Unsloth: The DAPO paper recommends `mask_truncated_completions = True`') + print('Unsloth: The DAPO paper recommends `epsilon_high = 0.28`') + print('Unsloth: The DAPO paper recommends setting `beta = 0.0` to remove the KL term') + mask_truncated_completions = True + epsilon_high = 0.28 + beta = 0.0 + loss_type = 'bnpo' + + if (per_device_train_batch_size // num_generations) * num_generations != per_device_train_batch_size: + print('Unsloth: We now expect `per_device_train_batch_size` to be a multiple of `num_generations`.\nWe will change the batch size of ' + str(per_device_train_batch_size) + ' to the `num_generations` of ' + str(num_generations)) + per_device_train_batch_size = num_generations + + if temperature <= 0: + raise MathError('Unsloth: Please set a positive non-zero temperature since your results will be wrong.') + elif temperature >= 10: + raise MathError('Unsloth: Please set a positive non-zero temperature less than 10, since sampling will be quite erratic.') + + + super().__init__( + output_dir = output_dir, + overwrite_output_dir = overwrite_output_dir, + do_train = do_train, + do_eval = do_eval, + do_predict = do_predict, + eval_strategy = eval_strategy, + prediction_loss_only = prediction_loss_only, + per_device_train_batch_size = per_device_train_batch_size, + per_device_eval_batch_size = per_device_eval_batch_size, + per_gpu_train_batch_size = per_gpu_train_batch_size, + per_gpu_eval_batch_size = per_gpu_eval_batch_size, + gradient_accumulation_steps = gradient_accumulation_steps, + eval_accumulation_steps = eval_accumulation_steps, + eval_delay = eval_delay, + torch_empty_cache_steps = torch_empty_cache_steps, + learning_rate = learning_rate, + weight_decay = weight_decay, + adam_beta1 = adam_beta1, + adam_beta2 = adam_beta2, + adam_epsilon = adam_epsilon, + max_grad_norm = max_grad_norm, + num_train_epochs = num_train_epochs, + max_steps = max_steps, + lr_scheduler_type = lr_scheduler_type, + warmup_ratio = warmup_ratio, + warmup_steps = warmup_steps, + log_level = log_level, + log_level_replica = log_level_replica, + log_on_each_node = log_on_each_node, + logging_dir = logging_dir, + logging_strategy = logging_strategy, + logging_first_step = logging_first_step, + logging_steps = logging_steps, + logging_nan_inf_filter = logging_nan_inf_filter, + save_strategy = save_strategy, + save_steps = save_steps, + save_total_limit = save_total_limit, + save_safetensors = save_safetensors, + save_on_each_node = save_on_each_node, + save_only_model = save_only_model, + restore_callback_states_from_checkpoint = restore_callback_states_from_checkpoint, + no_cuda = no_cuda, + use_cpu = use_cpu, + use_mps_device = use_mps_device, + seed = seed, + data_seed = data_seed, + jit_mode_eval = jit_mode_eval, + use_ipex = use_ipex, + bf16 = bf16, + fp16 = fp16, + fp16_opt_level = fp16_opt_level, + half_precision_backend = half_precision_backend, + bf16_full_eval = bf16_full_eval, + fp16_full_eval = fp16_full_eval, + tf32 = tf32, + local_rank = local_rank, + ddp_backend = ddp_backend, + tpu_num_cores = tpu_num_cores, + tpu_metrics_debug = tpu_metrics_debug, + debug = debug, + dataloader_drop_last = dataloader_drop_last, + eval_steps = eval_steps, + dataloader_num_workers = dataloader_num_workers, + dataloader_prefetch_factor = dataloader_prefetch_factor, + past_index = past_index, + run_name = run_name, + disable_tqdm = disable_tqdm, + remove_unused_columns = remove_unused_columns, + label_names = label_names, + load_best_model_at_end = load_best_model_at_end, + metric_for_best_model = metric_for_best_model, + greater_is_better = greater_is_better, + ignore_data_skip = ignore_data_skip, + fsdp = fsdp, + fsdp_min_num_params = fsdp_min_num_params, + fsdp_config = fsdp_config, + fsdp_transformer_layer_cls_to_wrap = fsdp_transformer_layer_cls_to_wrap, + accelerator_config = accelerator_config, + deepspeed = deepspeed, + label_smoothing_factor = label_smoothing_factor, + optim = optim, + optim_args = optim_args, + adafactor = adafactor, + group_by_length = group_by_length, + length_column_name = length_column_name, + report_to = report_to, + ddp_find_unused_parameters = ddp_find_unused_parameters, + ddp_bucket_cap_mb = ddp_bucket_cap_mb, + ddp_broadcast_buffers = ddp_broadcast_buffers, + dataloader_pin_memory = dataloader_pin_memory, + dataloader_persistent_workers = dataloader_persistent_workers, + skip_memory_metrics = skip_memory_metrics, + use_legacy_prediction_loop = use_legacy_prediction_loop, + push_to_hub = push_to_hub, + resume_from_checkpoint = resume_from_checkpoint, + hub_model_id = hub_model_id, + hub_strategy = hub_strategy, + hub_token = hub_token, + hub_private_repo = hub_private_repo, + hub_always_push = hub_always_push, + hub_revision = hub_revision, + gradient_checkpointing = gradient_checkpointing, + gradient_checkpointing_kwargs = gradient_checkpointing_kwargs, + include_inputs_for_metrics = include_inputs_for_metrics, + eval_do_concat_batches = eval_do_concat_batches, + fp16_backend = fp16_backend, + push_to_hub_model_id = push_to_hub_model_id, + push_to_hub_organization = push_to_hub_organization, + push_to_hub_token = push_to_hub_token, + mp_parameters = mp_parameters, + auto_find_batch_size = auto_find_batch_size, + full_determinism = full_determinism, + torchdynamo = torchdynamo, + ray_scope = ray_scope, + ddp_timeout = ddp_timeout, + torch_compile = torch_compile, + torch_compile_backend = torch_compile_backend, + torch_compile_mode = torch_compile_mode, + include_tokens_per_second = include_tokens_per_second, + include_num_input_tokens_seen = include_num_input_tokens_seen, + neftune_noise_alpha = neftune_noise_alpha, + optim_target_modules = optim_target_modules, + batch_eval_metrics = batch_eval_metrics, + eval_on_start = eval_on_start, + use_liger_kernel = use_liger_kernel, + liger_kernel_config = liger_kernel_config, + eval_use_gather_object = eval_use_gather_object, + average_tokens_across_devices = average_tokens_across_devices, + model_init_kwargs = model_init_kwargs, + disable_dropout = disable_dropout, + max_prompt_length = max_prompt_length, + num_generations = num_generations, + max_completion_length = max_completion_length, + ds3_gather_for_generation = ds3_gather_for_generation, + shuffle_dataset = shuffle_dataset, + generation_batch_size = generation_batch_size, + steps_per_generation = steps_per_generation, + temperature = temperature, + top_p = top_p, + top_k = top_k, + min_p = min_p, + generation_kwargs = generation_kwargs, + repetition_penalty = repetition_penalty, + use_transformers_paged = use_transformers_paged, + cache_implementation = cache_implementation, + use_vllm = use_vllm, + vllm_server_base_url = vllm_server_base_url, + vllm_mode = vllm_mode, + vllm_model_impl = vllm_model_impl, + vllm_guided_decoding_regex = vllm_guided_decoding_regex, + vllm_server_host = vllm_server_host, + vllm_server_port = vllm_server_port, + vllm_server_timeout = vllm_server_timeout, + vllm_gpu_memory_utilization = vllm_gpu_memory_utilization, + vllm_tensor_parallel_size = vllm_tensor_parallel_size, + beta = beta, + num_iterations = num_iterations, + epsilon = epsilon, + delta = delta, + epsilon_high = epsilon_high, + importance_sampling_level = importance_sampling_level, + reward_weights = reward_weights, + scale_rewards = scale_rewards, + loss_type = loss_type, + mask_truncated_completions = mask_truncated_completions, + sync_ref_model = sync_ref_model, + ref_model_mixup_alpha = ref_model_mixup_alpha, + ref_model_sync_steps = ref_model_sync_steps, + top_entropy_quantile = top_entropy_quantile, + use_liger_loss = use_liger_loss, + log_completions = log_completions, + num_completions_to_print = num_completions_to_print, + wandb_log_unique_prompts = wandb_log_unique_prompts,**kwargs) + self.vllm_sampling_params = vllm_sampling_params + self.unsloth_num_chunks = unsloth_num_chunks + +pass + +class _UnslothGRPOTrainer(Trainer): + """""" + + _tag_names = ["trl", "grpo"] + + def __init__( + self, + model: Union[str, PreTrainedModel], + reward_funcs: Union[RewardFunc, list[RewardFunc]], + args: Optional[GRPOConfig] = None, + train_dataset: Optional[Union[Dataset, IterableDataset]] = None, + eval_dataset: Optional[Union[Dataset, IterableDataset, dict[str, Union[Dataset, IterableDataset]]]] = None, + processing_class: Optional[Union[PreTrainedTokenizerBase, ProcessorMixin]] = None, + reward_processing_classes: Optional[Union[PreTrainedTokenizerBase, list[PreTrainedTokenizerBase]]] = None, + callbacks: Optional[list[TrainerCallback]] = None, + optimizers: tuple[Optional[torch.optim.Optimizer], Optional[torch.optim.lr_scheduler.LambdaLR]] = (None, None), + peft_config: Optional["PeftConfig"] = None, + ): + + if hasattr(model, 'vllm_engine') and hasattr(args, 'use_vllm'): + if (getattr(args, 'use_vllm', False) == False): + args.use_vllm = True + args.vllm_mode='colocate' + # Args + if args is None: + model_name = model if isinstance(model, str) else model.config._name_or_path + model_name = model_name.split("/")[-1] + args = GRPOConfig(f"{model_name}-GRPO") + + # Models + # Trained model + model_init_kwargs = args.model_init_kwargs or {} + if isinstance(model, str): + model_id = model + torch_dtype = model_init_kwargs.get("torch_dtype") + if isinstance(torch_dtype, torch.dtype) or torch_dtype == "auto" or torch_dtype is None: + pass # torch_dtype is already a torch.dtype or "auto" or None + elif isinstance(torch_dtype, str): # it's a str, but not "auto" + torch_dtype = getattr(torch, torch_dtype) + model_init_kwargs["torch_dtype"] = torch_dtype + else: + raise ValueError( + "Invalid `torch_dtype` passed to `GRPOConfig`. Expected either 'auto' or a string representing " + f"a `torch.dtype` (e.g., 'float32'), but got {torch_dtype}." + ) + # Disable caching if gradient checkpointing is enabled [not supported] + config = AutoConfig.from_pretrained(model_id) + architecture = getattr(transformers, config.architectures[0]) + model = architecture.from_pretrained(model_id, **model_init_kwargs) + else: + model_id = model.config._name_or_path + if args.model_init_kwargs is not None: + raise ValueError( + "You passed `model_init_kwargs` to the `GRPOConfig`, but your model is already instantiated. " + "This argument can only be used when the `model` argument is a string." + ) + + # Some models [SmolVLM/Idefics3] don't support `logits_to_keep` argument and error out if we pass it + # Inspect the forward method before we wrap the model with PEFT + self.model_kwarg_keys = ( + inspect.signature(model.forward).parameters.keys() + if not hasattr(model, "get_base_model") + else inspect.signature(model.get_base_model().forward).parameters.keys() + ) + + if False: + if not is_peft_available(): + raise ImportError("PEFT is required to use `peft_config`. Run `pip install peft`.") + model = model + + # Enable gradient checkpointing if requested + if args.gradient_checkpointing: + model = self._enable_gradient_checkpointing(model, args) + + # Processing class + if processing_class is None: + processing_class = AutoProcessor.from_pretrained(model.config._name_or_path) + + # Handle pad token for processors or tokenizers + if isinstance(processing_class, ProcessorMixin): + tokenizer = processing_class.tokenizer + elif isinstance(processing_class, PreTrainedTokenizerBase): + tokenizer = processing_class + else: + raise TypeError("The `processing_class` must be either a `PreTrainedTokenizerBase` or a `ProcessorMixin`") + + if tokenizer.pad_token is None: + tokenizer.pad_token = tokenizer.eos_token + + self.pad_token = tokenizer.pad_token + self.pad_token_id = tokenizer.pad_token_id + self.eos_token_id = tokenizer.eos_token_id + self.image_token = getattr(processing_class, "image_token", None) + self.image_token_id = getattr(processing_class, "image_token_id", None) + self.vision_start_token_id = getattr(model.config, "vision_start_token_id", None) + self.vision_end_token_id = getattr(model.config, "vision_end_token_id", None) + + # Reward functions + if not isinstance(reward_funcs, list): + reward_funcs = [reward_funcs] + self.reward_func_names = [] + for i, reward_func in enumerate(reward_funcs): + if isinstance(reward_func, str): + reward_funcs[i] = AutoModelForSequenceClassification.from_pretrained( + reward_func, num_labels=1, **model_init_kwargs + ) + if isinstance(reward_funcs[i], nn.Module): # Use Module over PretrainedModel for compat w/ compiled models + self.reward_func_names.append(reward_funcs[i].config._name_or_path.split("/")[-1]) + else: + self.reward_func_names.append(reward_funcs[i].__name__) + self.reward_funcs = reward_funcs + + # Reward weights + if args.reward_weights is not None: + if len(args.reward_weights) != len(reward_funcs): + raise ValueError( + f"Number of reward weights ({len(args.reward_weights)}) must match number of reward " + f"functions ({len(reward_funcs)})" + ) + self.reward_weights = torch.tensor(args.reward_weights, dtype=torch.float32) + else: + self.reward_weights = torch.ones(len(reward_funcs), dtype=torch.float32) + + # Reward processing class + if reward_processing_classes is None: + reward_processing_classes = [None] * len(reward_funcs) + elif not isinstance(reward_processing_classes, list): + reward_processing_classes = [reward_processing_classes] + else: + if len(reward_processing_classes) != len(reward_funcs): + raise ValueError("The number of reward processing classes must match the number of reward functions.") + + for i, (reward_processing_class, reward_func) in enumerate(zip(reward_processing_classes, reward_funcs)): + if isinstance(reward_func, PreTrainedModel): + if reward_processing_class is None: + reward_processing_class = AutoTokenizer.from_pretrained(reward_func.config._name_or_path) + if reward_processing_class.pad_token_id is None: + reward_processing_class.pad_token = reward_processing_class.eos_token + # The reward model computes the reward for the latest non-padded token in the input sequence. + # So it's important to set the pad token ID to the padding token ID of the processing class. + reward_func.config.pad_token_id = reward_processing_class.pad_token_id + reward_processing_classes[i] = reward_processing_class + self.reward_processing_classes = reward_processing_classes + + # Training arguments + self.max_prompt_length = args.max_prompt_length + self.max_completion_length = args.max_completion_length # = |o_i| in the GRPO paper + self.num_generations = args.num_generations # = G in the GRPO paper + self.temperature = args.temperature + self.top_p = args.top_p + self.top_k = args.top_k + self.min_p = args.min_p + self.repetition_penalty = args.repetition_penalty + self.use_transformers_paged = args.use_transformers_paged + self.use_vllm = args.use_vllm + self.vllm_mode = args.vllm_mode + self.vllm_gpu_memory_utilization = args.vllm_gpu_memory_utilization # only applies to colocation mode + self.vllm_tensor_parallel_size = args.vllm_tensor_parallel_size # only applies to colocation mode + self.use_liger_loss = args.use_liger_loss + self.loss_type = args.loss_type + self.scale_rewards = args.scale_rewards + self.importance_sampling_level = args.importance_sampling_level + self.mask_truncated_completions = args.mask_truncated_completions + self.top_entropy_quantile = args.top_entropy_quantile + if self.use_liger_loss and self.top_entropy_quantile < 1.0: + raise NotImplementedError( + "Liger Kernels don't currently support masking token positions based on entropy." + ) + if self.use_liger_loss and not self.importance_sampling_level == "token": + raise NotImplementedError( + "Liger Kernels currently only support token-level importance sampling. Please set" + "`importance_sampling_level` to 'token'." + ) + + # Datasets + self.shuffle_dataset = args.shuffle_dataset + + if ( + isinstance(train_dataset, IterableDataset) + or isinstance(eval_dataset, IterableDataset) + or ( + isinstance(eval_dataset, dict) and any(isinstance(ds, IterableDataset) for ds in eval_dataset.values()) + ) + ): + # See https://github.com/huggingface/trl/issues/3213 + raise NotImplementedError( + "Iterable datasets are not yet supported in GRPOTrainer. Please use a standard dataset instead." + ) + + # Multi-step + self.num_iterations = args.num_iterations # = 𝜇 in the GRPO paper + self.epsilon_low = args.epsilon + self.epsilon_high = args.epsilon_high if args.epsilon_high is not None else args.epsilon + # Tracks the number of iterations [forward + backward passes], including those within a grad accum cycle + self._step = 0 + # Buffer the batch to reuse generated outputs across multiple updates. For more details, see + # `_get_train_sampler` and `_prepare_inputs`. + self._buffered_inputs = None + + # The trainer estimates the number of FLOPs [floating-point operations] using the number of elements in the + # input tensor associated with the key "input_ids". However, in GRPO, the sampled data does not include the + # "input_ids" key. Instead, the available keys is "prompt". As a result, the trainer issues the warning: + # "Could not estimate the number of tokens of the input, floating-point operations will not be computed." To + # suppress this warning, we set the "estimate_tokens" key in the model's "warnings_issued" dictionary to True. + # This acts as a flag to indicate that the warning has already been issued. + model.warnings_issued["estimate_tokens"] = True + + super().__init__( + model=model, + args=args, + data_collator=identity, # No data collation is needed in GRPO + train_dataset=train_dataset, + eval_dataset=eval_dataset, + processing_class=processing_class, + callbacks=callbacks, + optimizers=optimizers, + ) + + # Reference model + self.beta = args.beta + if self.beta == 0.0: + # If beta is 0.0, the reference model is not needed + self.ref_model = None + elif is_peft_model(model): + # If PEFT is used, the reference model is not needed since the adapter can be disabled + # to revert to the initial model. + self.ref_model = None + else: + # For deepspeed, fsdp or non-distributed models, create a reference model from scratch + config = AutoConfig.from_pretrained(model_id) + architecture = getattr(transformers, config.architectures[0]) + self.ref_model = architecture.from_pretrained(model_id, **model_init_kwargs) + + # Disable dropout in the models + if args.disable_dropout: + disable_dropout_in_model(model) + if self.ref_model is not None: + disable_dropout_in_model(self.ref_model) + + # Liger loss + if self.use_liger_loss: + if not is_liger_kernel_available(): + raise ImportError( + "Liger is required to use `liger_loss` as the GRPO loss. Run `pip install liger-kernel`." + ) + # redirect the model.module forward to the model forward to ensure pre-forward hooks are called + self._forward_redirection = _ForwardRedirection() + + self.liger_grpo_loss = LigerFusedLinearGRPOLoss( + beta=self.beta, + epsilon_low=self.epsilon_low, + epsilon_high=self.epsilon_high, + temperature=self.temperature, + use_ref_model=self.beta != 0.0, + loss_type=self.loss_type, + max_completion_length=self.max_completion_length, + ) + + # Initialize the metrics + self._metrics = {"train": defaultdict(list), "eval": defaultdict(list)} + self._total_train_tokens = 0 + self.log_completions = args.log_completions + self.wandb_log_unique_prompts = args.wandb_log_unique_prompts + self.num_completions_to_print = args.num_completions_to_print + # Keep logs sized to the generation batch to record only outputs from the latest model update. + self._logs = { + "image": deque(maxlen=args.generation_batch_size), + "prompt": deque(maxlen=args.generation_batch_size), + "completion": deque(maxlen=args.generation_batch_size), + "rewards": defaultdict(lambda: deque(maxlen=args.generation_batch_size)), + "advantages": deque(maxlen=args.generation_batch_size), + } + + # Ensure each process receives a unique seed to prevent duplicate completions when generating with + # transformers if num_generations exceeds per_device_train_batch_size. We could skip it if we use vLLM, but + # it's safer to set it in all cases. + set_seed(args.seed, device_specific=True) + + if self.use_vllm: + if not is_vllm_available(): + raise ImportError( + "vLLM is not available and `use_vllm` is set to True. Please install vLLM with " + "`pip install vllm` to use it." + ) + + if self.vllm_mode == "server" and self.accelerator.is_main_process: + if args.vllm_server_base_url is not None: + base_url = args.vllm_server_base_url + else: + base_url = f"http://{args.vllm_server_host}:{args.vllm_server_port}" + self.vllm_client = VLLMClient(base_url=base_url, connection_timeout=args.vllm_server_timeout) + self.vllm_client.init_communicator(device=torch.cuda.current_device()) + + elif self.vllm_mode == "colocate": + if not self.accelerator.num_processes % self.vllm_tensor_parallel_size == 0: + raise ValueError( + f"vllm_tensor_parallel_size ({self.vllm_tensor_parallel_size}) must divide world size " + f"({self.accelerator.num_processes}) evenly." + ) + + if self.vllm_tensor_parallel_size > 1: + self.tp_group, _ = torch.distributed.new_subgroups_by_enumeration( + [ + list(range(i * self.vllm_tensor_parallel_size, (i + 1) * self.vllm_tensor_parallel_size)) + for i in range(self.accelerator.num_processes // self.vllm_tensor_parallel_size) + ] + ) + os.environ["RANK"] = str(self.accelerator.process_index) + os.environ["LOCAL_RANK"] = str(self.accelerator.local_process_index) + os.environ["WORLD_SIZE"] = str(self.accelerator.num_processes) + os.environ["MASTER_ADDR"] = os.environ.get("MASTER_ADDR", "localhost") + os.environ["MASTER_PORT"] = os.environ.get("MASTER_PORT", "12345") + + if self.max_prompt_length is not None and self.max_completion_length is not None: + max_model_len = self.max_prompt_length + self.max_completion_length + else: + max_model_len = None + self.llm = model.vllm_engine + self.guided_decoding_regex = args.vllm_guided_decoding_regex + + self._last_loaded_step = -1 + self.accelerator.wait_for_everyone() + else: + generation_kwargs = { + "max_new_tokens": self.max_completion_length, + "do_sample": True, + "pad_token_id": tokenizer.pad_token_id, + "bos_token_id": tokenizer.bos_token_id, + "eos_token_id": tokenizer.eos_token_id, + "temperature": self.temperature, + "top_p": self.top_p, + "top_k": self.top_k, + "min_p": self.min_p, + "repetition_penalty": self.repetition_penalty, + "cache_implementation": args.cache_implementation, + } + if args.use_transformers_paged: + generation_kwargs["max_batch_tokens"] = 512 + generation_kwargs["num_blocks"] = 1024 + generation_kwargs["block_size"] = 128 + if args.generation_kwargs is not None: + generation_kwargs.update(args.generation_kwargs) + self.generation_config = GenerationConfig(**generation_kwargs) + + # Gradient accumulation requires scaled loss. Normally, loss scaling in the parent class depends on whether the + # model accepts loss-related kwargs. Since we compute our own loss, this check is irrelevant. We set + # self.model_accepts_loss_kwargs to False to enable scaling. + self.model_accepts_loss_kwargs = False + + # Add tags to the model + self.model.add_model_tags(self._tag_names) + + if self.ref_model is not None: + if self.is_deepspeed_enabled: + self.ref_model = prepare_deepspeed(self.ref_model, self.accelerator) + elif self.is_fsdp_enabled: + self.ref_model = prepare_fsdp(self.ref_model, self.accelerator) + else: + self.ref_model = self.accelerator.prepare_model(self.ref_model, evaluation_mode=True) + + if args.sync_ref_model: + self.add_callback(SyncRefModelCallback(ref_model=self.ref_model, accelerator=self.accelerator)) + + for i, reward_func in enumerate(self.reward_funcs): + if isinstance(reward_func, PreTrainedModel): + if self.is_deepspeed_enabled: + self.reward_funcs[i] = prepare_deepspeed(reward_func, self.accelerator) + else: + # set device placement to True to make `prepare_model` move `reward_func` to device when using fsdp + self.reward_funcs[i] = self.accelerator.prepare_model( + reward_func, evaluation_mode=True, device_placement=True + ) + + def _set_signature_columns_if_needed(self): + # If `self.args.remove_unused_columns` is True, non-signature columns are removed. + # By default, this method sets `self._signature_columns` to the model's expected inputs. + # In GRPOTrainer, we preprocess data, so using the model's signature columns doesn't work. + # Instead, we set them to the columns expected by the `training_step` method, hence the override. + if self._signature_columns is None: + self._signature_columns = ["prompt", "image"] + + # This method overrides `Trainer.get_train_dataloader` to support our custom batching strategy. + # Instead of returning a standard per-step batch (i.e., `per_device_batch_size), our dataloader loads an + # *generation* batch (i.e., `per_device_batch_size × steps_per_generation`). This allows us to generate completions + # once every steps_per_generation step—rather than once per accumulation step—which is significantly more + # efficient. The only change from the original implementation is multiplying the batch size by + # `steps_per_generation`. Thus, `_prepare_inputs` is called with this *generation* batch, and it handles the + # splitting internally. + # Maintenance note: This method is a copy-paste of the original `Trainer.get_train_dataloader` with only one line + # modification. As a result, some parts of the method aren't relevant to GRPO, but we keep them to stay one line + # apart from the super method, ensuring easier maintenance in the future. + def get_train_dataloader(self): + if self.train_dataset is None: + raise ValueError("Trainer: training requires a train_dataset.") + + train_dataset = self.train_dataset + data_collator = self.data_collator + if is_datasets_available() and isinstance(train_dataset, datasets.Dataset): + train_dataset = self._remove_unused_columns(train_dataset, description="training") + else: + data_collator = self._get_collator_with_removed_columns(data_collator, description="training") + + dataloader_params = { + "batch_size": self._train_batch_size * self.args.steps_per_generation, # < this is the change + "collate_fn": data_collator, + "num_workers": self.args.dataloader_num_workers, + "pin_memory": self.args.dataloader_pin_memory, + "persistent_workers": self.args.dataloader_persistent_workers, + } + + if not isinstance(train_dataset, torch.utils.data.IterableDataset): + dataloader_params["sampler"] = self._get_train_sampler() + dataloader_params["drop_last"] = self.args.dataloader_drop_last + dataloader_params["worker_init_fn"] = partial( + seed_worker, num_workers=self.args.dataloader_num_workers, rank=self.args.process_index + ) + + dataloader_params["prefetch_factor"] = self.args.dataloader_prefetch_factor + + return self.accelerator.prepare(DataLoader(train_dataset, **dataloader_params)) + + def _get_train_sampler(self, dataset: Optional[Dataset] = None) -> Sampler: + # Returns a sampler that + # 1. ensures each prompt is repeated across multiple processes. This guarantees that identical prompts are + # distributed to different GPUs, allowing rewards to be computed and normalized correctly within each prompt + # group. Using the same seed across processes ensures consistent prompt assignment, preventing discrepancies + # in group formation. + # 2. repeats the batch multiple times to allow reusing generations across multiple updates. Refer to + # _prepare_inputs to see how the generations are stored and reused. + + # In the following figure, the values are the prompt indices. The first row shows the first sampled batch, the + # second row shows the second sampled batch, and so on. + # + # | GPU 0 | GPU 1 | + # + # global_step step <-───> num_generations=2 + # <-───────> per_device_train_batch_size=3 + # grad_accum ▲ ▲ 0 0 0 0 1 1 2 2 <- Generate for the first `steps_per_generation` (prompts 0 to 11); store the completions; use the first slice to compute the loss + # =2 ▼ | 0 1 3 3 4 4 5 5 <- Take the stored generations and use the second slice to compute the loss + # | + # | 1 2 6 6 7 7 8 8 <- Take the stored generations and use the third slice to compute the loss + # steps_per_gen=4 ▼ 1 3 9 9 10 10 11 11 <- Take the stored generations and use the fourth slice to compute the loss + # + # 2 4 12 12 13 13 14 14 <- Generate for the second `steps_per_generation` (prompts 12 to 23); store the completions; use the first slice to compute the loss + # 2 5 15 15 16 16 17 17 <- Take the stored generations and use the second slice to compute the loss + # ... + if dataset is None: + dataset = self.train_dataset + return RepeatSampler( + data_source=dataset, + mini_repeat_count=self.num_generations, + batch_size=self.args.generation_batch_size // self.num_generations, + repeat_count=self.num_iterations * self.args.steps_per_generation, + shuffle=self.shuffle_dataset, + seed=self.args.seed, + ) + + def _get_eval_sampler(self, eval_dataset) -> Sampler: + # See _get_train_sampler for an explanation of the sampler. + return RepeatSampler( + data_source=eval_dataset, + mini_repeat_count=self.num_generations, + seed=self.args.seed, + ) + + def _enable_gradient_checkpointing(self, model: PreTrainedModel, args: GRPOConfig) -> PreTrainedModel: + """Enables gradient checkpointing for the model.""" + # Ensure use_cache is disabled + model.config.use_cache = False + + # Enable gradient checkpointing on the base model for PEFT + if is_peft_model(model): + model.base_model.gradient_checkpointing_enable() + # Enable gradient checkpointing for non-PEFT models + else: + model.gradient_checkpointing_enable() + + gradient_checkpointing_kwargs = args.gradient_checkpointing_kwargs or {} + use_reentrant = ( + "use_reentrant" not in gradient_checkpointing_kwargs or gradient_checkpointing_kwargs["use_reentrant"] + ) + + if use_reentrant: + model.enable_input_require_grads() + + return model + + @profiling_decorator + def _get_last_hidden_state( + self, + unwrapped_model, + input_ids, + attention_mask, + logits_to_keep, + pixel_values=None, + image_grid_thw=None, + pixel_attention_mask=None, + image_sizes=None, + ): + if is_peft_model(unwrapped_model): + unwrapped_model = unwrapped_model.base_model.model + + # Build model inputs - check if the model supports logits_to_keep (some models and VLMs don't) + model_inputs = {"input_ids": input_ids, "attention_mask": attention_mask} + + # For Qwen models: + if image_grid_thw is not None and pixel_values is not None: + model_inputs["image_grid_thw"] = image_grid_thw + # For Gemma, SmolVLM2, LLaVa-Next etc.: + if pixel_values is not None: + model_inputs["pixel_values"] = pixel_values + # For SmolVLM2 + if pixel_attention_mask is not None: + model_inputs["pixel_attention_mask"] = pixel_attention_mask + # For LLaVa-Next + if image_sizes is not None: + model_inputs["image_sizes"] = image_sizes + + # Only add logits_to_keep if the model supports it + if "logits_to_keep" in self.model_kwarg_keys: + # We add 1 to `logits_to_keep` because the last logits of the sequence is later excluded + model_inputs["logits_to_keep"] = logits_to_keep + 1 + + last_hidden_state = unwrapped_model.model(**model_inputs).last_hidden_state + # Exclude the last value: it corresponds to the next token pred + last_hidden_state = last_hidden_state[:, :-1, :] # (B, L-1, H) + # Only keep the last logits_to_keep. For model that support logits_to_keep, this is a no-op. + last_hidden_state = last_hidden_state[:, -logits_to_keep:, :] # (B, logits_to_keep, H) + return last_hidden_state + + def get_high_entropy_mask( + self, entropies: torch.Tensor, mask: torch.Tensor, threshold: float, accelerator=None + ) -> torch.Tensor: + """ + Returns a binary mask identifying tokens whose entropy exceeds a given quantile threshold. + + Args: + entropies (`torch.Tensor`): + Tensor of shape (batch_size, seq_len) with per-token entropy values. + mask (`torch.Tensor`): + Binary mask of the same shape as `entropies`, where `1` indicates valid tokens and `0` padding. + threshold (`float`): + Quantile threshold between `0.0` and `1.0` to select high-entropy tokens. + + Returns: + `torch.Tensor`: + Boolean mask of shape (batch_size, seq_len), where `True` indicates tokens with entropy >= threshold and + `False` otherwise. + """ + non_pad_entropies = entropies[mask.bool()].float() + if non_pad_entropies.numel() == 0: + return torch.zeros_like(entropies, dtype=torch.bool) + all_non_pad_entropies = self.accelerator.gather(non_pad_entropies) + # Filter out any empty tensors that might result from processes with no valid tokens + entropy_threshold = torch.quantile(all_non_pad_entropies, threshold) + masked_entropies = entropies * mask.float() + entropy_mask = masked_entropies >= entropy_threshold + return entropy_mask & mask.bool() # ensure padding tokens are always masked out + + def _get_per_token_logps_and_entropies(self, model, input_ids, attention_mask, logits_to_keep, batch_size = None, compute_entropy = False, *args, **kwargs): + if True: # os.environ.get('UNSLOTH_USE_NEW_MODEL', '0') == '0': + return None, None # logps, entropies Unsloth efficient GRPO + # Otherwise, calculate normally: + if not hasattr(self, '_autocast_dtype'): + self._autocast_dtype = torch.float16 if os.environ.get('ACCELERATE_MIXED_PRECISION', 'fp16') == 'fp16' else torch.bfloat16 + if os.environ.get('UNSLOTH_FORCE_FLOAT32', '0') == '1': self._autocast_dtype = torch.float16 + + os.environ["UNSLOTH_RETURN_HIDDEN_STATES"] = "1" + with torch.amp.autocast(device_type = 'cuda', dtype = self._autocast_dtype): + # We add 1 to `logits_to_keep` because the last logits of the sequence is later excluded + logits = model( + input_ids = input_ids, + attention_mask = attention_mask, + logits_to_keep = logits_to_keep + 1, + ).logits + + entropies = None + if compute_entropy: + from trl.trainer.utils import entropy_from_logits + entropies = entropy_from_logits(logits) + + # logits = logits[:, :-1, :] # (B, L-1, V), exclude the last logit: it corresponds to the next token pred + return logits, entropies # logps, entropies + # input_ids = input_ids[:, -logits_to_keep:] + # For transformers<=4.48, logits_to_keep argument isn't supported, so here we drop logits ourselves. + # See https://github.com/huggingface/trl/issues/2770 + # logits = logits[:, -logits_to_keep:] + # return logits + # See https://huggingface.co/blog/the_n_implementation_details_of_rlhf_with_ppo#policy-training-implementation-details + # logits = logits / self.temperature + # logps = selective_log_softmax(logits, input_ids) + + # row_indices, col_indices = torch.where(logps < -20) + + # # Method 1: Check if tensors have elements + # if len(row_indices) > 0 and len(col_indices) > 0: + # breakpoint() # Breakpoint triggered here + # print("Found high values!") + # return logps # compute logprobs for the input tokens + pass + + def _fix_param_name_to_vllm(self, name, extra_prefixes: Optional[list[str]] = None): + extra_prefixes = extra_prefixes or [] + prefixes = ["_checkpoint_wrapped_module."] + extra_prefixes + for prefix in prefixes: + name = name.replace(prefix, "") + return name + + def _sync_fsdp1_params_to_vllm(self, module: nn.Module, prefix: str = "", visited=None): + """Memory-efficient post-order traversal of FSDP modules to extract full parameters and sync with vLLM.""" + # For FSDP1, we need to recurse into children and also use summon_full_params + if visited is None: + visited = set() + for child_name, child_module in module.named_children(): + child_prefix = f"{prefix}.{child_name}" if prefix else child_name + self._sync_fsdp1_params_to_vllm( + child_module, prefix=child_prefix, visited=visited + ) # recurse into the child + + if isinstance(module, FSDP): + with FSDP.summon_full_params(module, recurse=False, writeback=False): + for param_name, param in module.named_parameters(): + full_name = f"{prefix}.{param_name}" if prefix else param_name + full_name = self._fix_param_name_to_vllm(full_name, extra_prefixes=["_fsdp_wrapped_module."]) + + if full_name in visited: + continue # skip FSDP subtrees already traversed + visited.add(full_name) + + if self.vllm_mode == "server" and self.accelerator.is_main_process: + self.vllm_client.update_named_param(full_name, param.data) + elif self.vllm_mode == "colocate": + + pass + + pass + + def _sync_fsdp2_params_to_vllm(self, module: nn.Module): + # For FSDP2, module already covers all parameters, so no need for recursion + for name, param in module.items(): + if param.is_cpu: + param = param.to(torch.device("cuda")) + param = param.full_tensor() + + if self.vllm_mode == "server" and self.accelerator.is_main_process: + self.vllm_client.update_named_param(name, param) + elif self.vllm_mode == "colocate": + + pass + + pass + + def _move_model_to_vllm(self, *args, **kwargs): return None + + @profiling_decorator + def _prepare_inputs( + self, generation_batch: dict[str, Union[torch.Tensor, Any]] + ) -> dict[str, Union[torch.Tensor, Any]]: + # Prepares inputs for model training/evaluation by managing completion generation and batch handling. + # During training: + # - Receives the local generation batch (Per-GPU batch size × steps per generation) + # from the modified training dataloader instead of the standard local batch + # - Generates completions once for the entire generation batch and splits it into batches of size + # `per_device_train_batch_size` + # - Buffers these completions and returns the appropriate slice for the current accumulation step + # - Optimizes by regenerating completions only periodically (every steps_per_generation * num_iterations) + # During evaluation: + # - The input is treated as a standard local batch (no accumulation, no multiple iterations) + # - Completions are generated for each batch without buffering or reuse + # Returns a single local batch in both cases. + if hasattr(self, 'llm'): + if getattr(self.llm.llm_engine.vllm_config.model_config, 'enable_sleep_mode', False): + self.llm.wake_up() + + mode = "train" if self.model.training else "eval" + if mode == "train": + generate_every = self.args.steps_per_generation * self.num_iterations + if self._step % generate_every == 0 or self._buffered_inputs is None: + # self._buffered_inputs=None can occur when resuming from a checkpoint + generation_batch = self._generate_and_score_completions(generation_batch) + generation_batch = split_pixel_values_by_grid(generation_batch) + generation_batch = shuffle_sequence_dict(generation_batch) + generation_batches = split_tensor_dict(generation_batch, self.args.steps_per_generation) + self._buffered_inputs = [unsplit_pixel_values_by_grid(batch) for batch in generation_batches] + inputs = self._buffered_inputs[self._step % self.args.steps_per_generation] + self._step += 1 + else: + # In evaluation, there is neither batch grouping for generation, nor multiple iterations, hence + # local generation batch == local eval batch + inputs = self._generate_and_score_completions(generation_batch) + if hasattr(self, 'llm'): + if getattr(self.llm.llm_engine.vllm_config.model_config, 'enable_sleep_mode', False): + self.llm.sleep(os.environ.get('VLLM_SLEEP_MODE', 1)) + return inputs + + @profiling_decorator + def _calculate_rewards(self, inputs, prompts, completions, completion_ids_list): + device = self.accelerator.device + rewards_per_func = torch.zeros(len(prompts), len(self.reward_funcs), device=device) + + # Repeat all input columns (but "prompt", "completion", and "completion_ids") to match the num of generations + keys = [key for key in inputs[0] if key not in ["prompt", "completion", "completion_ids"]] + reward_kwargs = {key: [example[key] for example in inputs] for key in keys} + + # This allows for dynamic reward shaping based on training progress. + reward_kwargs["trainer_state"] = self.state + + for i, (reward_func, reward_processing_class, reward_func_name) in enumerate( + zip(self.reward_funcs, self.reward_processing_classes, self.reward_func_names) + ): + with profiling_context(self, reward_func_name): + if isinstance(reward_func, nn.Module): # Module (no PretrainedModel) for compat with compiled models + if is_conversational(inputs[0]): + messages = [{"messages": p + c} for p, c in zip(prompts, completions)] + texts = [apply_chat_template(x, reward_processing_class)["text"] for x in messages] + else: + texts = [p + c for p, c in zip(prompts, completions)] + reward_inputs = reward_processing_class( + text=texts, return_tensors="pt", padding=True, padding_side="right", add_special_tokens=False + ) + reward_inputs = super()._prepare_inputs(reward_inputs) + with torch.inference_mode(): + rewards_per_func[:, i] = reward_func(**reward_inputs).logits[:, 0] # Shape (B*G,) + else: + output_reward_func = reward_func( + prompts=prompts, completions=completions, completion_ids=completion_ids_list, **reward_kwargs + ) + # Convert None values to NaN + output_reward_func = [reward if reward is not None else torch.nan for reward in output_reward_func] + + rewards_per_func[:, i] = torch.tensor(output_reward_func, dtype=torch.float32, device=device) + + # If all reward functions return None for a given row, issue a detailed warning + if torch.isnan(rewards_per_func).all(dim=1).any(): + nan_row_idx = torch.isnan(rewards_per_func).all(dim=1).nonzero(as_tuple=True)[0][0] + row_reward_kwargs = {key: value[nan_row_idx] for key, value in reward_kwargs.items()} + row_reward_kwargs["prompt"] = prompts[nan_row_idx] + row_reward_kwargs["completion"] = completions[nan_row_idx] + warnings.warn( + f"All reward functions returned None for the following kwargs: {row_reward_kwargs}. " + "Please ensure that at least one reward function returns a valid reward." + ) + + # Gather the reward per function: this part is crucial, because the rewards are normalized per group and the + # completions may be distributed across processes + rewards_per_func = gather(rewards_per_func) + return rewards_per_func + + def _generate_and_score_completions( + self, inputs: list[dict[str, Union[torch.Tensor, Any]]] + ) -> dict[str, Union[torch.Tensor, Any]]: + device = self.accelerator.device + mode = "train" if self.model.training else "eval" + + prompts = [x["prompt"] for x in inputs] + + # We don't yet support visual reward models/function, so we keep a copy of the original text-only prompts for + # later use in the reward computation. If images are present, we insert {"type": "image"} as required by the + # VLM chat template. + original_prompts = copy.deepcopy(prompts) + + # If the prompts are conversational and the inputs contain images, we need to convert the prompts from + # [{"role": "user", "content": "What color is the sky?"}] to + # [{"role": "user", "content": [{"type": "image"}, {"type": "text", "text": "What color is the sky?"}]}] + kwargs = {} + has_images = "image" in inputs[0] + if has_images: + images = [example.get("image") for example in inputs] + kwargs = {"images": [[img] for img in images]} + for prompt in prompts: + if isinstance(prompt, list): + for message in prompt: + if not isinstance(message, dict): + continue + content = message.get("content") + role = message.get("role") + if isinstance(content, str): + if role == "user": + message["content"] = [{"type": "image"}, {"type": "text", "text": content}] + elif role == "system": + message["content"] = [{"type": "text", "text": content}] + + prompts_text = [maybe_apply_chat_template(example, self.processing_class)["prompt"] for example in inputs] + + prompt_inputs = self.processing_class( + text=prompts_text, + return_tensors="pt", + padding=True, + padding_side="left", + add_special_tokens=False, + **kwargs, + ) + prompt_inputs = super()._prepare_inputs(prompt_inputs) + prompt_ids, prompt_mask = prompt_inputs["input_ids"], prompt_inputs["attention_mask"] + if self.max_prompt_length is not None: + # If max_prompt_length is set, we trim the prompt to keep only the last `max_prompt_length` tokens. + # Then we decode those tokens back into text. We manually remove leading pad tokens from the decoded text, + # because we can't use `skip_special_tokens=True` (some special tokens are still needed for generation). + prompt_ids = prompt_ids[:, -self.max_prompt_length :] + prompt_mask = prompt_mask[:, -self.max_prompt_length :] + prompts_text = self.processing_class.batch_decode( + prompt_ids, skip_special_tokens=False, clean_up_tokenization_spaces=False + ) + pad_token = self.processing_class.pad_token + def strip_leading_tokens(text): + while text.startswith(pad_token): + text = text.removeprefix(pad_token) + return text + + if pad_token is not None: + prompts_text = [ + strip_leading_tokens(text) for text in prompts_text + ] + + # Generate completions using either vLLM or regular generation + if self.use_vllm: + # First, update the vLLM weights if needed + if self.state.global_step != self._last_loaded_step: + self._move_model_to_vllm() + self._last_loaded_step = self.state.global_step + + # Generate completions using vLLM: gather all prompts and use them in a single call in the main process + if self.vllm_mode == "server": + all_prompts_text = gather_object(prompts_text) + if has_images: + all_images = gather_object(images) + + if self.accelerator.is_main_process: + # Since 'prompts' contains 'num_generations' duplicates, we first take unique prompts, and generate + # num_generations outputs for each one. This is faster than generating outputs for each duplicate + # prompt individually. + ordered_set_of_prompts = all_prompts_text[:: self.num_generations] + + if has_images: + ordered_set_of_images = all_images[:: self.num_generations] + else: + ordered_set_of_images = None + + with profiling_context(self, "vLLM.generate"): + completion_ids = self.vllm_client.generate( + prompts=ordered_set_of_prompts, + images=ordered_set_of_images, + n=self.num_generations, + repetition_penalty=self.repetition_penalty, + temperature=self.temperature, + top_p=self.top_p, + top_k=-1 if self.top_k is None else self.top_k, + min_p=0.0 if self.min_p is None else self.min_p, + max_tokens=self.max_completion_length, + guided_decoding_regex=self.guided_decoding_regex, + generation_kwargs=self.args.generation_kwargs, + ) + else: + completion_ids = [None] * len(all_prompts_text) + # Broadcast the completions from the main process to all processes, ensuring each process receives its + # corresponding slice. + completion_ids = broadcast_object_list(completion_ids, from_process=0) + process_slice = slice( + self.accelerator.process_index * len(prompts), + (self.accelerator.process_index + 1) * len(prompts), + ) + completion_ids = completion_ids[process_slice] + + # Generate completions using colocated vLLM instances: each device holds vLLM copy and work on their own batch of prompts + elif self.vllm_mode == "colocate": + if self.guided_decoding_regex: + guided_decoding = GuidedDecodingParams(regex=self.guided_decoding_regex) + else: + guided_decoding = None + + generation_kwargs = { + "n": 1, # vLLM on each GPU generates only 1 in colocate mode + "repetition_penalty": self.repetition_penalty, + "temperature": self.temperature, + "top_p": self.top_p, + "top_k": -1 if self.top_k is None else self.top_k, + "min_p": 0.0 if self.min_p is None else self.min_p, + "max_tokens": self.max_completion_length, + "guided_decoding": guided_decoding, + } + if self.args.generation_kwargs is not None: + generation_kwargs.update(self.args.generation_kwargs) + sampling_params = SamplingParams(**generation_kwargs) + + if self.vllm_tensor_parallel_size > 1: + # Gather prompts from all ranks in the TP group and flatten. + # Each rank starts with its own prompts; after gathering, all ranks see the full group set. + orig_size = len(prompts_text) + gathered_prompts = [None for _ in range(self.vllm_tensor_parallel_size)] + torch.distributed.all_gather_object(gathered_prompts, prompts_text, group=self.tp_group) + all_prompts_text = [p for sublist in gathered_prompts for p in sublist] + + if has_images: + gathered_images = [None for _ in range(self.vllm_tensor_parallel_size)] + torch.distributed.all_gather_object(gathered_images, images, group=self.tp_group) + all_images = [img for sublist in gathered_images for img in sublist] + else: + all_images = None + else: + all_prompts_text = prompts_text + all_images = images if has_images else None + + if has_images and all_images: + vllm_inputs = [] + for prompt, image in zip(all_prompts_text, all_images): + if image is not None: + vllm_inputs.append({"prompt": prompt, "multi_modal_data": {"image": image}}) + else: + vllm_inputs.append(prompt) + else: + vllm_inputs = all_prompts_text + + with profiling_context(self, "vLLM.generate"): + all_outputs = self.llm.generate(vllm_inputs, sampling_params=sampling_params, use_tqdm=False, lora_request = self.model.load_lora('grpo_trainer_lora_model', load_tensors = True)) + + completion_ids = [output.token_ids for outputs in all_outputs for output in outputs.outputs] + + if self.vllm_tensor_parallel_size > 1: + # Slice completions for this rank within its TP group. + # Each rank generates all outputs — we keep only our share. + local_rank_in_group = torch.distributed.get_rank(group=self.tp_group) + tp_slice = slice(local_rank_in_group * orig_size, (local_rank_in_group + 1) * orig_size) + completion_ids = completion_ids[tp_slice] + + # Pad the completions, and concatenate them with the prompts + completion_ids = [torch.tensor(ids, device=device) for ids in completion_ids] + completion_ids = pad(completion_ids, padding_value=self.pad_token_id) + prompt_completion_ids = torch.cat([prompt_ids, completion_ids], dim=1) + + elif self.use_transformers_paged: + # Re-process inputs for paged generation if needed + # Note: images are already validated and preprocessed above + paged_prompt_inputs = self.processing_class(text=prompts_text, **kwargs) + previous_attn = self.model_wrapped.config._attn_implementation + + if is_flash_attn_2_available(): + self.model_wrapped.config._attn_implementation = "paged_attention" + else: + self.model_wrapped.config._attn_implementation = "sdpa_paged" + with ( + profiling_context(self, "transformers.generate_batch"), + unwrap_model_for_generation( + self.model_wrapped, self.accelerator, gather_deepspeed3_params=self.args.ds3_gather_for_generation + ) as unwrapped_model, + torch.no_grad(), + FSDP.summon_full_params(self.model_wrapped, recurse=False) if self.is_fsdp_enabled else nullcontext(), + ): + # Cast to the appropriate dtype based on training configuration + if self.args.bf16: + unwrapped_model.to(torch.bfloat16) + elif self.args.fp16: + unwrapped_model.to(torch.float16) + with torch.inference_mode(): + all_outputs = unwrapped_model.generate_batch( + paged_prompt_inputs.input_ids, generation_config=self.generation_config, progress_bar=False + ) + completion_ids = [output.generated_tokens for output in all_outputs.values()] + completion_ids = [torch.tensor(ids, device=device) for ids in completion_ids] + completion_ids = pad(completion_ids, padding_value=self.pad_token_id, padding_side="right") + prompt_ids = [torch.tensor(ids, device=device) for ids in paged_prompt_inputs.input_ids] + prompt_ids = pad(prompt_ids, padding_value=self.pad_token_id, padding_side="left") + prompt_completion_ids = torch.cat([prompt_ids, completion_ids], dim=1) + # Restore the original attention implementation, training mode + self.model_wrapped.config._attn_implementation = previous_attn + else: + # Regular generation path + with ( + profiling_context(self, "transformers.generate"), + unwrap_model_for_generation( + self.model_wrapped, self.accelerator, gather_deepspeed3_params=self.args.ds3_gather_for_generation + ) as unwrapped_model, + torch.no_grad(), + FSDP.summon_full_params(self.model_wrapped, recurse=False) if self.is_fsdp_enabled else nullcontext(), + ): + prompt_inputs["input_ids"], prompt_inputs["attention_mask"] = prompt_ids, prompt_mask + prompt_completion_ids = unwrapped_model.generate( + **prompt_inputs, generation_config=self.generation_config, disable_compile=True + ) + # Compute prompt length and extract completion ids + prompt_length = prompt_ids.size(1) + prompt_ids = prompt_completion_ids[:, :prompt_length] + completion_ids = prompt_completion_ids[:, prompt_length:] + + # Mask everything after the first EOS token + is_eos = completion_ids == self.eos_token_id + eos_idx = torch.full((is_eos.size(0),), is_eos.size(1), dtype=torch.long, device=device) + eos_idx[is_eos.any(dim=1)] = is_eos.int().argmax(dim=1)[is_eos.any(dim=1)] + sequence_indices = torch.arange(is_eos.size(1), device=device).expand(is_eos.size(0), -1) + completion_mask = (sequence_indices <= eos_idx.unsqueeze(1)).int() + + # Convert tensor to a list of lists of token IDs. This will be passed to the reward function, avoiding the need + # to re-tokenize completions if the reward is computed from tokens. + completion_ids_list = [ + [id.item() for id, m in zip(row, mask_row) if m] for row, mask_row in zip(completion_ids, completion_mask) + ] + + # Sum along sequence dimension (dim=1) to get completion length per sequence, used for logging + completion_lengths = completion_mask.sum(1) + + # If mask_truncated_completions is enabled, zero out truncated completions in completion_mask + if self.mask_truncated_completions: + truncated_completions = ~is_eos.any(dim=1) + completion_mask = completion_mask * (~truncated_completions).unsqueeze(1).int() + + # Concatenate prompt_mask with completion_mask for logit computation + attention_mask = torch.cat([prompt_mask, completion_mask], dim=1) # (B, P+C) + + logits_to_keep = completion_ids.size(1) # we only need to compute the logits for the completion tokens + batch_size = self.args.per_device_train_batch_size if mode == "train" else self.args.per_device_eval_batch_size + + with torch.no_grad(): + # If the generation and optimization steps are misaligned—i.e., if generation does not occur at the end of + # a full optimizer step (when gradient_accumulation_steps is not a multiple of generate_every)—then the + # samples may come from an earlier version of the model. In that case, we need to track old_per_token_logps + # for importance sampling. If the steps are aligned, importance sampling isn't necessary and we set + # old_per_token_logps to None. + generate_every = self.args.steps_per_generation * self.num_iterations # generation frequency + if self.args.gradient_accumulation_steps % generate_every != 0: + old_per_token_logps, _ = self._get_per_token_logps_and_entropies( + self.model, + prompt_completion_ids, + attention_mask, + logits_to_keep, + batch_size, + pixel_values=prompt_inputs.get("pixel_values"), + image_grid_thw=prompt_inputs.get("image_grid_thw"), + pixel_attention_mask=prompt_inputs.get("pixel_attention_mask"), + image_sizes=prompt_inputs.get("image_sizes"), + ) + else: + old_per_token_logps = None + + # Compute the per-token log probabilities for the reference model + if self.beta != 0.0: + if self.ref_model is not None: + ref_per_token_logps, _ = self._get_per_token_logps_and_entropies( + self.ref_model, + prompt_completion_ids, + attention_mask, + logits_to_keep, + batch_size=batch_size, + pixel_values=prompt_inputs.get("pixel_values"), + image_grid_thw=prompt_inputs.get("image_grid_thw"), + pixel_attention_mask=prompt_inputs.get("pixel_attention_mask"), + image_sizes=prompt_inputs.get("image_sizes"), + ) + else: + with self.accelerator.unwrap_model(self.model).disable_adapter(): + ref_per_token_logps, _ = self._get_per_token_logps_and_entropies( + self.model, + prompt_completion_ids, + attention_mask, + logits_to_keep, + batch_size=batch_size, + pixel_values=prompt_inputs.get("pixel_values"), + image_grid_thw=prompt_inputs.get("image_grid_thw"), + pixel_attention_mask=prompt_inputs.get("pixel_attention_mask"), + image_sizes=prompt_inputs.get("image_sizes"), + ) + else: + ref_per_token_logps = None + + # Decode the generated completions + completions_text = self.processing_class.batch_decode(completion_ids, skip_special_tokens=True) + if is_conversational(inputs[0]): + completions = [] + for prompt, completion in zip(prompts, completions_text): + bootstrap = prompt.pop()["content"] if prompt[-1]["role"] == "assistant" else "" + completions.append([{"role": "assistant", "content": bootstrap + completion}]) + else: + completions = completions_text + + # Calculate rewards for each reward function. rewards_per_func aggregates rewards across all processes. This is + # important because rewards will be normalized per group, and completions are distributed. We will later slice + # rewards_per_func to extract each process's subset. + rewards_per_func = self._calculate_rewards(inputs, original_prompts, completions, completion_ids_list) + + # Apply weights to each reward function's output and sum + rewards = (rewards_per_func * self.reward_weights.to(device).unsqueeze(0)).nansum(dim=1) + + # Compute grouped-wise rewards + mean_grouped_rewards = rewards.view(-1, self.num_generations).mean(dim=1) + std_grouped_rewards = rewards.view(-1, self.num_generations).std(dim=1) + is_std_zero = torch.isclose(std_grouped_rewards, torch.zeros_like(std_grouped_rewards)) + + # Normalize the rewards to compute the advantages + mean_grouped_rewards = mean_grouped_rewards.repeat_interleave(self.num_generations, dim=0) + std_grouped_rewards = std_grouped_rewards.repeat_interleave(self.num_generations, dim=0) + advantages = rewards - mean_grouped_rewards + if self.scale_rewards: + advantages = advantages / (std_grouped_rewards + 1e-4) + + # Slice to keep only the local part of the data + process_slice = slice( + self.accelerator.process_index * len(prompts), + (self.accelerator.process_index + 1) * len(prompts), + ) + all_process_advantages = advantages.clone() # keep the aggregated advantages for logging + advantages = advantages[process_slice] + + # Log the metrics + if mode == "train": + self.state.num_input_tokens_seen += self.accelerator.gather(attention_mask.sum()).sum().item() + self._metrics[mode]["num_tokens"] = [self.state.num_input_tokens_seen] + + # Log completion lengths, mean, min, max + agg_completion_lengths = self.accelerator.gather(completion_lengths) + self._metrics[mode]["completions/mean_length"].append(agg_completion_lengths.float().mean().item()) + self._metrics[mode]["completions/min_length"].append(agg_completion_lengths.float().min().item()) + self._metrics[mode]["completions/max_length"].append(agg_completion_lengths.float().max().item()) + + # Identify sequences that terminated with EOS and log their lengths + agg_terminated_with_eos = self.accelerator.gather(is_eos.any(dim=1)) + term_completion_lengths = agg_completion_lengths[agg_terminated_with_eos] + clipped_completions_ratio = 1 - len(term_completion_lengths) / len(agg_completion_lengths) + self._metrics[mode]["completions/clipped_ratio"].append(clipped_completions_ratio) + if len(term_completion_lengths) == 0: # edge case where no terminated sequences are found + term_completion_lengths = torch.zeros(1, device=device) + self._metrics[mode]["completions/mean_terminated_length"].append(term_completion_lengths.float().mean().item()) + self._metrics[mode]["completions/min_terminated_length"].append(term_completion_lengths.float().min().item()) + self._metrics[mode]["completions/max_terminated_length"].append(term_completion_lengths.float().max().item()) + + # Calculate mean reward per function, but only for samples where the function was applied (non-NaN values) + for i, reward_func_name in enumerate(self.reward_func_names): + mean_rewards = torch.nanmean(rewards_per_func[:, i]).item() + self._metrics[mode][f"rewards/{reward_func_name}/mean"].append(mean_rewards) + std_rewards = nanstd(rewards_per_func[:, i]).item() + self._metrics[mode][f"rewards/{reward_func_name}/std"].append(std_rewards) + self._metrics[mode]["reward"].append(mean_grouped_rewards.mean().item()) + self._metrics[mode]["reward_std"].append(std_grouped_rewards.mean().item()) + self._metrics[mode]["frac_reward_zero_std"].append(is_std_zero.float().mean().item()) + + # Log prompt and completion texts + self._logs["prompt"].extend(gather_object(prompts_text)) + self._logs["completion"].extend(gather_object(completions_text)) + for i, name in enumerate(self.reward_func_names): + self._logs["rewards"][name].extend(rewards_per_func[:, i].tolist()) + self._logs["advantages"].extend(all_process_advantages.tolist()) + + if has_images: + self._logs["image"].extend(gather_object(images)) + + output = { + "prompt_ids": prompt_ids, + "prompt_mask": prompt_mask, + "completion_ids": completion_ids, + "completion_mask": completion_mask, + "advantages": advantages, + } + if old_per_token_logps is not None: + output["old_per_token_logps"] = old_per_token_logps + if ref_per_token_logps is not None: + output["ref_per_token_logps"] = ref_per_token_logps + if "pixel_values" in prompt_inputs: + output["pixel_values"] = prompt_inputs["pixel_values"] + if "image_grid_thw" in prompt_inputs: + output["image_grid_thw"] = prompt_inputs["image_grid_thw"] + if "pixel_attention_mask" in prompt_inputs: + output["pixel_attention_mask"] = prompt_inputs["pixel_attention_mask"] + if "image_sizes" in prompt_inputs: + output["image_sizes"] = prompt_inputs["image_sizes"] + return output + + def compute_liger_loss(self, unwrapped_model, inputs): + # Compute the per-token log probabilities for the model + prompt_ids, prompt_mask = inputs["prompt_ids"], inputs["prompt_mask"] + completion_ids, completion_mask = inputs["completion_ids"], inputs["completion_mask"] + input_ids = torch.cat([prompt_ids, completion_ids], dim=1) + attention_mask = torch.cat([prompt_mask, completion_mask], dim=1) + logits_to_keep = completion_ids.size(1) # we only need to compute the logits for the completion tokens + + # Get the last hidden state of the model + last_hidden_state = self._get_last_hidden_state( + unwrapped_model, + input_ids, + attention_mask, + logits_to_keep, + inputs.get("pixel_values"), + inputs.get("image_grid_thw"), + inputs.get("pixel_attention_mask"), + inputs.get("image_sizes"), + ) + + # compute loss and metrics using liger grpo loss + loss, metrics = self.liger_grpo_loss( + _input=last_hidden_state, + lin_weight=unwrapped_model.lm_head.weight, + selected_token_ids=completion_ids, + attention_mask=completion_mask, + advantages=inputs["advantages"], + bias=unwrapped_model.lm_head.bias, + old_per_token_logps=inputs.get("old_per_token_logps"), + ref_per_token_logps=inputs.get("ref_per_token_logps"), + ) + # Extract metrics from the liger_grpo_loss output + # KL divergence is the first metric when beta is non-zero + mean_kl = metrics[0] if self.beta != 0.0 else None + clip_ratio = metrics[-1] + + mode = "train" if self.model.training else "eval" + if self.beta != 0.0: + self._metrics[mode]["kl"].append(self.accelerator.gather(mean_kl).mean().item()) + self._metrics[mode]["clip_ratio"].append(self.accelerator.gather(clip_ratio).mean().item()) + return loss + + def compute_loss(self, model, inputs, return_outputs = False, num_items_in_batch = None): + if return_outputs: + raise ValueError("The GRPOTrainer does not support returning outputs") + # Compute the per-token log probabilities for the model + + prompt_ids, prompt_mask = inputs["prompt_ids"], inputs["prompt_mask"] + completion_ids, completion_mask = inputs["completion_ids"], inputs["completion_mask"] + input_ids = torch.cat([prompt_ids, completion_ids], dim=1) + bsz, qlen = input_ids.shape + attention_mask = torch.cat([prompt_mask, completion_mask], dim=1) + # attention_mask = None + logits_to_keep = completion_ids.size(1) # we only need to compute the logits for the completion tokens + _input_ids = input_ids + _logits_to_keep = logits_to_keep + + get_logps_func = \ + lambda model, input_ids, attention_mask, logits_to_keep, batch_size=None, compute_entropy=False: \ + self._get_per_token_logps(model, input_ids, attention_mask, logits_to_keep) \ + if hasattr(self, "_get_per_token_logps") else \ + self._get_per_token_logps_and_entropies(model, input_ids, attention_mask, logits_to_keep, batch_size, compute_entropy)[0] # logps + + per_token_logps = get_logps_func(model, input_ids, attention_mask, logits_to_keep) + + # Compute the KL divergence between the model and the reference model + # _prepare_inputs doesn't return reference log probs anymore. We need to calculate it ourselves. + # https://github.com/huggingface/trl/blob/05bc43e960396581e458195b8388efe6b82cae1f/trl/trainer/grpo_trainer.py#L1328 + if self.beta != 0.0: + with torch.inference_mode(), model.disable_adapter(): + ref_per_token_logps = per_token_logps = get_logps_func(model, input_ids, attention_mask, logits_to_keep) + else: + ref_per_token_logps = None + # per_token_kl = torch.exp(ref_per_token_logps - per_token_logps) - (ref_per_token_logps - per_token_logps) - 1 + # x - x.detach() allows for preserving gradients from x + advantages = inputs["advantages"] + # per_token_loss = torch.exp(per_token_logps - per_token_logps.detach()) * advantages.unsqueeze(1) + # per_token_loss = -(per_token_loss - self.beta * per_token_kl) + # loss = ((per_token_loss * completion_mask).sum(dim=1) / completion_mask.sum(dim=1)).mean() + old_hidden_states = inputs.get("old_per_token_logps", None) + input_ids = input_ids[:, -logits_to_keep:] + + # Get logit softcapping and logit scale + logit_softcapping = getattr(model.config, "final_logit_softcapping", 0) # Gemma + if logit_softcapping is None: logit_softcapping = 0 + logit_scale_multiply = getattr(model.config, "logit_scale", 0) # Cohere + if logit_scale_multiply is None: logit_scale_multiply = 0 + logit_scale_divide = getattr(model.config, "logits_scaling", 0) # Granite + if logit_scale_divide is None: logit_scale_divide = 0 + if per_token_logps is not None: + + if ref_per_token_logps is not None: + ref_per_token_logps = ref_per_token_logps[:, :-1, :] # (B, L-1, V), exclude the last logit: it corresponds to the next token pred + per_token_logps = per_token_logps[:, :-1, :] # (B, L-1, V), exclude the last logit: it corresponds to the next token pred + + loss, completion_length, mean_kl = grpo_compute_loss_slow( + ref_per_token_logps, + per_token_logps, + old_hidden_states, + input_ids, + completion_mask, + self.beta, + advantages, + loss_type = self.args.loss_type, + epsilon_low = self.epsilon_low, + epsilon_high = self.epsilon_high, + max_completion_length = self.args.max_completion_length, + delta = self.args.delta, + temperature = self.args.temperature, + logit_softcapping = logit_softcapping, + logit_scale_multiply = logit_scale_multiply, + logit_scale_divide = logit_scale_divide, + ) + else: + if hasattr(self.args, "loss_type"): + loss, completion_length, mean_kl = grpo_accumulated_loss( + trainer = self, + input_ids = _input_ids, + logits_to_keep = logits_to_keep, + completion_mask = completion_mask, + advantages = advantages, + old_hidden_states = old_hidden_states, + n_chunks = self.args.unsloth_num_chunks, + loss_type = self.args.loss_type, + epsilon_low = self.epsilon_low, + epsilon_high = self.epsilon_high, + max_completion_length = self.args.max_completion_length, + delta = self.args.delta, + temperature = self.args.temperature, + logit_softcapping = logit_softcapping, + logit_scale_multiply = logit_scale_multiply, + logit_scale_divide = logit_scale_divide, + attention_mask = attention_mask, + ) + else: + # to ensure backwards compatibility with trl 0.15.2 and maybe even 0.17 + loss, completion_length, mean_kl = grpo_accumulated_loss( + trainer = self, + input_ids = _input_ids, + logits_to_keep = logits_to_keep, + completion_mask = completion_mask, + advantages = advantages, + old_hidden_states = old_hidden_states, + n_chunks = self.args.unsloth_num_chunks, + temperature = self.args.temperature, + logit_softcapping = logit_softcapping, + logit_scale_multiply = logit_scale_multiply, + logit_scale_divide = logit_scale_divide, + attention_mask = attention_mask, + ) + pass + pass + # Log the metrics + # completion_length = self.accelerator.gather_for_metrics(completion_mask.sum(1)).float().mean().item() + # mean_kl = ((per_token_kl * completion_mask).sum(dim=1) / completion_mask.sum(dim=1)).mean() + # self._metrics["kl"].append(self.accelerator.gather_for_metrics(mean_kl).mean().item()) + if "train" in self._metrics: + mode = "eval" if self.control.should_evaluate else "train" + self._metrics[mode]["completion_length"].append(completion_length.item()) + self._metrics[mode]["kl"].append(mean_kl.item()) + else: + self._metrics["completion_length"].append(completion_length.item()) + self._metrics["kl"].append(mean_kl.item()) + return loss + + def _compute_loss(self, model, inputs): + # Compute the per-token log probabilities for the model + prompt_ids, prompt_mask = inputs["prompt_ids"], inputs["prompt_mask"] + completion_ids, completion_mask = inputs["completion_ids"], inputs["completion_mask"] + input_ids = torch.cat([prompt_ids, completion_ids], dim=1) + attention_mask = torch.cat([prompt_mask, completion_mask], dim=1) + logits_to_keep = completion_ids.size(1) # we only need to compute the logits for the completion tokens + + # Compute the per_token_logps and the entropy at each position in the completion + per_token_logps, entropies = self._get_per_token_logps_and_entropies( + model, + input_ids, + attention_mask, + logits_to_keep, + compute_entropy=True, + pixel_values=inputs.get("pixel_values"), + image_grid_thw=inputs.get("image_grid_thw"), + pixel_attention_mask=inputs.get("pixel_attention_mask"), + image_sizes=inputs.get("image_sizes"), + ) + + if self.top_entropy_quantile < 1.0: + entropy_mask = self.get_high_entropy_mask(entropies, completion_mask, 1 - self.top_entropy_quantile) + else: + entropy_mask = None + + # Compute the KL divergence between the model and the reference model + if self.beta != 0.0: + ref_per_token_logps = inputs["ref_per_token_logps"] + per_token_kl = ( + torch.exp(ref_per_token_logps - per_token_logps) - (ref_per_token_logps - per_token_logps) - 1 + ) + + # Compute the loss + advantages = inputs["advantages"] + # When using num_iterations == 1 and steps_per_generation <= gradient_accumulation_steps + # old_per_token_logps == per_token_logps, so we can skip it's computation + # (see _generate_and_score_completions) and use per_token_logps.detach() instead. + old_per_token_logps = inputs.get("old_per_token_logps") + old_per_token_logps = per_token_logps.detach() if old_per_token_logps is None else old_per_token_logps + + log_ratio = per_token_logps - old_per_token_logps + if self.importance_sampling_level == "token": + log_importance_weights = log_ratio + elif self.importance_sampling_level == "sequence": + log_importance_weights = (log_ratio * completion_mask).sum(-1) / completion_mask.sum(-1).clamp(min=1.0) + log_importance_weights = log_importance_weights.unsqueeze(-1) + else: + raise ValueError( + f"Unknown importance sampling level: {self.importance_sampling_level}. Possible values are 'token' " + "and 'sequence'." + ) + # From here, log_importance_weights (and all subsequent tensors, coef_1, coef_2, etc.) shape depends on + # importance_sampling_level: "token" level: (B, T); "sequence" level: (B, 1) + + coef_1 = torch.exp(log_importance_weights) + coef_2 = torch.clamp(coef_1, 1 - self.epsilon_low, 1 + self.epsilon_high) + + # Two-sided clipping + if self.args.delta is not None: + coef_1 = torch.clamp(coef_1, max=self.args.delta) + + per_token_loss1 = coef_1 * advantages.unsqueeze(1) + per_token_loss2 = coef_2 * advantages.unsqueeze(1) + per_token_loss = -torch.min(per_token_loss1, per_token_loss2) + if entropy_mask is not None: + per_token_loss = per_token_loss * entropy_mask + if self.beta != 0.0: + per_token_loss = per_token_loss + self.beta * per_token_kl + + if self.loss_type == "grpo": + loss = ((per_token_loss * completion_mask).sum(-1) / completion_mask.sum(-1).clamp(min=1.0)).mean() + elif self.loss_type == "bnpo": + loss = (per_token_loss * completion_mask).sum() / completion_mask.sum().clamp(min=1.0) + elif self.loss_type == "dr_grpo": + loss = (per_token_loss * completion_mask).sum() / (per_token_loss.size(0) * self.max_completion_length) + else: + raise ValueError(f"Unknown loss type: {self.loss_type}") + + # Log the metrics + mode = "train" if self.model.training else "eval" + + completion_token_count = completion_mask.sum().clamp(min=1.0) + + def masked_batch_mean(x): + if x.shape[1] == 1: # when importance_sampling_level == "sequence" + return x.mean() + else: + return (x * completion_mask).sum() / completion_token_count + + if self.beta != 0.0: + mean_kl = masked_batch_mean(per_token_kl) + self._metrics[mode]["kl"].append(self.accelerator.gather(mean_kl).nanmean().item()) + + mean_entropy = masked_batch_mean(entropies) + self._metrics[mode]["entropy"].append(self.accelerator.gather(mean_entropy).nanmean().item()) + + # Compute the clipped probability ratios + is_low_clipped = (coef_1 < 1 - self.epsilon_low) & (advantages.unsqueeze(1) < 0) + is_high_clipped = (coef_1 > 1 + self.epsilon_high) & (advantages.unsqueeze(1) > 0) + is_region_clipped = is_low_clipped | is_high_clipped + + low_clip = masked_batch_mean(is_low_clipped.float()) + high_clip = masked_batch_mean(is_high_clipped.float()) + clip_ratio = masked_batch_mean(is_region_clipped.float()) + + gathered_low_clip = self.accelerator.gather(low_clip) + self._metrics[mode]["clip_ratio/low_mean"].append(gathered_low_clip.nanmean().item()) + self._metrics[mode]["clip_ratio/low_min"].append(nanmin(gathered_low_clip).item()) + gathered_high_clip = self.accelerator.gather(high_clip) + self._metrics[mode]["clip_ratio/high_mean"].append(gathered_high_clip.nanmean().item()) + self._metrics[mode]["clip_ratio/high_max"].append(nanmax(gathered_high_clip).item()) + gathered_clip_ratio = self.accelerator.gather(clip_ratio) + self._metrics[mode]["clip_ratio/region_mean"].append(gathered_clip_ratio.nanmean().item()) + return loss + + def prediction_step(self, model, inputs, prediction_loss_only, ignore_keys: Optional[list[str]] = None): + inputs = self._prepare_inputs(inputs) + with torch.no_grad(): + with self.compute_loss_context_manager(): + loss = self.compute_loss(model, inputs) + loss = loss.mean().detach() + return loss, None, None + + def log(self, logs: dict[str, float], start_time: Optional[float] = None) -> None: + mode = "train" if self.model.training else "eval" + metrics = {key: sum(val) / len(val) for key, val in self._metrics[mode].items()} # average the metrics + + # This method can be called both in training and evaluation. When called in evaluation, the keys in `logs` + # start with "eval_". We need to add the prefix "eval_" to the keys in `metrics` to match the format. + if mode == "eval": + metrics = {f"eval_{key}": val for key, val in metrics.items()} + + logs = {**logs, **metrics} + super().log(logs, start_time) + self._metrics[mode].clear() + + if self.accelerator.is_main_process and self.log_completions: + if is_rich_available(): + print_prompt_completions_sample( + self._logs["prompt"], + self._logs["completion"], + self._logs["rewards"], + self._logs["advantages"], + self.state.global_step, + self.num_completions_to_print, + ) + + if self.args.report_to and "wandb" in self.args.report_to and wandb.run is not None: + import pandas as pd + + table = { + "step": [str(self.state.global_step)] * len(self._logs["prompt"]), + "prompt": self._logs["prompt"], + "completion": self._logs["completion"], + **self._logs["rewards"], + "advantage": self._logs["advantages"], + } + + if self._logs["image"]: + table["image"] = [] + for img in self._logs["image"]: + if img is not None: + # Convert images to wandb Image objects for proper visualization + table["image"].append(wandb.Image(img)) + else: + table["image"].append(None) + + df = pd.DataFrame(table) + if self.wandb_log_unique_prompts: + df = df.drop_duplicates(subset=["prompt"]) + wandb.log({"completions": wandb.Table(dataframe=df)}) + + # Ensure the model card is saved along with the checkpoint + def _save_checkpoint(self, model, trial): + if self.args.hub_model_id is None: + model_name = Path(self.args.output_dir).name + else: + model_name = self.args.hub_model_id.split("/")[-1] + self.create_model_card(model_name=model_name) + super()._save_checkpoint(model, trial) + + def create_model_card( + self, + model_name: Optional[str] = None, + dataset_name: Optional[str] = None, + tags: Union[str, list[str], None] = None, + ): + """ + Creates a draft of a model card using the information available to the `Trainer`. + + Args: + model_name (`str` or `None`, *optional*, defaults to `None`): + Name of the model. + dataset_name (`str` or `None`, *optional*, defaults to `None`): + Name of the dataset used for training. + tags (`str`, `list[str]` or `None`, *optional*, defaults to `None`): + Tags to be associated with the model card. + """ + if not self.is_world_process_zero(): + return + + if hasattr(self.model.config, "_name_or_path") and not os.path.isdir(self.model.config._name_or_path): + base_model = self.model.config._name_or_path + else: + base_model = None + + # normalize `tags` to a mutable set + if tags is None: + tags = set() + elif isinstance(tags, str): + tags = {tags} + else: + tags = set(tags) + + if hasattr(self.model.config, "unsloth_version"): + tags.add("unsloth") + + tags.update(self._tag_names) + + citation = textwrap.dedent( + """\ + @article{zhihong2024deepseekmath, + title = {{DeepSeekMath: Pushing the Limits of Mathematical Reasoning in Open Language Models}}, + author = {Zhihong Shao and Peiyi Wang and Qihao Zhu and Runxin Xu and Junxiao Song and Mingchuan Zhang and Y. K. Li and Y. Wu and Daya Guo}, + year = 2024, + eprint = {arXiv:2402.03300}, + } + """ + ) + + model_card = generate_model_card( + base_model=base_model, + model_name=model_name, + hub_model_id=self.hub_model_id, + dataset_name=dataset_name, + tags=tags, + wandb_url=wandb.run.url if is_wandb_available() and wandb.run is not None else None, + comet_url=get_comet_experiment_url(), + trainer_name="GRPO", + trainer_citation=citation, + paper_title="DeepSeekMath: Pushing the Limits of Mathematical Reasoning in Open Language Models", + paper_id="2402.03300", + ) + + model_card.save(os.path.join(self.args.output_dir, "README.md")) +class UnslothGRPOTrainer(_UnslothGRPOTrainer): + """ + + Trainer for the Group Relative Policy Optimization (GRPO) method. This algorithm was initially proposed in the + paper [DeepSeekMath: Pushing the Limits of Mathematical Reasoning in Open Language + Models](https://huggingface.co/papers/2402.03300). + + Example: + + ```python + from datasets import load_dataset + from trl import GRPOTrainer + + dataset = load_dataset("trl-lib/tldr", split="train") + def reward_func(completions, **kwargs): + # Dummy reward function that rewards completions with more unique letters. + return [float(len(set(completion))) for completion in completions] + trainer = GRPOTrainer( + model="Qwen/Qwen2-0.5B-Instruct", + reward_funcs=reward_func, + train_dataset=dataset, + ) + + trainer.train() + ``` + + Args: + model (`Union[str, PreTrainedModel]`): + Model to be trained. Can be either: + + - A string, being the *model id* of a pretrained model hosted inside a model repo on huggingface.co, or a + path to a *directory* containing model weights saved using + [`~transformers.PreTrainedModel.save_pretrained`], e.g., `'./my_model_directory/'`. The model is loaded + using [`~transformers.AutoModelForCausalLM.from_pretrained`] with the keyword arguments in + `args.model_init_kwargs`. + - A [`~transformers.PreTrainedModel`] object. Only causal language models are supported. + reward_funcs (`Union[RewardFunc, list[RewardFunc]]`): + Reward functions to be used for computing the rewards. To compute the rewards, we call all the reward + functions with the prompts and completions and sum the rewards. Can be either: + + - A single reward function, such as: + - A string: The *model ID* of a pretrained model hosted inside a model repo on huggingface.co, or a + path to a *directory* containing model weights saved using + [`~transformers.PreTrainedModel.save_pretrained`], e.g., `'./my_model_directory/'`. The model is loaded + using [`~transformers.AutoModelForSequenceClassification.from_pretrained`] with `num_labels=1` and the + keyword arguments in `args.model_init_kwargs`. + - A [`~transformers.PreTrainedModel`] object: Only sequence classification models are supported. + - A custom reward function: The function is provided with the prompts and the generated completions, + plus any additional columns in the dataset. It should return a list of rewards. Custom reward + functions can also return `None` when the reward is not applicable to those samples. This is useful + for multi-task training where different reward functions apply to different types of samples. When a + reward function returns `None` for a sample, that reward function is excluded from the reward + calculation for that sample. For more details, see [Using a custom reward + function](#using-a-custom-reward-function). + + The trainer's state is also passed to the reward function. The trainer's state is an instance of + [`~transformers.TrainerState`] and can be accessed by accessing the `trainer_state` argument to the + reward function's signature. + - A list of reward functions, where each item can independently be any of the above types. Mixing different + types within the list (e.g., a string model ID and a custom reward function) is allowed. + args ([`GRPOConfig`], *optional*, defaults to `None`): + Configuration for this trainer. If `None`, a default configuration is used. + train_dataset ([`~datasets.Dataset`] or [`~datasets.IterableDataset`]): + Dataset to use for training. It must include a column `"prompt"`. Any additional columns in the dataset is + ignored. The format of the samples can be either: + + - [Standard](dataset_formats#standard): Each sample contains plain text. + - [Conversational](dataset_formats#conversational): Each sample contains structured messages (e.g., role + and content). + eval_dataset ([`~datasets.Dataset`], [`~datasets.IterableDataset`] or `dict[str, Union[Dataset, IterableDataset]]`): + Dataset to use for evaluation. It must meet the same requirements as `train_dataset`. + processing_class ([`~transformers.PreTrainedTokenizerBase`] or [`~transformers.ProcessorMixin`], *optional*, defaults to `None`): + Processing class used to process the data. The padding side must be set to "left". If `None`, the + processing class is loaded from the model's name with [`~transformers.AutoProcessor.from_pretrained`]. A + padding token, `tokenizer.pad_token`, must be set. If the processing class has not set a padding token, + `tokenizer.eos_token` will be used as the default. + reward_processing_classes (`Union[PreTrainedTokenizerBase, list[PreTrainedTokenizerBase]]`, *optional*, defaults to `None`): + Processing classes corresponding to the reward functions specified in `reward_funcs`. Can be either: + + - A single processing class: Used when `reward_funcs` contains only one reward function. + - A list of processing classes: Must match the order and length of the reward functions in `reward_funcs`. + If set to `None`, or if an element of the list corresponding to a [`~transformers.PreTrainedModel`] is + `None`, the tokenizer for the model is automatically loaded using + [`~transformers.AutoTokenizer.from_pretrained`]. For elements in `reward_funcs` that are custom reward + functions (not [`~transformers.PreTrainedModel`]), the corresponding entries in `reward_processing_classes` + are ignored. + callbacks (list of [`~transformers.TrainerCallback`], *optional*, defaults to `None`): + List of callbacks to customize the training loop. Will add those to the list of default callbacks detailed + in [here](https://huggingface.co/docs/transformers/main_classes/callback). + + If you want to remove one of the default callbacks used, use the [`~transformers.Trainer.remove_callback`] + method. + optimizers (`tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR]`, *optional*, defaults to `(None, None)`): + A tuple containing the optimizer and the scheduler to use. Will default to an instance of [`AdamW`] on your + model and a scheduler given by [`get_linear_schedule_with_warmup`] controlled by `args`. + peft_config ([`~peft.PeftConfig`], *optional*, defaults to `None`): + PEFT configuration used to wrap the model. If `None`, the model is not wrapped. + + """ + def __init__( + self, + model, + reward_funcs, + args = None, + train_dataset = None, + eval_dataset = None, + processing_class = None, + reward_processing_classes = None, + callbacks = None, + peft_config = None, + **kwargs + ): + if args is None: args = UnslothGRPOConfig() + use_bf16 = getattr(args, 'bf16', False) + if type(use_bf16) is not bool: use_bf16 = False + use_fp16 = getattr(args, 'fp16', False) + if type(use_fp16) is not bool: use_fp16 = False + force_float32 = False + if os.environ.get('UNSLOTH_FORCE_FLOAT32', '0') == '1': + print('Unsloth: Switching to float32 training since model cannot work with float16') + force_float32 = True + mixed_precision_dtype = os.environ.get('UNSLOTH_MIXED_PRECISION', 'float32') + dtype = getattr(model.config, 'torch_dtype', None) + if dtype is None: dtype = model.get_input_embeddings().dtype + from unsloth_zoo.utils import _get_dtype + dtype = _get_dtype(dtype) + float16 = dtype == torch.float16 + if not force_float32 and (float16 and use_bf16): raise TypeError('Unsloth: Model is in float16 precision but you want to use bfloat16 precision. Set fp16 to `True` and bf16 to `False`') + if not force_float32 and (not float16 and use_fp16): raise TypeError('Unsloth: Model is in bfloat16 precision but you want to use float16 precision. Set fp16 to `False` and bf16 to `True`') + if force_float32: + args.fp16 = False + args.bf16 = False + os.environ['ACCELERATE_MIXED_PRECISION'] = 'no' + elif (not use_bf16 and not use_fp16) and mixed_precision_dtype == 'float32': + args.fp16 = float16 + args.bf16 = not float16 + os.environ['ACCELERATE_MIXED_PRECISION'] = 'fp16' if float16 else 'bf16' + if getattr(args, 'eval_dataset', None) is not None and getattr(args, 'eval_strategy', 'no') == 'no': + args.eval_strategy = 'steps' + if getattr(args, 'eval_steps', None) is None: args.eval_steps = 0.1 + ga_steps = getattr(args, 'gradient_accumulation_steps', None) + if ga_steps is not None and ga_steps > 1: + from transformers import __version__ as transformers_version + if Version(transformers_version) <= Version('4.45.2'): + print('**** Unsloth: Please use our fixed gradient_accumulation_steps by updating transformers, TRL and Unsloth!\n' + '`pip install --upgrade --no-cache-dir --force-reinstall --no-deps unsloth transformers trl unsloth_zoo`') + if getattr(args, 'eval_strategy', 'no') != 'no': + eval_bsz = getattr(args, 'per_device_eval_batch_size', 8) + if eval_bsz == 8 and args.per_device_train_batch_size < eval_bsz: args.per_device_eval_batch_size = args.per_device_train_batch_size + if getattr(args, 'eval_accumulation_steps', None) is None and ga_steps is not None: args.eval_accumulation_steps = ga_steps + fp16_full_eval = getattr(args, 'fp16_full_eval', False) + if type(fp16_full_eval) is not bool: fp16_full_eval = False + bf16_full_eval = getattr(args, 'bf16_full_eval', False) + if type(bf16_full_eval) is not bool: bf16_full_eval = False + if args.fp16 and bf16_full_eval: args.bf16_full_eval = False; args.fp16_full_eval = True + if args.bf16 and fp16_full_eval: args.bf16_full_eval = True; args.fp16_full_eval = False + if force_float32: + args.bf16_full_eval = False + args.fp16_full_eval = False + elif os.environ.get('UNSLOTH_MIXED_PRECISION', 'float32') == 'bfloat16': + args.bf16_full_eval = True + args.fp16_full_eval = False + elif not bf16_full_eval and not fp16_full_eval: + args.bf16_full_eval = args.bf16 + args.fp16_full_eval = args.fp16 + _output_logits = False + if locals().get('compute_metrics', None) is not None: _output_logits = True + if locals().get('preprocess_logits_for_metrics', None) is not None: _output_logits = True + if _output_logits: + os.environ['UNSLOTH_RETURN_LOGITS'] = '1' + if 'max_seq_length' not in locals() and not hasattr(args, 'max_seq_length'): + pass + else: + model_max_seq_length = getattr(model, 'max_seq_length', None) + args_max_seq_length = getattr(args, 'max_seq_length', None) + if args_max_seq_length is None and model_max_seq_length is not None: + max_seq_length = model.max_seq_length + if hasattr(args, 'max_seq_length'): args.max_seq_length = max_seq_length + if model is not None and hasattr(model, 'for_training'): + model.for_training() + if 'tokenizer' in locals() and hasattr(tokenizer, 'padding_side'): tokenizer.padding_side = 'right' + if 'processing_class' in locals(): + if hasattr(processing_class, 'padding_side'): processing_class.padding_side = 'right' + if hasattr(processing_class, 'tokenizer') and hasattr(processing_class.tokenizer, 'padding_side'): processing_class.tokenizer.padding_side = 'right' + other_metrics = [] + if not isinstance(reward_funcs, list): _reward_funcs = [reward_funcs] + else: _reward_funcs = reward_funcs + for reward_func in _reward_funcs: + try: + reward_func_name = reward_func.__name__ + if True: + other_metrics.append(f'rewards/{reward_func_name}/mean') + if True: + other_metrics.append(f'rewards/{reward_func_name}/std') + if False: + other_metrics.append(f'rewards/{reward_func_name}') + except: pass + + from unsloth_zoo.logging_utils import PatchRLStatistics + PatchRLStatistics('grpo_trainer', other_metrics) + + super().__init__( + model = model, + reward_funcs = reward_funcs, + args = args, + train_dataset = train_dataset, + eval_dataset = eval_dataset, + processing_class = processing_class, + reward_processing_classes = reward_processing_classes, + callbacks = callbacks, + peft_config = peft_config,**kwargs) + if hasattr(self, 'neftune_hook_handle'): + self.neftune_hook_handle.remove() + if hasattr(self, 'neftune_hook_handle'): del self.neftune_hook_handle + if getattr(args, 'neftune_noise_alpha', None) is not None: + model.get_input_embeddings().neftune_noise_alpha = self.neftune_noise_alpha + pass + if hasattr(self, 'accelerator'): + scaler = self.accelerator.scaler + current_model = model + while hasattr(current_model, 'model'): + current_model.accelerator_scaler = scaler + current_model = current_model.model + current_model.accelerator_scaler = scaler + pass + +pass