text stringlengths 1.03k 82.6k | file_name stringlengths 8 85 |
|---|---|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
SFT dataset
- We assume user pass a single parquet file.
- We load all the data into the memory.
Each parquet file contains
"""
import numpy as np
import pandas as pd
import torch
from omegaconf.listconfig import ListConfig
from torch.utils.data import Dataset
from transformers import PreTrainedTokenizer
from verl.utils import hf_tokenizer
from verl.utils.fs import copy_to_local
from verl.utils.model import compute_position_id_with_mask
class SFTDataset(Dataset):
"""
This is an in-memory SFTDataset
Arguments:
config (OmegaConf): the data config
"""
def __init__(self, parquet_files: str | ListConfig, tokenizer, config, max_samples: int = -1):
prompt_key = config.get("prompt_key", "prompt")
prompt_dict_keys = config.get("prompt_dict_keys", None)
response_key = config.get("response_key", "response")
response_dict_keys = config.get("response_dict_keys", None)
max_length = config.get("max_length", 1024)
truncation = config.get("truncation", "error")
use_shm = config.get("use_shm", False)
self.shuffle = config.get("shuffle", False)
self.seed = config.get("seed")
self.apply_chat_template_kwargs = config.get("apply_chat_template_kwargs", {})
assert truncation in ["error", "left", "right"]
self.truncation = truncation
self.use_shm = use_shm
if not isinstance(parquet_files, ListConfig):
parquet_files = [parquet_files]
self.parquet_files = parquet_files
self.max_samples = max_samples
if isinstance(tokenizer, str):
tokenizer = hf_tokenizer(tokenizer)
self.tokenizer: PreTrainedTokenizer = tokenizer
self.prompt_key = prompt_key if isinstance(prompt_key, tuple | list) else [prompt_key]
self.response_key = response_key if isinstance(response_key, tuple | list) else [response_key]
self.prompt_dict_keys = prompt_dict_keys if prompt_dict_keys else []
self.response_dict_keys = response_dict_keys if response_dict_keys else []
self.max_length = max_length
self._download()
self._read_files_and_tokenize()
def _download(self):
for i, parquet_file in enumerate(self.parquet_files):
self.parquet_files[i] = copy_to_local(parquet_file, verbose=True, use_shm=self.use_shm)
def _read_files_and_tokenize(self):
def series_to_item(ls):
import numpy
import pandas
while isinstance(ls, pandas.core.series.Series | numpy.ndarray) and len(ls) == 1:
ls = ls[0]
return ls
dataframes = []
for parquet_file in self.parquet_files:
# read parquet files and cache
dataframe = pd.read_parquet(parquet_file)
dataframes.append(dataframe)
self.dataframe = pd.concat(dataframes)
total = len(self.dataframe)
print(f"dataset len: {len(self.dataframe)}")
if self.max_samples > 0 and self.max_samples < total:
if self.shuffle:
rngs_args = (self.seed,) if self.seed is not None else ()
rng = np.random.default_rng(*rngs_args)
indices = rng.choice(total, size=self.max_samples, replace=False)
else:
indices = np.arange(self.max_samples)
self.dataframe = self.dataframe.iloc[indices.tolist()]
print(f"selected {self.max_samples} random samples out of {total}")
self.prompts = self.dataframe[self.prompt_key]
for key in self.prompt_dict_keys:
# type(x): pandas.core.series.Series
# type(x[0]): numpy.ndarray
# type(x[0][0]): dict
try:
self.prompts = self.prompts.apply(lambda x: series_to_item(x)[key], axis=1) # noqa: B023
except Exception:
print(f"self.prompts={self.prompts}")
raise
if isinstance(self.prompts, pd.DataFrame):
self.prompts = self.prompts.squeeze()
self.prompts = self.prompts.tolist()
self.responses = self.dataframe[self.response_key]
for key in self.response_dict_keys:
try:
self.responses = self.responses.apply(lambda x: series_to_item(x)[key], axis=1) # noqa: B023
except Exception:
print(f"self.responses={self.responses}")
raise
if isinstance(self.responses, pd.DataFrame):
self.responses = self.responses.squeeze()
self.responses = self.responses.tolist()
def __len__(self):
return len(self.prompts)
def __getitem__(self, item):
tokenizer = self.tokenizer
prompt = self.prompts[item]
response = self.responses[item]
# apply chat template
prompt_chat = [{"role": "user", "content": prompt}]
# string
prompt_chat_str = tokenizer.apply_chat_template(
prompt_chat, add_generation_prompt=True, tokenize=False, **self.apply_chat_template_kwargs
)
response_chat_str = response + tokenizer.eos_token
# tokenize
prompt_ids_output = tokenizer(prompt_chat_str, return_tensors="pt", add_special_tokens=False)
prompt_ids = prompt_ids_output["input_ids"][0]
prompt_attention_mask = prompt_ids_output["attention_mask"][0]
response_ids_output = tokenizer(response_chat_str, return_tensors="pt", add_special_tokens=False)
response_ids = response_ids_output["input_ids"][0]
response_attention_mask = response_ids_output["attention_mask"][0]
prompt_length = prompt_ids.shape[0]
response_length = response_ids.shape[0]
input_ids = torch.cat((prompt_ids, response_ids), dim=-1)
attention_mask = torch.cat((prompt_attention_mask, response_attention_mask), dim=-1)
# padding to max length
sequence_length = input_ids.shape[0]
if sequence_length < self.max_length:
padded_input_ids = (
torch.ones(size=(self.max_length - sequence_length,), dtype=input_ids.dtype)
* self.tokenizer.pad_token_id
)
padded_attention_mask = torch.zeros(size=(self.max_length - sequence_length,), dtype=attention_mask.dtype)
input_ids = torch.cat((input_ids, padded_input_ids))
attention_mask = torch.cat((attention_mask, padded_attention_mask))
elif sequence_length > self.max_length:
if self.truncation == "left":
# actually, left truncation may not be reasonable
input_ids = input_ids[-self.max_length :]
attention_mask = attention_mask[-self.max_length :]
elif self.truncation == "right":
input_ids = input_ids[: self.max_length]
attention_mask = attention_mask[: self.max_length]
elif self.truncation == "error":
raise NotImplementedError(f"{sequence_length=} is larger than {self.max_length=}")
else:
raise NotImplementedError(f"Unknown truncation method {self.truncation}")
position_ids = compute_position_id_with_mask(attention_mask)
loss_mask = attention_mask.clone()
if prompt_length > 1:
# mask out prompt for SFT.
loss_mask[: min(prompt_length, loss_mask.size(0)) - 1] = 0
# mask out the last token in response
loss_mask[min(prompt_length + response_length, loss_mask.size(0)) - 1] = 0
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"position_ids": position_ids,
"loss_mask": loss_mask,
}
| verl__utils__dataset__sft_dataset.py |
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from io import BytesIO
from typing import Optional
import torch
from PIL import Image
def process_image(image: dict | Image.Image, image_patch_size: int = 14) -> Image.Image:
from qwen_vl_utils import fetch_image
if isinstance(image, Image.Image):
return image.convert("RGB")
if "bytes" in image:
assert "image" not in image, "Cannot have both `bytes` and `image`"
image["image"] = Image.open(BytesIO(image["bytes"]))
try:
ans = fetch_image(image, image_patch_size=image_patch_size)
except Exception:
ans = fetch_image(image)
return ans
VIDEO_FORMAT_HELP = """Currently, we only support the video formats introduced in qwen2-vl.
Refer to https://github.com/QwenLM/Qwen2.5-VL?tab=readme-ov-file#using---transformers-to-chat.
eg.
{
"type": "video",
"video": [
"file:///path/to/frame1.jpg",
"file:///path/to/frame2.jpg"
]
}
{
"type": "video",
"video": "file:///path/to/video.mp4"
}
# Defaults to fps=2, min_frames=4, max_frames=768
{
"type": "video",
"video": "file:///path/to/video.mp4",
"fps": 2,
"min_frames": 1,
"max_frames": 32
}
"""
def process_video(
video: dict,
image_patch_size: int = 14,
nframes: Optional[int] = None,
fps: Optional[float] = None,
fps_min_frames: Optional[int] = None,
fps_max_frames: Optional[int] = None,
return_video_sample_fps: bool = False,
return_video_metadata: bool = False,
) -> torch.Tensor:
"""Converts a video dict into a [n_frames, 3, H, W] tensor
Add video sample FPS in a future MR
"""
from qwen_vl_utils import fetch_video
if not isinstance(video, dict) or "video" not in video:
raise NotImplementedError(VIDEO_FORMAT_HELP)
assert nframes is None or fps is None, "Can't use both `nframes` or `fps`"
# Shallow copy... since we might want to add some keys
video = dict(video)
contains_sampling_rules = "nframes" in video or "fps" in video
if not contains_sampling_rules:
if nframes is not None:
video["nframes"] = nframes
elif fps is not None:
video["fps"] = fps
if fps_min_frames is not None:
video["min_frames"] = fps_min_frames
if fps_max_frames is not None:
video["max_frames"] = fps_max_frames
return fetch_video(
video,
image_patch_size=image_patch_size,
return_video_sample_fps=return_video_sample_fps,
return_video_metadata=return_video_metadata,
)
def process_multi_modal_inputs_for_minicpmo(input_ids, attention_mask, position_ids, cu_seqlens, multi_modal_inputs):
# Adjust image bounds based on left padding and cumulative sequence lengths
# This is necessary for MiniCPM-o's vision-language alignment
left_padding_length = torch.argmax(attention_mask, dim=1)
image_bounds = []
for i in range(len(multi_modal_inputs["image_bound"])):
image_bound = (
multi_modal_inputs["image_bound"][i].to(left_padding_length.device) - left_padding_length[i] + cu_seqlens[i]
)
image_bounds.append(image_bound)
# Flatten pixel values list for MiniCPM-o processing
pixel_values = []
for i in range(len(multi_modal_inputs["pixel_values"])):
pixel_values.extend([p for p in multi_modal_inputs["pixel_values"][i]])
multi_modal_inputs["pixel_values"] = [pixel_values]
multi_modal_inputs["image_bound"] = [torch.vstack(image_bounds)]
multi_modal_inputs["tgt_sizes"] = [torch.vstack(multi_modal_inputs["tgt_sizes"])]
multi_modal_inputs["input_ids"] = input_ids
multi_modal_inputs["attention_mask"] = attention_mask
multi_modal_inputs["position_ids"] = position_ids
return {"data": multi_modal_inputs}
| verl__utils__dataset__vision_utils.py |
# Copyright 2025 Individual Contributor: TomQunChaoA
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import torch
from verl.protocol import DataProto
logger = logging.getLogger(__file__)
def calculate_token_list_diff(tensor1: torch.Tensor, tensor2: torch.Tensor, mask: torch.Tensor) -> torch.Tensor:
# verify inputs
if tensor1.numel() == 0 or tensor2.numel() == 0:
return torch.zeros(tensor1.shape[0], dtype=torch.long, device=tensor1.device)
if tensor1.shape != tensor2.shape or mask.shape != tensor1.shape or mask.shape != tensor2.shape:
print(
f"<WARN> dim of tensor1, tensor2, mask is not equal, {(tensor1.shape)=},{(tensor2.shape)=}, {(mask.shape)=}"
)
return torch.ones_like(tensor1)
# transfer to same device
if tensor2.device != tensor1.device:
tensor2 = tensor2.to(tensor1.device)
if mask.device != tensor1.device:
mask = mask.to(tensor1.device)
# calculate diff
diff_mask = tensor1 != tensor2
valid_diff_mask = diff_mask & (mask == 1)
diff_counts = valid_diff_mask.sum(dim=1)
return diff_counts
def pearson_correlation_coefficient(tensor1: torch.Tensor, tensor2: torch.Tensor, mask: torch.Tensor) -> torch.Tensor:
# implemention of https://arxiv.org/pdf/2506.13585
if tensor1.shape != tensor2.shape or mask.shape != tensor1.shape or mask.shape != tensor2.shape:
return 0
mt1 = torch.masked_select(tensor1, mask)
mt2 = torch.masked_select(tensor2, mask)
result = torch.corrcoef(torch.stack([mt1, mt2], dim=0))
return result[0][1].detach().item()
def calculate_log_prob_diff(log_probs1: torch.Tensor, log_probs2: torch.Tensor, mask: torch.Tensor) -> torch.Tensor:
full_diff = torch.abs(log_probs1 - log_probs2)
return torch.masked_select(full_diff, mask)
def calculate_debug_metrics(data: DataProto) -> dict:
"""
calculate rollout vs actor logprobs diff, for debugging purpose
Args:
data: DataProto
the data batch to calculate
rollout_log_probs: log_probs record when rollout forward tokens
old_log_probs(actor log probs): log_probs record when actor forward tokens
loss_mask or attention_mask: to mask unrelated token
responses: the response tokens, for calculating size
Returns:
dict: metrics
"training/rollout_probs_diff_valid": 1->input is valid, 0->input is invalid
"training/rollout_probs_diff_max": max value of logprob diff of rollout vs. actor
"training/rollout_probs_diff_mean": mean value of logprob diff of rollout vs. actor
"training/rollout_probs_diff_std": std value of logprob diff of rollout vs. actor
"training/rollout_actor_probs_pearson_corr": logprob's pearson corrcoef of rollout vs. actor, reference to https://arxiv.org/pdf/2506.13585
"""
rollout_old_log_probs = data.batch["rollout_log_probs"]
actor_old_log_probs = data.batch["old_log_probs"]
if "response_mask" in data.batch:
logger.debug("response mask found, use it to mask log probs")
log_prob_mask = data.batch["response_mask"]
elif "attention_mask" in data.batch:
log_prob_mask = data.batch["attention_mask"]
else:
logger.warning(f"no mask info found, use all log probs, {(data.batch.keys())=}")
log_prob_mask = torch.ones_like(rollout_old_log_probs)
responses = data.batch["responses"]
response_length = responses.size(1)
response_mask = log_prob_mask[:, -response_length:]
# calculate pearson corrcoef
actor_probs = torch.exp(actor_old_log_probs)
rollout_probs = torch.exp(rollout_old_log_probs)
response_mask_bool = response_mask.bool()
pearson_corrcoef = pearson_correlation_coefficient(actor_probs, rollout_probs, response_mask_bool)
rollout_probs_diff = calculate_log_prob_diff(actor_probs, rollout_probs, response_mask_bool)
return {
"training/rollout_probs_diff_valid": 1,
"training/rollout_probs_diff_max": torch.max(rollout_probs_diff).detach().item(),
"training/rollout_probs_diff_mean": torch.mean(rollout_probs_diff).detach().item(),
"training/rollout_probs_diff_std": torch.std(rollout_probs_diff).detach().item(),
"training/rollout_actor_probs_pearson_corr": pearson_corrcoef,
}
| verl__utils__debug__metrics.py |
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Trajectory tracker can be inserted into code to save the intermediate results.
The results will be dump to hdfs for offline comparison.
Each process will have a client that first move all the tensors to CPU
"""
import io
import os
import tempfile
from collections import deque
import ray
import torch
from verl.utils.hdfs_io import copy, makedirs
remote_copy = ray.remote(copy)
@ray.remote
def save_to_hdfs(data: io.BytesIO, name, hdfs_dir, verbose):
filename = name + ".pth"
with tempfile.TemporaryDirectory() as tmpdirname:
local_filepath = os.path.join(tmpdirname, filename)
with open(local_filepath, "wb") as f:
f.write(data.getbuffer())
# upload to hdfs
if verbose:
print(f"Saving {local_filepath} to {hdfs_dir}")
try:
copy(local_filepath, hdfs_dir)
except Exception as e:
print(e)
@ray.remote
class TrajectoryTracker:
def __init__(self, hdfs_dir, verbose) -> None:
self.hdfs_dir = hdfs_dir
makedirs(hdfs_dir)
self.verbose = verbose
self.handle = deque()
def dump(self, data: io.BytesIO, name):
# get a temp file and write to it
self.handle.append(save_to_hdfs.remote(data, name, self.hdfs_dir, self.verbose))
def wait_for_hdfs(self):
while len(self.handle) != 0:
future = self.handle.popleft()
ray.get(future)
def dump_data(data, name):
enable = os.getenv("VERL_ENABLE_TRACKER", "0") == "1"
if not enable:
return
buffer = io.BytesIO()
torch.save(data, buffer)
tracker = get_trajectory_tracker()
ray.get(tracker.dump.remote(buffer, name))
def get_trajectory_tracker():
hdfs_dir = os.getenv("VERL_TRACKER_HDFS_DIR", default=None)
verbose = os.getenv("VERL_TRACKER_VERBOSE", default="0") == "1"
assert hdfs_dir is not None
tracker = TrajectoryTracker.options(name="global_tracker", get_if_exists=True, lifetime="detached").remote(
hdfs_dir, verbose
)
return tracker
if __name__ == "__main__":
# testing
os.environ["VERL_ENABLE_TRACKER"] = "1"
os.environ["VERL_TRACKER_HDFS_DIR"] = "~/debug/test"
@ray.remote
def process(iter):
data = {"obs": torch.randn(10, 20)}
dump_data(data, f"process_{iter}_obs")
ray.init()
output_lst = []
for i in range(10):
output_lst.append(process.remote(i))
out = ray.get(output_lst)
tracker = get_trajectory_tracker()
ray.get(tracker.wait_for_hdfs.remote())
| verl__utils__debug__trajectory_tracker.py |
# Copyright 2025 Bytedance Ltd. and/or its affiliates
#
# This code is inspired by the torchtune.
# https://github.com/pytorch/torchtune/blob/main/torchtune/utils/_device.py
#
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license in https://github.com/pytorch/torchtune/blob/main/LICENSE
import logging
import os
import platform
import subprocess
import torch
from packaging import version
logger = logging.getLogger(__name__)
def is_torch_npu_available(check_device=True) -> bool:
"""Check if Ascend NPU is available for PyTorch operations.
Attempts to detect NPU availability by checking for the torch.npu module
and its is_available() function.
Args:
check_device : only check torch_npu package or strictly check if NPU device is available
Returns:
bool: True if NPU is available, False otherwise.
"""
try:
if not hasattr(torch, "npu"):
return False
if check_device:
return torch.npu.is_available()
else:
return True
except ImportError:
return False
is_cuda_available = torch.cuda.is_available()
is_npu_available = is_torch_npu_available()
def get_resource_name() -> str:
"""Function that return ray resource name based on the device type.
Returns:
ray resource name string, either "GPU" or "NPU".
"""
return "GPU" if is_cuda_available else "NPU"
def get_visible_devices_keyword() -> str:
"""Get the environment variable name for visible device selection.
Returns the appropriate environment variable name based on the available
accelerator type (CUDA or Ascend NPU).
Returns:
str: 'CUDA_VISIBLE_DEVICES' if CUDA is available,
'ASCEND_RT_VISIBLE_DEVICES' otherwise.
"""
return "CUDA_VISIBLE_DEVICES" if not is_torch_npu_available(check_device=False) else "ASCEND_RT_VISIBLE_DEVICES"
def get_device_name() -> str:
"""Get the device type string based on available accelerators.
Detects the available accelerator and returns the corresponding PyTorch
device type string. Currently supports CUDA, Ascend NPU, and CPU.
Returns:
str: Device type string ('cuda', 'npu', or 'cpu').
"""
if is_cuda_available:
device = "cuda"
elif is_npu_available:
device = "npu"
else:
device = "cpu"
return device
def get_torch_device():
"""Get the PyTorch device module for the current accelerator.
Returns the torch device namespace (e.g., torch.cuda, torch.npu) based on
the detected accelerator type. Falls back to torch.cuda if the namespace
is not found.
Returns:
module: The PyTorch device module (torch.cuda, torch.npu, etc.).
"""
device_name = get_device_name()
try:
return getattr(torch, device_name)
except AttributeError:
logger.warning(f"Device namespace '{device_name}' not found in torch, try to load torch.cuda.")
return torch.cuda
def get_device_id() -> int:
"""Get the index of the current accelerator device.
Returns:
int: The current device index (e.g., 0 for 'cuda:0').
"""
return get_torch_device().current_device()
def get_nccl_backend() -> str:
"""Get the distributed communication backend based on device type.
Returns the appropriate collective communication backend for the
detected accelerator (HCCL for Ascend NPU, NCCL for CUDA).
Returns:
str: Backend name ('hccl' for NPU, 'nccl' for CUDA/default).
"""
if is_npu_available:
return "hccl"
else:
# default to nccl
return "nccl"
def set_expandable_segments(enable: bool) -> None:
"""Configure CUDA memory allocator expandable segments setting.
Expandable segments can help avoid out-of-memory (OOM) errors by allowing
the memory allocator to expand existing memory segments rather than
allocating new ones.
Args:
enable: If True, enable expandable segments. If False, disable them.
Note:
This function only has an effect when CUDA is available.
"""
if is_cuda_available:
torch.cuda.memory._set_allocator_settings(f"expandable_segments:{enable}")
def auto_set_device(config) -> None:
"""Automatically configure device name for different accelerators.
For example, on Ascend NPU, this function defaults the trainer device to "npu"
unless explicitly set to "cpu".
Args:
config: Configuration object with trainer.device attribute.
"""
if config and hasattr(config, "trainer") and hasattr(config.trainer, "device"):
if is_torch_npu_available():
if config.trainer.device not in ["cpu", "npu"]:
logger.warning(
f"Detect setting config.trainer.device to {config.trainer.device} for Ascend NPU, maybe"
f"from default value in config file, automatically set to `npu` instead."
)
config.trainer.device = "npu"
# Other cases: set device to "cuda" via config file, no need to change.
def get_device_capability(device_id: int = 0) -> tuple[int | None, int | None]:
"""Get the compute capability of a CUDA device.
Args:
device_id: The CUDA device index to query. Defaults to 0.
Returns:
tuple: A tuple of (major, minor) compute capability version,
or (None, None) if CUDA is not available.
"""
major, minor = None, None
if is_cuda_available:
major, minor = torch.cuda.get_device_capability(device_id)
return major, minor
def get_npu_versions() -> tuple[str, str]:
"""Get the software version and CANN toolkit version for NPU devices.
Returns:
tuple[str, str]: A tuple of (software_version, cann_version)
Raises:
RuntimeError: If unable to retrieve version information
"""
# Check npu-smi software version
result = subprocess.run(["npu-smi", "info", "-t", "board", "-i", "1"], capture_output=True, text=True, check=True)
# Parse software version from output
software_version = None
for line in result.stdout.split("\n"):
if "Software Version" in line:
# Extract version from line like: "Software Version : 25.3.rc1.2"
parts = line.split(":")
if len(parts) > 1:
software_version = parts[1].strip().lower()
break
if not software_version:
raise RuntimeError("Could not find Software Version in npu-smi output")
# Check CANN toolkit version
arch = platform.machine()
if arch not in ["arm64", "aarch64", "x86_64"]:
raise RuntimeError(f"Unsupported architecture: {arch}")
ascend_home = os.environ.get("ASCEND_HOME_PATH", "/usr/local/Ascend/ascend-toolkit/latest")
cann_path = os.path.join(ascend_home, f"{arch}-linux")
if not os.path.exists(cann_path):
raise RuntimeError(f"CANN toolkit path does not exist: {cann_path}")
info_file = os.path.join(cann_path, "ascend_toolkit_install.info")
if not os.path.exists(info_file):
raise RuntimeError(f"CANN toolkit info file does not exist: {info_file}")
# Parse version from info file
cann_version = None
with open(info_file) as f:
for line in f:
if line.startswith("version="):
cann_version = line.split("=", 1)[1].strip().lower()
break
if not cann_version:
raise RuntimeError("Could not find version in CANN toolkit info file")
return software_version, cann_version
def check_ipc_version_support(software_version: str, cann_version: str) -> bool:
"""Check if the given software and CANN versions support IPC.
Compares the software version and CANN toolkit version against minimum
required versions for IPC support:
- Software Version should be >= 25.3.rc1
- CANN version should be >= 8.3.rc1
Args:
software_version: The software version string (e.g., "25.5.0", "25.3.rc1.2", "25.5.t3.b001")
cann_version: The CANN toolkit version string (e.g., "8.3.0", "8.3.rc1")
Returns:
bool: True if IPC is supported, False otherwise.
Raises:
RuntimeError: If version format is invalid
"""
# For software_version like "25.3.rc1.2", "25.5.0", or "25.5.t3.b001",
# we need to extract the base version
# Use regex to extract version with the following rules:
# - Standard version: 25.5.0 -> 25.5.0
# - RC version: 25.3.rc1.2 -> 25.3.rc1
# - t suffix version: 25.5.t3.b001 -> 25.5 (only first 2 parts if third part is lowercase t)
# - RC version: 25.3.rc1 -> 25.3.rc1
# For versions with more than 3 parts (e.g., 25.3.rc1.2), only match the first 3 parts
import re
# Match version with optional rc part or lowercase t suffix:
# - If version has lowercase t (e.g., 25.5.t3.b001), only match first 2 parts
# - Otherwise, match up to 3 parts (e.g., 25.5.0, 25.3.rc1.2)
ascend_version_pattern = r"(\d+\.\d+(?=\.t))|(\d+\.\d+(?:\.(?:rc\d+|\d+))?)"
software_match = re.match(ascend_version_pattern, software_version)
if not software_match:
raise RuntimeError(f"Invalid software version format: {software_version}")
# Select the matched group (either first 2 parts or up to 3 parts)
software_base = software_match.group(1) if software_match.group(1) else software_match.group(2)
cann_match = re.match(ascend_version_pattern, cann_version)
if not cann_match:
raise RuntimeError(f"Invalid CANN version format: {cann_version}")
else:
# Select the matched group (either first 2 parts or up to 3 parts)
cann_base = cann_match.group(1) if cann_match.group(1) else cann_match.group(2)
if version.parse(software_base) >= version.parse("25.3.rc1"):
if version.parse(cann_base) >= version.parse("8.3.rc1"):
return True
else:
logger.info(f"CANN version {cann_version} is below 8.3.RC1")
else:
logger.info(f"Software version {software_version} is below 25.3.rc1")
return False
def is_support_ipc() -> bool:
"""Check if the device supports IPC (Inter-Process Communication).
For GPU devices, always returns True.
For NPU devices, checks the software version and CANN toolkit version
to determine if IPC is supported.
Returns:
bool: True if IPC is supported, False otherwise.
"""
# If CUDA is available, it's a GPU device
if is_cuda_available:
return True
# For NPU devices, check the software version and CANN toolkit version
if is_npu_available:
try:
software_version, cann_version = get_npu_versions()
return check_ipc_version_support(software_version, cann_version)
except subprocess.CalledProcessError as e:
raise RuntimeError(f"Failed to execute npu-smi command: {e}") from e
except Exception as e:
raise RuntimeError(f"Error checking IPC support: {e}") from e
# For other devices (CPU), return False
return False
| verl__utils__device.py |
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for distributed training."""
import ctypes
import os
import socket
from datetime import timedelta
import ray
import torch.distributed
from verl.utils.device import get_device_name, get_nccl_backend, get_torch_device, is_npu_available
from verl.utils.net_utils import is_ipv6
def set_numa_affinity():
if is_npu_available:
# TODO (FightingZhen) libnuma.so is not available in e2e_ascend CI image, remove this code after image update.
return
initialized = False
try:
libnuma = ctypes.CDLL("libnuma.so")
if libnuma.numa_available() < 0:
return
import pynvml
pynvml.nvmlInit()
initialized = True
device_name = "NPU" if is_npu_available else "GPU"
local_rank = int(ray.get_runtime_context().get_accelerator_ids()[device_name][0])
handle = pynvml.nvmlDeviceGetHandleByIndex(local_rank)
pynvml.nvmlDeviceSetCpuAffinity(handle)
except ImportError:
print("Warning: pynvml not available, skipping NUMA affinity setup")
except Exception as e:
print(f"Warning: Failed to set NUMA affinity: {e}")
finally:
if initialized:
pynvml.nvmlShutdown()
def initialize_global_process_group(timeout_second=36000):
torch.distributed.init_process_group(
get_nccl_backend(),
timeout=timedelta(seconds=timeout_second),
init_method=os.environ.get("DIST_INIT_METHOD", None),
)
local_rank = int(os.environ["LOCAL_RANK"])
rank = int(os.environ["RANK"])
world_size = int(os.environ["WORLD_SIZE"])
if torch.distributed.is_initialized():
get_torch_device().set_device(local_rank)
return local_rank, rank, world_size
def destroy_global_process_group():
if torch.distributed.is_initialized():
torch.distributed.destroy_process_group()
def initialize_global_process_group_ray(timeout_second=None, backend=None):
# in current ray environment, LOCAL_RANK is always zero.
import torch.distributed
timeout = timedelta(seconds=timeout_second) if timeout_second is not None else None
backend = backend or f"cpu:gloo,{get_device_name()}:{get_nccl_backend()}"
if not torch.distributed.is_initialized():
rank = int(os.environ.get("RANK", 0))
world_size = int(os.environ.get("WORLD_SIZE", 1))
torch.distributed.init_process_group(
backend=backend,
rank=rank,
world_size=world_size,
timeout=timeout,
init_method=os.environ.get("DIST_INIT_METHOD", None),
)
def stateless_init_process_group(master_address, master_port, rank, world_size, device):
"""
vLLM provides `StatelessProcessGroup` to create a process group
without considering the global process group in torch.distributed.
It is recommended to create `StatelessProcessGroup`, and then initialize
the data-plane communication (NCCL) between external (train processes)
and vLLM workers.
"""
# NOTE: If it is necessary to support weight synchronization with the sglang backend in the future,
# the following can be used:
# from sglang.srt.distributed.device_communicators.pynccl import PyNcclCommunicator
# from sglang.srt.distributed.utils import statelessprocessgroup
from torch.distributed import TCPStore
from vllm.distributed.utils import StatelessProcessGroup
from verl.utils.device import is_npu_available
if is_npu_available:
from vllm_ascend.distributed.device_communicators.pyhccl import PyHcclCommunicator as PyNcclCommunicator
else:
from vllm.distributed.device_communicators.pynccl import PyNcclCommunicator
def create_process_group(
host: str,
port: int,
rank: int,
world_size: int,
data_expiration_seconds: int = 3600,
store_timeout: int = 300,
) -> "StatelessProcessGroup":
"""
This is copied from vllm/distributed/utils.py:StatelessProcessGroup.create
Modified to support ipv6 stateless communication groups."""
launch_server = rank == 0
if launch_server:
# listen on the specified interface (instead of 0.0.0.0)
if is_ipv6(master_address):
listen_socket = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
else:
listen_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
listen_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
listen_socket.bind((host, port))
listen_socket.listen()
listen_fd = listen_socket.fileno()
else:
listen_socket = None
listen_fd = None
store = TCPStore(
host_name=host,
port=port,
world_size=world_size,
is_master=launch_server,
timeout=timedelta(seconds=store_timeout),
use_libuv=False, # for now: github.com/pytorch/pytorch/pull/150215
master_listen_fd=listen_fd,
)
return StatelessProcessGroup(
rank=rank,
world_size=world_size,
store=store,
socket=listen_socket,
data_expiration_seconds=data_expiration_seconds,
)
pg = create_process_group(host=master_address, port=master_port, rank=rank, world_size=world_size)
pynccl = PyNcclCommunicator(pg, device=device)
return pynccl
| verl__utils__distributed.py |
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
import torch
def _fused_linear_for_ppo_fwd(
hidden_states: torch.FloatTensor,
vocab_weights: torch.FloatTensor,
input_ids: torch.LongTensor,
temperature: float = 1.0,
) -> tuple[torch.FloatTensor, torch.FloatTensor]:
logits = (hidden_states @ vocab_weights.t()) / temperature
orig_dtype = logits.dtype
logits = logits.to(torch.float32)
# Slower but more numerically stable to do log_softmax than probs.log()
probs = logits.softmax(dim=-1)
log_probs = logits.log_softmax(dim=-1)
token_log_probs = log_probs.gather(-1, input_ids.unsqueeze(-1)).squeeze(-1)
entropy = torch.logsumexp(logits, dim=-1) - torch.sum(probs * logits, dim=-1)
return token_log_probs.to(orig_dtype), entropy.to(orig_dtype)
def _fused_linear_for_ppo_bwd(
dlog_probs: Optional[torch.FloatTensor],
dentropy: Optional[torch.FloatTensor],
hidden_states: torch.FloatTensor,
vocab_weights: torch.FloatTensor,
input_ids: torch.LongTensor,
temperature: float = 1.0,
) -> tuple[torch.FloatTensor, torch.FloatTensor]:
logits = (hidden_states @ vocab_weights.t()) / temperature
orig_dtype = logits.dtype
logits = logits.to(torch.float32)
probs = logits.softmax(dim=-1)
dlogits = 0
# Gradient from log_probs
if dlog_probs is not None:
one_hot_input = torch.zeros_like(logits).scatter_(-1, input_ids.unsqueeze(-1), 1)
dlogits += dlog_probs.to(torch.float32).unsqueeze(-1) * (one_hot_input - probs)
# Gradient from entropy
if dentropy is not None:
log_probs = logits.log_softmax(dim=-1)
entropy = torch.logsumexp(logits, dim=-1) - torch.sum(probs * logits, dim=-1)
dlogits += probs * (log_probs + entropy.unsqueeze(-1)) * (-dentropy.unsqueeze(-1))
dlogits = dlogits.to(orig_dtype) / temperature
dhidden_states = dlogits @ vocab_weights
dvocab_weights = dlogits.t() @ hidden_states
return dhidden_states, dvocab_weights
class FusedLinearForPPOFunction(torch.autograd.Function):
@staticmethod
def forward(
ctx,
hidden_states: torch.FloatTensor,
vocab_weights: torch.FloatTensor,
input_ids: torch.LongTensor,
temperature: float = 1.0,
chunk_size: int = 512,
) -> tuple[torch.FloatTensor, torch.FloatTensor]:
ctx.set_materialize_grads(False)
# Cast to a 2D tensor of the shape [T, D] for ease of working
orig_ndim = hidden_states.ndim
assert orig_ndim in (2, 3), f"Invalid hidden_states shape, received {hidden_states.shape}"
orig_batch_size = -1
if orig_ndim == 3:
assert input_ids.ndim == 2, f"input_ids shape doesn't match, {hidden_states.shape} {input_ids.shape}"
orig_batch_size = hidden_states.shape[0]
hidden_states = hidden_states.flatten(0, 1)
input_ids = input_ids.flatten(0, 1)
T = hidden_states.shape[0]
# Allocate memory for outputs
output_requires_grad = hidden_states.requires_grad or vocab_weights.requires_grad
log_probs = hidden_states.new_zeros(T, requires_grad=output_requires_grad)
entropy = hidden_states.new_zeros(T, requires_grad=output_requires_grad)
# Perform forward one chunk at a time
for chunk_start in range(0, T, chunk_size):
chunk_end = min(chunk_start + chunk_size, T)
chunk_log_probs, chunk_entropy = _fused_linear_for_ppo_fwd(
hidden_states=hidden_states[chunk_start:chunk_end],
vocab_weights=vocab_weights,
input_ids=input_ids[chunk_start:chunk_end],
temperature=temperature,
)
log_probs[chunk_start:chunk_end] = chunk_log_probs
entropy[chunk_start:chunk_end] = chunk_entropy
# Cast the output back to the original input dimension
if orig_ndim == 3:
log_probs = log_probs.view(orig_batch_size, -1)
entropy = entropy.view(orig_batch_size, -1)
ctx.save_for_backward(hidden_states, vocab_weights, input_ids)
ctx.orig_batch_size = orig_batch_size
ctx.orig_ndim = orig_ndim
ctx.temperature = temperature
ctx.chunk_size = chunk_size
return log_probs, entropy
@staticmethod
def backward(ctx, dlog_probs: Optional[torch.FloatTensor], dentropy: Optional[torch.FloatTensor]):
assert dlog_probs is not None or dentropy is not None
hidden_states, vocab_weights, input_ids = ctx.saved_tensors
orig_batch_size = ctx.orig_batch_size
orig_ndim = ctx.orig_ndim
temperature = ctx.temperature
chunk_size = ctx.chunk_size
# Here orig_ndim refers to the orig_ndim of hidden_states
if orig_ndim == 3:
if dlog_probs is not None:
dlog_probs = dlog_probs.flatten()
if dentropy is not None:
dentropy = dentropy.flatten()
T = hidden_states.shape[0]
# Allocate memory for outputs
dhidden_states = None
if hidden_states.requires_grad:
dhidden_states = torch.zeros_like(hidden_states)
dvocab_weights = None
if vocab_weights.requires_grad:
dvocab_weights = torch.zeros_like(vocab_weights)
# Perform backward one chunk at a time
for chunk_start in range(0, T, chunk_size):
chunk_end = min(chunk_start + chunk_size, T)
chunk_dlog_probs = None
if dlog_probs is not None:
chunk_dlog_probs = dlog_probs[chunk_start:chunk_end]
chunk_dentropy = None
if dentropy is not None:
chunk_dentropy = dentropy[chunk_start:chunk_end]
h, v = _fused_linear_for_ppo_bwd(
dlog_probs=chunk_dlog_probs,
dentropy=chunk_dentropy,
hidden_states=hidden_states[chunk_start:chunk_end],
vocab_weights=vocab_weights,
input_ids=input_ids[chunk_start:chunk_end],
temperature=temperature,
)
if hidden_states.requires_grad:
dhidden_states[chunk_start:chunk_end] += h
if vocab_weights.requires_grad:
dvocab_weights += v
# Cast the output back to the original input dimension
if orig_ndim == 3 and hidden_states.requires_grad:
hidden_size = hidden_states.shape[-1]
dhidden_states = dhidden_states.view(orig_batch_size, -1, hidden_size)
return (
dhidden_states, # hidden_states
dvocab_weights, # vocab_weights
None, # input_ids
None, # temperature
None, # chunk_size
)
class FusedLinearForPPO(torch.nn.Module):
def __init__(self, chunk_size: int = 512):
super().__init__()
self.chunk_size = chunk_size
def forward(
self,
hidden_states: torch.FloatTensor,
vocab_weights: torch.FloatTensor,
input_ids: torch.LongTensor,
temperature: float = 1.0,
) -> tuple[torch.FloatTensor, torch.FloatTensor]:
input_ids = input_ids.to(torch.int64)
return FusedLinearForPPOFunction.apply(
hidden_states,
vocab_weights,
input_ids,
temperature,
self.chunk_size,
)
| verl__utils__experimental__torch_functional.py |
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import torch
from transformers import PretrainedConfig
from verl.utils.device import get_torch_device
_DEVICE_FLOPS = {
"CPU": 448e9,
"GB200": 2.5e15,
"B200": 2.25e15,
"MI300X": 1336e12,
"H100": 989e12,
"H800": 989e12,
"H200": 989e12,
"A100": 312e12,
"A800": 312e12,
"L40S": 362.05e12,
"L40": 181.05e12,
"A40": 149.7e12,
"L20": 119.5e12,
"H20": 148e12,
"910B": 354e12,
"Ascend910": 354e12,
"RTX 3070 Ti": 21.75e12,
}
def get_device_flops(unit="T", device_name=None):
"""Get the theoretical FLOPS (Floating Point Operations Per Second) capacity of the current device.
Args:
unit (str): The unit to return the FLOPS in. Supported values are:
"B" - Billion (1e9)
"K" - Thousand (1e3)
"M" - Million (1e6)
"G" - Giga (1e9)
"T" - Tera (1e12, default)
"P" - Peta (1e15)
Returns:
float: The theoretical FLOPS capacity of the current device in the specified unit.
Returns float('inf') for unknown GPU types.
"""
def unit_convert(number, level):
units = ["B", "K", "M", "G", "T", "P"]
if number <= 0:
return number
ptr = 0
while ptr < len(units) and units[ptr] != level:
number /= 1000
ptr += 1
return number
# pass device_name is for testing purpose only
if device_name is None:
device = get_torch_device()
if device == torch.cpu:
device_name = "CPU"
else:
device_name = get_torch_device().get_device_name()
flops = float("inf") # INF flops for unkown gpu type
for key, value in sorted(_DEVICE_FLOPS.items(), reverse=True):
if key in device_name:
flops = value
break
flops_unit = unit_convert(flops, unit)
return flops_unit
def _estimate_qwen2_flops(config, tokens_sum, batch_seqlens, delta_time):
hidden_size = config.hidden_size
vocab_size = config.vocab_size
num_hidden_layers = config.num_hidden_layers
num_key_value_heads = config.num_key_value_heads
num_attention_heads = config.num_attention_heads
intermediate_size = config.intermediate_size
head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
q_size = num_attention_heads * head_dim
k_size = num_key_value_heads * head_dim
v_size = num_key_value_heads * head_dim
# non-attn per layer parm
# Qwen2/LLama use SwiGelu, gate, having up and down linear layer in mlp
mlp_N = hidden_size * intermediate_size * 3
attn_linear_N = hidden_size * (q_size + k_size + v_size + num_attention_heads * head_dim)
emd_and_lm_head_N = vocab_size * hidden_size * 2
# non-attn all_layer parm
dense_N = (mlp_N + attn_linear_N) * num_hidden_layers + emd_and_lm_head_N
# non-attn all_layer & all_token fwd & bwd flops
dense_N_flops = 6 * dense_N * tokens_sum
# attn all_layer & all_token fwd & bwd flops
seqlen_square_sum = 0
for seqlen in batch_seqlens:
seqlen_square_sum += seqlen * seqlen
attn_qkv_flops = 6 * seqlen_square_sum * head_dim * num_attention_heads * num_hidden_layers
# all_layer & all_token fwd & bwd flops
flops_all_token = dense_N_flops + attn_qkv_flops
flops_achieved = flops_all_token * (1.0 / delta_time) / 1e12
return flops_achieved
def _estimate_qwen3_vl_flops(config, tokens_sum, batch_seqlens, delta_time, **kargs):
# qwen3_vl uses text_config and vision_config to distinguish configs of different parts.
hidden_size = config.text_config.hidden_size
vocab_size = config.text_config.vocab_size
num_hidden_layers = config.text_config.num_hidden_layers
num_key_value_heads = config.text_config.num_key_value_heads
num_attention_heads = config.text_config.num_attention_heads
intermediate_size = config.text_config.intermediate_size
head_dim = hidden_size // num_attention_heads
q_size = num_attention_heads * head_dim
k_size = num_key_value_heads * head_dim
v_size = num_key_value_heads * head_dim
# non-attn per layer parm
mlp_N = hidden_size * intermediate_size * 3
attn_linear_N = hidden_size * (q_size + k_size + v_size + num_attention_heads * head_dim)
emd_and_lm_head_N = vocab_size * hidden_size * 2
# non-attn all_layer parm
dense_N = (mlp_N + attn_linear_N) * num_hidden_layers + emd_and_lm_head_N
# non-attn all_layer & all_token fwd & bwd flops
dense_N_flops = 6 * dense_N * tokens_sum
# qwen3_vl uses deepstack to merge visual embeds and text embeds, but it has no tensor operation.
# attn all_layer & all_token fwd & bwd flops
seqlen_square_sum = 0
for seqlen in batch_seqlens:
seqlen_square_sum += seqlen * seqlen
attn_qkv_flops = 6 * seqlen_square_sum * head_dim * num_attention_heads * num_hidden_layers
# vit flops
images_seqlens = kargs.get("images_seqlens", None)
if images_seqlens is not None:
vit_flops = _estimate_qwen3_vit_flop(images_seqlens, config.vision_config)
else:
vit_flops = 0
# all_layer & all_token fwd & bwd flops
flops_all_token = dense_N_flops + attn_qkv_flops + vit_flops
flops_achieved = flops_all_token * (1.0 / delta_time) / 1e12
return flops_achieved
def _estimate_qwen3_vl_moe_flops(config, tokens_sum, batch_seqlens, delta_time, **kargs):
# qwen3_vl uses text_config and vision_config to distinguish configs of different parts.
hidden_size = config.text_config.hidden_size
vocab_size = config.text_config.vocab_size
num_hidden_layers = config.text_config.num_hidden_layers
num_key_value_heads = config.text_config.num_key_value_heads
num_attention_heads = config.text_config.num_attention_heads
moe_intermediate_size = config.text_config.moe_intermediate_size
moe_num_expert = config.text_config.num_experts
moe_topk = config.text_config.num_experts_per_tok
head_dim = getattr(
config.text_config, "head_dim", config.text_config.hidden_size // config.text_config.num_attention_heads
)
q_size = num_attention_heads * head_dim
k_size = num_key_value_heads * head_dim
v_size = num_key_value_heads * head_dim
# non-attn per layer parm
moe_gata_N = hidden_size * moe_num_expert
# moe has gate_proj, up_proj and down_proj using SwiGLU in ExpertMlp layer & shared experts
moe_expertmlp_N = hidden_size * moe_intermediate_size * (moe_topk) * 3
attn_linear_N = hidden_size * (q_size + k_size + v_size + num_attention_heads * head_dim)
emd_and_lm_head_N = vocab_size * hidden_size * 2
# non-attn all_layer parm
moe_N = (moe_gata_N + moe_expertmlp_N + attn_linear_N) * (num_hidden_layers) + emd_and_lm_head_N
# non-attn all_layer & all_token fwd & bwd flops
dense_N_flops = 6 * moe_N * tokens_sum
# attn all_layer & all_token fwd & bwd flops
seqlen_square_sum = 0
for seqlen in batch_seqlens:
seqlen_square_sum += seqlen * seqlen
attn_qkv_flops = 6 * seqlen_square_sum * head_dim * num_attention_heads * num_hidden_layers
# vit flops
images_seqlens = kargs.get("images_seqlens", None)
if images_seqlens is not None:
vit_flops = _estimate_qwen3_vit_flop(images_seqlens, config.vision_config)
else:
vit_flops = 0
# all_layer & all_token fwd & bwd flops
flops_all_token = dense_N_flops + attn_qkv_flops + vit_flops
flops_achieved = flops_all_token * (1.0 / delta_time) / 1e12
return flops_achieved
def _estimate_qwen3_vit_flop(images_seqlens, config):
"""
Estimate the FLOPS of the vision encoder for Qwen3-VL
"""
if config is None:
return 0
tokens_sum = sum(images_seqlens)
num_heads = config.num_heads
depth = config.depth
dim = config.hidden_size
mlp_hidden_dim = config.intermediate_size
out_hidden_size = config.out_hidden_size
spatial_merge_size = config.spatial_merge_size
head_dim = dim // num_heads
# every vision token's patch_embed comes from a conv of (C, T, H, W) -> (dim,)
patch_embed_N = dim * config.in_channels * config.temporal_patch_size * config.patch_size * config.patch_size
# Qwen3 VL vision mlp does not use GLU, thus 2.
mlp_N = dim * mlp_hidden_dim * 2
attn_linear_N = dim * (4 * dim) # qkv and output proj
merger_N = (out_hidden_size + (dim * (spatial_merge_size**2))) * (dim * (spatial_merge_size**2))
# Qwen3 VL uses deep stack, one merger for every deepstack layer
deepstack_merger_N = merger_N * len(config.deepstack_visual_indexes)
# non-attn all_layer parm
dense_N = patch_embed_N + (mlp_N + attn_linear_N) * depth + deepstack_merger_N + merger_N
# non-attn all_layer & all_token fwd & bwd flops
dense_N_flops = 6 * dense_N * tokens_sum
# In Qwen3 VL, full attention is used in all vision layers.
full_attn_layer_num = depth
# full attn layer & all_token fwd & bwd flops
seqlen_square_sum = 0
for seqlen in images_seqlens:
seqlen_square_sum += seqlen * seqlen
attn_qkv_flops = 12 * seqlen_square_sum * head_dim * num_heads * full_attn_layer_num
vit_flops = dense_N_flops + attn_qkv_flops
return vit_flops
def _estimate_deepseek_v3_flops(config, tokens_sum, batch_seqlens, delta_time):
hidden_size = config.hidden_size
vocab_size = config.vocab_size
moe_intermediate_size = config.moe_intermediate_size
num_hidden_layers = config.num_hidden_layers
first_k_dense_replace = config.first_k_dense_replace
num_query_heads = config.num_attention_heads
moe_num_expert = config.n_routed_experts
moe_topk = config.num_experts_per_tok
share_expert_num = config.n_shared_experts
# non-attn per layer parm
moe_gata_N = hidden_size * moe_num_expert
# moe has fc1_1, fc1_2 and fc2 using SwiGLU in ExpertMlp layer & shared experts
moe_expertmlp_N = hidden_size * moe_intermediate_size * (moe_topk + share_expert_num) * 3
# MLA attn
attn_linear_N = 0
q_head_dim = config.qk_nope_head_dim + config.qk_rope_head_dim
if config.q_lora_rank is None:
attn_linear_N += hidden_size * num_query_heads * q_head_dim
else:
attn_linear_N += hidden_size * config.q_lora_rank
attn_linear_N += num_query_heads * q_head_dim * config.q_lora_rank
attn_linear_N += hidden_size * (config.kv_lora_rank + config.qk_rope_head_dim)
attn_linear_N += num_query_heads * (q_head_dim - config.qk_rope_head_dim + config.v_head_dim) * config.kv_lora_rank
attn_linear_N += num_query_heads * config.v_head_dim * hidden_size
emd_and_lm_head_N = vocab_size * hidden_size * 2
# non-attn all_layer parm
moe_N = (
(moe_gata_N + moe_expertmlp_N + attn_linear_N) * (num_hidden_layers - first_k_dense_replace)
+ (hidden_size * config.intermediate_size * 3 + attn_linear_N) * first_k_dense_replace
+ emd_and_lm_head_N
)
# non-attn all_layer & all_token fwd & bwd flops
dense_N_flops = 6 * moe_N * tokens_sum
# attn all_layer & all_token fwd & bwd flops
seqlen_square_sum = 0
for seqlen in batch_seqlens:
seqlen_square_sum += seqlen * seqlen * num_hidden_layers
# Core attention FLOPS for MLA with causal mask:
# Q @ K^T: 3 * 2 * seq^2 * q_head_dim * num_heads / 2 (causal)
# attn @ V: 3 * 2 * seq^2 * v_head_dim * num_heads / 2 (causal)
attn_qkv_flops = 3 * seqlen_square_sum * (q_head_dim + config.v_head_dim) * num_query_heads
# all_layer & all_token fwd & bwk flops
flops_all_token = dense_N_flops + attn_qkv_flops
flops_achieved = flops_all_token * (1.0 / delta_time) / 1e12
return flops_achieved
def _estimate_qwen2_moe_flops(config, tokens_sum, batch_seqlens, delta_time):
hidden_size = config.hidden_size
vocab_size = config.vocab_size
num_hidden_layers = config.num_hidden_layers
num_key_value_heads = config.num_key_value_heads
num_attention_heads = config.num_attention_heads
moe_intermediate_size = config.moe_intermediate_size
moe_topk = config.num_experts_per_tok
num_experts = config.num_experts
head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
q_size = num_attention_heads * head_dim
k_size = num_key_value_heads * head_dim
v_size = num_key_value_heads * head_dim
# non-attn per layer parm
# gate + moe export
moe_mlp_N = hidden_size * moe_topk * moe_intermediate_size * 3 + hidden_size * num_experts
attn_linear_N = hidden_size * (q_size + k_size + v_size + num_attention_heads * head_dim)
emd_and_lm_head_N = vocab_size * hidden_size * 2
# non-attn all_layer parm
dense_N = (moe_mlp_N + attn_linear_N) * num_hidden_layers + emd_and_lm_head_N
# non-attn all_layer & all_token fwd & bwd flops
dense_N_flops = 6 * dense_N * tokens_sum
# attn all_layer & all_token fwd & bwd flops
seqlen_square_sum = 0
for seqlen in batch_seqlens:
seqlen_square_sum += seqlen * seqlen
attn_qkv_flops = 6 * seqlen_square_sum * head_dim * num_attention_heads * num_hidden_layers
# all_layer & all_token fwd & bwd flops
flops_all_token = dense_N_flops + attn_qkv_flops
flops_achieved = flops_all_token * (1.0 / delta_time) / 1e12
return flops_achieved
def _estimate_gemma3_flops(config, tokens_sum, batch_seqlens, delta_time):
hidden_size = config.hidden_size
vocab_size = config.vocab_size
num_hidden_layers = config.num_hidden_layers
num_key_value_heads = config.num_key_value_heads
num_attention_heads = config.num_attention_heads
intermediate_size = config.intermediate_size
head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
q_size = num_attention_heads * head_dim
k_size = num_key_value_heads * head_dim
v_size = num_key_value_heads * head_dim
# non-attn per layer parm
# Gemma3 uses GeGLU (gelu_pytorch_tanh), having 3 matrices in MLP (inherited from Gemma2MLP)
mlp_N = hidden_size * intermediate_size * 3
attn_linear_N = hidden_size * (q_size + k_size + v_size + num_attention_heads * head_dim)
emd_and_lm_head_N = vocab_size * hidden_size * 2
# non-attn all_layer parm
dense_N = (mlp_N + attn_linear_N) * num_hidden_layers + emd_and_lm_head_N
# non-attn all_layer & all_token fwd & bwd flops
dense_N_flops = 6 * dense_N * tokens_sum
# attn all_layer & all_token fwd & bwd flops
# Gemma3 alternates between full and sliding window attention based on layer_types
seqlen_square_sum = 0
layer_types = getattr(config, "layer_types", None)
sliding_window = getattr(config, "sliding_window", 1024) # default 1024
# default pattern: every 6th layer is full
sliding_window_pattern = getattr(config, "sliding_window_pattern", 6)
# If layer_types is not provided, generate it based on sliding_window_pattern
if layer_types is None and sliding_window is not None and sliding_window_pattern is not None:
layer_types = [
"sliding_attention" if bool((i + 1) % sliding_window_pattern) else "full_attention"
for i in range(num_hidden_layers)
]
if layer_types:
# Calculate attention flops per layer based on attention type
for layer_idx in range(num_hidden_layers):
is_sliding = False
if layer_types and layer_idx < len(layer_types):
is_sliding = layer_types[layer_idx] == "sliding_attention"
for seqlen in batch_seqlens:
if is_sliding and sliding_window:
# Sliding window limits each token to attend to at most window_size tokens
effective_seqlen = min(seqlen, sliding_window)
seqlen_square_sum += seqlen * effective_seqlen
else:
# Full attention
seqlen_square_sum += seqlen * seqlen
else:
# If no layer_types config, assume all layers use full attention
for seqlen in batch_seqlens:
seqlen_square_sum += seqlen * seqlen
seqlen_square_sum *= num_hidden_layers
attn_qkv_flops = 6 * seqlen_square_sum * head_dim * num_attention_heads
# all_layer & all_token fwd & bwd flops
flops_all_token = dense_N_flops + attn_qkv_flops
flops_achieved = flops_all_token * (1.0 / delta_time) / 1e12
return flops_achieved
def _estimate_apertus_flops(config, tokens_sum, batch_seqlens, delta_time):
hidden_size = config.hidden_size
vocab_size = config.vocab_size
num_hidden_layers = config.num_hidden_layers
num_key_value_heads = config.num_key_value_heads
num_attention_heads = config.num_attention_heads
intermediate_size = config.intermediate_size
head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
q_size = num_attention_heads * head_dim
k_size = num_key_value_heads * head_dim
v_size = num_key_value_heads * head_dim
# Apertus MLP with XIELU activation uses only 2 linear layers (up_proj, down_proj)
# No gate_proj for XIELU, unlike SwiGLU which has 3 layers
mlp_N = hidden_size * intermediate_size * 2
attn_linear_N = hidden_size * (q_size + k_size + v_size + num_attention_heads * head_dim)
# ApertusConfig has qk_norm defaulting to True.
# This adds params for q_norm (on H) and k_norm (on num_kv_heads * head_dim)
qk_norm_params_per_layer = hidden_size + num_key_value_heads * head_dim # q_norm + k_norm
emd_and_lm_head_N = vocab_size * hidden_size * 2
# non-attn all_layer params
dense_N = (mlp_N + attn_linear_N + qk_norm_params_per_layer) * num_hidden_layers + emd_and_lm_head_N
# non-attn all_layer & all_token fwd & bwd flops
dense_N_flops = 6 * dense_N * tokens_sum
# attn all_layer & all_token fwd & bwd flops
seqlen_square_sum = 0
for seqlen in batch_seqlens:
seqlen_square_sum += seqlen * seqlen
attn_qkv_flops = 6 * seqlen_square_sum * head_dim * num_attention_heads * num_hidden_layers
# all_layer & all_token fwd & bwd flops
flops_all_token = dense_N_flops + attn_qkv_flops
flops_achieved = flops_all_token * (1.0 / delta_time) / 1e12
return flops_achieved
def _estimate_gpt_oss_flops(config, tokens_sum, batch_seqlens, delta_time):
hidden_size = config.hidden_size
vocab_size = config.vocab_size
num_hidden_layers = config.num_hidden_layers
num_key_value_heads = config.num_key_value_heads
num_attention_heads = config.num_attention_heads
# MoE params
moe_intermediate_size = config.intermediate_size
num_experts = config.num_local_experts
num_experts_per_tok = config.num_experts_per_tok
mlp_matrices = 3
# Head dim
head_dim = getattr(config, "head_dim", hidden_size // num_attention_heads)
q_size = num_attention_heads * head_dim
k_size = num_key_value_heads * head_dim
v_size = num_key_value_heads * head_dim
# 1. Attention Block (GQA)
attn_linear_N = hidden_size * (q_size + k_size + v_size + num_attention_heads * head_dim)
# 2. MLP / MoE Block
# Gate network
moe_gate_N = hidden_size * num_experts
# Expert forward calculation, Active parameters: mlp_matrices * H * I * num_experts_per_tok
moe_expert_N = hidden_size * moe_intermediate_size * mlp_matrices * num_experts_per_tok
moe_mlp_N = moe_gate_N + moe_expert_N
emd_and_lm_head_N = vocab_size * hidden_size * 2
# Total non-attn params per layer * layers + embeddings
# (moe_mlp_N + attn_linear_N) * layers
dense_N = (moe_mlp_N + attn_linear_N) * num_hidden_layers + emd_and_lm_head_N
# FLOPs for dense part (fwd + bwd = 6 * N)
dense_N_flops = 6 * dense_N * tokens_sum
# 3. Attention Matrix FLOPs
seqlen_square_sum = 0
# Handle sliding window attention
layer_types = getattr(config, "layer_types", None)
sliding_window = getattr(config, "sliding_window", 128)
if layer_types:
for layer_type in layer_types:
is_sliding = layer_type == "sliding_attention"
for seqlen in batch_seqlens:
if is_sliding and sliding_window:
# Sliding window limits each token to attend to at most window_size tokens
effective_seqlen = min(seqlen, sliding_window)
seqlen_square_sum += seqlen * effective_seqlen
else:
# Full attention
seqlen_square_sum += seqlen * seqlen
else:
# Default to full attention for all layers
for seqlen in batch_seqlens:
seqlen_square_sum += seqlen * seqlen
seqlen_square_sum *= num_hidden_layers
attn_qkv_flops = 6 * seqlen_square_sum * head_dim * num_attention_heads
# Total FLOPs
flops_all_token = dense_N_flops + attn_qkv_flops
flops_achieved = flops_all_token * (1.0 / delta_time) / 1e12
return flops_achieved
def _estimate_unknown_flops(config, tokens_sum, batch_seqlens, delta_time):
return 0
ESTIMATE_FUNC = {
"qwen2": _estimate_qwen2_flops,
"llama": _estimate_qwen2_flops,
"qwen2_moe": _estimate_qwen2_moe_flops,
"qwen2_vl": _estimate_qwen2_flops,
"qwen2_5_vl": _estimate_qwen2_flops,
"qwen3": _estimate_qwen2_flops,
"qwen3_moe": _estimate_qwen2_moe_flops,
"qwen3_vl": _estimate_qwen3_vl_flops,
"qwen3_vl_moe": _estimate_qwen3_vl_moe_flops,
"deepseek_v3": _estimate_deepseek_v3_flops,
"minicpmv": _estimate_qwen2_flops,
"minicpmo": _estimate_qwen2_flops,
"mistral": _estimate_qwen2_flops,
"gemma3_text": _estimate_gemma3_flops,
"seed_oss": _estimate_qwen2_flops,
"apertus": _estimate_apertus_flops,
"glm4v": _estimate_qwen2_flops,
"gpt_oss": _estimate_gpt_oss_flops,
"mimo": _estimate_qwen2_flops,
}
class FlopsCounter:
"""
Used to count mfu during training loop
Example:
flops_counter = FlopsCounter(config)
flops_achieved, flops_promised = flops_counter.estimate_flops(tokens_list, delta_time)
"""
def __init__(self, config: PretrainedConfig):
VALID_CONFIG_TYPE = ESTIMATE_FUNC.keys()
if config.model_type not in VALID_CONFIG_TYPE:
print(
f"Only support config type of {VALID_CONFIG_TYPE}, but got {config.model_type}. MFU will always be "
f"zero."
)
self.config = config
# TODO: actually we can make this a static method
def estimate_flops(self, batch_seqlens, delta_time, **kargs):
"""
Estimate the FLOPS based on the number of valid tokens in the current batch and the time taken.
Args:
batch_seqlens (List[int]): A list where each element represents the number of valid tokens in the
current batch.
delta_time (float): The time taken to process the batch, in seconds.
Returns:
estimated_flops (float): The estimated FLOPS based on the input tokens and time.
promised_flops (float): The expected FLOPS of the current device.
"""
tokens_sum = sum(batch_seqlens)
func = ESTIMATE_FUNC.get(self.config.model_type, _estimate_unknown_flops)
sig = inspect.signature(func)
if any(p.kind == inspect.Parameter.VAR_KEYWORD for p in sig.parameters.values()):
estimated_flops = func(self.config, tokens_sum, batch_seqlens, delta_time, **kargs)
else:
estimated_flops = func(self.config, tokens_sum, batch_seqlens, delta_time)
promised_flops = get_device_flops()
return estimated_flops, promised_flops
| verl__utils__flops_counter.py |
#!/usr/bin/env python
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -*- coding: utf-8 -*-
"""File-system agnostic IO APIs"""
import hashlib
import os
import shutil
import tempfile
try:
from hdfs_io import copy, exists, makedirs # for internal use only
except ImportError:
from .hdfs_io import copy, exists, makedirs
__all__ = ["copy", "exists", "makedirs"]
_HDFS_PREFIX = "hdfs://"
def is_non_local(path):
"""Check if a path is a non-local (HDFS) path.
Args:
path (str): The path to check.
Returns:
bool: True if the path is an HDFS path, False otherwise.
"""
return path.startswith(_HDFS_PREFIX)
def md5_encode(path: str) -> str:
"""Generate an MD5 hash of a path string.
This function is used to create unique identifiers for paths, typically
for creating cache directories or lock files.
Args:
path (str): The path to encode.
Returns:
str: The hexadecimal MD5 hash of the path.
"""
return hashlib.md5(path.encode()).hexdigest()
def get_local_temp_path(hdfs_path: str, cache_dir: str) -> str:
"""Generate a unique local cache path for an HDFS resource.
Creates a MD5-hashed subdirectory in cache_dir to avoid name conflicts,
then returns path combining this subdirectory with the HDFS basename.
Args:
hdfs_path (str): Source HDFS path to be cached
cache_dir (str): Local directory for storing cached files
Returns:
str: Absolute local filesystem path in format:
{cache_dir}/{md5(hdfs_path)}/{basename(hdfs_path)}
"""
# make a base64 encoding of hdfs_path to avoid directory conflict
encoded_hdfs_path = md5_encode(hdfs_path)
temp_dir = os.path.join(cache_dir, encoded_hdfs_path)
os.makedirs(temp_dir, exist_ok=True)
dst = os.path.join(temp_dir, os.path.basename(hdfs_path))
return dst
def verify_copy(src: str, dest: str) -> bool:
"""
verify the copy of src to dest by comparing their sizes and file structures.
return:
bool: True if the copy is verified, False otherwise.
"""
if not os.path.exists(src):
return False
if not os.path.exists(dest):
return False
if os.path.isfile(src) != os.path.isfile(dest):
return False
if os.path.isfile(src):
src_size = os.path.getsize(src)
dest_size = os.path.getsize(dest)
if src_size != dest_size:
return False
return True
src_files = set()
dest_files = set()
for root, dirs, files in os.walk(src):
rel_path = os.path.relpath(root, src)
dest_root = os.path.join(dest, rel_path) if rel_path != "." else dest
if not os.path.exists(dest_root):
return False
for entry in os.listdir(root):
src_entry = os.path.join(root, entry)
src_files.add(os.path.relpath(src_entry, src))
for entry in os.listdir(dest_root):
dest_entry = os.path.join(dest_root, entry)
dest_files.add(os.path.relpath(dest_entry, dest))
if src_files != dest_files:
return False
for rel_path in src_files:
src_entry = os.path.join(src, rel_path)
dest_entry = os.path.join(dest, rel_path)
if os.path.isdir(src_entry) != os.path.isdir(dest_entry):
return False
if os.path.isfile(src_entry):
src_size = os.path.getsize(src_entry)
dest_size = os.path.getsize(dest_entry)
if src_size != dest_size:
return False
return True
def copy_to_shm(src: str):
"""
Load the model into /dev/shm to make the process of loading the model multiple times more efficient.
"""
shm_model_root = "/dev/shm/verl-cache/"
src_abs = os.path.abspath(os.path.normpath(src))
dest = os.path.join(shm_model_root, hashlib.md5(src_abs.encode("utf-8")).hexdigest())
os.makedirs(dest, exist_ok=True)
dest = os.path.join(dest, os.path.basename(src_abs))
if os.path.exists(dest) and verify_copy(src, dest):
# inform user and depends on him
print(
f"[WARNING]: The memory model path {dest} already exists. If it is not you want, please clear it and "
f"restart the task."
)
else:
if os.path.isdir(src):
shutil.copytree(src, dest, symlinks=False, dirs_exist_ok=True)
else:
shutil.copy2(src, dest)
return dest
def _record_directory_structure(folder_path):
record_file = os.path.join(folder_path, ".directory_record.txt")
with open(record_file, "w") as f:
for root, dirs, files in os.walk(folder_path):
for dir_name in dirs:
relative_dir = os.path.relpath(os.path.join(root, dir_name), folder_path)
f.write(f"dir:{relative_dir}\n")
for file_name in files:
if file_name != ".directory_record.txt":
relative_file = os.path.relpath(os.path.join(root, file_name), folder_path)
f.write(f"file:{relative_file}\n")
return record_file
def _check_directory_structure(folder_path, record_file):
if not os.path.exists(record_file):
return False
existing_entries = set()
for root, dirs, files in os.walk(folder_path):
for dir_name in dirs:
relative_dir = os.path.relpath(os.path.join(root, dir_name), folder_path)
existing_entries.add(f"dir:{relative_dir}")
for file_name in files:
if file_name != ".directory_record.txt":
relative_file = os.path.relpath(os.path.join(root, file_name), folder_path)
existing_entries.add(f"file:{relative_file}")
with open(record_file) as f:
recorded_entries = set(f.read().splitlines())
return existing_entries == recorded_entries
def copy_to_local(
src: str, cache_dir=None, filelock=".file.lock", verbose=False, always_recopy=False, use_shm: bool = False
) -> str:
"""Copy files/directories from HDFS to local cache with validation.
Args:
src (str): Source path - HDFS path (hdfs://...), local filesystem path, or Hugging Face model ID
cache_dir (str, optional): Local directory for cached files. Uses system tempdir if None
filelock (str): Base name for file lock. Defaults to ".file.lock"
verbose (bool): Enable copy operation logging. Defaults to False
always_recopy (bool): Force fresh copy ignoring cache. Defaults to False
use_shm (bool): Enable shared memory copy. Defaults to False
Returns:
str: Local filesystem path to copied resource
"""
# Save to a local path for persistence.
local_path = copy_local_path_from_hdfs(src, cache_dir, filelock, verbose, always_recopy)
if use_shm and isinstance(local_path, str) and not os.path.exists(local_path):
try:
from huggingface_hub import snapshot_download
resolved = snapshot_download(local_path)
if isinstance(resolved, str) and os.path.exists(resolved):
local_path = resolved
except ImportError:
pass
except Exception as e:
print(f"WARNING: Failed to download model from Hugging Face: {e}")
# Load into shm to improve efficiency.
if use_shm:
return copy_to_shm(local_path)
return local_path
def copy_local_path_from_hdfs(
src: str, cache_dir=None, filelock=".file.lock", verbose=False, always_recopy=False
) -> str:
"""Deprecated. Please use copy_to_local instead."""
from filelock import FileLock
assert src[-1] != "/", f"Make sure the last char in src is not / because it will cause error. Got {src}"
if is_non_local(src):
# download from hdfs to local
if cache_dir is None:
# get a temp folder
cache_dir = tempfile.gettempdir()
os.makedirs(cache_dir, exist_ok=True)
assert os.path.exists(cache_dir)
local_path = get_local_temp_path(src, cache_dir)
# get a specific lock
filelock = md5_encode(src) + ".lock"
lock_file = os.path.join(cache_dir, filelock)
with FileLock(lock_file=lock_file):
if always_recopy and os.path.exists(local_path):
if os.path.isdir(local_path):
shutil.rmtree(local_path, ignore_errors=True)
else:
os.remove(local_path)
if not os.path.exists(local_path):
if verbose:
print(f"Copy from {src} to {local_path}")
copy(src, local_path)
if os.path.isdir(local_path):
_record_directory_structure(local_path)
elif os.path.isdir(local_path):
# always_recopy=False, local path exists, and it is a folder: check whether there is anything missed
record_file = os.path.join(local_path, ".directory_record.txt")
if not _check_directory_structure(local_path, record_file):
if verbose:
print(f"Recopy from {src} to {local_path} due to missing files or directories.")
shutil.rmtree(local_path, ignore_errors=True)
copy(src, local_path)
_record_directory_structure(local_path)
return local_path
else:
return src
def local_mkdir_safe(path):
"""_summary_
Thread-safe directory creation function that ensures the directory is created
even if multiple processes attempt to create it simultaneously.
Args:
path (str): The path to create a directory at.
"""
from filelock import FileLock
if not os.path.isabs(path):
working_dir = os.getcwd()
path = os.path.join(working_dir, path)
# Using hash value of path as lock file name to avoid long file name
lock_filename = f"ckpt_{hash(path) & 0xFFFFFFFF:08x}.lock"
lock_path = os.path.join(tempfile.gettempdir(), lock_filename)
try:
with FileLock(lock_path, timeout=60): # Add timeout
# make a new dir
os.makedirs(path, exist_ok=True)
except Exception as e:
print(f"Warning: Failed to acquire lock for {path}: {e}")
# Even if the lock is not acquired, try to create the directory
os.makedirs(path, exist_ok=True)
return path
| verl__utils__fs.py |
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import itertools
import json
import math
import os
from abc import ABC
from collections import OrderedDict
from contextlib import contextmanager, nullcontext
from typing import cast
import torch
import torch.distributed as dist
import torch.nn as nn
from packaging import version
from torch.distributed import DeviceMesh
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp._runtime_utils import _lazy_init
from torch.distributed.fsdp.wrap import size_based_auto_wrap_policy, transformer_auto_wrap_policy
from transformers.trainer_pt_utils import get_module_class_from_name
from verl.utils.device import get_device_id, get_device_name, get_torch_device
from verl.utils.model import check_exclude_modules, check_target_modules
if version.parse(torch.__version__) >= version.parse("2.6"):
from torch.distributed.fsdp import CPUOffloadPolicy, FSDPModule, MixedPrecisionPolicy, fully_shard
from torch.distributed.fsdp._fully_shard._fsdp_init import _get_post_forward_mesh_info
from torch.distributed.tensor import Shard
fully_shard_module = torch.distributed.fsdp._fully_shard._fully_shard
elif version.parse(torch.__version__) >= version.parse("2.4"):
from torch.distributed._composable.fsdp import CPUOffloadPolicy, FSDPModule, MixedPrecisionPolicy, fully_shard
fully_shard_module = torch.distributed._composable.fsdp
else:
fully_shard, MixedPrecisionPolicy, FSDPModule, CPUOffloadPolicy, fully_shard_module = None, None, None, None, None
def init_fn(x: torch.nn.Module):
if torch.distributed.get_rank() != 0:
x = x.to_empty(device=get_device_id(), recurse=False)
get_torch_device().empty_cache()
return x
def get_init_weight_context_manager(use_meta_tensor=True, mesh: DeviceMesh = None):
from accelerate import init_empty_weights
cpu_init_weights = lambda: torch.device("cpu")
if use_meta_tensor:
if mesh is None:
init_context = init_empty_weights if torch.distributed.get_rank() != 0 else cpu_init_weights
else:
init_context = init_empty_weights if mesh.get_coordinate()[-1] != 0 else cpu_init_weights
else:
init_context = cpu_init_weights
return init_context
# Copyright 2020-present the HuggingFace Inc. team.
# Adapted from https://github.com/huggingface/transformers/src/transformers/trainer.py
def get_fsdp_wrap_policy(module, config=None, is_lora=False):
"""Get FSDP wrap policy for the module.
Args:
module: The module to get wrap policy for
config: Configuration for wrap policy
is_lora: Whether to enable lambda policy for LoRA modules
"""
if config is None:
config = {}
# NOTE: This is a temporary workaround to be compatible with the OmegaConf & dataclass. We will remove this
# once we have make all config in verl from OmegaConf to data class.
def _get_attr(attr_name, default_value=None):
if hasattr(config, "get"):
return config.get(attr_name, default_value)
else:
return config.__getattribute__(attr_name)
if _get_attr("disable", False):
return None
default_transformer_cls_names_to_wrap = getattr(module, "_no_split_modules", None)
fsdp_transformer_layer_cls_to_wrap = _get_attr(
"transformer_layer_cls_to_wrap", default_transformer_cls_names_to_wrap
)
min_num_params = _get_attr("min_num_params", 0)
auto_wrap_policy = None
policies = []
from torch.distributed.fsdp.wrap import _or_policy, lambda_auto_wrap_policy
# Add lambda policy for LoRA modules if is_lora is True
if is_lora:
def lambda_policy_fn(module):
return bool(
len(list(module.named_children())) == 0
and getattr(module, "weight", None) is not None
and module.weight.requires_grad
)
lambda_policy = functools.partial(lambda_auto_wrap_policy, lambda_fn=lambda_policy_fn)
policies.append(lambda_policy)
if min_num_params > 0:
size_policy = functools.partial(size_based_auto_wrap_policy, min_num_params=min_num_params)
policies.append(size_policy)
elif fsdp_transformer_layer_cls_to_wrap is not None:
transformer_cls_to_wrap = set()
for layer_class in fsdp_transformer_layer_cls_to_wrap:
transformer_cls = get_module_class_from_name(module, layer_class)
if transformer_cls is None:
raise Exception("Could not find the transformer layer class to wrap in the model.")
else:
transformer_cls_to_wrap.add(transformer_cls)
transformer_policy = functools.partial(
transformer_auto_wrap_policy,
transformer_layer_cls=transformer_cls_to_wrap,
)
policies.append(transformer_policy)
if len(policies) > 0:
auto_wrap_policy = functools.partial(_or_policy, policies=policies)
return auto_wrap_policy
@torch.no_grad()
def offload_fsdp_model_to_cpu(model: FSDP, empty_cache: bool = True):
if fsdp_version(model) == 2:
offload_fsdp2_model_to_cpu(model, empty_cache)
return
assert isinstance(model, FSDP)
# lazy init FSDP model
_lazy_init(model, model)
assert model._is_root, "Only support root model offloading to CPU"
for handle in model._all_handles:
if handle._offload_params:
continue
flat_param = handle.flat_param
assert (
flat_param.data.data_ptr() == flat_param._local_shard.data_ptr()
and id(flat_param.data) != id(flat_param._local_shard)
and flat_param.data.size() == flat_param._local_shard.size()
)
handle.flat_param_to(torch.device("cpu"), non_blocking=True)
# the following still keeps id(._local_shard) != id(.data)
flat_param._local_shard = flat_param.data
assert id(flat_param._local_shard) != id(flat_param.data)
if empty_cache:
get_torch_device().empty_cache()
@torch.no_grad()
def offload_fsdp2_model_to_cpu(model, empty_cache: bool = True):
model.cpu()
if empty_cache:
get_torch_device().empty_cache()
@torch.no_grad()
def load_fsdp_model_to_gpu(model: FSDP):
if fsdp_version(model) == 2:
load_fsdp2_model_to_gpu(model)
return
assert isinstance(model, FSDP)
# lazy init FSDP model
_lazy_init(model, model)
assert model._is_root, "Only support root model loading to GPU"
device_id = get_device_id()
for handle in model._all_handles:
if handle._offload_params:
continue
flat_param = handle.flat_param
handle.flat_param_to(torch.device(f"{get_device_name()}:{device_id}"), non_blocking=True)
# the following still keeps id(._local_shard) != id(.data)
flat_param._local_shard = flat_param.data
@torch.no_grad()
def load_fsdp2_model_to_gpu(model):
device = get_device_id()
model.to(device)
@torch.no_grad()
def offload_fsdp_optimizer(optimizer):
if not optimizer.state:
return
for param_group in optimizer.param_groups:
for param in param_group["params"]:
state = optimizer.state[param]
for key, value in state.items():
if isinstance(value, torch.Tensor):
state[key] = value.to("cpu", non_blocking=True)
@torch.no_grad()
def load_fsdp_optimizer(optimizer, device_id):
if not optimizer.state:
return
for param_group in optimizer.param_groups:
for param in param_group["params"]:
state = optimizer.state[param]
for key, value in state.items():
if isinstance(value, torch.Tensor):
state[key] = value.to(device_id, non_blocking=True)
@contextmanager
def meta_device_init():
"""
Create model parameters with meta device.
Note buffers in model will still be initialized in default device (e.g., CPU),
since the buffers can be non-persistent and filled with expected values that can
NOT be captured in meta device.
"""
device = torch.device("meta")
old_register_parameter = nn.Module.register_parameter
registered = set()
def register_empty_parameter(module, name, param):
old_register_parameter(module, name, param)
# we will skip register shared parameters as it
# is already registered previously
if param is not None and param not in registered:
param_cls = type(module._parameters[name])
kwargs = module._parameters[name].__dict__
kwargs["requires_grad"] = param.requires_grad
module._parameters[name] = param_cls(module._parameters[name].to(device), **kwargs)
registered.add(module._parameters[name])
try:
nn.Module.register_parameter = register_empty_parameter
yield
finally:
registered.clear()
nn.Module.register_parameter = old_register_parameter
def parallel_load_safetensors(filepath):
"""
Parallel load safetensors from huggingface checkpoint
Huggingface checkpoint contains:
- config.json: a json file for model configuration
- model.safetensor.index.json: a json file for safetensors (parameters & buffers) index
- model-000x-of-ooxx.safetensors: a binary file for safetensors (parameters & buffers) chunks
Or (when model is small),
- model.safetensors: a binary file for all parameters and buffers
Each rank will own a part of model chunks and load them directly into GPU memory.
"""
from safetensors.torch import load_file
safetensors2param = {}
index_file = os.path.join(filepath, "model.safetensors.index.json")
if os.path.exists(index_file):
index = json.load(open(index_file, "rb"))
for param_name, filename in index["weight_map"].items():
safetensors2param.setdefault(filename, []).append(param_name)
else:
# in this case, the model is small and we can load it all at once
param_file = os.path.join(filepath, "model.safetensors")
assert os.path.exists(param_file), f"Cannot find {param_file}"
states = load_file(param_file)
for param_name in states:
safetensors2param.setdefault("model.safetensors", []).append(param_name)
del states
total_files = len(safetensors2param)
ckpt_chunks = sorted(safetensors2param.keys())
world_size = dist.get_world_size()
size = int(math.ceil(total_files / world_size))
ckpt_chunks = [ckpt_chunks[rank * size : rank * size + size] for rank in range(world_size)]
shard_states = {}
device = get_device_id()
for rank, files in enumerate(ckpt_chunks):
if rank == dist.get_rank():
for file in files:
file = os.path.join(filepath, file)
states = load_file(file, device=device)
# print(f"rank {rank} loading {file}...")
shard_states.update(states)
else:
for file in files:
for param_name in safetensors2param[file]:
shard_states[param_name] = rank
return shard_states
def parallel_init_module_fn(module: torch.nn.Module, shard_states: dict[str, torch.nn.Parameter]):
"""
Generate a function to initialize sub-modules in the `module` with `shard_states`
from huggingface checkpoint.
Args:
module (torch.nn.Module): the global module to be initialized
shard_states (Dict[str, torch.nn.Parameter]): the shard states from huggingface checkpoint
Returns:
init_fn (Callable): a function to initialize sub-modules in the `module` with `shard_states`
"""
state2fqn = {}
for name, state in itertools.chain(
module.named_parameters(remove_duplicate=False), module.named_buffers(remove_duplicate=False)
):
state2fqn.setdefault(state, []).append(name)
# remove standalone parameters and buffers
shared = {s for s, names in state2fqn.items() if len(names) > 1}
materialized_states = {}
@torch.no_grad()
def create_and_sync_state(param_name, state, is_param):
assert param_name in shard_states, f"{param_name} not loaded"
device = get_device_id()
if is_param:
param = torch.nn.Parameter(torch.empty_like(state.data, device=device), requires_grad=state.requires_grad)
else: # buffer
param = torch.empty_like(state.data, device=device)
loaded = shard_states[param_name]
if isinstance(loaded, torch.nn.Parameter | torch.Tensor):
# NOTE: loaded.dtype can be different with param.dtype
param.data.copy_(loaded.data)
dist.broadcast(param.data, src=dist.get_rank())
else:
assert isinstance(loaded, int) # the rank that holds the state
dist.broadcast(param.data, src=loaded)
shard_states.pop(param_name)
del loaded
return param
def init_fn(sub_mod: torch.nn.Module, recurse: bool = True):
param_and_buffers = tuple(sub_mod.named_parameters(recurse=False)) + tuple(sub_mod.named_buffers(recurse=False))
# param_and_buffers = sorted(sub_mod.named_parameters(recurse=False), key=lambda x: x[0])
for name, state in param_and_buffers:
if not state.is_meta:
continue
is_param = name in sub_mod._parameters
fqn = state2fqn[state].pop(0)
# non-persistent buffers will not be saved in state dict, we can safely skip it
if (not is_param) and fqn not in shard_states:
if state.is_meta:
raise RuntimeError(
f"find a non-persistent buffer ({fqn}) initiated with device meta. Such buffer is not saved "
f"in checkpoint and user should guarantee to init in CPU / GPU device."
)
continue
# for shared parameter, we get it from the first time it is created
if state in shared:
if state not in materialized_states:
materialized_states[state] = create_and_sync_state(fqn, state, is_param)
else:
if fqn in shard_states:
shard_states.pop(fqn)
materialize_state = materialized_states[state]
# for not shared parameter, we create it directly
else:
materialize_state = create_and_sync_state(fqn, state, is_param)
if is_param:
sub_mod._parameters[name] = materialize_state
else:
sub_mod._buffers[name] = materialize_state
if recurse:
for module in sub_mod.children():
init_fn(module, recurse=True)
# for debug
# if len(shard_states) == 0: print("clear")
return sub_mod
return init_fn
def fsdp_version(model):
if isinstance(model, FSDP):
return 1
elif isinstance(model, FSDPModule):
return 2
else:
return 0
def get_fsdp_state_ctx(model, state_type, state_cfg, optim_cfg):
if fsdp_version(model) == 1:
return FSDP.state_dict_type(model, state_type, state_cfg, optim_cfg)
else:
return nullcontext()
def get_fsdp_full_state_dict(model: torch.nn.Module, offload_to_cpu: bool = True, rank0_only: bool = True):
"""
Get the full state dict from an FSDP model.
Args:
model (torch.nn.Module): The FSDP model to get state dict from
offload_to_cpu (bool, optional): Whether to offload the state dict to CPU. Defaults to True.
rank0_only (bool, optional): Whether to only get state dict on rank 0. Defaults to True.
Returns:
dict: The full state dict of the model
Raises:
NotImplementedError: If the FSDP version is unknown
"""
if fsdp_version(model) == 1:
from torch.distributed.fsdp import FullStateDictConfig, StateDictType
state_dict_config = FullStateDictConfig(offload_to_cpu=offload_to_cpu, rank0_only=rank0_only)
with get_fsdp_state_ctx(
model, state_type=StateDictType.FULL_STATE_DICT, state_cfg=state_dict_config, optim_cfg=None
):
state_dict = model.state_dict()
return state_dict
elif fsdp_version(model) == 2:
from torch.distributed.checkpoint.state_dict import StateDictOptions, get_model_state_dict
state_dict_config = StateDictOptions(
full_state_dict=True, cpu_offload=offload_to_cpu, broadcast_from_rank0=not rank0_only
)
state_dict = get_model_state_dict(model, options=state_dict_config)
return state_dict
else:
raise NotImplementedError(f"Unknown FSDP version {fsdp_version}")
def fsdp2_load_full_state_dict(model: torch.nn.Module, full_state: dict, device_mesh=None, cpu_offload=None):
"""
Loads the full state dict (could be only on rank 0) into the sharded model. This is done by broadcasting the
parameters from rank 0 to all other ranks. This function modifies the model in-place.
Args:
model (`torch.nn.Module`): The model to load the state dict into
full_state (`dict`): The full state dict to load, can only be on rank 0
"""
if version.parse(torch.__version__) >= version.parse("2.7.0"):
from torch.distributed.checkpoint.state_dict import StateDictOptions, set_model_state_dict
else:
# official torch 2.6.0 set_model_state_dict API leads to OOM
# use torch 2.7.0 copy from verl/third_party/torch/distributed/checkpoint
from verl.third_party.torch.distributed.checkpoint.state_dict import StateDictOptions, set_model_state_dict
# To broadcast, it needs to be instantiated in the GPU.
if dist.get_rank() == 0:
model = model.to(device=get_device_id(), non_blocking=True)
else:
model = model.to_empty(device=get_device_id())
cpu_offload = cpu_offload is not None
options = StateDictOptions(full_state_dict=True, cpu_offload=cpu_offload, broadcast_from_rank0=True)
set_model_state_dict(model, full_state, options=options)
# rotary_emb is not in state_dict, so we need to broadcast it manually
for name, buf in model.named_buffers():
dist.broadcast(buf, src=0)
if cpu_offload:
model.to("cpu", non_blocking=True)
for buf in model.buffers():
buf.data = buf.data.to(get_device_id())
@contextmanager
def maybe_patch_fsdp_module(model):
if fully_shard_module is None:
yield
return
orig_fsdp_module = fully_shard_module.FSDPModule
class FSDPModuleABC(ABC, orig_fsdp_module):
pass
try:
if isinstance(model, ABC):
fully_shard_module.FSDPModule = FSDPModuleABC
yield
finally:
fully_shard_module.FSDPModule = orig_fsdp_module
def apply_fsdp2(model, fsdp_kwargs, config):
"""model: AutoModelForCausalLM"""
assert CPUOffloadPolicy is not None, "PyTorch version >= 2.4 is required for using fully_shard API (FSDP2)"
default_transformer_cls_names_to_wrap = getattr(model, "_no_split_modules", None)
fsdp_transformer_layer_cls_to_wrap = config.get("wrap_policy", {}).get(
"transformer_layer_cls_to_wrap", default_transformer_cls_names_to_wrap
)
if isinstance(fsdp_transformer_layer_cls_to_wrap, str):
fsdp_transformer_layer_cls_to_wrap = [fsdp_transformer_layer_cls_to_wrap]
assert len(fsdp_transformer_layer_cls_to_wrap) > 0 and fsdp_transformer_layer_cls_to_wrap[0] is not None
modules = []
for name, module in model.named_modules():
if module.__class__.__name__ in fsdp_transformer_layer_cls_to_wrap or (
isinstance(module, nn.Embedding) and not model.config.tie_word_embeddings
):
modules.append(module)
for idx, module in enumerate(modules):
# if torch.distributed.is_initialized() and torch.distributed.get_rank() == 0:
# print(f"wrap module {module.__class__.__name__}")
with maybe_patch_fsdp_module(module):
fully_shard(module, **fsdp_kwargs)
# if torch.distributed.is_initialized() and torch.distributed.get_rank() == 0:
# print(f"wrap module {model.__class__.__name__}")
with maybe_patch_fsdp_module(model):
fully_shard(model, **fsdp_kwargs) # fsdp2 will not reshard_after_forward for root module
def get_shard_placement_fn(fsdp_size):
"""Choose the dimension that can divide fsdp_size to avoid padding"""
def shard_placement_fn(param):
shape = list(param.shape)
for i in range(len(shape)):
if shape[i] % fsdp_size == 0:
return Shard(i)
return Shard(0)
return shard_placement_fn
def fsdp2_clip_grad_norm_(parameters, max_norm, norm_type=2.0, error_if_nonfinite=False, foreach=None):
"""torch.nn.utils.clip_grad_norm_ cann't run on cpu parameter DTensor"""
from torch.nn.utils.clip_grad import _clip_grads_with_norm_, _get_total_norm
if isinstance(parameters, torch.Tensor):
parameters = [parameters]
else:
# prevent generators from being exhausted
parameters = list(parameters)
grads = [p.grad for p in parameters if p.grad is not None]
total_norm = _get_total_norm(grads, norm_type, error_if_nonfinite, foreach)
total_norm = total_norm.to(get_device_id(), non_blocking=True)
_clip_grads_with_norm_(parameters, max_norm, total_norm, foreach)
return total_norm
def layered_summon_lora_params(fsdp_module) -> OrderedDict:
from peft.utils.save_and_load import get_peft_model_state_dict
def __prefix_submodules(module, prefix):
for name, submodule in module.named_modules():
if name.startswith(prefix) and "." not in name[len(prefix) :]:
yield name, submodule
lora_params = OrderedDict()
prefix_list = [
# fsdp
"_fsdp_wrapped_module.base_model.model.",
"_fsdp_wrapped_module.base_model.model.model.",
"_fsdp_wrapped_module.base_model.model.model.layers.",
"_fsdp_wrapped_module.base_model.model.model.language_model.layers.",
# fsdp2
"base_model.model.",
"base_model.model.model.",
"base_model.model.model.layers.",
"base_model.model.model.language_model.layers.",
]
peft_model = getattr(fsdp_module, "_fsdp_wrapped_module", fsdp_module)
for prefix in prefix_list:
for name, submodule in __prefix_submodules(fsdp_module, prefix):
prefix = name.replace("_fsdp_wrapped_module.base_model.model.", "base_model.model.")
if name.endswith(".model") or name.endswith(".layers"):
continue
if fsdp_version(submodule) > 0:
with FSDP.summon_full_params(submodule, writeback=False):
sub_lora_params = get_peft_model_state_dict(peft_model, state_dict=submodule.state_dict())
sub_lora_params = {
f"{prefix}.{name}": param.full_tensor().detach().cpu()
if hasattr(param, "full_tensor")
else param.detach().cpu()
for name, param in sub_lora_params.items()
}
lora_params.update(sub_lora_params)
submodule._is_root = False
get_torch_device().empty_cache()
return lora_params
def collect_lora_params(module: FSDP, layered_summon: bool, base_sync_done: bool) -> OrderedDict:
"""
collect lora params or full params if base model is not ready in vllm
work with if isinstance(self.module._fsdp_wrapped_module, PeftModel)
"""
from peft.utils.save_and_load import get_peft_model_state_dict
lora_params = OrderedDict()
peft_model = getattr(module, "_fsdp_wrapped_module", module)
if fsdp_version(module) > 0:
if layered_summon:
if not base_sync_done:
raise ValueError(
"To use layered_summon, you must make sure base-model is preloaded in vllm, e.g. let "
"rollout.load_format=safetensors"
)
lora_params = layered_summon_lora_params(module)
else:
with FSDP.summon_full_params(module, writeback=False):
if base_sync_done:
lora_params = get_peft_model_state_dict(peft_model)
lora_params = {
name: param.full_tensor().detach().cpu()
if hasattr(param, "full_tensor")
else param.detach().cpu()
for name, param in lora_params.items()
}
else:
model = peft_model.base_model.model
orig_dev = "cpu" if "cpu" in str(next(model.parameters()).device) else get_device_name()
model = model.to("cpu")
for name, param in model.state_dict().items():
if any(x in name for x in ["_flat_param", "lora_"]):
continue
name = name.replace("_fsdp_wrapped_module.", "").replace(".base_layer", "")
lora_params[name] = (
param.full_tensor().detach().cpu()
if hasattr(param, "full_tensor")
else param.detach().cpu()
)
model = model.to(orig_dev)
get_torch_device().empty_cache()
else:
if base_sync_done:
lora_params = get_peft_model_state_dict(peft_model)
else:
model = peft_model.base_model.model
orig_dev = "cpu" if "cpu" in str(next(model.parameters()).device) else get_device_name()
model = model.to("cpu")
for name, param in model.state_dict().items():
if any(x in name for x in ["_flat_param", "lora_"]):
continue
name = name.replace("_fsdp_wrapped_module.", "").replace(".base_layer", "")
lora_params[name] = param.detach().cpu()
model = model.to(orig_dev)
return lora_params
def replace_lora_wrapper(k, peft_config):
"""Replace LoRA parameter keys with base layer equivalents.
Transforms LoRA parameter names to their corresponding base layer
names for proper weight loading in vLLM when base model sync is not done.
Args:
k (str): Original parameter key name.
Returns:
str: Transformed parameter key for base layer.
"""
stacked_params = ["q_proj", "k_proj", "v_proj", "o_proj", "gate_proj", "up_proj", "down_proj"]
if k.endswith(".weight"):
module_k = k[: -len(".weight")]
if check_exclude_modules(peft_config, module_k):
return k
elif any([module_k.endswith(s) for s in stacked_params]) or check_target_modules(peft_config, module_k):
return f"{module_k}.base_layer.weight"
if k.endswith(".bias"):
module_k = k[: -len(".bias")]
if check_exclude_modules(peft_config, module_k):
return k
elif any([module_k.endswith(s) for s in stacked_params]) or check_target_modules(peft_config, module_k):
return f"{module_k}.base_layer.bias"
return k
def set_reshard_after_forward(module: FSDPModule, reshard_after_forward: bool, recurse: bool = True) -> None:
"""
Sets if the module should reshard parameters after forward. This can be
used to change the ``reshard_after_forward`` FSDP arg at runtime. For
example, this can be used to set the FSDP root module's value to
``True`` (since it is otherwise specially set to ``False``), or it can
set an FSDP module's value to ``False`` for running evals and set back
to ``True`` for training.
Args:
reshard_after_forward (bool): Whether to reshard parameters after
forward.
recurse (bool): Whether to set for all FSDP submodules or just the
passed-in module.
---
Copied from https://github.com/pytorch/pytorch/blob/main/torch/distributed/fsdp/_fully_shard/_fully_shard.py to
address the absence of the set_reshard_after_forward function in torch versions earlier than 2.8.0.
"""
if not isinstance(reshard_after_forward, bool):
raise ValueError(f"reshard_after_forward should be a bool, got {type(reshard_after_forward)}")
self_module = cast(nn.Module, module)
modules = list(self_module.modules()) if recurse else [self_module]
for module in modules:
if isinstance(module, FSDPModule):
state = module._get_fsdp_state()
state._auto_reshard_after_forward = False
if fsdp_param_group := state._fsdp_param_group:
fsdp_param_group.post_forward_mesh_info = _get_post_forward_mesh_info(
reshard_after_forward, fsdp_param_group.mesh_info
)
def normalize_peft_param_name(params: dict) -> dict:
"""
Converts peft model parameter name to base parameter name
For example,
base_model.model.model.embed_tokens.weight -> model.embed_tokens.weight
base_model.model.model.layers.0.self_attn.q_proj.base_layer.weight -> model.layers.0.self_attn.q_proj.weight
and remove params such as base_model.model.model.layers.0.self_attn.q_proj.lora_A.default.weight,
base_model.model.model.layers.0.self_attn.q_proj.lora_B.default.weight
"""
def _normalize_peft_name(name: str) -> str:
return name.replace("base_model.model.", "").replace("base_model.", "").replace(".base_layer", "")
def _is_lora_key(name: str) -> bool:
# catch typical PEFT keys
return ("lora_" in name) or (".adapter_" in name)
params = [(_normalize_peft_name(k), v) for k, v in params.items()]
# strip any residual LoRA tensors
params = {k: v for k, v in params if not _is_lora_key(k)}
return params
def _merge_or_unmerge_lora_(module, merge: bool):
"""Merge or unmerge LoRA adapters in a module.
Args:
module: The module containing LoRA layers
merge: If True, merge LoRA into base model; if False, unmerge LoRA
"""
from peft.tuners.lora import LoraLayer
with torch.no_grad():
for m in module.modules():
if isinstance(m, LoraLayer):
is_merged = getattr(m, "merged", False)
if merge and not is_merged:
m.merge()
elif (not merge) and is_merged:
m.unmerge()
# merged_adapters
def _clean_merged_lora_(module):
"""Cleans the merged lora adapters"""
from peft.tuners.lora import LoraLayer
with torch.no_grad():
for m in module.modules():
if isinstance(m, LoraLayer):
merged_adapters = getattr(m, "merged_adapters", False)
if merged_adapters:
m.merged_adapters = []
def fsdp_merge_unmerge(module: nn.Module, do_merge: bool):
"""Merge or unmerge LoRA adapters in FSDP module.
For FSDP (v1), it gathers all model parameters to each device, which may cause OOM.
For FSDP2, it gathers model parameters layer-by-layer to reduce memory footprint.
Args:
module: The FSDP module to merge/unmerge LoRA adapters
do_merge: If True, merge LoRA into base model; if False, unmerge LoRA
"""
version = fsdp_version(module)
assert version in [1, 2], f"fsdp_merge_unmerge requires FSDP module, got version {version}"
if version == 1:
# Unshard → merge → Reshard
with FSDP.summon_full_params(module, writeback=True, with_grads=False):
_merge_or_unmerge_lora_(module, merge=do_merge)
else:
# FSDP2: Unshard → merge → Reshard layer-by-layer
for name, submodule in module.named_modules():
if isinstance(submodule, FSDPModule) and name != "": # skip root model
with FSDP.summon_full_params(submodule, writeback=True, with_grads=False):
_merge_or_unmerge_lora_(submodule, merge=do_merge)
def backup_base_model_weights(module):
"""Backup base model weights to CPU with LoRA temporarily disabled.
This function temporarily disables LoRA adapters, backs up the clean base model weights
to CPU, then re-enables the adapters.
Args:
module: The PEFT model with LoRA adapters
Returns:
dict: Dictionary mapping parameter name to CPU tensor backup of base model weights
"""
from peft import PeftModel
backup = {}
with torch.no_grad():
# Check if module is a PEFT model
if isinstance(module, PeftModel):
# Temporarily disable adapters to get clean base model weights
with module.disable_adapter():
# Backup base model weights (excluding lora parameters)
for name, param in module.named_parameters():
if "lora" not in name.lower():
backup[name] = param.data.clone().cpu()
else:
# For non-PEFT models, just backup all parameters
for name, param in module.named_parameters():
backup[name] = param.data.clone().cpu()
return backup
def restore_base_model_weights(module, backup):
"""Restore base model weights from CPU backup.
This function restores the base model weights from the CPU backup, effectively
undoing any LoRA merge operations.
Args:
module: The PEFT model with LoRA adapters
backup: Dictionary mapping parameter name to CPU tensor backup of base model weights
"""
with torch.no_grad():
for name, param in module.named_parameters():
if name in backup:
param.data.copy_(backup[name].to(param.device))
@contextmanager
def merged_lora_context(actor, backup_adapters=False):
"""Context manager to temporarily merge LoRA adapters.
This context manager merges LoRA adapters into the base model weights,
performs operations (like syncing weights to vLLM), then restores the base model
weights from backup.
Args:
actor: The actor module with LoRA adapters to merge
backup_adapters: If True, backup base model weights (with LoRA disabled) before
merging and restore them after. This is more numerically stable than unmerging.
Yields:
None
"""
base_weights_backup = None
if backup_adapters:
# Backup base model weights with LoRA temporarily disabled
base_weights_backup = backup_base_model_weights(actor)
# Merge LoRA adapters into base model
fsdp_merge_unmerge(actor, do_merge=True)
try:
# Do work while merged (sync_to_vllm / generate / etc.)
yield
finally:
if backup_adapters and base_weights_backup is not None:
# Restore base model weights from CPU backup (effectively undoing the merge)
restore_base_model_weights(actor, base_weights_backup)
_clean_merged_lora_(actor)
else:
# Fall back to unmerge if no backup was made
fsdp_merge_unmerge(actor, do_merge=False)
| verl__utils__fsdp_utils.py |
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright 2023-2024 SGLang Team
# Copyright 2025 ModelBest Inc. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Group-wise helpers for RL training utilities.
Public API:
- as_torch_index(index, device=None) -> torch.LongTensor
- group_mean_std(scores, gidx, eps=1e-6, device=None) -> (mean_g, std_g, count_g)
Default device policy:
- If `device` is None:
* In pytest (detected by env "PYTEST_CURRENT_TEST"): use CPU.
* Else if CUDA is available: use CUDA.
* Else: use CPU.
- You can override via env "VERL_FORCE_DEVICE" (e.g., "cuda:0" / "cpu").
Notes:
- as_torch_index: canonicalizes arbitrary group labels to a contiguous 1-D torch.long
tensor in range [0..G-1]. Robust to torch/numpy/list/tuple, ints/floats/bools,
numeric strings, UUIDs, mixed object arrays. Near-integer floats (|x-round(x)|<=1e-6)
are rounded; otherwise factorization is applied.
- group_mean_std: pure-PyTorch per-group mean/std with Bessel correction for variance
(denominator max(count-1, 1)). Singleton groups fallback to mean=0, std=1 for
compatibility with common “native” conventions.
"""
from __future__ import annotations
import os
from typing import Any, Optional
import numpy as np
import torch
from verl.utils.device import get_device_name
__all__ = ["as_torch_index", "group_mean_std"]
def _resolve_device(explicit: Optional[torch.device | str]) -> torch.device:
"""
Resolve device according to policy described in the module docstring.
Priority:
1) explicit argument
2) VERL_FORCE_DEVICE env
3) pytest detection -> cpu
4) cuda if available, else cpu
"""
if explicit is not None:
return torch.device(explicit)
forced = os.getenv("VERL_FORCE_DEVICE")
if forced:
return torch.device(forced)
# Heuristic: pytest sets PYTEST_CURRENT_TEST
if "PYTEST_CURRENT_TEST" in os.environ:
return torch.device("cpu")
return torch.device(get_device_name())
def _to_1d_numpy_object_array(x: Any) -> np.ndarray:
"""Best-effort: convert arbitrary input into a 1-D numpy array; fallback to object dtype."""
try:
arr = np.asarray(x)
except Exception:
try:
arr = np.array(list(x), dtype=object)
except Exception:
arr = np.array([x], dtype=object)
if arr.ndim != 1:
arr = arr.reshape(-1)
return arr
def as_torch_index(index: Any, device: torch.device | str | None = None) -> torch.Tensor:
"""
Convert arbitrary group labels to a contiguous 1-D torch.long tensor (0..G-1).
Args:
index: Any iterable of labels or tensor/ndarray.
device: Target device; if None, resolved via _resolve_device().
Returns:
torch.LongTensor with shape (N,)
"""
target = _resolve_device(device)
# ---------- Fast path: torch.Tensor ----------
if isinstance(index, torch.Tensor):
t = index.reshape(-1)
if t.dtype in (
torch.int64,
torch.int32,
torch.int16,
torch.int8,
getattr(torch, "uint8", torch.uint8),
torch.bool,
):
return t.to(device=target, dtype=torch.long)
if t.dtype in (torch.float16, torch.float32, torch.float64, torch.bfloat16):
t64 = t.to(dtype=torch.float64)
rounded = torch.round(t64)
if torch.allclose(t64, rounded, rtol=0.0, atol=1e-6):
return rounded.to(device=target, dtype=torch.long)
arr = np.array([str(x.item()) for x in t], dtype=object)
else:
arr = np.array([str(x.item()) if hasattr(x, "item") else str(x) for x in t], dtype=object)
else:
# ---------- Non-torch: go through numpy ----------
arr = _to_1d_numpy_object_array(index)
# Pure integers (incl. bool)
if arr.dtype != object and np.issubdtype(arr.dtype, np.integer):
return torch.from_numpy(arr.astype(np.int64, copy=False)).to(device=target)
# Floats nearly equal to integers
if arr.dtype != object and np.issubdtype(arr.dtype, np.floating):
arr64 = arr.astype(np.float64, copy=False)
rounded = np.rint(arr64)
if np.allclose(arr64, rounded, rtol=0.0, atol=1e-6):
return torch.from_numpy(rounded.astype(np.int64)).to(device=target)
# fall through
# Try numeric string coercion
try:
coerced = arr.astype(np.int64)
return torch.from_numpy(coerced).to(device=target)
except Exception:
pass
if arr.dtype != object:
arr = arr.astype(object)
# ---------- Factorization (UUIDs / mixed types / arbitrary labels) ----------
try:
_, inv = np.unique(arr, return_inverse=True)
except Exception:
sarr = np.array([str(x) for x in arr], dtype=object)
_, inv = np.unique(sarr, return_inverse=True)
inv = inv.astype(np.int64, copy=False)
return torch.from_numpy(inv).to(device=target)
@torch.no_grad()
def group_mean_std(
scores: torch.Tensor,
gidx: torch.Tensor,
eps: float = 1e-6,
device: torch.device | str | None = None,
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Compute per-group mean/std/count in pure PyTorch.
mean_g = sum / count
std_g = sqrt( max( (sum2 - sum^2/count) / max(count-1, 1), eps ) )
Singleton groups fallback to mean=0, std=1.
Args:
scores: (N,) float tensor.
gidx : (N,) long/int tensor with group indices (0..G-1).
eps : Numerical floor for variance.
device: Target device; if None, resolved via _resolve_device().
Returns:
mean_g: (G,) float32
std_g : (G,) float32
count : (G,) float32
"""
target = _resolve_device(device)
scores = scores.reshape(-1).to(device=target, dtype=torch.float32)
gidx = gidx.reshape(-1).to(device=target, dtype=torch.long)
if scores.numel() != gidx.numel():
raise ValueError(f"scores and gidx length mismatch: {scores.numel()} vs {gidx.numel()}")
G = int(torch.max(gidx).item()) + 1 if gidx.numel() > 0 else 0
if G == 0:
# Return empty tensors on the selected device
empty = torch.empty(0, device=target, dtype=torch.float32)
return empty, empty, empty
ones = torch.ones_like(scores, dtype=torch.float32)
count = torch.zeros(G, device=target, dtype=torch.float32).index_add_(0, gidx, ones)
s1 = torch.zeros(G, device=target, dtype=torch.float32).index_add_(0, gidx, scores)
s2 = torch.zeros(G, device=target, dtype=torch.float32).index_add_(0, gidx, scores * scores)
mean = s1 / count.clamp_min(1.0)
var_num = s2 - (s1 * s1) / count.clamp_min(1.0)
denom = (count - 1.0).clamp_min(1.0)
var = var_num / denom
std = torch.sqrt(torch.clamp(var, min=eps))
# Singleton groups: mean=0, std=1
single = count <= 1.0
if torch.any(single):
mean = mean.clone()
std = std.clone()
mean[single] = 0.0
std[single] = 1.0
return mean, std, count
| verl__utils__groupwise.py |
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import shutil
logger = logging.getLogger(__file__)
logger.setLevel(os.getenv("VERL_SFT_LOGGING_LEVEL", "WARN"))
_HDFS_PREFIX = "hdfs://"
_HDFS_BIN_PATH = shutil.which("hdfs")
def exists(path: str, **kwargs) -> bool:
r"""Works like os.path.exists() but supports hdfs.
Test whether a path exists. Returns False for broken symbolic links.
Args:
path (str): path to test
Returns:
bool: True if the path exists, False otherwise
"""
if _is_non_local(path):
return _exists(path, **kwargs)
return os.path.exists(path)
def _exists(file_path: str):
"""hdfs capable to check whether a file_path is exists"""
if file_path.startswith("hdfs"):
return _run_cmd(_hdfs_cmd(f"-test -e {file_path}")) == 0
return os.path.exists(file_path)
def makedirs(name, mode=0o777, exist_ok=False, **kwargs) -> None:
r"""Works like os.makedirs() but supports hdfs.
Super-mkdir; create a leaf directory and all intermediate ones. Works like
mkdir, except that any intermediate path segment (not just the rightmost)
will be created if it does not exist. If the target directory already
exists, raise an OSError if exist_ok is False. Otherwise no exception is
raised. This is recursive.
Args:
name (str): directory to create
mode (int): file mode bits
exist_ok (bool): if True, do not raise an exception if the directory already exists
kwargs: keyword arguments for hdfs
"""
if _is_non_local(name):
# TODO(haibin.lin):
# - handle OSError for hdfs(?)
# - support exist_ok for hdfs(?)
_mkdir(name, **kwargs)
else:
os.makedirs(name, mode=mode, exist_ok=exist_ok)
def _mkdir(file_path: str) -> bool:
"""hdfs mkdir"""
if file_path.startswith("hdfs"):
_run_cmd(_hdfs_cmd(f"-mkdir -p {file_path}"))
else:
os.makedirs(file_path, exist_ok=True)
return True
def copy(src: str, dst: str, **kwargs) -> bool:
r"""Works like shutil.copy() for file, and shutil.copytree for dir, and supports hdfs.
Copy data and mode bits ("cp src dst"). Return the file's destination.
The destination may be a directory.
If source and destination are the same file, a SameFileError will be
raised.
Arg:
src (str): source file path
dst (str): destination file path
kwargs: keyword arguments for hdfs copy
Returns:
str: destination file path
"""
if _is_non_local(src) or _is_non_local(dst):
# TODO(haibin.lin):
# - handle SameFileError for hdfs files(?)
# - return file destination for hdfs files
return _copy(src, dst)
else:
if os.path.isdir(src):
return shutil.copytree(src, dst, **kwargs)
else:
return shutil.copy(src, dst, **kwargs)
def _copy(from_path: str, to_path: str, timeout: int = None) -> bool:
if to_path.startswith("hdfs"):
if from_path.startswith("hdfs"):
returncode = _run_cmd(_hdfs_cmd(f"-cp -f {from_path} {to_path}"), timeout=timeout)
else:
returncode = _run_cmd(_hdfs_cmd(f"-put -f {from_path} {to_path}"), timeout=timeout)
else:
if from_path.startswith("hdfs"):
returncode = _run_cmd(
_hdfs_cmd(
f"-get \
{from_path} {to_path}"
),
timeout=timeout,
)
else:
try:
shutil.copy(from_path, to_path)
returncode = 0
except shutil.SameFileError:
returncode = 0
except Exception as e:
logger.warning(f"copy {from_path} {to_path} failed: {e}")
returncode = -1
return returncode == 0
def _run_cmd(cmd: str, timeout=None):
return os.system(cmd)
def _hdfs_cmd(cmd: str) -> str:
return f"{_HDFS_BIN_PATH} dfs {cmd}"
def _is_non_local(path: str):
return path.startswith(_HDFS_PREFIX)
| verl__utils__hdfs_io.py |
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Utilities to check if packages are available.
We assume package availability won't change during runtime.
"""
import importlib
import importlib.util
import os
import warnings
from functools import cache, wraps
from typing import Optional
@cache
def is_megatron_core_available():
try:
mcore_spec = importlib.util.find_spec("megatron.core")
except ModuleNotFoundError:
mcore_spec = None
return mcore_spec is not None
@cache
def is_vllm_available():
try:
vllm_spec = importlib.util.find_spec("vllm")
except ModuleNotFoundError:
vllm_spec = None
return vllm_spec is not None
@cache
def is_sglang_available():
try:
sglang_spec = importlib.util.find_spec("sglang")
except ModuleNotFoundError:
sglang_spec = None
return sglang_spec is not None
@cache
def is_nvtx_available():
try:
nvtx_spec = importlib.util.find_spec("nvtx")
except ModuleNotFoundError:
nvtx_spec = None
return nvtx_spec is not None
@cache
def is_trl_available():
try:
trl_spec = importlib.util.find_spec("trl")
except ModuleNotFoundError:
trl_spec = None
return trl_spec is not None
def import_external_libs(external_libs=None):
if external_libs is None:
return
if not isinstance(external_libs, list):
external_libs = [external_libs]
import importlib
for external_lib in external_libs:
importlib.import_module(external_lib)
PKG_PATH_PREFIX = "pkg://"
FILE_PATH_PREFIX = "file://"
def load_module(module_path: str, module_name: Optional[str] = None) -> object:
"""Load a module from a path.
Args:
module_path (str):
The path to the module. Either
- `pkg_path`, e.g.,
- "pkg://verl.utils.dataset.rl_dataset"
- "pkg://verl/utils/dataset/rl_dataset"
- or `file_path` (absolute or relative), e.g.,
- "file://verl/utils/dataset/rl_dataset.py"
- "/path/to/verl/utils/dataset/rl_dataset.py"
module_name (str, optional):
The name of the module to added to ``sys.modules``. If not provided, the module will not be added,
thus will not be cached and directly ``import``able.
"""
if not module_path:
return None
if module_path.startswith(PKG_PATH_PREFIX):
module_name = module_path[len(PKG_PATH_PREFIX) :].replace("/", ".")
module = importlib.import_module(module_name)
else:
if module_path.startswith(FILE_PATH_PREFIX):
module_path = module_path[len(FILE_PATH_PREFIX) :]
if not os.path.exists(module_path):
raise FileNotFoundError(f"Custom module file not found: {module_path=}")
# Use the provided module_name for the spec, or derive a unique name to avoid collisions.
spec_name = module_name or f"custom_module_{hash(os.path.abspath(module_path))}"
spec = importlib.util.spec_from_file_location(spec_name, module_path)
if spec is None or spec.loader is None:
raise ImportError(f"Could not load module from {module_path=}")
module = importlib.util.module_from_spec(spec)
try:
spec.loader.exec_module(module)
except Exception as e:
raise RuntimeError(f"Error loading module from {module_path=}") from e
if module_name is not None:
import sys
# Avoid overwriting an existing module with a different object.
if module_name in sys.modules and sys.modules[module_name] is not module:
raise RuntimeError(
f"Module name '{module_name}' already in `sys.modules` and points to a different module."
)
sys.modules[module_name] = module
return module
def _get_qualified_name(func):
"""Get full qualified name including module and class (if any)."""
module = func.__module__
qualname = func.__qualname__
return f"{module}.{qualname}"
def deprecated(replacement: str = ""):
"""Decorator to mark functions or classes as deprecated."""
def decorator(obj):
qualified_name = _get_qualified_name(obj)
if isinstance(obj, type):
original_init = obj.__init__
@wraps(original_init)
def wrapped_init(self, *args, **kwargs):
msg = f"Warning: Class '{qualified_name}' is deprecated."
if replacement:
msg += f" Please use '{replacement}' instead."
warnings.warn(msg, category=FutureWarning, stacklevel=2)
return original_init(self, *args, **kwargs)
obj.__init__ = wrapped_init
return obj
else:
@wraps(obj)
def wrapped(*args, **kwargs):
msg = f"Warning: Function '{qualified_name}' is deprecated."
if replacement:
msg += f" Please use '{replacement}' instead."
warnings.warn(msg, category=FutureWarning, stacklevel=2)
return obj(*args, **kwargs)
return wrapped
return decorator
def load_extern_object(module_path: str, object_name: str) -> object:
"""Load an object from a module path.
Args:
module_path (str): See :func:`load_module`.
object_name (str):
The name of the object to load with ``getattr(module, object_name)``.
"""
module = load_module(module_path)
if not hasattr(module, object_name):
raise AttributeError(f"Object not found in module: {object_name=}, {module_path=}.")
return getattr(module, object_name)
def load_class_from_fqn(fqn: str, description: str = "class") -> type:
"""Load a class from its fully qualified name.
Args:
fqn: Fully qualified class name (e.g., 'mypackage.module.ClassName').
description: Description for error messages (e.g., 'AgentLoopManager').
Returns:
The loaded class.
Raises:
ValueError: If fqn format is invalid (missing dot separator).
ImportError: If the module cannot be imported.
AttributeError: If the class is not found in the module.
Example:
>>> cls = load_class_from_fqn("verl.experimental.agent_loop.AgentLoopManager")
>>> instance = cls(config=config, ...)
"""
if "." not in fqn:
raise ValueError(
f"Invalid {description} '{fqn}'. Expected fully qualified class name (e.g., 'mypackage.module.ClassName')."
)
try:
module_path, class_name = fqn.rsplit(".", 1)
module = importlib.import_module(module_path)
return getattr(module, class_name)
except ImportError as e:
raise ImportError(f"Failed to import module '{module_path}' for {description}: {e}") from e
except AttributeError as e:
raise AttributeError(f"Class '{class_name}' not found in module '{module_path}': {e}") from e
@deprecated(replacement="load_module(file_path); getattr(module, type_name)")
def load_extern_type(file_path: str, type_name: str) -> type:
"""DEPRECATED. Directly use `load_extern_object` instead."""
return load_extern_object(file_path, type_name)
| verl__utils__import_utils.py |
# Copyright 2025 Bytedance Ltd. and/or its affiliates
# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import torch
logger = logging.getLogger(__name__)
# Check if Triton is available
_TRITON_AVAILABLE = False
try:
import triton
import triton.language as tl
_TRITON_AVAILABLE = True
except ImportError:
logger.debug("Triton not available, FP8 Triton kernels will not be used")
# Environment variable to control Triton FP8 usage (set to "1" to disable)
_DISABLE_TRITON_FP8 = os.environ.get("VERL_DISABLE_TRITON_FP8", "0").lower() in ("1", "true", "yes")
# FP8 constants
FP8_DTYPE = torch.float8_e4m3fn
FP8_MAX = torch.finfo(FP8_DTYPE).max
FP8_MIN = -FP8_MAX
def ceil_div(x: int, y: int) -> int:
"""Perform ceiling division of two integers."""
return (x + y - 1) // y
def is_triton_available() -> bool:
"""Check if Triton is available for FP8 kernels."""
return _TRITON_AVAILABLE
if _TRITON_AVAILABLE:
@triton.jit
def _blockwise_cast_to_fp8_kernel(
X,
Y,
S,
stride_xm,
stride_xn,
stride_ym,
stride_yn,
stride_sm,
stride_sn,
M,
N,
eps,
fp8_min,
fp8_max,
BLOCK_M: tl.constexpr = 128,
BLOCK_N: tl.constexpr = 128,
):
"""Triton kernel for blockwise FP8 quantization.
Each program instance handles one block of size (BLOCK_M, BLOCK_N).
Computes per-block scale and quantizes to FP8 in a single pass.
Refer to https://github.com/THUDM/slime/blob/main/slime/backends/megatron_utils/kernels/fp8_kernel.py
"""
pid_m = tl.cast(tl.program_id(axis=0), tl.int64)
pid_n = tl.cast(tl.program_id(axis=1), tl.int64)
# Compute block offsets
off_m = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
off_n = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
# Create masks for boundary handling
mask_m = off_m < M
mask_n = off_n < N
mask = mask_m[:, None] & mask_n[None, :]
# Load input block and convert to float32 for precision
x = tl.load(X + off_m[:, None] * stride_xm + off_n[None, :] * stride_xn, mask=mask, other=0.0).to(tl.float32)
# Compute block-wise absolute maximum with epsilon for numerical stability
_absmax = tl.maximum(tl.max(tl.abs(x)), eps)
# Compute scale: scale = absmax / fp8_max
x_s = _absmax / fp8_max
# Compute inverse scale for quantization
s_inv = 1.0 / x_s
# Quantize: clamp(x * s_inv, fp8_min, fp8_max)
y_q = tl.clamp(x * s_inv, fp8_min, fp8_max).to(Y.dtype.element_ty)
# Store quantized values and scale
tl.store(Y + off_m[:, None] * stride_ym + off_n[None, :] * stride_yn, y_q, mask=mask)
tl.store(S + pid_m * stride_sm + pid_n * stride_sn, x_s)
def blockwise_cast_to_fp8_triton(
x: torch.Tensor,
weight_block_size: list[int] | tuple[int, int] | None = None,
) -> tuple[torch.Tensor, torch.Tensor]:
"""Quantize a 2D tensor to FP8 using blockwise quantization with Triton.
This function provides high-performance FP8 quantization with minimal memory overhead.
All computations (abs, max, scale, clamp) are performed in a single Triton kernel,
eliminating intermediate tensor allocations.
Args:
x: Input tensor of shape (M, N), must be 2D.
weight_block_size: Block size for quantization as [BLOCK_M, BLOCK_N].
Defaults to [128, 128] if None.
Returns:
Tuple of (quantized_tensor, scale_tensor):
- quantized_tensor: FP8 quantized tensor of shape (M, N)
- scale_tensor: Per-block scale factors of shape (ceil(M/BLOCK_M), ceil(N/BLOCK_N))
This is the inverse scale (multiply to dequantize).
"""
assert x.dim() == 2, f"Expected 2D tensor, got {x.dim()}D"
# Default block size
BLOCK_M, BLOCK_N = 128, 128
if weight_block_size is not None:
BLOCK_M, BLOCK_N = weight_block_size[0], weight_block_size[1]
M, N = x.shape
# Pre-allocate output tensors (only memory allocation in this function)
y = torch.empty(M, N, device=x.device, dtype=FP8_DTYPE)
s = torch.empty(ceil_div(M, BLOCK_M), ceil_div(N, BLOCK_N), dtype=torch.float32, device=x.device)
# Grid: one program per block
def grid(meta):
return (triton.cdiv(M, meta["BLOCK_M"]), triton.cdiv(N, meta["BLOCK_N"]))
# Tune kernel parameters based on memory layout
if x.is_contiguous():
kwargs = {"BLOCK_M": BLOCK_M, "BLOCK_N": BLOCK_N, "num_warps": 8, "num_stages": 2}
else:
kwargs = {"BLOCK_M": BLOCK_M, "BLOCK_N": BLOCK_N, "num_warps": 1, "num_stages": 4}
# Launch kernel
_blockwise_cast_to_fp8_kernel[grid](
x,
y,
s,
*x.stride(),
*y.stride(),
*s.stride(),
M,
N,
1e-10, # eps for numerical stability
FP8_MIN,
FP8_MAX,
**kwargs,
)
return y, s
def scaled_fp8_blockwise_triton(
data_hp: torch.Tensor,
weight_block_size: list[int] | tuple[int, int],
) -> tuple[torch.Tensor, torch.Tensor]:
"""High-performance FP8 blockwise quantization using Triton kernel.
This is the recommended function to use for FP8 quantization when Triton is available.
It handles padding automatically and returns results in the expected format.
Args:
data_hp: Input high-precision tensor of shape (M, N).
weight_block_size: Block size for quantization as [BLOCK_M, BLOCK_N].
Returns:
Tuple of (fp8_data, descale):
- fp8_data: FP8 quantized tensor of original shape
- descale: Per-block descale factors (inverse of scale, for dequantization)
Raises:
RuntimeError: If Triton is not available.
"""
if not _TRITON_AVAILABLE:
raise RuntimeError("Triton is required for scaled_fp8_blockwise_triton but is not available")
block_size0 = weight_block_size[0]
block_size1 = weight_block_size[1]
# Save original shape for potential cropping
original_shape = data_hp.shape
# Pad dimensions to be multiples of block size if needed
pad_dim0 = (block_size0 - data_hp.shape[0] % block_size0) % block_size0
pad_dim1 = (block_size1 - data_hp.shape[1] % block_size1) % block_size1
if pad_dim0 > 0 or pad_dim1 > 0:
logger.debug(
f"Padding weight from {data_hp.shape} to "
f"({data_hp.shape[0] + pad_dim0}, {data_hp.shape[1] + pad_dim1}) "
f"for blockwise FP8 quantization"
)
data_hp = torch.nn.functional.pad(data_hp, (0, pad_dim1, 0, pad_dim0), mode="constant", value=0)
# Call Triton kernel
fp_data, scale = blockwise_cast_to_fp8_triton(data_hp, weight_block_size)
# Remove padding to restore original shape
if pad_dim0 > 0 or pad_dim1 > 0:
fp_data = fp_data[: original_shape[0], : original_shape[1]].contiguous()
# Return scale as descale (the Triton kernel returns scale, we need to return it as-is
# since it's already the inverse scale format expected by vLLM/SGLang)
return fp_data, scale
def _scaled_fp8_blockwise_pytorch(
data_hp: torch.Tensor,
weight_block_size: list[int] | tuple[int, int],
) -> tuple[torch.Tensor, torch.Tensor]:
"""PyTorch implementation of blockwise FP8 quantization.
Memory-optimized implementation that:
- Uses in-place operations where possible
- Explicitly deletes intermediate tensors
- Minimizes peak memory usage during quantization
Args:
data_hp: Input high-precision tensor of shape (M, N).
weight_block_size: Block size for quantization as [BLOCK_M, BLOCK_N].
Returns:
Tuple of (fp8_data, descale):
- fp8_data: FP8 quantized tensor
- descale: Per-block descale factors for dequantization
"""
block_size0 = weight_block_size[0]
block_size1 = weight_block_size[1]
assert block_size0 == block_size1, "Block sizes must be equal"
# Save unpadded shape for later cropping
original_shape = data_hp.shape
# Pad dimensions to be multiples of block size if needed
pad_dim0 = (block_size0 - data_hp.shape[0] % block_size0) % block_size0
pad_dim1 = (block_size1 - data_hp.shape[1] % block_size1) % block_size1
if pad_dim0 > 0 or pad_dim1 > 0:
logger.debug(
f"Padding weight from {data_hp.shape} to "
f"({data_hp.shape[0] + pad_dim0}, {data_hp.shape[1] + pad_dim1}) "
f"for blockwise FP8 quantization"
)
data_hp = torch.nn.functional.pad(data_hp, (0, pad_dim1, 0, pad_dim0), mode="constant", value=0)
# FP8
max_dtype = FP8_MAX
padded_shape = data_hp.shape
blk_m, blk_n = data_hp.shape[0] // block_size0, data_hp.shape[1] // block_size1
# Reshape and permute - these are views, no memory allocation
data_hp = data_hp.reshape(blk_m, block_size0, blk_n, block_size1)
data_hp = data_hp.permute(0, 2, 1, 3).contiguous()
# Flatten to (BLK_M, BLK_N, BLOCK_SIZE_M * BLOCK_SIZE_N) in float32 for precision
data_hp = data_hp.to(torch.float32).flatten(start_dim=2)
# Calculate max absolute value per block - use fused abs+amax
max_abs = data_hp.abs().amax(dim=-1, keepdim=True)
# Compute scale in-place where possible
scale_fp = torch.empty_like(max_abs)
torch.div(max_dtype, max_abs, out=scale_fp)
# Handle edge cases: zero and inf
scale_fp = torch.where(max_abs == 0, torch.ones_like(scale_fp), scale_fp)
scale_fp = torch.where(max_abs == torch.inf, torch.ones_like(scale_fp), scale_fp)
del max_abs # Free max_abs memory
# Compute descale before modifying data
descale_fp = torch.reciprocal(scale_fp)
# Scale and clamp in a memory-efficient way
data_hp.mul_(scale_fp)
del scale_fp # Free scale memory
data_hp.clamp_(min=-max_dtype, max=max_dtype)
# Convert to FP8
fp_data = data_hp.to(FP8_DTYPE)
del data_hp # Free float32 data
# Reshape back to original layout
fp_data = fp_data.reshape(blk_m, blk_n, block_size0, block_size1).permute(0, 2, 1, 3).reshape(padded_shape)
# Remove padding to restore original shape
if original_shape[0] != padded_shape[0] or original_shape[1] != padded_shape[1]:
fp_data = fp_data[: original_shape[0], : original_shape[1]].contiguous()
return fp_data, descale_fp
def scaled_fp8_blockwise(
data_hp: torch.Tensor,
weight_block_size: list[int] | tuple[int, int],
) -> tuple[torch.Tensor, torch.Tensor]:
"""Cast tensor from high precision to FP8 with blockwise quantization.
This function automatically selects the best available implementation:
1. Triton kernel (if available): Highest performance, minimal memory overhead
2. PyTorch fallback: Memory-optimized implementation using in-place operations
To disable Triton and force PyTorch fallback, set environment variable:
VERL_DISABLE_TRITON_FP8=1
Args:
data_hp: Input tensor of shape (M, N) in high precision (bf16/fp16/fp32).
weight_block_size: Block size for quantization as [BLOCK_M, BLOCK_N].
Returns:
Tuple of (fp8_data, descale):
- fp8_data: FP8 quantized tensor
- descale: Per-block descale factors for dequantization
"""
assert len(data_hp.shape) == 2, "Only 2d input tensor is supported"
# Use Triton kernel if available and not disabled
if _TRITON_AVAILABLE and not _DISABLE_TRITON_FP8:
return scaled_fp8_blockwise_triton(data_hp, weight_block_size)
# PyTorch fallback implementation (memory-optimized)
return _scaled_fp8_blockwise_pytorch(data_hp, weight_block_size)
| verl__utils__kernel__fp8_kernel.py |
#
# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Implementations of the linear cross entropy with token entropy kernel.
"""
import typing
from dataclasses import dataclass
import torch
import torch.distributed as dist
from verl.utils.device import get_device_capability, get_device_name, is_cuda_available
try:
import triton
import triton.language as tl
HAVE_TRITON = True
SUPPORT_CUDA_TMA = is_cuda_available and get_device_capability()[0] >= 9 and hasattr(tl, "make_tensor_descriptor")
except ImportError:
HAVE_TRITON = False
SUPPORT_CUDA_TMA = False
from verl.utils.device import get_torch_device
if not HAVE_TRITON:
from contextlib import contextmanager
from unittest.mock import MagicMock
@contextmanager
def null_decorator(*args, **kwargs):
if len(kwargs) == 0 and len(args) == 1 and callable(args[0]):
return args[0]
else:
def inner(func):
return func
return inner
triton = MagicMock()
triton.jit = null_decorator
triton.autotune = null_decorator
tl = MagicMock()
elif SUPPORT_CUDA_TMA:
# TMA descriptors require a global memory allocation
def alloc_fn(size: int, alignment: int, stream: typing.Optional[int]):
return torch.empty(size, device=get_device_name(), dtype=torch.int8)
# https://github.com/triton-lang/triton/commit/43625fc968b693ab51884ca95adbcf3e43483fd0
# Triton 3.5.0 stores allocators in ContextVar; values do not propagate to new
# threads by default. Some execution paths in verl use thread pools (e.g.,
# concurrent.futures), so we set a ContextVar *default* to avoid falling
# back to NullAllocator in worker threads.
try:
import contextvars
import triton.runtime._allocation as _triton_allocation
if isinstance(getattr(_triton_allocation, "_allocator", None), contextvars.ContextVar):
_triton_allocation._allocator = contextvars.ContextVar(
_triton_allocation._allocator.name,
default=alloc_fn,
)
except (ImportError, AttributeError):
pass
triton.set_allocator(alloc_fn)
@dataclass
class EntropyReductionEnum:
"""
Enum for the reduction method of cross entropy.
"""
_None = 0
_Sum = 1
_Mean = 2
def get_entropy_reduction_enum_number(reduction: str) -> int:
"""
Get the enum number for the reduction method of cross entropy.
"""
_enum = EntropyReductionEnum._None
if reduction == "none":
_enum = EntropyReductionEnum._None
elif reduction == "sum":
_enum = EntropyReductionEnum._Sum
elif reduction == "mean":
_enum = EntropyReductionEnum._Mean
else:
raise ValueError(f"Invalid reduction: {reduction}")
return _enum
def get_entropy_reduction_enum(ce_reduction: int) -> EntropyReductionEnum:
"""
Get the enum for the reduction method of cross entropy.
"""
_enum = EntropyReductionEnum._None
if ce_reduction == 0:
_enum = EntropyReductionEnum._None
elif ce_reduction == 1:
_enum = EntropyReductionEnum._Sum
elif ce_reduction == 2:
_enum = EntropyReductionEnum._Mean
else:
raise ValueError(f"Invalid ce_reduction: {ce_reduction}")
return _enum
@dataclass
class BackwardEnum:
"""
Enum for the backward method.
"""
_Total_Fuse_MN = (
0 # Fuse d_logits & d_hidden & d_weight, no intermediate storage, requires fp32 for d_hidden & d_weight
)
_Total_Separate = 1 # Store d_logits, no special requirements for d_hidden & d_weight
_Split_Dlogits_N = 2 # split d_logits along its N dimension, aka. vocab_size
_Split_Dlogits_M = 3 # split d_logits along its M dimension, aka. num_tokens
@dataclass
class Config:
"""Configuration for efficient entropy kernel operations.
Args:
_backward (BackwardEnum): Backward computation method. Defaults to BackwardEnum._Split_Dlogits_N.
_use_triton (bool): Whether to use Triton kernels for computation. Defaults to True.
"""
_backward: BackwardEnum = BackwardEnum._Split_Dlogits_N
_use_triton: bool = True
_config = Config()
def set_backward_method(backward_method: BackwardEnum):
"""
Set the backward method.
"""
global _config
_config._backward = backward_method
@triton.autotune(
configs=[triton.Config({"BLOCK_SIZE_M": 128, "BLOCK_SIZE_N": 256, "BLOCK_SIZE_K": 32}, num_stages=3, num_warps=8)],
key=["num_tokens", "hidden_size", "vocab_size"],
)
@triton.jit
def efficient_entropy_kernel_general_mainloop(
rank,
hidden_ptr,
weight_ptr,
labels_ptr,
num_tokens,
hidden_size,
vocab_size,
vocab_per_split,
stride_hidden_m: tl.int64,
stride_hidden_k: tl.int64,
stride_weight_n: tl.int64,
stride_weight_k: tl.int64,
max_ptr,
stride_max_m: tl.int64,
stride_max_n: tl.int64,
accu_ptr,
stride_accu_m: tl.int64,
stride_accu_n: tl.int64,
entropy_b_ptr,
stride_entropy_b_m: tl.int64,
stride_entropy_b_n: tl.int64,
global_logprobs_ptr,
stride_global_logprobs: tl.int64,
global_logprobs_scalar_ptr,
rcp_temperature: tl.float32,
# Meta-parameters
BLOCK_SIZE_M: tl.constexpr,
BLOCK_SIZE_N: tl.constexpr,
BLOCK_SIZE_K: tl.constexpr,
USE_TMA: tl.constexpr,
):
"""
forward mainloop
"""
pid = tl.program_id(axis=0)
num_splits = (vocab_size + vocab_per_split - 1) // vocab_per_split
num_pid_m = tl.cdiv(num_tokens, BLOCK_SIZE_M)
num_pid_n = tl.cdiv(vocab_per_split, BLOCK_SIZE_N)
pid_m = pid % num_pid_m
pid_n = pid // num_pid_m
if pid_m == 0 and pid_n == 0:
tl.store(global_logprobs_scalar_ptr, 0.0)
# create pointers for the first blocks of hidden
start_offs_am = pid_m * BLOCK_SIZE_M
offs_am = start_offs_am + tl.arange(0, BLOCK_SIZE_M)
offs_k = tl.arange(0, BLOCK_SIZE_K)
if USE_TMA:
# using TMA and device-side descriptor creation
hidden_desc = tl.make_tensor_descriptor(
hidden_ptr,
shape=[num_tokens, hidden_size],
strides=[stride_hidden_m, 1],
block_shape=[BLOCK_SIZE_M, BLOCK_SIZE_K],
)
weight_desc = tl.make_tensor_descriptor(
weight_ptr,
shape=[vocab_size, hidden_size],
strides=[stride_weight_n, 1],
block_shape=[BLOCK_SIZE_N, BLOCK_SIZE_K],
)
else:
hidden_ptrs = hidden_ptr + (offs_am[:, None] * stride_hidden_m + offs_k[None, :] * stride_hidden_k)
# load labels for this block
labels = tl.load(labels_ptr + offs_am, mask=offs_am < num_tokens)
# traverse over N dimension
# _max = tl.zeros((BLOCK_SIZE_M,), dtype=tl.float32)
_max = tl.full((BLOCK_SIZE_M,), -float("inf"), dtype=tl.float32)
_accu = tl.zeros((BLOCK_SIZE_M,), dtype=tl.float32)
_entropy_b = tl.zeros((BLOCK_SIZE_M,), dtype=tl.float32)
_logprobs = tl.zeros((BLOCK_SIZE_M,), dtype=tl.float32)
for n in range(0, num_pid_n):
start_offs_bn = pid_n * vocab_per_split + n * BLOCK_SIZE_N
offs_bn = start_offs_bn + tl.arange(0, BLOCK_SIZE_N)
logits = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
if not USE_TMA:
# weight_ptrs = weight_ptr + (offs_k[:, None] * stride_weight_k + offs_bn[None, :] * stride_weight_n)
weight_ptrs = weight_ptr + (offs_bn[:, None] * stride_weight_n + offs_k[None, :] * stride_weight_k)
# iterate over K dimension
for k in range(0, tl.cdiv(hidden_size, BLOCK_SIZE_K)):
if USE_TMA:
# load the next block of hidden and weight
start_offs_k = k * BLOCK_SIZE_K
_hidden = hidden_desc.load([start_offs_am, start_offs_k])
_weight = weight_desc.load([start_offs_bn, start_offs_k])
else:
# load the next block of hidden and weight
_hidden = tl.load(
hidden_ptrs,
mask=(offs_k[None, :] < hidden_size - k * BLOCK_SIZE_K) & (offs_am[:, None] < num_tokens),
other=0.0,
)
_weight = tl.load(
weight_ptrs,
mask=(offs_k[None, :] < hidden_size - k * BLOCK_SIZE_K)
& (offs_bn[:, None] < (min((pid_n + 1) * vocab_per_split, vocab_size))),
other=0.0,
)
# advance the ptrs to the next K block
hidden_ptrs += BLOCK_SIZE_K * stride_hidden_k
weight_ptrs += BLOCK_SIZE_K * stride_weight_k
# GEMM
logits = tl.dot(_hidden, _weight.trans(), logits)
if not USE_TMA:
# reset hidden_ptrs for next iteration
hidden_ptrs -= hidden_size * stride_hidden_k
# scale logits by temperature
logits *= rcp_temperature
# update global maximum
_max_old = _max
m_pid_n = tl.max(logits, axis=1)
_max = tl.maximum(_max_old, m_pid_n)
exp_logits = tl.exp(logits - _max[:, None])
coeff = tl.exp(_max_old - _max)
_accu = coeff * _accu + tl.sum(exp_logits, axis=1)
_entropy_b = _entropy_b * coeff + tl.sum(logits * exp_logits, axis=1)
label_mask = (offs_bn + rank * vocab_size)[None, :] == labels[:, None]
_logprobs += tl.sum(logits * label_mask, axis=1)
# store maximum
offs_max_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
offs_max_n = pid_n
maximum_ptrs = max_ptr + offs_max_n * stride_max_n + offs_max_m * stride_max_m
tl.store(maximum_ptrs, _max, mask=(offs_max_m < num_tokens) & (offs_max_n < num_splits))
# store entropy
accu_ptrs = accu_ptr + offs_max_n * stride_accu_n + offs_max_m * stride_accu_m
tl.store(accu_ptrs, _accu, mask=(offs_max_m < num_tokens) & (offs_max_n[None] < num_splits))
entropy_b_ptrs = entropy_b_ptr + offs_max_n * stride_entropy_b_n + offs_max_m * stride_entropy_b_m
tl.store(entropy_b_ptrs, _entropy_b, mask=(offs_max_m < num_tokens) & (offs_max_n < num_splits))
# store logprobs
vocab_left_idx = pid_n * vocab_per_split + rank * vocab_size
vocab_right_idx = min((pid_n + 1) * vocab_per_split, vocab_size) + rank * vocab_size
mask = (labels >= vocab_left_idx) & (labels < vocab_right_idx)
mask &= offs_am < num_tokens
global_logprobs_ptrs = global_logprobs_ptr + offs_am * stride_global_logprobs
# tl.atomic_add(global_logprobs_ptrs, _logprobs, mask=mask)
tl.store(global_logprobs_ptrs, _logprobs, mask=mask)
@triton.autotune(configs=[triton.Config({"BLOCK_SIZE_M": 16, "BLOCK_SIZE_N": 64})], key=["num_tokens", "num_splits"])
@triton.jit
def efficient_entropy_triton_kernel_epilogue(
max_ptr,
stride_max_m: tl.int64,
stride_max_n: tl.int64,
num_tokens,
num_splits,
global_max_ptr,
stride_global_max: tl.int64,
accu_ptr,
stride_accu_m: tl.int64,
stride_accu_n: tl.int64,
global_accu_ptr,
stride_global_accu: tl.int64,
entropy_b_ptr,
stride_entropy_b_m: tl.int64,
stride_entropy_b_n: tl.int64,
global_entropy_b_ptr,
stride_global_entropy_b: tl.int64,
global_entropy_ptr,
stride_global_entropy: tl.int64,
global_logprobs_ptr,
stride_global_logprobs: tl.int64,
global_logprobs_scalar_ptr,
reduction: int,
BLOCK_SIZE_M: tl.constexpr,
BLOCK_SIZE_N: tl.constexpr,
):
"""
foward epilogue
"""
pid_m = tl.program_id(axis=0)
offs_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
global_max = tl.zeros((BLOCK_SIZE_M,), dtype=tl.float32)
global_accu = tl.zeros((BLOCK_SIZE_M,), dtype=tl.float32)
global_entropy_b = tl.zeros((BLOCK_SIZE_M,), dtype=tl.float32)
for pid_n in range(0, tl.cdiv(num_splits, BLOCK_SIZE_N)):
offs_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
max_ptrs = max_ptr + offs_m[:, None] * stride_max_m + offs_n[None, :] * stride_max_n
_max = tl.load(max_ptrs, mask=(offs_m[:, None] < num_tokens) & (offs_n[None, :] < num_splits), other=0.0)
accu_ptrs = accu_ptr + offs_m[:, None] * stride_accu_m + offs_n[None, :] * stride_accu_n
_accu = tl.load(accu_ptrs, mask=(offs_m[:, None] < num_tokens) & (offs_n[None, :] < num_splits), other=0.0)
entropy_b_ptrs = entropy_b_ptr + offs_m[:, None] * stride_entropy_b_m + offs_n[None, :] * stride_entropy_b_n
_entropy_b = tl.load(
entropy_b_ptrs, mask=(offs_m[:, None] < num_tokens) & (offs_n[None, :] < num_splits), other=0.0
)
# local reduction
_max_old = global_max
_local_max = tl.max(_max, axis=1)
global_max = tl.maximum(global_max, _local_max)
_scale = tl.exp(_max - global_max[:, None])
_coeff = tl.exp(_max_old - global_max)
global_accu = _coeff * global_accu + tl.sum(_scale * _accu, axis=1)
global_entropy_b = _coeff * global_entropy_b + tl.sum(_scale * _entropy_b, axis=1)
# store
maximum_ptrs = global_max_ptr + offs_m * stride_global_max
tl.store(maximum_ptrs, global_max, mask=offs_m < num_tokens)
# store entropy_b
global_entropy_b = tl.fdiv(global_entropy_b, global_accu) # entropy_b
tl.store(global_entropy_b_ptr + offs_m * stride_global_entropy_b, global_entropy_b, mask=offs_m < num_tokens)
# store entropy
global_accu_ptrs = global_accu_ptr + offs_m * stride_global_accu
tl.store(global_accu_ptrs, global_accu, mask=offs_m < num_tokens)
global_entropy = tl.log(global_accu) + global_max - global_entropy_b # entropy_a
global_entropy_ptrs = global_entropy_ptr + offs_m * stride_global_entropy
tl.store(global_entropy_ptrs, global_entropy, mask=offs_m < num_tokens)
# update logprobs
global_logprobs_ptrs = global_logprobs_ptr + offs_m * stride_global_logprobs
global_logprobs = tl.load(global_logprobs_ptrs, mask=offs_m < num_tokens)
global_logprobs = global_max + tl.log(global_accu) - global_logprobs
global_logprobs = -1 * global_logprobs
if reduction == 0:
tl.store(global_logprobs_ptrs, global_logprobs, mask=offs_m < num_tokens)
elif reduction == 1:
global_logprobs_scalar = tl.sum(global_logprobs, axis=0)
tl.atomic_add(global_logprobs_scalar_ptr, global_logprobs_scalar)
elif reduction == 2:
global_logprobs_scalar = tl.sum(global_logprobs, axis=0) / num_tokens.to(tl.float32)
tl.atomic_add(global_logprobs_scalar_ptr, global_logprobs_scalar)
@triton.autotune(configs=[triton.Config({"BLOCK_SIZE_M": 16, "BLOCK_SIZE_N": 64})], key=["num_tokens", "num_splits"])
@triton.jit
def efficient_entropy_triton_kernel_epilogue_tp(
num_tokens,
num_splits,
reduced_max_ptr,
stride_reduced_max_m: tl.int64,
stride_reduced_max_n: tl.int64,
original_max_ptr,
stride_original_max_m: tl.int64,
stride_original_max_n: tl.int64,
accu_ptr,
stride_accu_m: tl.int64,
stride_accu_n: tl.int64,
entropy_b_ptr,
stride_entropy_b_m: tl.int64,
stride_entropy_b_n: tl.int64,
global_max_ptr,
stride_global_max: tl.int64,
global_accu_ptr,
stride_global_accu: tl.int64,
global_entropy_b_ptr,
stride_global_entropy_b: tl.int64,
BLOCK_SIZE_M: tl.constexpr,
BLOCK_SIZE_N: tl.constexpr,
):
pid_m = tl.program_id(axis=0)
offs_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
global_max = tl.zeros((BLOCK_SIZE_M,), dtype=tl.float32)
global_accu = tl.zeros((BLOCK_SIZE_M,), dtype=tl.float32)
global_entropy_b = tl.zeros((BLOCK_SIZE_M,), dtype=tl.float32)
for pid_n in range(0, tl.cdiv(num_splits, BLOCK_SIZE_N)):
offs_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
_reduced_max = tl.load(
reduced_max_ptr + offs_m[:, None] * stride_reduced_max_m + offs_n[None, :] * stride_reduced_max_n,
mask=(offs_m[:, None] < num_tokens) & (offs_n[None, :] < num_splits),
other=0.0,
)
_original_max = tl.load(
original_max_ptr + offs_m[:, None] * stride_original_max_m + offs_n[None, :] * stride_original_max_n,
mask=(offs_m[:, None] < num_tokens) & (offs_n[None, :] < num_splits),
other=0.0,
)
_accu = tl.load(
accu_ptr + offs_m[:, None] * stride_accu_m + offs_n[None, :] * stride_accu_n,
mask=(offs_m[:, None] < num_tokens) & (offs_n[None, :] < num_splits),
other=0.0,
)
# local reduce-max
_max_old = global_max
_local_max = tl.max(_reduced_max, axis=1)
global_max = tl.maximum(global_max, _local_max)
# update accumulate
_coeff = tl.exp(_max_old - global_max)
_scale = tl.exp(_original_max - global_max[:, None])
global_accu = _coeff * global_accu + tl.sum(_scale * _accu, axis=1)
# update entropy_b
_entropy_b = tl.load(
entropy_b_ptr + offs_m[:, None] * stride_entropy_b_m + offs_n[None, :] * stride_entropy_b_n,
mask=(offs_m[:, None] < num_tokens) & (offs_n[None, :] < num_splits),
other=0.0,
)
global_entropy_b = _coeff * global_entropy_b + tl.sum(_scale * _entropy_b, axis=1)
# store
tl.store(global_max_ptr + offs_m * stride_global_max, global_max, mask=offs_m < num_tokens)
tl.store(global_accu_ptr + offs_m * stride_global_accu, global_accu, mask=offs_m < num_tokens)
tl.store(global_entropy_b_ptr + offs_m * stride_global_entropy_b, global_entropy_b, mask=offs_m < num_tokens)
@triton.autotune(configs=[triton.Config({"BLOCK_SIZE_M": 16})], key=["num_tokens"])
@triton.jit
def efficient_entropy_triton_epilogue_tp_update(
num_tokens,
logprobs_ptr,
stride_logprobs: tl.int64,
maximum_ptr,
stride_maximum: tl.int64,
accumulate_ptr,
stride_accumulate: tl.int64,
entropy_b_ptr,
stride_entropy_b: tl.int64,
entropy_ptr,
stride_entropy: tl.int64,
logprobs_scalar_ptr,
reduction: int,
BLOCK_SIZE_M: tl.constexpr,
):
pid_m = tl.program_id(axis=0)
offs_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
maximum = tl.load(maximum_ptr + offs_m * stride_maximum, mask=offs_m < num_tokens)
accumulate = tl.load(accumulate_ptr + offs_m * stride_accumulate, mask=offs_m < num_tokens)
entropy_b = tl.load(entropy_b_ptr + offs_m * stride_entropy_b, mask=offs_m < num_tokens)
entropy_b = tl.fdiv(entropy_b, accumulate)
tl.store(entropy_b_ptr + offs_m * stride_entropy_b, entropy_b, mask=offs_m < num_tokens)
entropy = tl.log(accumulate) + maximum - entropy_b
tl.store(entropy_ptr + offs_m * stride_entropy, entropy, mask=offs_m < num_tokens)
logprobs = tl.load(logprobs_ptr + offs_m * stride_logprobs, mask=offs_m < num_tokens)
logprobs = maximum + tl.log(accumulate) - logprobs
logprobs = -1 * logprobs
if reduction == 0:
tl.store(logprobs_ptr + offs_m * stride_logprobs, logprobs, mask=offs_m < num_tokens)
elif reduction == 1:
logprobs_scalar = tl.sum(logprobs, axis=0)
tl.atomic_add(logprobs_scalar_ptr, logprobs_scalar)
elif reduction == 2:
logprobs_scalar = tl.sum(logprobs, axis=0) / num_tokens.to(tl.float32)
tl.atomic_add(logprobs_scalar_ptr, logprobs_scalar)
_dedicated_stream, _dedicated_events = None, None
def efficient_entropy_forward(
hidden: torch.Tensor,
weight: torch.Tensor,
labels: torch.Tensor,
reduction: typing.Optional[int] = 2,
temperature: typing.Optional[float] = 1.0,
dist_process_group: typing.Optional[dist.ProcessGroup] = None,
) -> list[torch.Tensor]:
"""
forward host function
"""
assert hidden.is_cuda and weight.is_cuda and labels.is_cuda
assert weight.device == hidden.device and labels.device == hidden.device
assert hidden.dim() == 2 and weight.dim() == 2 and labels.dim() == 1
assert hidden.is_contiguous() and weight.is_contiguous() and labels.is_contiguous()
assert hidden.shape[0] == labels.shape[0] and hidden.shape[1] == weight.shape[1]
_rank = 0 if dist_process_group is None else dist.get_rank(dist_process_group)
_world_size = 1 if dist_process_group is None else dist.get_world_size(dist_process_group)
if dist_process_group is not None and not hasattr(efficient_entropy_forward, "_initialized"):
global _dedicated_stream, _dedicated_events
_dedicated_stream = get_torch_device().Stream(hidden.device)
_dedicated_events = [get_torch_device().Event() for _ in range(2)]
efficient_entropy_forward._initialized = True
num_tokens, hidden_size = hidden.shape
num_tokens = labels.shape[0]
vocab_size, hidden_size = weight.shape
assert hidden_size % 128 == 0
REDUCTION = get_entropy_reduction_enum(reduction)
if REDUCTION == EntropyReductionEnum._None:
if dist_process_group is None:
logprobs = torch.empty((num_tokens,), device=hidden.device, dtype=torch.float32)
else:
logprobs = torch.zeros((num_tokens,), device=hidden.device, dtype=torch.float32)
elif REDUCTION in (EntropyReductionEnum._Sum, EntropyReductionEnum._Mean):
logprobs = torch.empty((), device=hidden.device, dtype=torch.float32)
else:
raise ValueError(f"Invalid reduction: {reduction}")
entropy = torch.empty((num_tokens,), device=hidden.device, dtype=torch.float32)
assert logprobs.is_contiguous() and entropy.is_contiguous()
maximum = torch.empty_like(entropy)
accumulate_and_entropy_b = torch.empty((num_tokens * 2,), device=hidden.device, dtype=torch.float32)
accumulate_and_entropy_b_view = accumulate_and_entropy_b.view(2, num_tokens)
accumulate = accumulate_and_entropy_b_view[0, :]
entropy_b = accumulate_and_entropy_b_view[1, :]
assert maximum.is_contiguous() and accumulate.is_contiguous() and entropy_b.is_contiguous()
vocab_per_split = 1024
assert vocab_per_split % 128 == 0
num_splits = (vocab_size + vocab_per_split - 1) // vocab_per_split
_max = torch.empty((num_tokens, num_splits), device=hidden.device, dtype=torch.float32)
_accu = torch.empty((num_tokens, num_splits), device=hidden.device, dtype=torch.float32)
_entropy_b = torch.empty((num_tokens, num_splits), device=hidden.device, dtype=torch.float32)
if REDUCTION == EntropyReductionEnum._None:
_logprobs = logprobs
else:
_logprobs = torch.empty((num_tokens,), device=hidden.device, dtype=torch.float32)
assert _accu.is_contiguous() and _entropy_b.is_contiguous() and _max.is_contiguous()
assert _accu.is_cuda and _entropy_b.is_cuda and _max.is_cuda
if _config._use_triton:
# 1D kernel launch, then split the tile
def mainloop_grid(meta):
return (triton.cdiv(num_tokens, meta["BLOCK_SIZE_M"]) * num_splits,)
efficient_entropy_kernel_general_mainloop[mainloop_grid](
_rank,
hidden,
weight,
labels,
num_tokens,
hidden_size,
vocab_size,
vocab_per_split,
hidden.stride(0),
hidden.stride(1),
weight.stride(0),
weight.stride(1),
_max,
_max.stride(0),
_max.stride(1),
_accu,
_accu.stride(0),
_accu.stride(1),
_entropy_b,
_entropy_b.stride(0),
_entropy_b.stride(1),
_logprobs,
_logprobs.stride(0),
logprobs,
1.0 / temperature,
USE_TMA=SUPPORT_CUDA_TMA and hidden.stride(1) == 1 and weight.stride(1) == 1,
)
else:
raise AssertionError("Triton is required for efficient entropy kernel")
# reduction on maximum and maximum_indices
def epilogue_grid(meta):
return (triton.cdiv(num_tokens, meta["BLOCK_SIZE_M"]),)
if dist_process_group is None:
efficient_entropy_triton_kernel_epilogue[epilogue_grid](
_max,
_max.stride(0),
_max.stride(1),
num_tokens,
num_splits,
maximum,
maximum.stride(0),
_accu,
_accu.stride(0),
_accu.stride(1),
accumulate,
accumulate.stride(0),
_entropy_b,
_entropy_b.stride(0),
_entropy_b.stride(1),
entropy_b,
entropy_b.stride(0),
entropy,
entropy.stride(0),
_logprobs,
_logprobs.stride(0),
logprobs,
REDUCTION,
)
else:
# tensor-parallel
_max_backup = _max.clone()
dist.all_reduce(_max, op=dist.ReduceOp.MAX, group=dist_process_group)
get_torch_device().current_stream().record_event(_dedicated_events[0])
with get_torch_device().stream(_dedicated_stream):
_dedicated_stream.wait_event(_dedicated_events[0])
dist.all_reduce(_logprobs, op=dist.ReduceOp.SUM, group=dist_process_group)
_dedicated_stream.record_event(_dedicated_events[1])
efficient_entropy_triton_kernel_epilogue_tp[epilogue_grid](
num_tokens,
num_splits,
_max,
_max.stride(0),
_max.stride(1),
_max_backup,
_max_backup.stride(0),
_max_backup.stride(1),
_accu,
_accu.stride(0),
_accu.stride(1),
_entropy_b,
_entropy_b.stride(0),
_entropy_b.stride(1),
maximum,
maximum.stride(0),
accumulate,
accumulate.stride(0),
entropy_b,
entropy_b.stride(0),
)
get_torch_device().current_stream().wait_event(_dedicated_events[1])
dist.all_reduce(accumulate_and_entropy_b, op=dist.ReduceOp.SUM, group=dist_process_group)
# update logprobs & entropy
efficient_entropy_triton_epilogue_tp_update[epilogue_grid](
num_tokens,
_logprobs,
_logprobs.stride(0),
maximum,
maximum.stride(0),
accumulate,
accumulate.stride(0),
entropy_b,
entropy_b.stride(0),
entropy,
entropy.stride(0),
logprobs,
REDUCTION,
)
return (logprobs, entropy, maximum, accumulate, entropy_b)
# NOTE: merge d_weight & d_hidden here, split along M & N
@triton.autotune(
configs=[
triton.Config(
{"BLOCK_SIZE_M": 128, "BLOCK_SIZE_N": 128, "BLOCK_SIZE_K": 32, "GROUP_SIZE_M": 16},
num_stages=3,
num_warps=8,
)
],
key=["num_tokens", "hidden_size", "vocab_size"],
)
@triton.jit
def efficient_entropy_backward_kernel_general_mainloop_MN(
num_tokens: int,
hidden_size: int,
vocab_size: int,
rank: int,
hidden_ptr,
stride_hidden_m: tl.int64,
stride_hidden_k: tl.int64,
weight_ptr,
stride_weight_n: tl.int64,
stride_weight_k: tl.int64,
labels_ptr,
stride_labels: tl.int64,
maximum_ptr,
stride_maximum: tl.int64,
accu_ptr,
stride_accu: tl.int64,
d_entropy_ptr,
stride_d_entropy: tl.int64,
d_logprobs_ptr,
stride_d_logprobs: tl.int64,
reduction: int,
entropy_b_ptr,
stride_entropy_b: tl.int64,
d_hidden_ptr,
stride_d_hidden_m: tl.int64,
stride_d_hidden_k: tl.int64,
d_weight_ptr,
stride_d_weight_n: tl.int64,
stride_d_weight_k: tl.int64,
rcp_temperature: tl.float32,
BLOCK_SIZE_M: tl.constexpr,
BLOCK_SIZE_N: tl.constexpr,
BLOCK_SIZE_K: tl.constexpr,
GROUP_SIZE_M: tl.constexpr,
USE_TMA: tl.constexpr,
):
"""
backward mainloop, where d_logits & d_hidden & d_weight are fused
"""
# block swizzling
# pid = tl.program_id(axis=0)
# num_pid_m = tl.cdiv(num_tokens, BLOCK_SIZE_M)
# pid_m = pid % num_pid_m
# pid_n = pid // num_pid_m
pid = tl.program_id(axis=0)
num_pid_m = tl.cdiv(num_tokens, BLOCK_SIZE_M)
num_pid_n = tl.cdiv(vocab_size, BLOCK_SIZE_N)
num_pid_in_group = GROUP_SIZE_M * num_pid_n
group_id = pid // num_pid_in_group
first_pid_m = group_id * GROUP_SIZE_M
group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M)
pid_m = first_pid_m + ((pid % num_pid_in_group) % group_size_m)
pid_n = (pid % num_pid_in_group) // group_size_m
start_offs_am = pid_m * BLOCK_SIZE_M
offs_am = start_offs_am + tl.arange(0, BLOCK_SIZE_M)
start_offs_bn = pid_n * BLOCK_SIZE_N
offs_bn = start_offs_bn + tl.arange(0, BLOCK_SIZE_N)
offs_k = tl.arange(0, BLOCK_SIZE_K)
if USE_TMA:
# using TMA and device-side descriptor creation
hidden_desc = tl.make_tensor_descriptor(
hidden_ptr,
shape=[num_tokens, hidden_size],
strides=[stride_hidden_m, 1],
block_shape=[BLOCK_SIZE_M, BLOCK_SIZE_K],
)
weight_desc = tl.make_tensor_descriptor(
weight_ptr,
shape=[vocab_size, hidden_size],
strides=[stride_weight_n, 1],
block_shape=[BLOCK_SIZE_N, BLOCK_SIZE_K],
)
maximum_ptrs = maximum_ptr + offs_am * stride_maximum
maximum = tl.load(maximum_ptrs, mask=offs_am < num_tokens, other=0.0)
accu_ptrs = accu_ptr + offs_am * stride_accu
accu = tl.load(accu_ptrs, mask=offs_am < num_tokens, other=1e-6) # epsilon to avoid division by zero
accu_rcp = tl.fdiv(1.0, accu)
d_entropy_ptrs = d_entropy_ptr + offs_am * stride_d_entropy
d_entropy = tl.load(d_entropy_ptrs, mask=offs_am < num_tokens, other=0.0)
if reduction == 0: # none
d_logprobs_ptrs = d_logprobs_ptr + offs_am * stride_d_logprobs
d_logprobs = tl.load(d_logprobs_ptrs, mask=offs_am < num_tokens, other=0.0)
elif reduction == 1: # sum
d_logprobs = tl.load(d_logprobs_ptr)
d_logprobs = tl.broadcast_to(d_logprobs, (BLOCK_SIZE_M,))
else: # mean
d_logprobs = tl.fdiv(tl.load(d_logprobs_ptr), num_tokens.to(tl.float32))
d_logprobs = tl.broadcast_to(d_logprobs, (BLOCK_SIZE_M,))
d_logprobs = -1 * d_logprobs
entropy_b_ptrs = entropy_b_ptr + offs_am * stride_entropy_b
entropy_b = tl.load(entropy_b_ptrs, mask=offs_am < num_tokens, other=0.0)
if not USE_TMA:
hidden_ptrs = hidden_ptr + (offs_am[:, None] * stride_hidden_m + offs_k[None, :] * stride_hidden_k)
# weight_ptrs = weight_ptr + (offs_k[:, None] * stride_weight_k + offs_bn[None, :] * stride_weight_n)
weight_ptrs = weight_ptr + (offs_bn[:, None] * stride_weight_n + offs_k[None, :] * stride_weight_k)
labels_ptrs = labels_ptr + offs_am * stride_labels
labels = tl.load(labels_ptrs, mask=offs_am < num_tokens, other=0)
d_hidden_ptrs = d_hidden_ptr + offs_am[:, None] * stride_d_hidden_m + offs_k[None, :] * stride_d_hidden_k
# d_weight_ptrs = d_weight_ptr + offs_k[:, None] * stride_d_weight_k + offs_bn[None, :] * stride_d_weight_n
d_weight_ptrs = d_weight_ptr + offs_bn[:, None] * stride_d_weight_n + offs_k[None, :] * stride_d_weight_k
logits = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
for k in range(0, tl.cdiv(hidden_size, BLOCK_SIZE_K)):
if USE_TMA:
start_offs_k = k * BLOCK_SIZE_K
_hidden = hidden_desc.load([start_offs_am, start_offs_k])
_weight = weight_desc.load([start_offs_bn, start_offs_k])
else:
_hidden = tl.load(
hidden_ptrs,
mask=(offs_k[None, :] < hidden_size - k * BLOCK_SIZE_K) & (offs_am[:, None] < num_tokens),
other=0.0,
)
_weight = tl.load(
weight_ptrs,
mask=(offs_k[None, :] < hidden_size - k * BLOCK_SIZE_K) & (offs_bn[:, None] < vocab_size),
other=0.0,
)
hidden_ptrs += BLOCK_SIZE_K * stride_hidden_k
weight_ptrs += BLOCK_SIZE_K * stride_weight_k
logits = tl.dot(_hidden, _weight.T, logits)
if not USE_TMA:
hidden_ptrs -= hidden_size * stride_hidden_k
weight_ptrs -= hidden_size * stride_weight_k
# scale logits by temperature
logits *= rcp_temperature
exp_logits = tl.exp(logits - maximum[:, None])
mask = (offs_bn + rank * vocab_size)[None, :] == labels[:, None]
d_logits = d_logprobs[:, None] * (exp_logits * accu_rcp[:, None] - mask)
d_logits += d_entropy[:, None] * (-exp_logits * accu_rcp[:, None]) * (logits - entropy_b[:, None])
# scale d_logits by temperature
d_logits *= rcp_temperature
# loop for d_weight & d_hidden
for k in range(0, tl.cdiv(hidden_size, BLOCK_SIZE_K)):
start_offs_k = k * BLOCK_SIZE_K
if USE_TMA:
_hidden = hidden_desc.load([start_offs_am, start_offs_k])
else:
_hidden = tl.load(
hidden_ptrs,
mask=(offs_k[None, :] < hidden_size - k * BLOCK_SIZE_K) & (offs_am[:, None] < num_tokens),
other=0.0,
)
# _d_weight = tl.dot(tl.trans(_hidden).to(tl.float32), d_logits)
# tl.atomic_add(d_weight_ptrs,
# _d_weight,
# mask=(offs_k[:, None] < hidden_size - k * BLOCK_SIZE_K) & (offs_bn[None, :] < vocab_size))
_d_weight = tl.dot(d_logits.trans(), _hidden.to(tl.float32))
tl.atomic_add(
d_weight_ptrs,
_d_weight,
mask=(offs_k[None, :] < hidden_size - k * BLOCK_SIZE_K) & (offs_bn[:, None] < vocab_size),
)
if USE_TMA:
_weight = weight_desc.load([start_offs_bn, start_offs_k])
else:
# _weight = tl.load(
# weight_ptrs,
# mask=(offs_k[:, None] < hidden_size - k * BLOCK_SIZE_K) & (offs_bn[None, :] < vocab_size),
# other=0.0
# )
# _d_hidden = tl.dot(d_logits, tl.trans(_weight).to(tl.float32))
_weight = tl.load(
weight_ptrs,
mask=(offs_k[None, :] < hidden_size - k * BLOCK_SIZE_K) & (offs_bn[:, None] < vocab_size),
other=0.0,
)
_d_hidden = tl.dot(d_logits, _weight.to(tl.float32))
tl.atomic_add(
d_hidden_ptrs,
_d_hidden,
mask=(offs_k[None, :] < hidden_size - k * BLOCK_SIZE_K) & (offs_am[:, None] < num_tokens),
)
if not USE_TMA:
hidden_ptrs += BLOCK_SIZE_K * stride_hidden_k
weight_ptrs += BLOCK_SIZE_K * stride_weight_k
d_hidden_ptrs += BLOCK_SIZE_K * stride_d_hidden_k
d_weight_ptrs += BLOCK_SIZE_K * stride_d_weight_k
@triton.autotune(
configs=[
triton.Config(
{"BLOCK_SIZE_M": 128, "BLOCK_SIZE_N": 128, "BLOCK_SIZE_K": 32, "GROUP_SIZE_M": 16},
num_stages=3,
num_warps=8,
),
],
key=["num_tokens", "hidden_size", "vocab_size"],
)
@triton.jit
def efficient_entropy_backward_kernel_d_hidden(
num_tokens: int,
hidden_size: int,
vocab_size: int,
rank: int,
hidden_ptr,
stride_hidden_m: tl.int64,
stride_hidden_k: tl.int64,
weight_ptr,
stride_weight_n: tl.int64,
stride_weight_k: tl.int64,
labels_ptr,
stride_labels: tl.int64,
maximum_ptr,
stride_maximum: tl.int64,
accu_ptr,
stride_accu: tl.int64,
d_entropy_ptr,
stride_d_entropy: tl.int64,
d_logprobs_ptr,
stride_d_logprobs: tl.int64,
reduction: int,
entropy_b_ptr,
stride_entropy_b: tl.int64,
d_hidden_ptr,
stride_d_hidden_m: tl.int64,
stride_d_hidden_k: tl.int64,
rcp_temperature: tl.float32,
BLOCK_SIZE_M: tl.constexpr,
BLOCK_SIZE_N: tl.constexpr,
BLOCK_SIZE_K: tl.constexpr,
GROUP_SIZE_M: tl.constexpr,
):
"""
backward d_hidden
"""
pid = tl.program_id(axis=0)
num_pid_m = tl.cdiv(num_tokens, BLOCK_SIZE_M)
pid_m = pid % num_pid_m
pid_k = pid // num_pid_m
offs_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
offs_k = tl.arange(0, BLOCK_SIZE_K)
result_offs_k = pid_k * BLOCK_SIZE_K + offs_k
maximum = tl.load(maximum_ptr + offs_m * stride_maximum, mask=offs_m < num_tokens, other=0.0)
accu = tl.load(accu_ptr + offs_m * stride_accu, mask=offs_m < num_tokens, other=1e-6)
accu_rcp = tl.fdiv(1.0, accu)
d_entropy = tl.load(d_entropy_ptr + offs_m * stride_d_entropy, mask=offs_m < num_tokens, other=0.0)
if reduction == 0:
d_logprobs = tl.load(d_logprobs_ptr + offs_m * stride_d_logprobs, mask=offs_m < num_tokens, other=0.0)
elif reduction == 1:
d_logprobs = tl.load(d_logprobs_ptr)
d_logprobs = tl.broadcast_to(d_logprobs, (BLOCK_SIZE_M,))
else:
d_logprobs = tl.fdiv(tl.load(d_logprobs_ptr), num_tokens.to(tl.float32))
d_logprobs = tl.broadcast_to(d_logprobs, (BLOCK_SIZE_M,))
d_logprobs = -1 * d_logprobs
entropy_b = tl.load(entropy_b_ptr + offs_m * stride_entropy_b, mask=offs_m < num_tokens, other=0.0)
labels = tl.load(labels_ptr + offs_m * stride_labels, mask=offs_m < num_tokens, other=0)
# iterate over vocab_size
d_hidden = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_K), dtype=tl.float32)
for n in range(0, tl.cdiv(vocab_size, BLOCK_SIZE_N)):
offs_n = n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
hidden_ptrs = hidden_ptr + (offs_m[:, None] * stride_hidden_m + offs_k[None, :] * stride_hidden_k)
weight_ptrs = weight_ptr + (offs_n[:, None] * stride_weight_n + offs_k[None, :] * stride_weight_k)
# iterate over hidden_size to get logits
logits = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
for k in range(0, tl.cdiv(hidden_size, BLOCK_SIZE_K)):
_hidden = tl.load(
hidden_ptrs,
mask=(offs_k[None, :] < hidden_size - k * BLOCK_SIZE_K) & (offs_m[:, None] < num_tokens),
other=0.0,
)
_weight = tl.load(
weight_ptrs,
mask=(offs_k[None, :] < hidden_size - k * BLOCK_SIZE_K) & (offs_n[:, None] < vocab_size),
other=0.0,
)
logits = tl.dot(_hidden, _weight.trans(), logits)
hidden_ptrs += BLOCK_SIZE_K * stride_hidden_k
weight_ptrs += BLOCK_SIZE_K * stride_weight_k
# scale logits by temperature
logits *= rcp_temperature
exp_logits = tl.exp(logits - maximum[:, None])
mask = (offs_n + rank * vocab_size)[None, :] == labels[:, None]
d_logits = d_logprobs[:, None] * (exp_logits * accu_rcp[:, None] - mask)
d_logits += d_entropy[:, None] * (-exp_logits * accu_rcp[:, None]) * (logits - entropy_b[:, None])
# scale d_logits
d_logits *= rcp_temperature
# calculate d_hidden
weight_ptrs = weight_ptr + (offs_n[:, None] * stride_weight_n + result_offs_k[None, :] * stride_weight_k)
_weight = tl.load(
weight_ptrs, mask=(result_offs_k[None, :] < hidden_size) & (offs_n[:, None] < vocab_size), other=0.0
)
d_hidden = tl.dot(d_logits.to(weight_ptr.dtype.element_ty), _weight, d_hidden)
# write back
tl.store(
d_hidden_ptr + offs_m[:, None] * stride_d_hidden_m + result_offs_k[None, :] * stride_d_hidden_k,
d_hidden,
mask=(offs_m[:, None] < num_tokens) & (result_offs_k[None, :] < hidden_size),
)
@triton.autotune(
configs=[
triton.Config(
{"BLOCK_SIZE_M": 128, "BLOCK_SIZE_N": 128, "BLOCK_SIZE_K": 32, "GROUP_SIZE_M": 16},
num_stages=3,
num_warps=8,
),
],
key=["num_tokens", "hidden_size", "vocab_size"],
)
@triton.jit
def efficient_entropy_backward_kernel_d_weight(
num_tokens: int,
hidden_size: int,
vocab_size: int,
rank: int,
hidden_ptr,
stride_hidden_m: tl.int64,
stride_hidden_k: tl.int64,
weight_ptr,
stride_weight_n: tl.int64,
stride_weight_k: tl.int64,
labels_ptr,
stride_labels: tl.int64,
maximum_ptr,
stride_maximum: tl.int64,
accu_ptr,
stride_accu: tl.int64,
d_entropy_ptr,
stride_d_entropy: tl.int64,
d_logprobs_ptr,
stride_d_logprobs: tl.int64,
reduction: int,
entropy_b_ptr,
stride_entropy_b: tl.int64,
d_weight_ptr,
stride_d_weight_n: tl.int64,
stride_d_weight_k: tl.int64,
rcp_temperature: tl.float32,
BLOCK_SIZE_M: tl.constexpr,
BLOCK_SIZE_N: tl.constexpr,
BLOCK_SIZE_K: tl.constexpr,
GROUP_SIZE_M: tl.constexpr,
):
pid = tl.program_id(axis=0)
num_pid_n = tl.cdiv(vocab_size, BLOCK_SIZE_N)
pid_n = pid % num_pid_n
pid_k = pid // num_pid_n
offs_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
offs_k = tl.arange(0, BLOCK_SIZE_K)
result_offs_k = pid_k * BLOCK_SIZE_K + offs_k
d_weight = tl.zeros((BLOCK_SIZE_N, BLOCK_SIZE_K), dtype=tl.float32)
for m in range(0, tl.cdiv(num_tokens, BLOCK_SIZE_M)):
offs_m = m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
maximum = tl.load(maximum_ptr + offs_m * stride_maximum, mask=offs_m < num_tokens, other=0.0)
accu = tl.load(accu_ptr + offs_m * stride_accu, mask=offs_m < num_tokens, other=1e-6)
accu_rcp = tl.fdiv(1.0, accu)
d_entropy = tl.load(d_entropy_ptr + offs_m * stride_d_entropy, mask=offs_m < num_tokens, other=0.0)
if reduction == 0:
d_logprobs = tl.load(d_logprobs_ptr + offs_m * stride_d_logprobs, mask=offs_m < num_tokens, other=0.0)
elif reduction == 1:
d_logprobs = tl.load(d_logprobs_ptr)
d_logprobs = tl.broadcast_to(d_logprobs, (BLOCK_SIZE_M,))
else:
d_logprobs = tl.fdiv(tl.load(d_logprobs_ptr), num_tokens.to(tl.float32))
d_logprobs = tl.broadcast_to(d_logprobs, (BLOCK_SIZE_M,))
d_logprobs = -1 * d_logprobs
entropy_b = tl.load(entropy_b_ptr + offs_m * stride_entropy_b, mask=offs_m < num_tokens, other=0.0)
labels = tl.load(labels_ptr + offs_m * stride_labels, mask=offs_m < num_tokens, other=0)
hidden_ptrs = hidden_ptr + (offs_m[:, None] * stride_hidden_m + offs_k[None, :] * stride_hidden_k)
weight_ptrs = weight_ptr + (offs_n[:, None] * stride_weight_n + offs_k[None, :] * stride_weight_k)
logits = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
for k in range(0, tl.cdiv(hidden_size, BLOCK_SIZE_K)):
_hidden = tl.load(
hidden_ptrs,
mask=(offs_k[None, :] < hidden_size - k * BLOCK_SIZE_K) & (offs_m[:, None] < num_tokens),
other=0.0,
)
_weight = tl.load(
weight_ptrs,
mask=(offs_k[None, :] < hidden_size - k * BLOCK_SIZE_K) & (offs_n[:, None] < vocab_size),
other=0.0,
)
logits = tl.dot(_hidden, _weight.trans(), logits)
hidden_ptrs += BLOCK_SIZE_K * stride_hidden_k
weight_ptrs += BLOCK_SIZE_K * stride_weight_k
logits *= rcp_temperature
exp_logits = tl.exp(logits - maximum[:, None])
mask = (offs_n + rank * vocab_size)[None, :] == labels[:, None]
d_logits = d_logprobs[:, None] * (exp_logits * accu_rcp[:, None] - mask)
d_logits += d_entropy[:, None] * (-exp_logits * accu_rcp[:, None]) * (logits - entropy_b[:, None])
d_logits *= rcp_temperature
hidden_ptrs = hidden_ptr + (offs_m[:, None] * stride_hidden_m + result_offs_k[None, :] * stride_hidden_k)
_hidden = tl.load(
hidden_ptrs, mask=(result_offs_k[None, :] < hidden_size) & (offs_m[:, None] < num_tokens), other=0.0
)
d_weight = tl.dot(d_logits.to(d_weight_ptr.dtype.element_ty).trans(), _hidden, d_weight)
# write back
tl.store(
d_weight_ptr + offs_n[:, None] * stride_d_weight_n + result_offs_k[None, :] * stride_d_weight_k,
d_weight,
mask=(offs_n[:, None] < vocab_size) & (result_offs_k[None, :] < hidden_size),
)
# NOTE: split tile from d_logits' perspective
@triton.autotune(
configs=[
triton.Config(
{"BLOCK_SIZE_M": 128, "BLOCK_SIZE_N": 256, "BLOCK_SIZE_K": 32, "GROUP_SIZE_M": 16},
num_stages=3,
num_warps=8,
),
],
key=["num_tokens", "hidden_size", "vocab_size"],
)
@triton.jit
def efficient_entropy_backward_kernel_general_d_logits(
num_tokens: int,
hidden_size: int,
vocab_size: int,
rank: int,
hidden_ptr,
stride_hidden_m: tl.int64,
stride_hidden_k: tl.int64,
weight_ptr,
stride_weight_n: tl.int64,
stride_weight_k: tl.int64,
labels_ptr,
stride_labels: tl.int64,
maximum_ptr,
stride_maximum: tl.int64,
accu_ptr,
stride_accu: tl.int64,
d_entropy_ptr,
stride_d_entropy: tl.int64,
d_logprobs_ptr,
stride_d_logprobs: tl.int64,
reduction: int,
entropy_b_ptr,
stride_entropy_b,
d_logits_ptr,
stride_d_logits_m: tl.int64,
stride_d_logits_n: tl.int64,
rcp_temperature: tl.float32,
BLOCK_SIZE_M: tl.constexpr,
BLOCK_SIZE_N: tl.constexpr,
BLOCK_SIZE_K: tl.constexpr,
GROUP_SIZE_M: tl.constexpr,
USE_TMA: tl.constexpr,
):
"""
backward d_logits
"""
# block swizzling
# pid = tl.program_id(axis=0)
# num_pid_m = tl.cdiv(num_tokens, BLOCK_SIZE_M)
# pid_m = pid % num_pid_m
# pid_n = pid // num_pid_m
pid = tl.program_id(axis=0)
num_pid_m = tl.cdiv(num_tokens, BLOCK_SIZE_M)
num_pid_n = tl.cdiv(vocab_size, BLOCK_SIZE_N)
num_pid_in_group = GROUP_SIZE_M * num_pid_n
group_id = pid // num_pid_in_group
first_pid_m = group_id * GROUP_SIZE_M
group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M)
pid_m = first_pid_m + ((pid % num_pid_in_group) % group_size_m)
pid_n = (pid % num_pid_in_group) // group_size_m
start_offs_am = pid_m * BLOCK_SIZE_M
offs_am = start_offs_am + tl.arange(0, BLOCK_SIZE_M)
start_offs_bn = pid_n * BLOCK_SIZE_N
offs_bn = start_offs_bn + tl.arange(0, BLOCK_SIZE_N)
offs_k = tl.arange(0, BLOCK_SIZE_K)
maximum_ptrs = maximum_ptr + offs_am * stride_maximum
maximum = tl.load(maximum_ptrs, mask=offs_am < num_tokens, other=0.0)
accu_ptrs = accu_ptr + offs_am * stride_accu
accu = tl.load(accu_ptrs, mask=offs_am < num_tokens, other=1e-6) # epsilon to avoid division by zero
accu_rcp = tl.fdiv(1.0, accu)
d_entropy_ptrs = d_entropy_ptr + offs_am * stride_d_entropy
d_entropy = tl.load(d_entropy_ptrs, mask=offs_am < num_tokens, other=0.0)
if reduction == 0: # none
d_logprobs_ptrs = d_logprobs_ptr + offs_am * stride_d_logprobs
d_logprobs = tl.load(d_logprobs_ptrs, mask=offs_am < num_tokens, other=0.0)
elif reduction == 1: # sum
d_logprobs = tl.load(d_logprobs_ptr)
d_logprobs = tl.broadcast_to(d_logprobs, (BLOCK_SIZE_M,))
else: # mean
d_logprobs = tl.fdiv(tl.load(d_logprobs_ptr), num_tokens.to(tl.float32))
d_logprobs = tl.broadcast_to(d_logprobs, (BLOCK_SIZE_M,))
d_logprobs = -1 * d_logprobs
entropy_b_ptrs = entropy_b_ptr + offs_am * stride_entropy_b
entropy_b = tl.load(entropy_b_ptrs, mask=offs_am < num_tokens, other=0.0)
labels_ptrs = labels_ptr + offs_am * stride_labels
labels = tl.load(labels_ptrs, mask=offs_am < num_tokens, other=0)
logits = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
if USE_TMA:
# using TMA and device-side descriptor creation
hidden_desc = tl.make_tensor_descriptor(
hidden_ptr,
shape=[num_tokens, hidden_size],
strides=[stride_hidden_m, 1],
block_shape=[BLOCK_SIZE_M, BLOCK_SIZE_K],
)
weight_desc = tl.make_tensor_descriptor(
weight_ptr,
shape=[vocab_size, hidden_size],
strides=[stride_weight_n, 1],
block_shape=[BLOCK_SIZE_N, BLOCK_SIZE_K],
)
else:
hidden_ptrs = hidden_ptr + (offs_am[:, None] * stride_hidden_m + offs_k[None, :] * stride_hidden_k)
# weight_ptrs = weight_ptr + (offs_k[:, None] * stride_weight_k + offs_bn[None, :] * stride_weight_n)
weight_ptrs = weight_ptr + (offs_bn[:, None] * stride_weight_n + offs_k[None, :] * stride_weight_k)
for k in range(0, tl.cdiv(hidden_size, BLOCK_SIZE_K)):
if USE_TMA:
start_offs_k = k * BLOCK_SIZE_K
_hidden = hidden_desc.load([start_offs_am, start_offs_k])
_weight = weight_desc.load([start_offs_bn, start_offs_k])
else:
_hidden = tl.load(
hidden_ptrs,
mask=(offs_k[None, :] < hidden_size - k * BLOCK_SIZE_K) & (offs_am[:, None] < num_tokens),
other=0.0,
)
_weight = tl.load(
weight_ptrs,
mask=(offs_k[None, :] < hidden_size - k * BLOCK_SIZE_K) & (offs_bn[:, None] < vocab_size),
other=0.0,
)
hidden_ptrs += BLOCK_SIZE_K * stride_hidden_k
weight_ptrs += BLOCK_SIZE_K * stride_weight_k
logits = tl.dot(_hidden, _weight.T, logits)
if not USE_TMA:
hidden_ptrs -= hidden_size * stride_hidden_k
weight_ptrs -= hidden_size * stride_weight_k
# scale logits by temperature
logits *= rcp_temperature
exp_logits = tl.exp(logits - maximum[:, None])
mask = (offs_bn + rank * vocab_size)[None, :] == labels[:, None]
d_logits = d_logprobs[:, None] * (exp_logits * accu_rcp[:, None] - mask)
d_logits += d_entropy[:, None] * (-exp_logits * accu_rcp[:, None]) * (logits - entropy_b[:, None])
# scale d_logits by temperature
d_logits *= rcp_temperature
# store d_logits
d_logits_ptrs = d_logits_ptr + offs_am[:, None] * stride_d_logits_m + offs_bn[None, :] * stride_d_logits_n
tl.store(
d_logits_ptrs,
d_logits, # will be implicitly converted to d_logits_ptrs.dtype.element_ty
mask=(offs_am[:, None] < num_tokens) & (offs_bn[None, :] < vocab_size),
)
@triton.autotune(
configs=[
triton.Config(
{"BLOCK_SIZE_M": 128, "BLOCK_SIZE_N": 256, "BLOCK_SIZE_K": 32, "GROUP_SIZE_M": 16},
num_stages=3,
num_warps=8,
),
],
key=["num_tokens", "hidden_size", "vocab_size"],
)
@triton.jit
def efficient_entropy_backward_kernel_general_d_logits_split_N(
split_idx: int,
num_tokens: int,
hidden_size: int,
vocab_size: int,
vocab_per_split: int,
rank: int,
hidden_ptr,
stride_hidden_m: tl.int64,
stride_hidden_k: tl.int64,
weight_ptr,
stride_weight_n: tl.int64,
stride_weight_k: tl.int64,
labels_ptr,
stride_labels: tl.int64,
maximum_ptr,
stride_maximum: tl.int64,
accu_ptr,
stride_accu: tl.int64,
d_entropy_ptr,
stride_d_entropy: tl.int64,
d_logprobs_ptr,
stride_d_logprobs: tl.int64,
reduction: int,
entropy_b_ptr,
stride_entropy_b,
d_logits_ptr,
stride_d_logits_m: tl.int64,
stride_d_logits_n: tl.int64,
rcp_temperature: tl.float32,
BLOCK_SIZE_M: tl.constexpr,
BLOCK_SIZE_N: tl.constexpr,
BLOCK_SIZE_K: tl.constexpr,
GROUP_SIZE_M: tl.constexpr,
USE_TMA: tl.constexpr,
):
pid = tl.program_id(axis=0)
num_pid_m = tl.cdiv(num_tokens, BLOCK_SIZE_M)
num_pid_n = tl.cdiv(vocab_per_split, BLOCK_SIZE_N)
num_pid_in_group = GROUP_SIZE_M * num_pid_n
group_id = pid // num_pid_in_group
first_pid_m = group_id * GROUP_SIZE_M
group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M)
pid_m = first_pid_m + ((pid % num_pid_in_group) % group_size_m)
pid_n = (pid % num_pid_in_group) // group_size_m
start_offs_am = pid_m * BLOCK_SIZE_M
offs_am = start_offs_am + tl.arange(0, BLOCK_SIZE_M)
start_offs_bn = split_idx * vocab_per_split + pid_n * BLOCK_SIZE_N
offs_bn = start_offs_bn + tl.arange(0, BLOCK_SIZE_N)
offs_k = tl.arange(0, BLOCK_SIZE_K)
maximum = tl.load(maximum_ptr + offs_am * stride_maximum, mask=offs_am < num_tokens, other=0.0)
accu = tl.load(accu_ptr + offs_am * stride_accu, mask=offs_am < num_tokens, other=1e-6)
accu_rcp = tl.fdiv(1.0, accu)
d_entropy = tl.load(d_entropy_ptr + offs_am * stride_d_entropy, mask=offs_am < num_tokens, other=0.0)
if reduction == 0:
d_logprobs = tl.load(d_logprobs_ptr + offs_am * stride_d_logprobs, mask=offs_am < num_tokens, other=0.0)
elif reduction == 1:
d_logprobs = tl.load(d_logprobs_ptr)
d_logprobs = tl.broadcast_to(d_logprobs, (BLOCK_SIZE_M,))
else:
d_logprobs = tl.fdiv(tl.load(d_logprobs_ptr), num_tokens.to(tl.float32))
d_logprobs = tl.broadcast_to(d_logprobs, (BLOCK_SIZE_M,))
d_logprobs = -1 * d_logprobs
entropy_b = tl.load(entropy_b_ptr + offs_am * stride_entropy_b, mask=offs_am < num_tokens, other=0.0)
labels = tl.load(labels_ptr + offs_am * stride_labels, mask=offs_am < num_tokens, other=0)
logits = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
if USE_TMA:
# using TMA and device-side descriptor creation
hidden_desc = tl.make_tensor_descriptor(
hidden_ptr,
shape=[num_tokens, hidden_size],
strides=[stride_hidden_m, 1],
block_shape=[BLOCK_SIZE_M, BLOCK_SIZE_K],
)
weight_desc = tl.make_tensor_descriptor(
weight_ptr,
shape=[vocab_size, hidden_size],
strides=[stride_weight_n, 1],
block_shape=[BLOCK_SIZE_N, BLOCK_SIZE_K],
)
else:
hidden_ptrs = hidden_ptr + (offs_am[:, None] * stride_hidden_m + offs_k[None, :] * stride_hidden_k)
weight_ptrs = weight_ptr + (offs_bn[:, None] * stride_weight_n + offs_k[None, :] * stride_weight_k)
vocab_right_bound = min((split_idx + 1) * vocab_per_split, vocab_size)
for k in range(0, tl.cdiv(hidden_size, BLOCK_SIZE_K)):
if USE_TMA:
start_offs_k = k * BLOCK_SIZE_K
_hidden = hidden_desc.load([start_offs_am, start_offs_k])
_weight = weight_desc.load([start_offs_bn, start_offs_k])
else:
_hidden = tl.load(
hidden_ptrs,
mask=(offs_k[None, :] < hidden_size - k * BLOCK_SIZE_K) & (offs_am[:, None] < num_tokens),
other=0.0,
)
_weight = tl.load(
weight_ptrs,
mask=(offs_k[None, :] < hidden_size - k * BLOCK_SIZE_K) & (offs_bn[:, None] < vocab_right_bound),
other=0.0,
)
hidden_ptrs += BLOCK_SIZE_K * stride_hidden_k
weight_ptrs += BLOCK_SIZE_K * stride_weight_k
logits = tl.dot(_hidden, _weight.T, logits)
logits *= rcp_temperature
exp_logits = tl.exp(logits - maximum[:, None])
mask = (offs_bn + rank * vocab_size)[None, :] == labels[:, None]
d_logits = d_logprobs[:, None] * (exp_logits * accu_rcp[:, None] - mask)
d_logits += d_entropy[:, None] * (-exp_logits * accu_rcp[:, None]) * (logits - entropy_b[:, None])
d_logits *= rcp_temperature
# filter d_logits with mask
result_offs_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
mask = (offs_am[:, None] < num_tokens) & (result_offs_n[None, :] < vocab_per_split)
tl.store(
d_logits_ptr + offs_am[:, None] * stride_d_logits_m + result_offs_n[None, :] * stride_d_logits_n, d_logits, mask
)
def efficient_entropy_backward(
dlogprobs: torch.Tensor,
dentropy: torch.Tensor,
hidden: torch.Tensor,
weight: torch.Tensor,
labels: torch.Tensor,
maximum: torch.Tensor,
acc: torch.Tensor,
entropy_b: torch.Tensor,
reduction: typing.Optional[int] = 2,
should_return_fp32_grad: bool = False,
temperature: typing.Optional[float] = 1.0,
dist_process_group: typing.Optional[dist.ProcessGroup] = None,
) -> list[torch.Tensor]:
"""
backward host function
"""
assert hidden.is_cuda and weight.is_cuda and labels.is_cuda
assert weight.device == hidden.device and labels.device == hidden.device
assert hidden.dim() == 2 and weight.dim() == 2 and labels.dim() == 1
assert hidden.is_contiguous() and weight.is_contiguous() and labels.is_contiguous()
assert hidden.shape[0] == labels.shape[0] and hidden.shape[1] == weight.shape[1]
_rank = 0 if dist_process_group is None else dist.get_rank(dist_process_group)
_world_size = 1 if dist_process_group is None else dist.get_world_size(dist_process_group)
num_tokens, hidden_size = hidden.shape
num_tokens = labels.shape[0]
vocab_size, hidden_size = weight.shape
assert hidden_size % 128 == 0
REDUCTION = get_entropy_reduction_enum(reduction)
if REDUCTION == EntropyReductionEnum._None:
assert dlogprobs.shape == (num_tokens,)
else:
assert dlogprobs.dim() == 0
assert dlogprobs.is_contiguous() and dentropy.is_contiguous()
assert dlogprobs.is_cuda and dentropy.is_cuda
assert dlogprobs.device == hidden.device and dlogprobs.device == dentropy.device
assert dentropy.shape == (num_tokens,)
d_hidden, d_weight = None, None
if _config._backward == BackwardEnum._Total_Fuse_MN or should_return_fp32_grad:
d_hidden = torch.zeros_like(hidden, dtype=torch.float32, device=hidden.device)
d_weight = torch.zeros_like(weight, dtype=torch.float32, device=weight.device)
else:
d_hidden = torch.empty_like(hidden, dtype=hidden.dtype, device=hidden.device)
d_weight = torch.empty_like(weight, dtype=hidden.dtype, device=weight.device)
assert d_hidden.is_contiguous() and d_weight.is_contiguous()
assert maximum.is_contiguous() and acc.is_contiguous()
assert maximum.device == hidden.device and acc.device == hidden.device
assert maximum.shape == labels.shape == acc.shape
assert maximum.is_cuda and acc.is_cuda
vocab_per_split = 1024
assert vocab_per_split % 128 == 0
num_splits = (vocab_size + vocab_per_split - 1) // vocab_per_split
assert entropy_b.is_contiguous() and entropy_b.is_cuda
assert entropy_b.shape == (num_tokens,)
if _config._backward == BackwardEnum._Total_Fuse_MN:
# --- Triton doesn't materialize d_logits at all. Split tiles at the perspective of d_logits.
def mainloop_grid(meta):
return (triton.cdiv(num_tokens, meta["BLOCK_SIZE_M"]) * triton.cdiv(vocab_size, meta["BLOCK_SIZE_N"]),)
efficient_entropy_backward_kernel_general_mainloop_MN[mainloop_grid](
num_tokens,
hidden_size,
vocab_size,
_rank,
hidden,
hidden.stride(0),
hidden.stride(1),
weight,
weight.stride(0),
weight.stride(1),
labels,
labels.stride(0),
maximum,
maximum.stride(0),
acc,
acc.stride(0),
dentropy,
dentropy.stride(0),
dlogprobs,
dlogprobs.stride(0) if REDUCTION == EntropyReductionEnum._None else 0,
REDUCTION,
entropy_b,
entropy_b.stride(0),
d_hidden,
d_hidden.stride(0),
d_hidden.stride(1),
d_weight,
d_weight.stride(0),
d_weight.stride(1),
1.0 / temperature,
USE_TMA=SUPPORT_CUDA_TMA and hidden.stride(1) == 1 and weight.stride(1) == 1,
)
elif _config._backward == BackwardEnum._Total_Separate:
_d_logits = torch.empty((num_tokens, vocab_size), device=hidden.device, dtype=hidden.dtype).contiguous()
assert _d_logits.is_contiguous()
if _config._use_triton:
def d_logits_grid(meta):
return (triton.cdiv(num_tokens, meta["BLOCK_SIZE_M"]) * triton.cdiv(vocab_size, meta["BLOCK_SIZE_N"]),)
efficient_entropy_backward_kernel_general_d_logits[d_logits_grid](
num_tokens,
hidden_size,
vocab_size,
_rank,
hidden,
hidden.stride(0),
hidden.stride(1),
weight,
weight.stride(0),
weight.stride(1),
labels,
labels.stride(0),
maximum,
maximum.stride(0),
acc,
acc.stride(0),
dentropy,
dentropy.stride(0),
dlogprobs,
dlogprobs.stride(0) if REDUCTION == EntropyReductionEnum._None else 0,
REDUCTION,
entropy_b,
entropy_b.stride(0),
_d_logits,
_d_logits.stride(0),
_d_logits.stride(1),
1.0 / temperature,
USE_TMA=SUPPORT_CUDA_TMA and hidden.stride(1) == 1 and weight.stride(1) == 1,
)
torch.matmul(_d_logits, weight, out=d_hidden)
torch.matmul(_d_logits.T, hidden, out=d_weight)
else:
raise AssertionError("Triton is required for efficient entropy kernel")
elif _config._backward == BackwardEnum._Split_Dlogits_N:
vocab_per_split = 9504
num_splits = (vocab_size + vocab_per_split - 1) // vocab_per_split
_d_logits = torch.empty((num_tokens, vocab_per_split), device=hidden.device, dtype=hidden.dtype).contiguous()
assert _d_logits.is_contiguous()
def d_logits_grid(meta):
return (triton.cdiv(num_tokens, meta["BLOCK_SIZE_M"]) * triton.cdiv(vocab_per_split, meta["BLOCK_SIZE_N"]),)
for split_idx in range(num_splits):
efficient_entropy_backward_kernel_general_d_logits_split_N[d_logits_grid](
split_idx,
num_tokens,
hidden_size,
vocab_size,
vocab_per_split,
_rank,
hidden,
hidden.stride(0),
hidden.stride(1),
weight,
weight.stride(0),
weight.stride(1),
labels,
labels.stride(0),
maximum,
maximum.stride(0),
acc,
acc.stride(0),
dentropy,
dentropy.stride(0),
dlogprobs,
dlogprobs.stride(0) if REDUCTION == EntropyReductionEnum._None else 0,
REDUCTION,
entropy_b,
entropy_b.stride(0),
_d_logits,
_d_logits.stride(0),
_d_logits.stride(1),
1.0 / temperature,
USE_TMA=SUPPORT_CUDA_TMA and hidden.stride(1) == 1 and weight.stride(1) == 1,
)
if split_idx == (num_splits - 1):
vocab_right_bound = min((split_idx + 1) * vocab_per_split, vocab_size) - split_idx * vocab_per_split
_d_logits = _d_logits[:, :vocab_right_bound].contiguous()
if split_idx == 0:
torch.matmul(
_d_logits, weight[split_idx * vocab_per_split : (split_idx + 1) * vocab_per_split, :], out=d_hidden
)
else:
d_hidden += torch.matmul(
_d_logits, weight[split_idx * vocab_per_split : (split_idx + 1) * vocab_per_split, :]
)
torch.matmul(
_d_logits.T, hidden, out=d_weight[split_idx * vocab_per_split : (split_idx + 1) * vocab_per_split, :]
)
elif _config._backward == BackwardEnum._Split_Dlogits_M:
raise NotImplementedError("BackwardEnum._Split_Dlogits_M is not implemented yet")
return d_hidden, d_weight
| verl__utils__kernel__kernels.py |
#
# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import typing
import torch
import torch.distributed as dist
class LinearCrossEntropy(torch.autograd.Function):
@staticmethod
def forward(
ctx,
hidden: torch.Tensor,
weight: torch.Tensor,
labels: torch.Tensor,
temperature: typing.Optional[float] = 1.0,
reduction: typing.Optional[str] = "none",
dist_process_group: typing.Optional[dist.ProcessGroup] = None,
) -> list[torch.Tensor]:
"""_summary_
Args:
ctx (_type_): _description_
hidden (torch.Tensor): (batch_size, num_tokens, hidden_size) -> (batch_size * num_tokens, hidden_size)
weight (torch.Tensor): (vocab_size, hidden_size)
labels (torch.Tensor): (batch_size, num_tokens) -> (batch_size * num_tokens, )
temperature (typing.Optional[float], optional): _description_. Defaults to 1.0.
reduction (typing.Optional[str], optional): _description_. Defaults to "none".
dist_process_group (typing.Optional[dist.ProcessGroup], optional): _description_. Defaults to None.
Returns:
typing.List[torch.Tensor]: _description_
"""
assert isinstance(temperature, float), f"temperature must be a float, but got {type(temperature)}"
assert isinstance(reduction, str), f"reduction must be a str, but got {type(reduction)}"
with torch.cuda.nvtx.range("LinearCrossEntropy-forward"):
from . import kernels
REDUCTION = kernels.get_entropy_reduction_enum_number(reduction.lower())
original_hidden_shape = hidden.shape
if len(hidden.shape) != 2:
hidden = hidden.view(-1, hidden.shape[-1]) # (batch_size * num_tokens, hidden_size)
if len(labels.shape) != 1:
labels = labels.view(-1)
logprobs, entropy, _maximum, _accumulate, _entropy_b = kernels.efficient_entropy_forward(
hidden, weight, labels, REDUCTION, temperature, dist_process_group
)
ctx.save_for_backward(hidden, weight, labels, _maximum, _accumulate, _entropy_b)
ctx.original_hidden_shape = original_hidden_shape
ctx.REDUCTION = REDUCTION
ctx.dist_process_group = dist_process_group
ctx.should_return_fp32_grad = False
ctx.temperature = temperature
return logprobs, entropy
@staticmethod
def backward(ctx, dlogprobs: torch.Tensor, dentropy: torch.Tensor) -> list[torch.Tensor]:
from . import kernels
with torch.cuda.nvtx.range("LinearCrossEntropy-backward"):
(hidden, weight, labels, _maximum, _accumulate, _entropy_b) = ctx.saved_tensors
REDUCTION = ctx.REDUCTION
dist_process_group = ctx.dist_process_group
should_return_fp32_grad = ctx.should_return_fp32_grad
temperature = ctx.temperature
d_hidden, d_weight = kernels.efficient_entropy_backward(
dlogprobs,
dentropy,
hidden,
weight,
labels,
_maximum,
_accumulate,
_entropy_b,
REDUCTION,
should_return_fp32_grad,
temperature,
dist_process_group,
)
d_hidden = d_hidden.view(ctx.original_hidden_shape)
return (d_hidden, d_weight, None, None, None, None)
linear_cross_entropy = LinearCrossEntropy.apply
| verl__utils__kernel__linear_cross_entropy.py |
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A Ray logger will receive logging info from different processes.
"""
import datetime
import logging
import numbers
import pprint
import torch
def concat_dict_to_str(dict: dict, step):
output = [f"step:{step}"]
for k, v in dict.items():
if isinstance(v, numbers.Number):
output.append(f"{k}:{pprint.pformat(v)}")
output_str = " - ".join(output)
return output_str
class LocalLogger:
"""
A local logger that logs messages to the console.
Args:
print_to_console (bool): Whether to print to the console.
"""
def __init__(self, print_to_console=True):
self.print_to_console = print_to_console
def flush(self):
pass
def log(self, data, step):
if self.print_to_console:
print(concat_dict_to_str(data, step=step), flush=True)
class DecoratorLoggerBase:
"""
Base class for all decorators that log messages.
Args:
role (str): The role (the name) of the logger.
logger (logging.Logger): The logger instance to use for logging.
level (int): The logging level.
rank (int): The rank of the process.
log_only_rank_0 (bool): If True, only log for rank 0.
"""
def __init__(
self, role: str, logger: logging.Logger = None, level=logging.DEBUG, rank: int = 0, log_only_rank_0: bool = True
):
self.role = role
self.logger = logger
self.level = level
self.rank = rank
self.log_only_rank_0 = log_only_rank_0
self.logging_function = self.log_by_logging
if logger is None:
self.logging_function = self.log_by_print
def log_by_print(self, log_str):
if not self.log_only_rank_0 or self.rank == 0:
print(f"{self.role} {log_str}", flush=True)
def log_by_logging(self, log_str):
if self.logger is None:
raise ValueError("Logger is not initialized")
if not self.log_only_rank_0 or self.rank == 0:
self.logger.log(self.level, f"{self.role} {log_str}")
def print_rank_0(message):
"""If distributed is initialized, print only on rank 0."""
if torch.distributed.is_initialized():
if torch.distributed.get_rank() == 0:
print(message, flush=True)
else:
print(message, flush=True)
def print_with_rank(message: str, rank: int = 0, log_only_rank_0: bool = False):
"""_summary_
Print a message with rank information.
This function prints the message only if `log_only_rank_0` is False or if the rank is 0.
Args:
message (str): _description_
rank (int, optional): _description_. Defaults to 0.
log_only_rank_0 (bool, optional): _description_. Defaults to False.
"""
if not log_only_rank_0 or rank == 0:
print(f"[Rank {rank}] {message}", flush=True)
def print_with_rank_and_timer(message: str, rank: int = 0, log_only_rank_0: bool = False):
"""_summary_
Print a message with rank information and a timestamp.
This function prints the message only if `log_only_rank_0` is False or if the rank is 0.
Args:
message (str): _description_
rank (int, optional): _description_. Defaults to 0.
log_only_rank_0 (bool, optional): _description_. Defaults to False.
"""
now = datetime.datetime.now()
message = f"[{now.strftime('%Y-%m-%d %H:%M:%S')}] [Rank {rank}] {message}"
if not log_only_rank_0 or rank == 0:
print(message, flush=True)
def log_with_rank(message: str, rank, logger: logging.Logger, level=logging.INFO, log_only_rank_0: bool = False):
"""_summary_
Log a message with rank information using a logger.
This function logs the message only if `log_only_rank_0` is False or if the rank is 0.
Args:
message (str): The message to log.
rank (int): The rank of the process.
logger (logging.Logger): The logger instance to use for logging.
level (int, optional): The logging level. Defaults to logging.INFO.
log_only_rank_0 (bool, optional): If True, only log for rank 0. Defaults to False.
"""
if not log_only_rank_0 or rank == 0:
logger.log(level, f"[Rank {rank}] {message}")
| verl__utils__logger__aggregate_logger.py |
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import torch
def set_basic_config(level):
"""
This function sets the global logging format and level. It will be called when import verl
"""
logging.basicConfig(format="%(levelname)s:%(asctime)s:%(message)s", level=level)
def log_to_file(string):
print(string)
if os.path.isdir("logs"):
with open(f"logs/log_{torch.distributed.get_rank()}", "a+") as f:
f.write(string + "\n")
| verl__utils__logging_utils.py |
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import megatron.core
import torch
from megatron.core import dist_checkpointing, mpu
from megatron.core.dist_checkpointing.serialization import (
get_default_load_sharded_strategy,
get_default_save_sharded_strategy,
)
from megatron.core.dist_checkpointing.strategies.fully_parallel import (
FullyParallelLoadStrategyWrapper,
FullyParallelSaveStrategyWrapper,
)
from packaging import version
def save_dist_checkpointing(
sharded_state_dict,
ckpt_path,
async_save=False,
content_metadata=None,
):
validate_sharding_integrity = True
# Get checkpointing strategies
save_strategy = get_default_save_sharded_strategy("torch_dist")
save_strategy = FullyParallelSaveStrategyWrapper(
save_strategy, mpu.get_data_parallel_group(with_context_parallel=True)
)
# https://github.com/NVIDIA/Megatron-LM/blob/core_v0.14.0/megatron/core/optimizer/distrib_optimizer.py#L1109-L1123
mcore_ge_014 = version.parse(megatron.core.__version__) >= version.parse("0.14.0")
# Save model sharded state dicts
save_kwargs = dict(
sharded_strategy=save_strategy,
async_sharded_save=async_save,
validate_access_integrity=validate_sharding_integrity,
)
if content_metadata is not None:
if mcore_ge_014:
save_kwargs["content_metadata"] = content_metadata
return dist_checkpointing.save(sharded_state_dict, ckpt_path, **save_kwargs)
def load_dist_checkpointing(sharded_state_dict, ckpt_dir):
# Get checkpointing strategies
load_strategy = get_default_load_sharded_strategy(ckpt_dir)
load_strategy = FullyParallelLoadStrategyWrapper(
load_strategy, mpu.get_data_parallel_group(with_context_parallel=True)
)
# Fix torch.load weights only error
try:
import transformer_engine as te
torch.serialization.add_safe_globals([torch.optim.AdamW])
torch.serialization.add_safe_globals([te.pytorch.optimizers.fused_adam.FusedAdam])
except Exception:
pass
# Load model sharded state dicts
state_dict = dist_checkpointing.load(sharded_state_dict, ckpt_dir, sharded_strategy=load_strategy)
return state_dict
| verl__utils__megatron__dist_checkpointing.py |
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from verl.utils.device import get_device_id
class MemoryBuffer:
def __init__(self, numel, numel_padded, dtype):
self.numel = numel
self.numel_padded = numel_padded
self.dtype = dtype
self.data = torch.zeros(self.numel_padded, dtype=self.dtype, device=get_device_id(), requires_grad=False)
def zero(self):
"""Reset the buffer to zero."""
self.data.zero_()
def get(self, shape, start_index):
"""Return a tensor with the input `shape` as a view into the
1-D data starting at `start_index`."""
end_index = start_index + shape.numel()
assert end_index <= self.numel, "requested tensor is out of the buffer range."
buffer_tensor = self.data[start_index:end_index]
buffer_tensor = buffer_tensor.view(shape)
return buffer_tensor
| verl__utils__megatron__memory.py |
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from megatron.core.optimizer import OptimizerConfig
from megatron.core.optimizer import get_megatron_optimizer as get_megatron_optimizer_native
from megatron.core.optimizer_param_scheduler import OptimizerParamScheduler
from verl.utils.logger import print_rank_0
def init_megatron_optim_config(
optim_config: dict, use_distributed_optimizer: bool = True, fp16: bool = False
) -> OptimizerConfig:
optim_args = {
"optimizer": optim_config.optimizer,
"lr": optim_config.lr,
"min_lr": optim_config.min_lr,
"clip_grad": optim_config.clip_grad,
"weight_decay": optim_config.weight_decay,
"use_distributed_optimizer": use_distributed_optimizer,
}
if fp16:
optim_args.update(
{
"bf16": False,
"fp16": True,
"params_dtype": torch.float16,
"initial_loss_scale": 32768,
"min_loss_scale": 1,
"use_precision_aware_optimizer": True,
"store_param_remainders": False,
}
)
else: # bf16 mode
optim_args.update(
{
"bf16": True,
"params_dtype": torch.bfloat16,
}
)
override_config = optim_config.get("override_optimizer_config", {})
if override_config:
for k, v in override_config.items():
optim_args[k] = v
print_rank_0(f"optimizer config after override: {optim_args}")
config = OptimizerConfig(**optim_args)
return config
def get_megatron_optimizer(
model,
config: OptimizerConfig,
):
# Base optimizer.
return get_megatron_optimizer_native(
config=config,
model_chunks=model,
)
def get_megatron_optimizer_param_scheduler(
optimizer,
config,
):
"""
Get the optimizer parameter scheduler for Megatron.
"""
lr_decay_steps = config.lr_decay_steps
lr_warmup_steps = config.lr_warmup_steps
if config.get("lr_decay_steps", None) is None:
lr_decay_steps = config.total_training_steps
wsd_decay_steps = None
if config.get("lr_wsd_decay_steps", None) is not None:
wsd_decay_steps = config.lr_wsd_decay_steps
if config.get("lr_warmup_steps_ratio", None) is not None and (
config.get("lr_warmup_steps", None) is None or config.lr_warmup_steps <= 0
):
lr_warmup_steps = int(config.lr_warmup_steps_ratio * lr_decay_steps)
opt_param_scheduler = OptimizerParamScheduler(
optimizer,
init_lr=config.lr_warmup_init,
max_lr=config.lr,
min_lr=config.min_lr,
lr_warmup_steps=lr_warmup_steps,
lr_decay_steps=lr_decay_steps,
lr_decay_style=config.lr_decay_style,
start_wd=config.weight_decay,
end_wd=config.weight_decay,
wd_incr_steps=config.total_training_steps,
wd_incr_style=config.weight_decay_incr_style,
use_checkpoint_opt_param_scheduler=config.use_checkpoint_opt_param_scheduler,
override_opt_param_scheduler=(not config.use_checkpoint_opt_param_scheduler),
wsd_decay_steps=wsd_decay_steps,
lr_wsd_decay_style=config.lr_wsd_decay_style,
)
return opt_param_scheduler
def get_megatron_last_lr(optimizer):
"""
Get the last learning rate from the optimizer parameter scheduler.
"""
return optimizer.param_groups[0]["lr"]
| verl__utils__megatron__optimizer.py |
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from megatron.core import parallel_state as mpu
from .sequence_parallel import pad_to_sequence_parallel
def compute_transformers_input_shapes(batches, meta_info):
from flash_attn.bert_padding import unpad_input # flash 2 is a must for Megatron
# pre-compute input shapes for each micro-batch at each pp stage
input_shapes = []
for model_inputs in batches:
input_ids = model_inputs["input_ids"]
attention_mask = model_inputs["attention_mask"]
input_ids_rmpad = unpad_input(input_ids.unsqueeze(dim=-1), attention_mask)[0] # (total_nnz, 1)
if meta_info["sequence_parallel"]:
input_ids_rmpad = pad_to_sequence_parallel(input_ids_rmpad)
# compute shapes for model_inputs
input_shapes.append(
torch.Size(
[
input_ids_rmpad.shape[0] // mpu.get_tensor_model_parallel_world_size(),
1,
meta_info["hidden_size"],
]
)
)
else:
# compute shapes for model_inputs
input_shapes.append(torch.Size([input_ids_rmpad.shape[0], 1, meta_info["hidden_size"]]))
return input_shapes
def make_batch_generator(batches, vpp_size):
"""
Creates a batch generator suitable for Megatron pipeline parallelism,
handling virtual pipeline parallelism (VPP).
If VPP is used (vpp_size > 1), it duplicates the batch iterator for each
virtual pipeline stage. Otherwise, it returns a single iterator.
Args:
batches: An iterable (e.g., list) of micro-batches.
vpp_size (int): The virtual pipeline model parallel size.
Returns:
An iterator or a list of iterators over the micro-batches.
"""
if vpp_size > 1:
# has vpp
batch_generator = [batches] * vpp_size # number of vpp chunks
batch_generator = [iter(b) for b in batch_generator]
else:
# no vpp
batch_generator = iter(batches)
return batch_generator
| verl__utils__megatron__pipeline_parallel.py |
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from enum import Enum
import torch
try:
from megatron.core.transformer.moe.moe_utils import (
apply_router_token_dropping,
compute_routing_scores_for_aux_loss,
group_limited_topk,
)
from megatron.core.transformer.moe.token_dispatcher import MoEAlltoAllTokenDispatcher
except ImportError:
warnings.warn("NPU not support router replay for now.", stacklevel=2)
MoEAlltoAllTokenDispatcher = None
from megatron.core.transformer.moe.router import TopKRouter
from megatron.core.transformer.transformer_config import TransformerConfig
# https://github.com/THUDM/slime/blob/main/slime/utils/routing_replay.py
class RouterReplayAction(Enum):
RECORD = "record"
REPLAY_FORWARD = "replay_forward"
REPLAY_BACKWARD = "replay_backward"
class RouterReplay:
"""
A class to manage the recording and replaying of MoE routing decisions.
It holds all router instances and provides static methods to globally
control recording and replaying.
"""
# Static variable to hold all router instances, one per MoE layer.
router_instances = []
@staticmethod
def set_replay_data(all_layers_topk_indices: list):
"""
Distributes the topk indices for all layers to their respective RouterReplay instances.
:param all_layers_topk_indices: A list of tensors, where each tensor contains the
topk indices for a specific layer. The order
must match the instantiation order of the routers.
"""
if len(all_layers_topk_indices) != len(RouterReplay.router_instances):
raise ValueError(
f"The number of replay tensors ({len(all_layers_topk_indices)}) "
f"does not match the number of router instances ({len(RouterReplay.router_instances)})."
)
for i, router_instance in enumerate(RouterReplay.router_instances):
router_instance.set_target_indices(all_layers_topk_indices[i])
@staticmethod
def get_recorded_data() -> list:
"""
Collects the recorded topk indices from all RouterReplay instances.
:return: A list of tensors, each containing the recorded topk indices for a layer.
"""
return [router.get_recorded_indices() for router in RouterReplay.router_instances]
@staticmethod
def clear_global_indices():
"""Clears the recorded and target topk indices in all instances."""
for router in RouterReplay.router_instances:
router.clear_indices()
def __init__(self):
"""Initializes a RouterReplay instance for a specific layer."""
self.target_topk_idx = None # For replay
self.recorded_topk_idx = None # For recording
self.router_replay_action = None # Router replay action for this layer
self.replay_backward_list = [] # List of tensors for backward pass replay
self.layer_number = None # Global layer index if available
RouterReplay.router_instances.append(self)
def set_target_indices(self, topk_indices: torch.Tensor):
"""Sets the target topk indices for replay."""
self.target_topk_idx = topk_indices
self.replay_backward_list.append(topk_indices)
def get_recorded_indices(self):
"""Returns the recorded topk indices."""
return self.recorded_topk_idx
def record_indices(self, topk_indices: torch.Tensor):
"""Records the topk indices."""
self.recorded_topk_idx = topk_indices
def clear_indices(self):
"""Clears the recorded and target topk indices."""
self.recorded_topk_idx = None
self.target_topk_idx = None
self.replay_backward_list = []
def set_router_replay_action(self, router_replay_action: RouterReplayAction):
"""Sets the router replay action for this layer."""
self.router_replay_action = router_replay_action
def clear_router_replay_action(self):
"""Clears the router replay action for this layer."""
self.router_replay_action = None
@staticmethod
def set_global_router_replay_action(router_replay_action: RouterReplayAction):
"""Sets the router replay action for all router instances."""
for router in RouterReplay.router_instances:
router.set_router_replay_action(router_replay_action)
@staticmethod
def clear_global_router_replay_action():
"""Clears the router replay action for all router instances."""
for router in RouterReplay.router_instances:
router.clear_router_replay_action()
def _patched_topk_routing_with_score_function(
logits: torch.Tensor,
topk: int,
use_pre_softmax: bool,
num_groups: int,
group_topk: int,
score_function: str,
expert_bias: torch.Tensor,
fused: bool,
router_replay: RouterReplay,
scaling_factor: float,
):
"""
Patched version of topk_routing_with_score_function that supports router replay.
"""
num_tokens, num_experts = logits.shape
def _compute_topk(scores, topk, num_groups=None, group_topk=None):
if group_topk:
return group_limited_topk(
scores=scores,
topk=topk,
num_tokens=num_tokens,
num_experts=num_experts,
num_groups=num_groups,
group_topk=group_topk,
)
else:
return torch.topk(scores, k=topk, dim=1)
def compute_topk(scores, topk, num_groups=None, group_topk=None):
# Default behavior if no replay is active
routing_action = router_replay.router_replay_action if router_replay is not None else None
if routing_action is None:
return _compute_topk(scores, topk, num_groups=num_groups, group_topk=group_topk)
if routing_action == RouterReplayAction.RECORD:
probs, top_indices = _compute_topk(scores, topk, num_groups=num_groups, group_topk=group_topk)
if router_replay is not None:
router_replay.record_indices(top_indices)
return probs, top_indices
elif routing_action == RouterReplayAction.REPLAY_FORWARD:
if router_replay is None or router_replay.target_topk_idx is None:
# Fallback if replay data is not available
return _compute_topk(scores, topk, num_groups=num_groups, group_topk=group_topk)
# Use the provided indices for replay
top_indices = router_replay.target_topk_idx
# Ensure indices are on the correct device
top_indices = top_indices.to(scores.device)
# Gather the scores for the replayed indices to get the probabilities
probs = scores.gather(1, top_indices)
return probs, top_indices
elif routing_action == RouterReplayAction.REPLAY_BACKWARD:
if router_replay is None or not router_replay.replay_backward_list:
# Fallback if replay data is not available
return _compute_topk(scores, topk, num_groups=num_groups, group_topk=group_topk)
# Use the last recorded indices for backward replay
top_indices = router_replay.replay_backward_list.pop(0)
# Ensure indices are on the correct device
top_indices = top_indices.to(scores.device)
# Gather the scores for the replayed indices to get the probabilities
probs = scores.gather(1, top_indices)
return probs, top_indices
else: # Unknown action, fallback
return _compute_topk(scores, topk, num_groups=num_groups, group_topk=group_topk)
if score_function == "softmax":
if use_pre_softmax:
scores = torch.softmax(logits, dim=-1, dtype=torch.float32).type_as(logits)
probs, top_indices = compute_topk(scores, topk, num_groups, group_topk)
else:
scores, top_indices = compute_topk(logits, topk, num_groups, group_topk)
probs = torch.softmax(scores, dim=-1, dtype=torch.float32).type_as(logits)
elif score_function == "sigmoid":
scores = torch.sigmoid(logits.float()).type_as(logits)
if expert_bias is not None:
scores_for_routing = scores + expert_bias
_, top_indices = compute_topk(scores_for_routing, topk, num_groups, group_topk)
scores = torch.gather(scores, dim=1, index=top_indices).type_as(logits)
else:
scores, top_indices = compute_topk(scores, topk, num_groups, group_topk)
probs = scores / (scores.sum(dim=-1, keepdim=True) + 1e-20) if topk > 1 else scores
else:
raise ValueError(f"Invalid score_function: {score_function}")
if scaling_factor:
probs = probs * scaling_factor
if torch.are_deterministic_algorithms_enabled():
# build [num_tokens, num_experts] from [num_tokens, topk]
routing_probs = torch.zeros_like(logits)
rows = torch.arange(num_tokens, device=logits.device).unsqueeze(1)
routing_probs.index_put_((rows, top_indices), probs, accumulate=False)
routing_map = torch.zeros_like(logits, dtype=logits.dtype)
routing_map.index_put_((rows, top_indices), torch.ones_like(probs, dtype=routing_map.dtype), accumulate=False)
routing_map = routing_map.bool()
else:
# TODO Try using element-wise operations instead of scatter?
routing_probs = torch.zeros_like(logits).scatter(1, top_indices, probs)
routing_map = torch.zeros_like(logits).int().scatter(1, top_indices, 1).bool()
return routing_probs, routing_map
def patched_routing(self, logits: torch.Tensor, *args, **kwargs):
"""Top-k routing function
Args:
logits (torch.Tensor): Logits tensor after gating.
Returns:
probs (torch.Tensor): The probabilities of token to experts assignment.
routing_map (torch.Tensor): The mapping of token to experts assignment,
with shape [num_tokens, num_experts].
"""
seq_length, bsz = logits.shape[:2]
logits = logits.view(-1, self.config.num_moe_experts)
# Apply Z-Loss
logits = self.apply_z_loss(logits)
# Calculate probs and routing_map for token dispatching
if self.routing_type == "sinkhorn":
probs, routing_map = self.sinkhorn_load_balancing(logits)
else:
probs, routing_map = _patched_topk_routing_with_score_function(
logits=logits,
topk=self.topk,
use_pre_softmax=self.config.moe_router_pre_softmax,
num_groups=self.config.moe_router_num_groups,
group_topk=self.config.moe_router_group_topk,
scaling_factor=self.config.moe_router_topk_scaling_factor,
score_function=self.score_function,
expert_bias=self.expert_bias,
fused=self.config.moe_router_fusion,
router_replay=self.router_replay,
)
# Apply token dropping to probs and routing_map.
if self.config.moe_expert_capacity_factor is not None:
probs, routing_map = apply_router_token_dropping(
probs,
routing_map,
router_topk=self.topk,
capacity_factor=self.config.moe_expert_capacity_factor,
drop_policy=self.config.moe_token_drop_policy,
pad_to_capacity=self.config.moe_pad_expert_input_to_capacity,
)
# Apply each aux loss type and attach aux loss autograd function to probs
if self.training and torch.is_grad_enabled() and self.is_aux_loss_enabled():
# Calculate scores and routing_map for aux loss
routing_map_for_aux_loss, scores_for_aux_loss = compute_routing_scores_for_aux_loss(
logits, self.topk, self.score_function, fused=self.config.moe_router_fusion
)
probs = self._apply_aux_loss(probs, scores_for_aux_loss, routing_map_for_aux_loss)
probs = self._apply_seq_aux_loss(probs, scores_for_aux_loss, routing_map_for_aux_loss, seq_length, bsz)
probs = self._apply_global_aux_loss(probs, scores_for_aux_loss, routing_map_for_aux_loss)
# Update expert bias and tokens_per_expert
# Prevent extra local tokens accumulation on evaluation or activation recomputation
if self.enable_expert_bias and torch.is_grad_enabled():
with torch.no_grad():
self.local_tokens_per_expert += routing_map.sum(dim=0)
return probs, routing_map
def apply_router_replay_patch():
"""
Applies the monkey patch for MoE Router Replay functionality.
This patch dynamically adds the 'enable_routing_replay' attribute to TransformerConfig
and modifies the TopKRouter to support recording and replaying of routing decisions.
"""
print("Applying Router Replay Patch...")
# Clear router instances to avoid state leakage between model initializations.
RouterReplay.router_instances.clear()
# Step 1: Patch TransformerConfig to include the feature flag
if not hasattr(TransformerConfig, "enable_routing_replay"):
# Add class attribute with default value
TransformerConfig.enable_routing_replay = False
# Store original __init__ method
original_tf_config_init = TransformerConfig.__init__
# Define new __init__ method that safely handles enable_routing_replay parameter
def patched_tf_config_init(self, *args, **kwargs):
# Simple solution: remove the unknown parameter before calling original constructor
enable_routing_replay = kwargs.pop("enable_routing_replay", TransformerConfig.enable_routing_replay)
# Call original constructor with remaining kwargs
original_tf_config_init(self, *args, **kwargs)
# Set the instance attribute
self.enable_routing_replay = enable_routing_replay
# Apply the patch
TransformerConfig.__init__ = patched_tf_config_init
# Step 2: Patch TopKRouter only once to ensure idempotency.
if hasattr(TopKRouter, "_router_replay_patched"):
return
original_init = TopKRouter.__init__
original_set_layer_number = TopKRouter.set_layer_number
def patched_set_layer_number(self, layer_number: int):
original_set_layer_number(self, layer_number)
if self.router_replay is not None:
self.router_replay.layer_number = layer_number
# Step 3: Define the new __init__ method
def patched_init(self, *args, **kwargs):
original_init(self, *args, **kwargs)
self.router_replay = None
if self.config.enable_routing_replay:
self.router_replay = RouterReplay()
# Step 4: Patch MoEAlltoAllTokenDispatcher.preprocess to handle router replay
# When router replay is enabled, duplicate indices in top_indices can cause
# routing_map.sum() < num_tokens * topk, leading to split size mismatch in alltoall.
if MoEAlltoAllTokenDispatcher is not None and not hasattr(MoEAlltoAllTokenDispatcher, "_preprocess_patched"):
original_preprocess = MoEAlltoAllTokenDispatcher.preprocess
def patched_preprocess(self, routing_map):
"""Patched preprocess that handles router replay correctly for alltoall dispatcher."""
# Call original preprocess
result = original_preprocess(self, routing_map)
# Fix num_out_tokens when router replay is enabled
if (
getattr(self.config, "enable_routing_replay", False)
and not self.drop_and_pad
and self.config.moe_expert_capacity_factor is None
and not (
getattr(self.config, "moe_router_padding_for_quantization", None)
or getattr(self.config, "moe_router_padding_for_fp8", None)
)
):
# With router replay, duplicate indices can reduce the actual routed
# token count, so derive it from the routing map instead.
self.num_out_tokens = int(routing_map.sum().item())
return result
MoEAlltoAllTokenDispatcher.preprocess = patched_preprocess
MoEAlltoAllTokenDispatcher._preprocess_patched = True
# Step 5: Apply the patches
TopKRouter.__init__ = patched_init
TopKRouter.routing = patched_routing
TopKRouter.set_layer_number = patched_set_layer_number
TopKRouter._router_replay_patched = True
| verl__utils__megatron__router_replay_patch.py |
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Router Replay Utilities
Utilities for handling router replay functionality in Megatron models.
"""
import warnings
from typing import Optional
import torch
try:
from megatron.core.pipeline_parallel.utils import is_vp_first_stage, is_vp_last_stage
except ImportError:
warnings.warn("NPU not support router replay for now.", stacklevel=2)
pass
from megatron.core import parallel_state as mpu
from megatron.core.pipeline_parallel.schedules import get_schedule_table
from megatron.core.tensor_parallel import gather_from_sequence_parallel_region, scatter_to_sequence_parallel_region
from megatron.core.transformer.transformer_config import TransformerConfig
from megatron.core.transformer.transformer_layer import get_transformer_layer_offset
from verl.models.mcore.util import (
postprocess_packed_seqs,
preprocess_packed_seqs,
preprocess_thd_no_padding,
)
from verl.utils.device import get_device_name
from verl.utils.megatron.router_replay_patch import RouterReplay, RouterReplayAction
device_name = get_device_name()
# from megatron.core.transformer.transformer_block import get_num_layers_to_build
def get_num_layers_to_build(
config: TransformerConfig, vp_stage: Optional[int] = None, pp_rank: Optional[int] = None
) -> int:
"""
Determine the number of transformer layers to build for the current pipeline stage.
Args:
config (TransformerConfig): Configuration object containing transformer model parameters.
vp_stage (Optional[int]): Virtual pipeline stage number.
pp_rank (Optional[int]): Pipeline parallel rank.
Returns:
int: The number of layers to be built for the current pipeline stage.
"""
# If we have a custom PP layout, straightforwardly
# return the number of decoders in the layout array.
if hasattr(config, "pipeline_model_parallel_layout") and config.pipeline_model_parallel_layout is not None:
from megatron.core.transformer.enums import LayerType
return config.pipeline_model_parallel_layout.get_num_layers_to_build(
layer_type=LayerType.decoder, vp_stage=vp_stage
)
# Fallback for legacy tests.
if pp_rank is None:
pp_rank = mpu.get_pipeline_model_parallel_rank()
is_first_pp_stage = pp_rank == 0
is_last_pp_stage = pp_rank == config.pipeline_model_parallel_size - 1
if config.num_layers_in_first_pipeline_stage is not None or config.num_layers_in_last_pipeline_stage is not None:
assert not (config.account_for_embedding_in_pipeline_split or config.account_for_loss_in_pipeline_split), (
" \
Does not support standalone embedding stage and standalone loss stage with uneven pp"
)
# Number of layers to distribute over rest of pipeline stages
layers_to_distribute = config.num_layers
# Number of pipeline stages left for distributing transformer layers
pipeline_stages_left = config.pipeline_model_parallel_size
# If the uneven first (last) pipeline stage is enabled, remove the specified number
# of layers to calculate the number of layers on each middle pipeline stage.
if config.num_layers_in_first_pipeline_stage is not None:
layers_to_distribute -= config.num_layers_in_first_pipeline_stage
pipeline_stages_left -= 1
if config.num_layers_in_last_pipeline_stage is not None:
layers_to_distribute -= config.num_layers_in_last_pipeline_stage
pipeline_stages_left -= 1
# If pp_size <= 2, we do not have any intermediate pipeline stages, and we do not
# need to check if the left over layers are divisible by the left over stages.
if pipeline_stages_left > 0:
assert layers_to_distribute % pipeline_stages_left == 0, (
"With uneven pipelineing the left over layers must be divisible by left over stages"
)
num_layers_per_pipeline_rank = layers_to_distribute // pipeline_stages_left
else:
num_layers_per_pipeline_rank = 0
# If the uneven first (last) pipeline stage is enabled, return the specified number
# of layers for all virtual pipeline parallel stages within the first (last) pipeline
# parallel stage.
if is_first_pp_stage and config.num_layers_in_first_pipeline_stage is not None:
num_layers_per_pipeline_rank = config.num_layers_in_first_pipeline_stage
if is_last_pp_stage and config.num_layers_in_last_pipeline_stage is not None:
num_layers_per_pipeline_rank = config.num_layers_in_last_pipeline_stage
else:
# Include the embedding layer and loss layer into pipeline parallelism partition
num_layers = config.num_layers
if config.account_for_embedding_in_pipeline_split:
num_layers += 1
if config.account_for_loss_in_pipeline_split:
num_layers += 1
assert num_layers % config.pipeline_model_parallel_size == 0, (
"num_layers should be divisible by pipeline_model_parallel_size"
)
num_layers_per_pipeline_rank = num_layers // config.pipeline_model_parallel_size
vp_size = config.virtual_pipeline_model_parallel_size
if vp_size is not None and config.pipeline_model_parallel_size > 1:
# Interleaved pipeline parallelism:
# Number of layers in each model chunk is the number of layers in the stage,
# divided by the number of model chunks in a stage.
# With 8 layers, 2 stages, and 4 model chunks, we want an assignment of
# layers to stages like (each list is a model chunk):
# Stage 0: [0] [2] [4] [6]
# Stage 1: [1] [3] [5] [7]
# With 8 layers, 2 stages, and 2 virtual stages, we want an assignment of
# layers to stages like (each list is a model chunk):
# Stage 0: [0, 1] [4, 5]
# Stage 1: [2, 3] [6, 7]
assert num_layers_per_pipeline_rank % vp_size == 0, (
f"num_layers_per_pipeline_rank {num_layers_per_pipeline_rank} \
should be divisible by vp_size {vp_size}"
)
num_layers_per_virtual_stage = num_layers_per_pipeline_rank // vp_size
num_layers_to_build = num_layers_per_virtual_stage
else:
# Non-interleaved pipeline parallelism:
# Each stage gets a contiguous set of layers.
num_layers_to_build = num_layers_per_pipeline_rank
# The embedding (or loss) layer cannot function as a standalone transformer layer
# Reduce the number of layers to construct by 1 on the first (or last) stage if the
# embedding (or loss) layer is included in the pipeline parallelism partition and placement.
if config.account_for_embedding_in_pipeline_split:
if is_vp_first_stage(vp_stage, vp_size) and is_first_pp_stage:
num_layers_to_build -= 1
assert num_layers_to_build >= 0, "Not enough layers in the first virtual pipeline stage"
if config.account_for_loss_in_pipeline_split:
if is_vp_last_stage(vp_stage, vp_size) and is_last_pp_stage:
num_layers_to_build -= 1
assert num_layers_to_build >= 0, "Not enough layers in the last virtual pipeline stage"
return num_layers_to_build
def merge_router_topk_indices(attention_mask, input_ids, mini_layer_topk_idx_list, tf_config, vp_rank=None):
"""
Merge recorded router top-k indices across sequence-parallel ranks for all router instances,
then pack/unpack them to align with the original (batch, seq_len) layout and append the result.
Args:
attention_mask (torch.Tensor): Attention mask of shape [batch_size, seq_len]. Used to determine
the valid token positions during pack/unpack.
input_ids (torch.Tensor): Input token IDs of shape [batch_size, seq_len]. Used together with
attention_mask for sequence packing/unpacking.
mini_layer_topk_idx_list (list): A Python list to which the merged top-k indices tensor will be appended.
tf_config: Megatron/Transformer engine configuration object. Used to locate router instances for
the current micro-batch.
vp_rank (Optional[int]): Virtual pipeline stage rank override. If None, the current VP rank from
Megatron parallel state will be used.
Returns:
None: The function has side effects only; it appends a tensor of shape
[1, dynamic_bs_all, layer_num, topk] to mini_layer_topk_idx_list.
"""
with torch.no_grad():
router_instances_list = RouterReplayHelper.get_micro_batch_router_list(tf_config, vp_rank)
layers_topk_idx = []
for router in router_instances_list:
layers_topk_idx.append(router.recorded_topk_idx.to(torch.uint8)) # dynamic_bs, topk
# layer_num, dynamic_bs, topk -> dynamic_bs, layer_num, topk
layers_topk_idx = torch.stack(layers_topk_idx).permute(1, 0, 2).to(device_name)
# dynamic_bs, layer_num, topk -> 1, dynamic_bs_all, layer_num, topk
layers_topk_idx = (
gather_from_sequence_parallel_region(layers_topk_idx, tensor_parallel_output_grad=False)
.unsqueeze(0)
.contiguous()
)
batch_size, seq_len = attention_mask.shape[:2]
_, packed_seq_params = preprocess_packed_seqs(input_ids, attention_mask, pre_process=True)
layers_topk_idx = postprocess_packed_seqs(
layers_topk_idx, packed_seq_params, attention_mask, batch_size, seq_len, post_process=True
)
mini_layer_topk_idx_list.append(layers_topk_idx.cpu())
def set_router_replay_data(layers_topk_idx, attention_mask, tf_config, vp_rank=None):
"""
Scatter the packed router top-k indices back to sequence-parallel ranks and update each local
RouterReplay instance with target indices for replay mode.
This function prepares the per-layer, per-sample top-k routing decisions (recorded during an earlier
forward) so that subsequent replay passes can follow exactly the same routing.
Args:
layers_topk_idx (torch.Tensor): Router top-k indices with shape [bs, max_seq_len, layer_num, topk].
This should be the merged output produced by merge_router_topk_indices.
attention_mask (torch.Tensor): Attention mask [batch_size, seq_len] used for pack/unpack alignment.
tf_config: Megatron/Transformer engine configuration object.
vp_rank (Optional[int]): Virtual pipeline stage rank override. If None, the current VP rank from
Megatron parallel state will be used.
Returns:
None: The function updates internal RouterReplay instances in-place.
"""
with torch.no_grad():
if layers_topk_idx.is_nested:
layers_topk_idx_rmpad, _ = preprocess_thd_no_padding(layers_topk_idx, pre_process=True)
else:
layers_topk_idx_rmpad, _ = preprocess_packed_seqs(layers_topk_idx, attention_mask, pre_process=True)
layers_topk_idx_rmpad = layers_topk_idx_rmpad.contiguous() # 1, dynamic_bs_all, layer_num, topk
# 1, dynamic_bs_split, layer_num, topk
layers_topk_idx_rmpad_split = scatter_to_sequence_parallel_region(
layers_topk_idx_rmpad.to(device_name).squeeze(dim=0)
).unsqueeze(dim=0)
# dynamic_bs_split, layer_num, topk -> layer_num, dynamic_bs_split, topk
layers_topk_idx_reshape = layers_topk_idx_rmpad_split.permute(0, 2, 1, 3).squeeze(
dim=0
) # layer_num, dynamic_bs_all, topk
num_layers_in_data = layers_topk_idx_reshape.shape[0]
use_global_layer_index = getattr(tf_config, "num_layers", None) == num_layers_in_data
local_rank_info = get_current_rank_layer_info(tf_config, vp_rank)
offset, _ = local_rank_info["start"], local_rank_info["end"]
router_instances_list = RouterReplayHelper.get_micro_batch_router_list(tf_config, vp_rank)
for i, router in enumerate(router_instances_list):
layer_idx = None
if use_global_layer_index:
layer_number = getattr(router, "layer_number", None)
if layer_number is not None:
layer_idx = layer_number - 1
if layer_idx is None:
layer_idx = i + offset
if layer_idx < 0 or layer_idx >= num_layers_in_data:
raise ValueError(
f"router replay layer index {layer_idx} out of range for data with {num_layers_in_data} layers"
)
router.set_target_indices(layers_topk_idx_reshape[layer_idx].to(torch.int64))
def reorder_and_merge_vpp_layers(
micro_batch_tensor_list,
num_microbatches: int,
vpp_size: int,
microbatch_group_size_per_vp_stage: int,
) -> torch.Tensor:
"""
Reorder and merge per-VPP layer blocks into a contiguous layer dimension.
Given a tensor shaped as [bs*vpp_size, max_token_len, layer_num_per_vpp, topk], this function:
1) Builds the schedule table for virtual microbatches and reorders the first dimension so that entries
belonging to the same model chunk (VPP stage) become contiguous.
2) Reshapes and merges the (vpp_size, layer_num_per_vpp) into a single layer dimension, producing
[bs, max_token_len, layer_num, topk].
Args:
micro_batch_tensor_list : the list of Input tensor.
num_microbatches (int): Number of microbatches per pipeline stage (bs).
vpp_size (int): Virtual pipeline parallel size (number of model chunks).
microbatch_group_size_per_vp_stage (int): Number of consecutive microbatches processed per VPP stage.
Returns:
torch.Tensor: Output tensor of shape [bs, max_token_len, layer_num, topk].
Raises:
ValueError: If input tensor dimensionality or expected sizes do not match.
RuntimeError: If the computed output shape is unexpected or the schedule length mismatches.
"""
# 1) Build schedule table: map each virtual_microbatch_id -> (microbatch_id, model_chunk_id)
schedule_table = get_schedule_table(num_microbatches, vpp_size, microbatch_group_size_per_vp_stage)
# 2) Group by model_chunk_id to build reorder indices so entries of the same chunk become contiguous along dim 0
tensor_by_chunk = [[] for _ in range(vpp_size)]
mini_tensor_list = []
for vidx, (_mb, chunk_id) in enumerate(schedule_table):
tensor_by_chunk[chunk_id].append(micro_batch_tensor_list[vidx])
for chunk_id in range(vpp_size):
mini_tensor_list.append(torch.cat(tensor_by_chunk[chunk_id], dim=0))
out = torch.cat(mini_tensor_list, dim=2)
return out
def get_current_rank_layer_info(tf_config, vp_rank=None):
# When vp_rank is None, default to the current VP rank (or 0 if VP is disabled).
"""Return the local layer range/count for the current process and the full assignment table.
Args:
tf_config: Configuration object used by compute_pipeline_layer_assignment.
vp_rank (Optional[int]): Explicit virtual pipeline stage rank to query. If None, uses
mpu.get_virtual_pipeline_model_parallel_rank() when VP is enabled; otherwise 0.
Returns:
Tuple[dict, dict]: A tuple of (local_assignment, all_assignments) where local_assignment contains
keys {"start", "end", "count"} for the current (pp_rank, vp_stage).
"""
if vp_rank is None:
vp_rank = 0
num_layers_to_build = get_num_layers_to_build(tf_config, vp_stage=vp_rank)
offset = get_transformer_layer_offset(tf_config, vp_stage=vp_rank)
local = {}
local["start"] = offset
local["end"] = offset + num_layers_to_build
local["count"] = num_layers_to_build
return local
def pp_gather(local_layers_router_map, tf_config):
# TODO: Consider non-uniform layer allocation cases.
"""
Gather local router maps from all PP ranks into a global router map.
Args:
local_layers_router_map (torch.Tensor): Local router map of shape
[bs, max_seq_len, local_num_layers, topk].
tf_config: Configuration providing pipeline_model_parallel_size.
Returns:
torch.Tensor: Global router map of shape [bs, max_seq_len, num_layers, topk] placed on CPU.
"""
pp_size = tf_config.pipeline_model_parallel_size
if pp_size <= 1:
return local_layers_router_map
pp_group = mpu.get_pipeline_model_parallel_group()
world_size = torch.distributed.get_world_size(pp_group)
local_layers_router_map = local_layers_router_map.to(device_name)
layers_topk_idx_global_list = [
torch.empty(
size=local_layers_router_map.shape,
dtype=local_layers_router_map.dtype,
device=local_layers_router_map.device,
)
for _ in range(world_size)
]
torch.distributed.all_gather(
tensor=local_layers_router_map,
tensor_list=layers_topk_idx_global_list,
group=pp_group,
async_op=False,
)
vp_size = tf_config.virtual_pipeline_model_parallel_size
if vp_size is not None:
vpp_router_map_offset = [[] for _ in range(pp_size)]
for pp_stage in range(pp_size):
vpp_router_map_offset[pp_stage].append(0)
for vp_stage in range(vp_size):
num_layers_to_build = get_num_layers_to_build(tf_config, vp_stage, pp_stage)
vpp_router_map_offset[pp_stage].append(num_layers_to_build + vpp_router_map_offset[pp_stage][-1])
layers_topk_idx_global = []
for vp_stage in range(vp_size):
for pp_stage in range(pp_size):
piece = slice(vpp_router_map_offset[pp_stage][vp_stage], vpp_router_map_offset[pp_stage][vp_stage + 1])
layers_topk_idx_global.append(layers_topk_idx_global_list[pp_stage][:, :, piece, :])
global_router_map = torch.cat(layers_topk_idx_global, dim=2).to("cpu")
else:
global_router_map = torch.cat(layers_topk_idx_global_list, dim=2).to("cpu")
return global_router_map
class RouterReplayHelper:
"""Helper class to query router replay state and locate local RouterReplay instances."""
@staticmethod
def get_micro_batch_router_list(tf_config, vp_rank=None):
"""
Return the list of RouterReplay instances corresponding to the current micro-batch and local
(pp_rank, vp_stage) layer range.
When virtual pipeline (VPP) is enabled, the local range for the PP rank is expanded to include
all VP stages by multiplying the per-VP count by vp_size. The returned slice is taken from the
global RouterReplay.router_instances list.
Args:
tf_config: Configuration object used to compute layer assignments.
vp_rank (Optional[int]): Explicit virtual pipeline stage to query. If None, the current VP
rank from Megatron parallel state is used when available.
Returns:
list: A contiguous sublist of RouterReplay.router_instances for the local layer range.
"""
vp_size = tf_config.virtual_pipeline_model_parallel_size
if vp_size is not None:
vp_rank = 0 if vp_rank is None else vp_rank
offset = 0
for pre_vp_stage in range(vp_size):
if pre_vp_stage == vp_rank:
break
num_layers_to_build = get_num_layers_to_build(tf_config, pre_vp_stage)
offset += num_layers_to_build
else:
offset = 0
num_layers_to_build = get_num_layers_to_build(tf_config, vp_rank)
router_instances_list = RouterReplay.router_instances[offset : offset + num_layers_to_build]
return router_instances_list
@staticmethod
def is_r2_record_action(tf_config, vp_rank=None) -> bool:
"""Return True if the current router_replay_action is RECORD (R2) for the local router instances.
This inspects the first local RouterReplay instance's router_replay_action and compares it to
RouterReplayAction.RECORD.
"""
router_instances_list = RouterReplayHelper.get_micro_batch_router_list(tf_config, vp_rank)
return router_instances_list and router_instances_list[0].router_replay_action == RouterReplayAction.RECORD
@staticmethod
def is_replay_forward_action(tf_config, vp_rank=None) -> bool:
"""Return True if the current router_replay_action is REPLAY_FORWARD for the local router instances.
This inspects the first local RouterReplay instance's router_replay_action and compares it to
RouterReplayAction.REPLAY_FORWARD.
"""
router_instances_list = RouterReplayHelper.get_micro_batch_router_list(tf_config, vp_rank)
return (
router_instances_list and router_instances_list[0].router_replay_action == RouterReplayAction.REPLAY_FORWARD
)
@staticmethod
def is_replay_backward_action(tf_config, vp_rank=None) -> bool:
"""Return True if the current router_replay_action is REPLAY_BACKWARD for the local router instances.
This inspects the first local RouterReplay instance's router_replay_action and compares it to
RouterReplayAction.REPLAY_BACKWARD.
"""
router_instances_list = RouterReplayHelper.get_micro_batch_router_list(tf_config, vp_rank)
return (
router_instances_list
and router_instances_list[0].router_replay_action == RouterReplayAction.REPLAY_BACKWARD
)
| verl__utils__megatron__router_replay_utils.py |
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn.functional as F
from megatron.core import parallel_state as mpu
def mark_parameter_as_sequence_parallel(parameter):
parameter.sequence_parallel = True
def is_sequence_parallel_param(param):
return hasattr(param, "sequence_parallel") and param.sequence_parallel
def pad_to_sequence_parallel(unpad_tokens: torch.Tensor):
"""pad the tokens such that the total length is a multiple of sp world size
Args:
unpad_tokens: (total_nnz, ...). Tokens after removing padding
Returns:
the padded tokens: (total_nnz + pad_size,...)
"""
total_nnz = unpad_tokens.shape[0]
sp_world_size = mpu.get_tensor_model_parallel_world_size()
pad_size = 0 if total_nnz % sp_world_size == 0 else sp_world_size - total_nnz % sp_world_size
if pad_size > 0:
if unpad_tokens.ndim == 1:
unpad_tokens = F.pad(unpad_tokens, (0, pad_size))
elif unpad_tokens.ndim == 2:
unpad_tokens = F.pad(unpad_tokens, (0, 0, 0, pad_size))
else:
raise NotImplementedError(f"Padding dim {unpad_tokens.ndim()} is not supported")
return unpad_tokens
| verl__utils__megatron__sequence_parallel.py |
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Utilities for using tensor_parallel in megatron
"""
from typing import TYPE_CHECKING
import torch
import torch.distributed as dist
from megatron.core import parallel_state as mpu
from torch.nn import init
if TYPE_CHECKING:
from megatron.core import ModelParallelConfig
def update_kwargs_with_config(dictionary: dict, config: "ModelParallelConfig"):
dictionary["config"] = config
return dictionary
def get_default_kwargs_for_model_parallel_config():
model_parallel_config_kwargs = {
"params_dtype": torch.float32,
"use_cpu_initialization": False,
"perform_initialization": True,
"gradient_accumulation_fusion": False,
"sequence_parallel": False,
}
return model_parallel_config_kwargs
def get_default_model_parallel_config():
from megatron.core import ModelParallelConfig
return ModelParallelConfig(**get_default_kwargs_for_model_parallel_config())
def get_common_default_kwargs_for_parallel_linear():
default_model_parallel_config = get_default_model_parallel_config()
common_default_kwargs = {
"init_method": init.xavier_normal_,
"stride": 1,
"keep_master_weight_for_test": False,
"config": default_model_parallel_config,
}
return common_default_kwargs
def get_default_kwargs_for_column_parallel_linear():
from megatron.core import ModelParallelConfig
model_parallel_config_kwargs = get_default_kwargs_for_model_parallel_config()
column_parallel_config_kwargs = {
"async_tensor_model_parallel_allreduce": False,
}
model_parallel_config_kwargs.update(column_parallel_config_kwargs)
column_default_kwargs = {
"config": ModelParallelConfig(**model_parallel_config_kwargs),
}
common_default_kwargs = get_common_default_kwargs_for_parallel_linear()
common_default_kwargs.update(column_default_kwargs)
return common_default_kwargs
def get_default_kwargs_for_row_parallel_linear():
common_default_kwargs = get_common_default_kwargs_for_parallel_linear()
return common_default_kwargs
def get_default_kwargs_for_parallel_embedding():
from megatron.core import ModelParallelConfig
model_parallel_config_kwargs = get_default_kwargs_for_model_parallel_config()
embedding_default_kwargs = {
"init_method": init.xavier_normal_,
"config": ModelParallelConfig(**model_parallel_config_kwargs),
}
return embedding_default_kwargs
def is_tensor_parallel_param(param):
return hasattr(param, "tensor_model_parallel") and param.tensor_model_parallel
def get_tensor_parallel_partition_dim(param):
assert is_tensor_parallel_param(param)
return param.partition_dim
def get_tensor_parallel_partition_stride(param):
assert is_tensor_parallel_param(param)
return param.partition_stride
class _VocabParallelEntropy(torch.autograd.Function):
@staticmethod
def forward(ctx, vocab_parallel_logits: torch.Tensor) -> torch.Tensor:
@torch.compile(dynamic=True)
def mul_reduce(a, b):
return (a * b).sum(dim=-1, keepdim=True)
logits_max = vocab_parallel_logits.max(dim=-1, keepdim=True).values
dist.all_reduce(logits_max, op=dist.ReduceOp.MAX, group=mpu.get_tensor_model_parallel_group())
normalized_vocab_parallel_logits = vocab_parallel_logits - logits_max
normalized_exp_logits = normalized_vocab_parallel_logits.exp_()
normalized_sum_exp_logits = normalized_exp_logits.sum(dim=-1, keepdim=True)
dist.all_reduce(normalized_sum_exp_logits, group=mpu.get_tensor_model_parallel_group())
softmax_logits = normalized_exp_logits.div_(normalized_sum_exp_logits)
sum_softmax_times_logits = mul_reduce(softmax_logits, vocab_parallel_logits)
dist.all_reduce(sum_softmax_times_logits, group=mpu.get_tensor_model_parallel_group())
entropy = logits_max + normalized_sum_exp_logits.log() - sum_softmax_times_logits
ctx.save_for_backward(vocab_parallel_logits, softmax_logits, sum_softmax_times_logits)
return entropy.squeeze(dim=-1)
@staticmethod
def backward(ctx, grad_output: torch.Tensor) -> torch.Tensor:
vocab_parallel_logits, softmax_logits, sum_softmax_times_logits = ctx.saved_tensors
# reuse softmax_logits as grad
vocab_parallel_logits.sub_(sum_softmax_times_logits)
softmax_logits.mul_(vocab_parallel_logits)
softmax_logits.mul_(grad_output.unsqueeze(dim=-1))
# recover vocab_parallel_logits
vocab_parallel_logits.add_(sum_softmax_times_logits)
softmax_logits.mul_(-1)
return softmax_logits
def vocab_parallel_entropy(vocab_parallel_logits: torch.Tensor) -> torch.Tensor:
"""Compute entropy when the logits are sharded in tp ranks
Args:
vocab_parallel_logits: (total_nnz, vocab_size // tp_size)
Returns: (total_nnz,)
"""
return _VocabParallelEntropy.apply(vocab_parallel_logits)
def vocab_parallel_log_probs_from_logits(logits, labels):
"""TODO(zhangchi.usc1992): We may change the implementation later"""
from megatron.core import tensor_parallel
return -tensor_parallel.vocab_parallel_cross_entropy(vocab_parallel_logits=logits, target=labels)
def vocab_parallel_log_probs_from_logits_response_rmpad(input_ids, attention_mask, logits_rmpad, response_length):
"""Similar to log_probs_from_logits_response_rmpad, but the logits_rmpad is now spliited across tensor parallel
region.
This will further reduce the peak memory usage during training
Args:
input_ids: [batch_size, seqlen]
attention_mask: [batch_size, seqlen]
logits_rmpad: [total_nnz, vocab_size // tp_size]
response_length: int
"""
from flash_attn.bert_padding import pad_input, unpad_input
batch_size, seqlen = input_ids.shape
input_ids_rmpad, indices, *_ = unpad_input(input_ids.unsqueeze(-1), attention_mask=attention_mask)
input_ids_rmpad = input_ids_rmpad.squeeze(-1)
input_ids_rmpad_rolled = torch.roll(input_ids_rmpad, shifts=-1, dims=0)
full_log_probs_rmpad = vocab_parallel_log_probs_from_logits(
logits=logits_rmpad, labels=input_ids_rmpad_rolled
) # (total_nnz,)
full_output = pad_input(
hidden_states=full_log_probs_rmpad.unsqueeze(-1), indices=indices, batch=batch_size, seqlen=seqlen
)
output = full_output.squeeze(-1)[:, -response_length - 1 : -1] # [batch_size, response_length]
return output
| verl__utils__megatron__tensor_parallel.py |
# Copyright 2025 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for PEFT (Parameter-Efficient Fine-Tuning) of Megatron in VERL."""
import os
from pathlib import Path
from typing import Iterator
import torch
# Map megatron lora target modules to HF-style module names for vLLM
MEGATRON_TO_HF_MODULES = {
"linear_qkv": ["q_proj", "k_proj", "v_proj"],
"linear_proj": ["o_proj"],
"linear_fc1": ["gate_proj", "up_proj"],
"linear_fc2": ["down_proj"],
"router": ["gate"],
# Canonical LoRA mappings
"linear_q": ["q_proj"],
"linear_k": ["k_proj"],
"linear_v": ["v_proj"],
"linear_fc1_up": ["up_proj"],
"linear_fc1_gate": ["gate_proj"],
# MLA mappings
"linear_kv_down_proj": ["kv_a_proj_with_mqa"],
"linear_kv_up_proj": ["kv_b_proj"],
"linear_q_down_proj": ["q_a_proj"],
"linear_q_up_proj": ["q_b_proj"],
"linear_q_proj": ["q_proj"],
}
# Modules with stacked parameters that need .base_layer suffix in vLLM
STACKED_PARAMS = [
".q_proj.weight",
".q_proj.bias",
".k_proj.weight",
".k_proj.bias",
".v_proj.weight",
".v_proj.bias",
".o_proj.weight",
".o_proj.bias",
".gate_proj.weight",
".up_proj.weight",
".down_proj.weight",
".mlp.gate.weight",
".mlp.gate.bias",
".mlp.gate.e_score_correction_bias",
".kv_a_proj_with_mqa.weight",
".kv_b_proj.weight",
".q_a_proj.weight",
".q_b_proj.weight",
]
def _get_rank_checkpoint_path(base_path: str) -> str:
"""Get rank-specific checkpoint path following Megatron's convention.
Returns path like: base_path/mp_rank_{tp:02d}_{pp:03d}_{ep:03d}/
Args:
base_path: Base checkpoint directory
Returns:
Rank-specific subdirectory path
"""
from megatron.core import mpu
tensor_rank = mpu.get_tensor_model_parallel_rank()
pipeline_rank = mpu.get_pipeline_model_parallel_rank()
expert_rank = mpu.get_expert_model_parallel_rank()
pipeline_parallel = mpu.get_pipeline_model_parallel_world_size() > 1
expert_parallel = mpu.get_expert_model_parallel_world_size() > 1
if not pipeline_parallel:
rank_path = os.path.join(base_path, f"mp_rank_{tensor_rank:02d}")
else:
rank_path = os.path.join(base_path, f"mp_rank_{tensor_rank:02d}_{pipeline_rank:03d}")
if expert_parallel:
rank_path = rank_path + f"_{expert_rank:03d}"
return rank_path
def get_adapter_state_dict(model):
"""Extract only adapter parameters from a model.
Args:
model: PyTorch model (possibly wrapped in DDP/Float16Module)
Returns:
Dict of adapter parameter names to tensors
"""
from verl.utils.megatron_utils import unwrap_model
# Unwrap model from DDP/Float16Module
unwrapped = unwrap_model(model)
if isinstance(unwrapped, list):
unwrapped = unwrapped[0]
adapter_state = {}
for name, param in unwrapped.named_parameters():
if ".adapter." in name.lower():
adapter_state[name] = param.data.clone()
return adapter_state
def save_adapter_checkpoint(
model: torch.nn.Module | list[torch.nn.Module],
checkpoint_path: str,
rank: int = 0,
):
"""Save only adapter parameters to checkpoint.
This is much more efficient than saving the full model when using PEFT,
as adapters typically represent <1% of total parameters.
Uses Megatron's distributed checkpoint structure: each rank saves to
checkpoint_path/mp_rank_{tp:02d}_{pp:03d}/adapter.pt
Args:
model: Model or list of models
checkpoint_path: Base path to save checkpoint (rank-specific subdirs created)
rank: Process rank (used for logging only)
"""
if isinstance(model, list):
models = model
else:
models = [model]
# Get adapter state from first model
adapter_state = get_adapter_state_dict(models[0])
if not adapter_state:
if rank == 0:
print("Warning: No adapter parameters found to save")
return
# Get rank-specific directory path
Path(checkpoint_path).mkdir(parents=True, exist_ok=True)
rank_path = _get_rank_checkpoint_path(checkpoint_path)
adapter_file = rank_path + "_adapter.pt"
torch.save(
{
"adapter_state_dict": adapter_state,
},
adapter_file,
)
if rank == 0:
print(f"Saved {len(adapter_state)} adapter parameters to {checkpoint_path} (distributed)")
def load_adapter_checkpoint(
model: torch.nn.Module | list[torch.nn.Module],
checkpoint_path: str,
strict: bool = True,
):
"""Load adapter parameters from checkpoint.
Loads from Megatron's distributed checkpoint structure: reads from
checkpoint_path/mp_rank_{tp:02d}_{pp:03d}/adapter.pt for each rank.
Args:
model: Model or list of models
checkpoint_path: Base path to checkpoint directory
strict: Whether to strictly enforce parameter name matching
"""
from megatron.core import mpu
from verl.utils.megatron_utils import unwrap_model
# Get rank-specific path
rank_path = _get_rank_checkpoint_path(checkpoint_path)
adapter_file = rank_path + "_adapter.pt"
if not os.path.isfile(adapter_file):
raise FileNotFoundError(f"Adapter checkpoint not found: {adapter_file}")
checkpoint = torch.load(adapter_file, map_location="cpu")
adapter_state = checkpoint.get("adapter_state_dict", {})
if not adapter_state:
print("Warning: No adapter parameters found in checkpoint")
return
if isinstance(model, list):
models = model
else:
models = [model]
# Load adapter parameters into each model (for VPP, models may have multiple chunks)
loaded_count = 0
for m in models:
unwrapped = unwrap_model(m)
if isinstance(unwrapped, list):
unwrapped = unwrapped[0]
# Load parameters
_, unexpected = unwrapped.load_state_dict(adapter_state, strict=False)
if strict and unexpected:
raise RuntimeError(f"Error loading adapter checkpoint:\nUnexpected keys: {unexpected}")
loaded_count += len(adapter_state)
if (
mpu.get_data_parallel_rank() == 0
and mpu.get_tensor_model_parallel_rank() == 0
and mpu.get_pipeline_model_parallel_rank() == 0
):
print(f"Loaded {len(adapter_state)} adapter parameters from {checkpoint_path}")
def count_adapter_parameters(model):
"""Count the number of trainable adapter parameters.
Args:
model: PyTorch model
Returns:
Tuple of (adapter_params, total_params, percentage)
"""
from verl.utils.megatron_utils import unwrap_model
unwrapped = unwrap_model(model)
if isinstance(unwrapped, list):
unwrapped = unwrapped[0]
adapter_params = 0
total_params = 0
for name, param in unwrapped.named_parameters():
total_params += param.numel()
if "lora" in name.lower() or "adapter" in name.lower():
if param.requires_grad:
adapter_params += param.numel()
percentage = 100 * adapter_params / total_params if total_params > 0 else 0
return adapter_params, total_params, percentage
def print_adapter_info(model):
"""Print information about adapter parameters in the model."""
adapter_params, total_params, percentage = count_adapter_parameters(model)
print(f"\n{'=' * 60}")
print("PEFT Adapter Information:")
print(f" Total parameters: {total_params:,}")
print(f" Adapter parameters: {adapter_params:,}")
print(f" Trainable percentage: {percentage:.2f}%")
print(f"{'=' * 60}\n")
def convert_megatron_to_hf_target_modules(megatron_modules: list[str]) -> list[str]:
"""Convert megatron lora target modules to HF-style module names.
Args:
megatron_modules: List of megatron-style module names.
Returns:
List of HF-style module names with duplicates removed.
"""
hf_target_modules = []
for module in megatron_modules:
if module in MEGATRON_TO_HF_MODULES:
hf_target_modules.extend(MEGATRON_TO_HF_MODULES[module])
else:
hf_target_modules.append(module)
# Remove duplicates while preserving order
return list(dict.fromkeys(hf_target_modules))
def build_peft_config_for_vllm(lora_config: dict) -> dict:
"""Build a peft_config dict compatible with vLLM's PEFTHelper from megatron lora config.
Args:
lora_config: Megatron lora configuration dictionary.
Returns:
A dictionary compatible with vLLM's PEFTHelper.from_dict().
"""
from peft import TaskType
target_modules = lora_config.get("target_modules", ["linear_qkv", "linear_proj", "linear_fc1", "linear_fc2"])
exclude_modules = lora_config.get("exclude_modules", [])
hf_target_modules = convert_megatron_to_hf_target_modules(target_modules)
hf_exclude_modules = convert_megatron_to_hf_target_modules(exclude_modules)
return {
"task_type": TaskType.CAUSAL_LM,
"r": lora_config.get("rank", 0),
"lora_alpha": lora_config.get("alpha", 32),
"target_modules": hf_target_modules,
"exclude_modules": hf_exclude_modules,
"bias": "none",
"lora_dropout": lora_config.get("dropout", 0.0),
}
# vLLM needs to target all-linear no matter about specific LoRA config
def add_base_layer_suffix(
params: Iterator[tuple[str, torch.Tensor]],
model_type: str,
) -> Iterator[tuple[str, torch.Tensor]]:
"""Yield param pairs with a base-layer suffix added to the param name.
Args:
params: Iterator of (param_name, tensor)
model_type: The type of the model (e.g., "llama").
"""
stacked_params = STACKED_PARAMS
# TODO: other models may have more special treatment, or integrate this into Megatron-Bridge
if model_type == "llama":
stacked_params = [".embed_tokens.weight", *STACKED_PARAMS]
for name, param in params:
ending_suffix = ""
for suffix in stacked_params:
if name.endswith(suffix):
ending_suffix = suffix
break
if ending_suffix:
suffix = ending_suffix.rsplit(".", 1)[-1]
name = f"{name[: -len(suffix)]}base_layer.{suffix}"
yield name, param
__all__ = [
"get_adapter_state_dict",
"save_adapter_checkpoint",
"load_adapter_checkpoint",
"count_adapter_parameters",
"print_adapter_info",
"convert_megatron_to_hf_target_modules",
"build_peft_config_for_vllm",
"add_base_layer_suffix",
]
| verl__utils__megatron_peft_utils.py |
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved.
# Copyright 2023-2024 SGLang Team
# Copyright 2025 ModelBest Inc. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pretrain utilities."""
import gc
import inspect
import logging
import os
import warnings
from dataclasses import dataclass
from typing import Any
import torch
import torch.nn.functional as F
from megatron.core import ModelParallelConfig, mpu, parallel_state, tensor_parallel
from megatron.core.distributed import DistributedDataParallel as DDP
from megatron.core.distributed import DistributedDataParallelConfig
from megatron.core.enums import ModelType
from megatron.core.optimizer import ChainedOptimizer
from megatron.core.parallel_state import get_global_memory_buffer
from megatron.core.transformer import MLATransformerConfig, TransformerConfig
from megatron.core.transformer.module import Float16Module
from megatron.core.transformer.multi_token_prediction import MTPLossLoggingHelper
from megatron.core.utils import get_attr_wrapped_model
from transformers import PretrainedConfig
import verl.utils.megatron.tensor_parallel as tp_utils
from verl.utils.device import get_device_id, get_device_name, get_torch_device
from verl.utils.fs import local_mkdir_safe
from verl.utils.model import normalize_model_name
from verl.utils.torch_dtypes import PrecisionType
from verl.workers.config import HFModelConfig, McoreEngineConfig
logger = logging.getLogger(__file__)
logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN"))
def get_model_config(model):
return get_attr_wrapped_model(model, "config", allow_none=False)
def get_model(
model_provider_func,
model_type=ModelType.encoder_or_decoder,
wrap_with_ddp=True,
use_distributed_optimizer=True,
transformer_config=None,
override_ddp_config=None,
):
"""Build the model."""
# Build model.
if (
mpu.get_pipeline_model_parallel_world_size() > 1
and mpu.get_virtual_pipeline_model_parallel_world_size() is not None
):
assert model_type != ModelType.encoder_and_decoder, (
"Interleaved schedule not supported for model with both encoder and decoder"
)
model = []
has_vp_stage = inspect.signature(mpu.is_pipeline_first_stage).parameters.get("vp_stage", None) is not None
for i in range(mpu.get_virtual_pipeline_model_parallel_world_size()):
mpu.set_virtual_pipeline_model_parallel_rank(i)
# Set pre_process and post_process only after virtual rank is set.
extra_kwargs = {} if not has_vp_stage else {"ignore_virtual": False, "vp_stage": i}
pre_process = mpu.is_pipeline_first_stage(**extra_kwargs)
post_process = mpu.is_pipeline_last_stage(**extra_kwargs)
this_model = model_provider_func(pre_process=pre_process, post_process=post_process, vp_stage=i)
this_model.model_type = model_type
model.append(this_model)
mpu.set_virtual_pipeline_model_parallel_rank(0)
else:
pre_process = mpu.is_pipeline_first_stage()
post_process = mpu.is_pipeline_last_stage()
add_encoder = True
add_decoder = True
assert model_type != ModelType.encoder_and_decoder, "Model type encoder_and_decoder is not supported"
if model_type == ModelType.encoder_and_decoder:
if mpu.get_pipeline_model_parallel_world_size() > 1:
assert mpu.get_pipeline_model_parallel_split_rank() is not None, (
"Split rank needs to be specified for model with both encoder and decoder"
)
rank = mpu.get_pipeline_model_parallel_rank()
split_rank = mpu.get_pipeline_model_parallel_split_rank()
world_size = mpu.get_pipeline_model_parallel_world_size()
pre_process = rank == 0 or rank == split_rank
post_process = (rank == (split_rank - 1)) or (rank == (world_size - 1))
add_encoder = mpu.is_pipeline_stage_before_split()
add_decoder = mpu.is_pipeline_stage_after_split()
model = model_provider_func(
pre_process=pre_process, post_process=post_process, add_encoder=add_encoder, add_decoder=add_decoder
)
else:
model = model_provider_func(pre_process=pre_process, post_process=post_process)
model.model_type = model_type
if not isinstance(model, list):
model = [model]
# Set tensor model parallel attributes if not set.
# Only parameters that are already tensor model parallel have these
# attributes set for them. We should make sure the default attributes
# are set for all params so the optimizer can use them.
for model_module in model:
for param in model_module.parameters():
tensor_parallel.set_defaults_if_not_set_tensor_model_parallel_attributes(param)
# Print number of parameters.
if mpu.get_data_parallel_rank() == 0:
print(
" > number of parameters on (tensor, pipeline) model parallel rank ({}, {}): {}".format(
mpu.get_tensor_model_parallel_rank(),
mpu.get_pipeline_model_parallel_rank(),
sum([sum([p.nelement() for p in model_module.parameters()]) for model_module in model]),
),
flush=True,
)
# GPU allocation.
if transformer_config is None or (not transformer_config.use_cpu_initialization):
for model_module in model:
model_module.to(f"{get_device_name()}:{get_device_id()}")
# Fp16 conversion.
config: TransformerConfig = get_model_config(model[0])
config.fp8 = None
tfconfig: TransformerConfig = model[0].config
if config.fp16 or config.bf16: # the ModelParallelConfig in GPTModel
model = [Float16Module(config, model_module) for model_module in model]
if wrap_with_ddp:
ddp_models = []
ddp_config_dict = {
"use_distributed_optimizer": use_distributed_optimizer,
"grad_reduce_in_fp32": True,
"overlap_grad_reduce": False,
}
if override_ddp_config is not None:
ddp_config_dict.update(override_ddp_config)
ddp_config = DistributedDataParallelConfig(**ddp_config_dict)
for model_chunk_idx, model_chunk in enumerate(model):
ddp_model = DDP(
config=tfconfig,
module=model_chunk,
disable_bucketing=(model_chunk_idx > 0),
ddp_config=ddp_config,
)
ddp_models.append(ddp_model)
model = ddp_models
# # Broadcast params from data parallel src rank to other data parallel ranks.
# # if args.data_parallel_random_init:
for model_module in model:
model_module.broadcast_params()
return model
@dataclass
class McoreModuleWrapperConfig:
"""Configuration for Mcore module wrapper."""
is_value_model: bool = False
share_embeddings_and_output_weights: bool = False
wrap_with_ddp: bool = True
use_distributed_optimizer: bool = True
def make_megatron_module(
wrap_config: McoreModuleWrapperConfig,
tf_config: TransformerConfig,
hf_config: PretrainedConfig,
bridge: Any = None,
provider: Any = None,
override_model_config: dict[str, Any] = None,
override_ddp_config: dict[str, Any] = None,
peft_cls: Any = None,
peft_config: Any = None,
):
if override_model_config is None:
override_model_config = {}
if bridge is not None:
if provider is None:
from verl.models.mcore.mbridge import freeze_moe_router, make_value_model
value_model_hook = make_value_model
else:
from verl.models.mcore.bridge import freeze_moe_router, make_value_model
hidden_size = (
hf_config.text_config.hidden_size if hasattr(hf_config, "text_config") else hf_config.hidden_size
)
value_model_hook = make_value_model(hidden_size, provider.sequence_parallel)
post_model_creation_callbacks = []
if wrap_config.is_value_model:
post_model_creation_callbacks.append(value_model_hook)
if override_model_config.get("moe_config", {}).get("freeze_moe_router", False):
post_model_creation_callbacks.append(freeze_moe_router)
if provider is not None:
# When using PEFT with Megatron-Bridge, we must apply PEFT transformation
# BEFORE wrapping the model in DDP. This is required because:
# 1. PEFT freezes base model parameters (requires_grad=False)
# 2. DDP must be aware of which parameters are trainable when building gradient buckets
# 3. The distributed optimizer must only track trainable (adapter) parameters
# See Megatron-Bridge docs: training/peft.md
# Register PEFT transformation as pre-wrap hook if peft_cls is specified
# This must happen BEFORE DDP wrapping to avoid KeyError with frozen parameters
if peft_cls is not None:
from verl.utils.megatron_peft_utils import load_adapter_checkpoint, print_adapter_info
def peft_pre_wrap_hook(model):
"""Pre-wrap hook that applies PEFT transformation."""
# Apply PEFT transformation - this will freeze base model and add adapters
# The PEFT callable handles both freezing and transformation
transformed_model = peft_cls(model, training=True)
# Set parameters to save (adapter-only checkpointing)
peft_cls.set_params_to_save(transformed_model)
# Load adapter weights if adapter_path is specified
adapter_path = getattr(peft_config, "adapter_path", None)
if adapter_path is not None and adapter_path:
print(f"Loading adapter weights from: {adapter_path}")
load_adapter_checkpoint(transformed_model, adapter_path)
# Print PEFT statistics
if torch.distributed.get_rank() == 0:
print_adapter_info(transformed_model)
return transformed_model
provider.register_pre_wrap_hook(peft_pre_wrap_hook)
# Register post-creation callbacks (make_value_model, freeze_moe_router) as pre-wrap hooks
for callback in post_model_creation_callbacks:
provider.register_pre_wrap_hook(callback)
# Create DDP config if needed
ddp_config = None
if wrap_config.wrap_with_ddp:
from megatron.bridge.training.config import DistributedDataParallelConfig
ddp_config_dict = {
"use_distributed_optimizer": wrap_config.use_distributed_optimizer,
}
# Apply any DDP config overrides
if override_ddp_config is not None:
ddp_config_dict.update(override_ddp_config)
ddp_config = DistributedDataParallelConfig(**ddp_config_dict)
ddp_config.finalize()
# Now call provide_distributed_model with all hooks registered
# Hooks will be applied automatically before DDP wrapping
model = provider.provide_distributed_model(
wrap_with_ddp=wrap_config.wrap_with_ddp,
ddp_config=ddp_config,
fp16=provider.fp16,
bf16=provider.bf16,
)
# Extract TransformerConfig from the created model
tf_config = get_model_config(model[0] if isinstance(model, list) else model)
else:
model = bridge.get_model(
post_model_creation_callbacks=post_model_creation_callbacks,
wrap_with_ddp=wrap_config.wrap_with_ddp,
fp16=tf_config.fp16,
bf16=tf_config.bf16,
ddp_config=override_ddp_config,
)
if isinstance(tf_config, MLATransformerConfig):
# Keep the same behavior as hf_to_mcore_config_dpskv3
from verl.models.mcore.patch import apply_patch
apply_patch()
else:
def megatron_model_provider(pre_process, post_process, vp_stage=None):
from verl.models.mcore import init_mcore_model
parallel_model = init_mcore_model(
tf_config,
hf_config,
pre_process,
post_process,
share_embeddings_and_output_weights=wrap_config.share_embeddings_and_output_weights,
value=wrap_config.is_value_model,
freeze_moe_router=override_model_config.get("moe_config", {}).get("freeze_moe_router", False),
vp_stage=vp_stage,
)
parallel_model.to(get_device_name())
return parallel_model
model = get_model(
megatron_model_provider,
wrap_with_ddp=wrap_config.wrap_with_ddp,
use_distributed_optimizer=wrap_config.use_distributed_optimizer,
override_ddp_config=override_ddp_config,
)
return model, tf_config
ALL_MODULE_WRAPPER_CLASSNAMES = (DDP, Float16Module)
def unwrap_model(model, module_instances=ALL_MODULE_WRAPPER_CLASSNAMES):
return_list = True
if not isinstance(model, list):
model = [model]
return_list = False
unwrapped_model = []
for model_module in model:
while isinstance(model_module, module_instances):
model_module = model_module.module
unwrapped_model.append(model_module)
if not return_list:
return unwrapped_model[0]
return unwrapped_model
def convert_config(hf_config: PretrainedConfig, megatron_config) -> TransformerConfig:
"""[Deprecated] convert config
Args:
hf_config (PretrainedConfig): _description_
megatron_config (_type_): _description_
Returns:
TransformerConfig: _description_
"""
warnings.warn("[deprecated] use config converter for more model support", stacklevel=2)
print(f"megatron config {megatron_config}")
dt = PrecisionType.to_dtype(megatron_config.params_dtype)
print(f"pipeline_dtype=megatron_config {dt}")
qkv_bias = True if "Qwen2ForCausalLM" in hf_config.architectures else getattr(hf_config, "attention_bias", False)
overlap_p2p_comm = (
mpu.get_virtual_pipeline_model_parallel_world_size() is not None
and mpu.get_virtual_pipeline_model_parallel_world_size() > 1
)
batch_p2p_comm = False
transformer_config = TransformerConfig(
num_layers=hf_config.num_hidden_layers,
hidden_size=hf_config.hidden_size,
num_attention_heads=hf_config.num_attention_heads,
num_query_groups=hf_config.num_key_value_heads,
ffn_hidden_size=hf_config.intermediate_size,
# max_position_embeddings=hf_config.max_position_embeddings,
activation_func=F.silu,
normalization="RMSNorm",
# rotary_percent=False, # default,
gated_linear_unit=True, # for llama
use_cpu_initialization=True,
apply_residual_connection_post_layernorm=False, # check what's this mean
add_bias_linear=False,
tensor_model_parallel_size=mpu.get_tensor_model_parallel_world_size(),
pipeline_model_parallel_size=mpu.get_pipeline_model_parallel_world_size(),
virtual_pipeline_model_parallel_size=mpu.get_virtual_pipeline_model_parallel_world_size(),
context_parallel_size=mpu.get_context_parallel_world_size(),
overlap_p2p_comm=overlap_p2p_comm,
batch_p2p_comm=batch_p2p_comm,
pipeline_dtype=dt,
params_dtype=dt,
sequence_parallel=mpu.get_tensor_model_parallel_world_size() > 1,
variable_seq_lengths=True,
masked_softmax_fusion=True,
moe_token_dispatcher_type="alltoall",
attention_dropout=hf_config.attention_dropout,
hidden_dropout=getattr(hf_config, "hidden_dropout", 0.0),
add_qkv_bias=qkv_bias,
bf16=dt is torch.bfloat16,
)
return transformer_config
def mcore_model_parallel_config(
sequence_parallel: bool,
params_dtype: torch.dtype,
) -> ModelParallelConfig:
# WARNING: Code should not reach this point. This function is deprecated and will be removed.
# Please use hf_to_mcore_config_dense() from verl.models.mcore.config_converter instead.
warnings.warn(
"Code should not reach this point. This function is deprecated and will be removed. Please use "
"hf_to_mcore_config_dense() from verl.models.mcore.config_converter instead.",
DeprecationWarning,
stacklevel=2,
)
return ModelParallelConfig(
tensor_model_parallel_size=mpu.get_tensor_model_parallel_world_size(),
pipeline_model_parallel_size=mpu.get_pipeline_model_parallel_world_size(),
virtual_pipeline_model_parallel_size=mpu.get_virtual_pipeline_model_parallel_world_size(),
context_parallel_size=mpu.get_context_parallel_world_size(),
sequence_parallel=sequence_parallel,
params_dtype=params_dtype,
pipeline_dtype=params_dtype,
bf16=True,
fp16=False,
timers=None,
)
@torch.no_grad()
def offload_megatron_model_to_cpu(models):
"""
In megatron, the model and optimizer storage are:
- bf16 parameter data chunked in model parallel group
- fp32 grad chunked in model parallel group
- fp32 main_parameter chunked in model and dp group
- fp32 optimizer state chunked in model and dp group
"""
for model_chunk in models:
if isinstance(model_chunk, DDP):
model_chunk_all_buffers = [model_chunk.buffers, model_chunk.expert_parallel_buffers]
for buffers in model_chunk_all_buffers:
for buffer in buffers:
# offload parameters
if buffer.param_data.storage().size() > 0:
buffer.param_data.cpu_data = buffer.param_data.data.cpu().pin_memory()
buffer.param_data_size = buffer.param_data.storage().size()
buffer.param_data.storage().resize_(0)
assert buffer.param_data_size == buffer.param_data.cpu_data.storage().size()
if buffer.grad_data.storage().size() > 0:
# if the grad_data size is already zero, we assume that it is already offloaded
buffer.grad_data_size = buffer.grad_data.storage().size()
buffer.grad_data.storage().resize_(0)
else:
# we need this for ref module
for _, param in model_chunk.named_parameters():
param.data = param.data.to("cpu", non_blocking=True)
if param.grad is not None:
param.grad = param.grad.to("cpu", non_blocking=True)
gc.collect()
get_torch_device().empty_cache()
@torch.no_grad()
def load_megatron_model_to_gpu(models, load_grad=True):
for model_chunk in models:
if isinstance(model_chunk, DDP):
model_chunk_all_buffers = [model_chunk.buffers, model_chunk.expert_parallel_buffers]
for buffers in model_chunk_all_buffers:
for buffer in buffers:
# sometimes, we don't want to load grad for pure inference
if load_grad and hasattr(buffer, "grad_data_size"):
buffer.grad_data.storage().resize_(buffer.grad_data_size)
buffer.grad_data.zero_()
if buffer.param_data.storage().size() == 0:
buffer.param_data.storage().resize_(buffer.param_data_size)
# copy data from cpu to cuda
buffer.param_data.copy_(buffer.param_data.cpu_data, non_blocking=True)
else:
# we need this for ref module
device_id = get_device_id()
for _, param in model_chunk.named_parameters():
param.data = param.data.to(device_id, non_blocking=True)
if param.grad is not None:
param.grad = param.grad.to(device_id, non_blocking=True)
gc.collect()
get_torch_device().empty_cache()
@torch.no_grad()
def offload_megatron_copy_params(optimizers):
"""
Offload optimizer parameters to CPU. Supports both Megatron optimizers
and `ChainedOptimizer`, which wraps a list of underlying optimizers.
Args:
optimizers: The optimizer or ChainedOptimizer instance.
"""
def _iter_opts(opt):
if isinstance(opt, ChainedOptimizer):
return opt.chained_optimizers
return [opt]
def offload_tensor_to_cpu(tensor):
if tensor is None:
return
tensor.data = tensor.data.to("cpu", non_blocking=True)
def offload_group_to_cpu(group):
if group is None:
return
if isinstance(group, list):
for param_group in group:
if isinstance(param_group, list):
for param in param_group:
offload_tensor_to_cpu(param)
else:
offload_tensor_to_cpu(param_group)
else:
offload_tensor_to_cpu(group)
# Offload all parameter groups to CPU for each underlying optimizer
for _opt in _iter_opts(optimizers):
if hasattr(_opt, "shard_fp32_from_float16_groups"):
offload_group_to_cpu(_opt.shard_fp32_from_float16_groups)
@torch.no_grad()
def load_megatron_copy_params(optimizers):
"""
Load optimizer parameters back to GPU. Handles ChainedOptimizer.
Args:
optimizers: Optimizer or ChainedOptimizer instance.
"""
def _iter_opts(opt):
if isinstance(opt, ChainedOptimizer):
return opt.chained_optimizers
return [opt]
def load_tensor_to_gpu(tensor):
if tensor is None:
return
device_id = get_device_id()
tensor.data = tensor.data.to(device_id, non_blocking=True)
def load_group_to_gpu(group):
if group is None:
return
if isinstance(group, list):
for param_group in group:
if isinstance(param_group, list):
for param in param_group:
load_tensor_to_gpu(param)
else:
load_tensor_to_gpu(param_group)
else:
load_tensor_to_gpu(group)
# Load all parameter groups to GPU for each underlying optimizer
for _opt in _iter_opts(optimizers):
if hasattr(_opt, "shard_fp32_from_float16_groups"):
load_group_to_gpu(_opt.shard_fp32_from_float16_groups)
@torch.no_grad()
def offload_megatron_optimizer(optimizers):
def _iter_opts(opt):
if isinstance(opt, ChainedOptimizer):
return opt.chained_optimizers
return [opt]
for _opt in _iter_opts(optimizers):
offload_megatron_copy_params(_opt)
## worker may hold zero parameter when enabling custom pipeline layout
if _opt.optimizer is not None:
# HybridDeviceOptimizer: offload all sub-optimizer states to CPU
# TODO: this should be a method in Megatron-LM's HybridDeviceOptimizer
hdo = _opt.optimizer
if all(hasattr(hdo, attr) for attr in ("sub_optimizers", "inner_param_to_orig_param", "state")):
for optimizer in hdo.sub_optimizers:
for param, state in optimizer.state.items():
for k, v in state.items():
if not isinstance(v, torch.Tensor):
continue
orig_param = hdo.inner_param_to_orig_param.get(param, param)
hdo.state[orig_param][k] = state[k] = v.to("cpu")
else:
opt_state_dict_values = _opt.optimizer.state.values()
for v in opt_state_dict_values:
if "exp_avg" in v:
v["exp_avg"] = v["exp_avg"].to("cpu", non_blocking=True)
if "exp_avg_sq" in v:
v["exp_avg_sq"] = v["exp_avg_sq"].to("cpu", non_blocking=True)
try:
# Free TransformerEngine's dummy weight gradients cache
# https://github.com/NVIDIA/TransformerEngine/blob/release_v2.10/transformer_engine/pytorch/module/base.py#L64
from transformer_engine.pytorch.module.base import _dummy_wgrads
_dummy_wgrads.clear()
except ImportError:
pass
# Free Megatron-LM's global memory buffer
get_global_memory_buffer().buffer.clear()
gc.collect()
get_torch_device().empty_cache()
@torch.no_grad()
def load_megatron_optimizer(optimizers):
def _iter_opts(opt):
if isinstance(opt, ChainedOptimizer):
return opt.chained_optimizers
return [opt]
for _opt in _iter_opts(optimizers):
load_megatron_copy_params(_opt)
## worker may hold zero parameter when enabling custom pipeline layout
if _opt.optimizer is not None:
# if we are using HybridDeviceOptimizer, we need to only move gpu optimizer state to gpu
if hasattr(_opt.optimizer, "_move_new_state_to_right_device"):
_opt.optimizer._move_new_state_to_right_device()
else:
opt_state_dict_values = _opt.optimizer.state.values()
for v in opt_state_dict_values:
if "exp_avg" in v:
v["exp_avg"] = v["exp_avg"].to(get_device_id(), non_blocking=True)
if "exp_avg_sq" in v:
v["exp_avg_sq"] = v["exp_avg_sq"].to(get_device_id(), non_blocking=True)
gc.collect()
get_torch_device().empty_cache()
def get_dist_checkpoint_path(checkpoint_path):
local_mkdir_safe(checkpoint_path)
local_mkdir_safe(os.path.join(checkpoint_path, "dist_ckpt"))
return os.path.join(checkpoint_path, "dist_ckpt")
def get_hf_model_checkpoint_path(checkpoint_path):
local_mkdir_safe(checkpoint_path)
local_mkdir_safe(os.path.join(checkpoint_path, "huggingface"))
return os.path.join(checkpoint_path, "huggingface")
def get_transformer_config_checkpoint_path(checkpoint_path):
os.makedirs(checkpoint_path, exist_ok=True)
return os.path.join(checkpoint_path, "transformer_config.json")
def convert_megatron_model_to_transformers_model(
name,
param,
config: PretrainedConfig,
tp_size: int,
num_query_groups: int,
convert_qkv_gate_up_by_trunk_concat=False,
):
"""Convert megatron model to transformers model."""
new_params = {}
def convert_qkv_shard(full_tensor, q_name, k_name, v_name):
nonlocal config
nonlocal tp_size
nonlocal num_query_groups
q_shard_list = []
k_shard_list = []
v_shard_list = []
hidden_size_per_head = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
if config.num_key_value_heads >= tp_size:
q_size_tp = hidden_size_per_head * config.num_attention_heads // tp_size
kv_size_tp = hidden_size_per_head * config.num_key_value_heads // tp_size
total_size = q_size_tp + 2 * kv_size_tp
for i in range(tp_size):
num_query_groups_per_partition = num_query_groups // tp_size
qkv_part = full_tensor[i * total_size : (i + 1) * total_size]
q_size_chunk = q_size_tp // num_query_groups_per_partition
kv_size_chunk = kv_size_tp // num_query_groups_per_partition
for qkv_part_chunk in qkv_part.chunk(num_query_groups_per_partition):
q_part = qkv_part_chunk[:q_size_chunk]
k_part = qkv_part_chunk[q_size_chunk : q_size_chunk + kv_size_chunk]
v_part = qkv_part_chunk[q_size_chunk + kv_size_chunk :]
q_shard_list.append(q_part)
k_shard_list.append(k_part)
v_shard_list.append(v_part)
else:
q_size_tp = hidden_size_per_head * config.num_attention_heads // tp_size
kv_size_tp = hidden_size_per_head
total_size = q_size_tp + 2 * kv_size_tp
for i in range(tp_size):
num_query_groups_per_partition = num_query_groups // tp_size
qkv_part = full_tensor[i * total_size : (i + 1) * total_size]
q_size_chunk = q_size_tp // num_query_groups_per_partition
kv_size_chunk = kv_size_tp // num_query_groups_per_partition
for qkv_part_chunk in qkv_part.chunk(num_query_groups_per_partition):
q_part = qkv_part_chunk[:q_size_chunk]
k_part = qkv_part_chunk[q_size_chunk : q_size_chunk + kv_size_chunk]
v_part = qkv_part_chunk[q_size_chunk + kv_size_chunk :]
q_shard_list.append(q_part)
if i * config.num_key_value_heads % tp_size == 0:
k_shard_list.append(k_part)
v_shard_list.append(v_part)
new_params[q_name] = torch.cat(q_shard_list, dim=0)
new_params[k_name] = torch.cat(k_shard_list, dim=0)
new_params[v_name] = torch.cat(v_shard_list, dim=0)
def convert_gate_up_shard(full_tensor, gate_name, up_name):
nonlocal config
nonlocal tp_size
intermediate_size_tp = config.intermediate_size // tp_size
gate_weight_list = []
up_weight_list = []
for i in range(tp_size):
gate_up_weight_tp = full_tensor[intermediate_size_tp * 2 * i : intermediate_size_tp * 2 * (i + 1)]
gate_weight_tp = gate_up_weight_tp[:intermediate_size_tp]
up_weight_tp = gate_up_weight_tp[intermediate_size_tp:]
gate_weight_list.append(gate_weight_tp)
up_weight_list.append(up_weight_tp)
new_params[gate_name] = torch.cat(gate_weight_list, dim=0)
new_params[up_name] = torch.cat(up_weight_list, dim=0)
if name == "embedding.word_embeddings.weight":
new_params["model.embed_tokens.weight"] = param
elif "self_attention" in name:
splitted_name = name.split(".")
layer_number = splitted_name[2]
component = splitted_name[4]
param_type = splitted_name[5]
if component == "linear_proj":
new_params[f"model.layers.{layer_number}.self_attn.o_proj.weight"] = param
elif component == "linear_qkv" and not isinstance(param, list):
if param_type == "layer_norm_weight":
new_params[f"model.layers.{layer_number}.input_layernorm.weight"] = param
else:
if convert_qkv_gate_up_by_trunk_concat:
convert_qkv_shard(
param,
f"model.layers.{layer_number}.self_attn.q_proj.{param_type}",
f"model.layers.{layer_number}.self_attn.k_proj.{param_type}",
f"model.layers.{layer_number}.self_attn.v_proj.{param_type}",
)
else:
new_params[f"model.layers.{layer_number}.self_attn.qkv_proj.{param_type}"] = param
elif component == "q_layernorm" or component == "k_layernorm":
hf_component = component.replace("layer", "")
new_params[f"model.layers.{layer_number}.self_attn.{hf_component}.weight"] = param
else:
assert isinstance(param, list) and len(param) == 3
assert param_type == "weight" or param_type == "bias"
new_params[f"model.layers.{layer_number}.self_attn.q_proj.{param_type}"] = param[0]
new_params[f"model.layers.{layer_number}.self_attn.k_proj.{param_type}"] = param[1]
new_params[f"model.layers.{layer_number}.self_attn.v_proj.{param_type}"] = param[2]
elif "mlp" in name:
splitted_name = name.split(".")
layer_number = splitted_name[2]
component = splitted_name[4]
param_type = splitted_name[5]
if component == "linear_fc1" and not isinstance(param, list):
if param_type == "layer_norm_weight":
new_params[f"model.layers.{layer_number}.post_attention_layernorm.weight"] = param
elif param_type == "weight":
if convert_qkv_gate_up_by_trunk_concat:
convert_gate_up_shard(
param,
f"model.layers.{layer_number}.mlp.gate_proj.weight",
f"model.layers.{layer_number}.mlp.up_proj.weight",
)
else:
new_params[f"model.layers.{layer_number}.mlp.gate_up_proj.weight"] = param
elif component == "linear_fc1" and isinstance(param, list):
assert len(param) == 2
assert param_type == "weight" or param_type == "bias"
new_params[f"model.layers.{layer_number}.mlp.gate_proj.weight"] = param[0]
new_params[f"model.layers.{layer_number}.mlp.up_proj.weight"] = param[1]
elif component == "linear_fc2":
new_params[f"model.layers.{layer_number}.mlp.down_proj.weight"] = param
elif name == "decoder.final_layernorm.weight":
new_params["model.norm.weight"] = param
elif name == "output_layer.weight":
new_params["lm_head.weight"] = param
else:
raise ValueError(f"Unknown param name: {name}")
return new_params.keys(), new_params.values()
def broadcast_from_megatron_pp(tensor: torch.Tensor):
# tensor is not None only in one of the pp ranks
if tensor is not None:
shape = tensor.shape
dtype = tensor.dtype
tensor_parallel = getattr(tensor, "tensor_model_parallel", None)
partition_dim = getattr(tensor, "partition_dim", None)
tensor_spec = (shape, dtype, tensor_parallel, partition_dim)
else:
tensor_spec = None
tensor_spec_output = [None] * mpu.get_pipeline_model_parallel_world_size()
torch.distributed.all_gather_object(
object_list=tensor_spec_output, obj=tensor_spec, group=mpu.get_pipeline_model_parallel_group()
)
# find the src rank
target_tensor_spec = None
src_rank = None
for rank, tensor_spec in enumerate(tensor_spec_output):
if tensor_spec is not None:
if target_tensor_spec is None:
target_tensor_spec = tensor_spec
else:
raise ValueError("A tensor exists on two pp ranks")
src_rank = rank
assert target_tensor_spec is not None
if tensor is None:
tensor = torch.empty(size=target_tensor_spec[0], dtype=target_tensor_spec[1], device=get_device_id())
if target_tensor_spec[2] is not None:
tensor.tensor_model_parallel = target_tensor_spec[2]
if target_tensor_spec[3] is not None:
tensor.partition_dim = target_tensor_spec[3]
global_rank = torch.distributed.get_global_rank(group=mpu.get_pipeline_model_parallel_group(), group_rank=src_rank)
torch.distributed.broadcast(tensor=tensor, src=global_rank, group=mpu.get_pipeline_model_parallel_group())
return tensor
def broadcast_str_from_megatron_pp(obj: Any):
obj_output = [None] * mpu.get_pipeline_model_parallel_world_size()
torch.distributed.all_gather_object(object_list=obj_output, obj=obj, group=mpu.get_pipeline_model_parallel_group())
src_rank = None
target_obj = None
for rank, item in enumerate(obj_output):
if item is not None:
if target_obj is not None:
raise ValueError("An object exists on two pp ranks")
target_obj = item
src_rank = rank
assert target_obj is not None, "No valid object found to broadcast."
global_rank = torch.distributed.get_global_rank(group=mpu.get_pipeline_model_parallel_group(), group_rank=src_rank)
obj_output = [None] * torch.distributed.get_world_size(group=mpu.get_pipeline_model_parallel_group())
obj_output[0] = target_obj
torch.distributed.broadcast_object_list(
object_list=obj_output, src=global_rank, group=mpu.get_pipeline_model_parallel_group()
)
return obj_output[0]
def default_tp_concat_fn(
layer_name_mapping,
name,
train_params,
infer_params,
model_config,
hf_config=None,
convert_qkv_gate_up_by_simple_split=False,
):
"""
name: name of the parameter
train_params: training parameters
infer_params (Iterable[torch.Tensor]): a iterator towards list of parameters all-gathered from micro_dp_group
model_config: huggingface model_config
TODO(zhangchi.usc1992): currently, the implementation is adhoc. We can move this function to the model
definition so that it is model-agnostic. If the model doesn't implement this function,
we can throw an error to force user disable TP HybridEngine.
"""
from megatron.core import mpu
train_tp_size = mpu.get_tensor_model_parallel_world_size()
if layer_name_mapping.get("qkv_layer_name") in name and "layer_norm" not in name:
# if the tensor is qkv, for each param on tp, split into q, k, v
# concat q, k, v separately.
q_lst = []
k_lst = []
v_lst = []
num_attention_heads = model_config.num_attention_heads
num_key_value_heads = model_config.num_key_value_heads
if "vision_model" in name:
num_attention_heads = hf_config.vision_config.num_heads
num_key_value_heads = hf_config.vision_config.num_heads
assert num_attention_heads % num_key_value_heads == 0
num_q_per_kv = num_attention_heads // num_key_value_heads
assert infer_params[0].shape[0] % (num_q_per_kv + 2) == 0, (
f"param '{name}' shape '{infer_params[0].shape}' dim0 is not divisible by {num_q_per_kv + 2}"
)
kv_size_per_tp = infer_params[0].shape[0] // (num_q_per_kv + 2)
split_size = [kv_size_per_tp * num_q_per_kv, kv_size_per_tp, kv_size_per_tp]
for infer_param in infer_params:
num_query_groups_per_partition = num_key_value_heads // train_tp_size
for chunk in infer_param.chunk(num_query_groups_per_partition):
split_size = [
kv_size_per_tp * num_q_per_kv // num_query_groups_per_partition,
kv_size_per_tp // num_query_groups_per_partition,
kv_size_per_tp // num_query_groups_per_partition,
]
q, k, v = chunk.split(split_size)
q_lst.append(q)
k_lst.append(k)
v_lst.append(v)
q = torch.cat(q_lst, dim=0)
k = torch.cat(k_lst, dim=0)
v = torch.cat(v_lst, dim=0)
infer_params = torch.cat((q, k, v), dim=0) if not convert_qkv_gate_up_by_simple_split else [q, k, v]
elif (
layer_name_mapping.get("gate_proj_layer_name") in name
and "layer_norm" not in name
and "vision_model.projection" not in name
):
# if the tensor is gate and proj
gate_lst = []
up_lst = []
for infer_param in infer_params:
gate, up = infer_param.chunk(2)
gate_lst.append(gate)
up_lst.append(up)
gate = torch.cat(gate_lst, dim=0)
up = torch.cat(up_lst, dim=0)
infer_params = torch.cat((gate, up), dim=0) if not convert_qkv_gate_up_by_simple_split else [gate, up]
elif "mlp.experts.linear_fc2.weight" in name: # moe
infer_params = torch.cat(infer_params, dim=1)
else:
# concat tensor
infer_params = torch.cat(infer_params, dim=tp_utils.get_tensor_parallel_partition_dim(train_params))
return infer_params
def per_tensor_generator(
actor_module,
model_config,
weight_converter,
transformer_config,
layer_name_mapping,
convert_qkv_gate_up_by_simple_split=True,
):
from megatron.core import parallel_state as mpu
pp_rank = mpu.get_pipeline_model_parallel_rank()
ep_size = mpu.get_expert_model_parallel_world_size()
etp_size = mpu.get_expert_tensor_parallel_world_size()
ep_group = mpu.get_expert_model_parallel_group()
etp_group = mpu.get_expert_tensor_parallel_group()
vpp_size = len(actor_module)
all_gather_group = mpu.get_tensor_model_parallel_group()
all_gather_group_size = torch.distributed.get_world_size(group=all_gather_group)
def tensor_generator():
for scan_vpp_idx in range(vpp_size):
existing_keys = set()
model = unwrap_model(actor_module[scan_vpp_idx])
for name, param in model.named_parameters():
existing_keys.add(name)
yield name, param
# note
# there is a bug in megatron GPTModel
# decoder.layers[n].mlp.router.expert_bias" in GPTModel is not registered in named_parameter, but in
# state_dict(). for now we patch it by adding those keys to extra_keys.
extra_keys = [x for x in model.state_dict().keys() if "_extra_state" not in x and x not in existing_keys]
for name in extra_keys:
yield name, model.state_dict()[name].to(get_device_id())
# we need first make all rank get full model information
meta_info = []
for scan_vpp_idx in range(vpp_size):
existing_keys = set()
model = unwrap_model(actor_module[scan_vpp_idx])
for idx, (name, _) in enumerate(model.named_parameters()):
existing_keys.add(name)
meta_info.append((pp_rank, scan_vpp_idx, idx, name))
extra_keys = [x for x in model.state_dict().keys() if "_extra_state" not in x and x not in existing_keys]
for name in extra_keys:
meta_info.append((pp_rank, scan_vpp_idx, idx, name))
obj_spec_output = [None] * mpu.get_pipeline_model_parallel_world_size()
torch.distributed.all_gather_object(
object_list=obj_spec_output, obj=meta_info, group=mpu.get_pipeline_model_parallel_group()
)
layer_list_meta = [item for sublist in obj_spec_output for item in sublist]
gen_func = tensor_generator()
# lazy load tensor for full model
for cur_pp_rank, scan_vpp_idx, idx, name in layer_list_meta:
if model_config.tie_word_embeddings and ("output_layers" in name):
import warnings
warnings.warn(
"Current model sharing word and embedding weights, skip output layer conversion", stacklevel=2
)
continue
if cur_pp_rank == pp_rank:
try:
cur_name, cur_tensor = next(gen_func)
except StopIteration:
cur_name, cur_tensor = None, None
cur_name = normalize_model_name(name, cur_pp_rank, scan_vpp_idx, transformer_config)
else:
cur_tensor, cur_name = None, None
# pp broadcast model tensor and name
cur_name = broadcast_str_from_megatron_pp(cur_name)
broad_pp_tensor = broadcast_from_megatron_pp(cur_tensor)
# (xya): this is a hack to fix the name of the parameters
while cur_name.startswith("module."):
cur_name = cur_name[len("module.") :]
# EP
if ".mlp.experts.linear_fc" in cur_name and ep_size > 1:
num_experts = weight_converter.mcore_config.num_moe_experts
num_experts_per_rank = num_experts // ep_size
infer_params = [torch.empty_like(broad_pp_tensor) for _ in range(ep_size)]
torch.distributed.all_gather(infer_params, broad_pp_tensor, group=ep_group)
name_prefix, local_expert_id = cur_name.split(".weight")
local_expert_id = int(local_expert_id)
global_expert_ids = [num_experts_per_rank * ep_rank + local_expert_id for ep_rank in range(ep_size)]
global_expert_names = [f"{name_prefix}.weight{expert_id}" for expert_id in global_expert_ids]
for name, param in zip(global_expert_names, infer_params, strict=True):
if etp_size > 1:
# gather etp
etp_params = [torch.empty_like(param) for _ in range(etp_size)]
torch.distributed.all_gather(etp_params, param, group=etp_group)
params = etp_params
else:
params = [param]
merge_params = default_tp_concat_fn(
layer_name_mapping,
name,
broad_pp_tensor,
params,
model_config,
weight_converter.hf_config,
convert_qkv_gate_up_by_simple_split,
)
if not isinstance(merge_params, list):
merge_params = [merge_params]
converted_names, converted_params = weight_converter.convert_param(name, merge_params)
yield from zip(converted_names, [param.detach() for param in converted_params], strict=True)
continue
# tp all gather
if tp_utils.is_tensor_parallel_param(broad_pp_tensor):
# allocate a new tensor with proper size
if all_gather_group_size <= 1:
infer_params = [broad_pp_tensor]
else:
infer_params = [torch.empty_like(broad_pp_tensor) for _ in range(all_gather_group_size)]
torch.distributed.all_gather(infer_params, broad_pp_tensor, group=mpu.get_tensor_model_parallel_group())
infer_params = default_tp_concat_fn(
layer_name_mapping,
cur_name,
broad_pp_tensor,
infer_params,
model_config,
weight_converter.hf_config,
convert_qkv_gate_up_by_simple_split,
)
else:
infer_params = broad_pp_tensor
if not isinstance(infer_params, list):
infer_params = [infer_params]
converted_names, converted_params = weight_converter.convert_param(cur_name, infer_params)
yield from zip(converted_names, [param.detach() for param in converted_params], strict=True)
def get_transformer_layer_offset(pipeline_rank, vp_stage, config: TransformerConfig):
"""
Get the index offset of any pipeline stage, given the level of pipelining.
Make pipeline_rank and vp_stage as two arguments to make it more flexible,
which is able to fetch layer offset for any pipeline stage.
The original function only returns the layer offset for current pipeline stage.
Extension to https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/core/transformer/transformer_layer.py::get_transformer_layer_offset
"""
has_vp_stage = (
inspect.signature(parallel_state.is_pipeline_first_stage).parameters.get("vp_stage", None) is not None
)
extra_kwargs = {} if not has_vp_stage else {"ignore_virtual": False, "vp_stage": vp_stage}
if config.pipeline_model_parallel_size > 1:
if hasattr(config, "pipeline_model_parallel_layout") and config.pipeline_model_parallel_layout:
from megatron.core.transformer.enums import LayerType
offset = config.pipeline_model_parallel_layout.get_layer_offset(
layer_type=LayerType.decoder, vp_stage=vp_stage
)
elif (
config.num_layers_in_first_pipeline_stage is not None
or config.num_layers_in_last_pipeline_stage is not None
):
# Calculate number of pipeline stages to distribute the remaining Transformer
# layers after deducting the Transformer layers in the first or the last stages
middle_pipeline_stages = config.pipeline_model_parallel_size
middle_pipeline_stages -= sum(
[
1 if x is not None else 0
for x in (
config.num_layers_in_first_pipeline_stage,
config.num_layers_in_last_pipeline_stage,
)
]
)
# Calculate layers to distribute in each pipeline stage. If the
# num_layers_in_first_pipeline_stage and num_layers_in_last_pipeline_stage
# are not set, we will not enable uneven pipeline. All layers will be treated
# as middle layers.
num_layers_in_first_pipeline_stage = (
0 if config.num_layers_in_first_pipeline_stage is None else config.num_layers_in_first_pipeline_stage
)
num_layers_in_last_pipeline_stage = (
0 if config.num_layers_in_last_pipeline_stage is None else config.num_layers_in_last_pipeline_stage
)
middle_num_layers = (
config.num_layers - num_layers_in_first_pipeline_stage - num_layers_in_last_pipeline_stage
)
if (vp_size := config.virtual_pipeline_model_parallel_size) is not None:
assert vp_stage is not None, "vp_stage must be provided if virtual pipeline model parallel size is set"
# Calculate number of layers in each virtual model chunk
# If the num_layers_in_first_pipeline_stage and
# num_layers_in_last_pipeline_stage are not set, all pipeline stages
# will be treated as middle pipeline stages in the calculation
num_layers_per_virtual_model_chunk_in_first_pipeline_stage = (
0
if config.num_layers_in_first_pipeline_stage is None
else config.num_layers_in_first_pipeline_stage // vp_size
)
num_layers_per_virtual_model_chunk_in_last_pipeline_stage = (
0
if config.num_layers_in_last_pipeline_stage is None
else config.num_layers_in_last_pipeline_stage // vp_size
)
num_layers_per_vritual_model_chunk_in_middle_pipeline_stage = middle_num_layers // vp_size
# First stage + middle stage + last stage
total_virtual_chunks = (
num_layers_per_virtual_model_chunk_in_first_pipeline_stage
+ num_layers_per_vritual_model_chunk_in_middle_pipeline_stage
+ num_layers_per_virtual_model_chunk_in_last_pipeline_stage
)
# Calculate the layer offset with interleaved uneven pipeline parallelism
if pipeline_rank == 0:
offset = vp_stage * total_virtual_chunks
else:
offset = (
vp_stage * total_virtual_chunks
+ num_layers_per_virtual_model_chunk_in_first_pipeline_stage
+ (pipeline_rank - 1)
* (num_layers_per_vritual_model_chunk_in_middle_pipeline_stage // middle_pipeline_stages)
)
else:
if middle_pipeline_stages > 0:
num_layers_per_pipeline_rank = middle_num_layers // middle_pipeline_stages
else:
num_layers_per_pipeline_rank = 0
middle_pipeline_rank = (
pipeline_rank if config.num_layers_in_first_pipeline_stage is None else pipeline_rank - 1
)
if pipeline_rank == 0:
offset = 0
else:
offset = (middle_pipeline_rank * num_layers_per_pipeline_rank) + num_layers_in_first_pipeline_stage
else:
num_layers = config.num_layers
# Increase the number of layers by one if we include the embedding (loss)
# layer into pipeline parallelism partition and placement
if config.account_for_embedding_in_pipeline_split:
num_layers += 1
if config.account_for_loss_in_pipeline_split:
num_layers += 1
num_layers_per_pipeline_rank = num_layers // config.pipeline_model_parallel_size
if (vp_size := config.virtual_pipeline_model_parallel_size) is not None:
assert vp_stage is not None, "vp_stage must be provided if virtual pipeline model parallel size is set"
num_layers_per_virtual_rank = num_layers_per_pipeline_rank // vp_size
total_virtual_chunks = num_layers // vp_size
offset = vp_stage * total_virtual_chunks + (pipeline_rank * num_layers_per_virtual_rank)
# Reduce the offset of embedding layer from the total layer number
if config.account_for_embedding_in_pipeline_split and not parallel_state.is_pipeline_first_stage(
**extra_kwargs
):
offset -= 1
else:
offset = pipeline_rank * num_layers_per_pipeline_rank
# Reduce the offset of embedding layer from the total layer number
if config.account_for_embedding_in_pipeline_split and not parallel_state.is_pipeline_first_stage(
**extra_kwargs
):
offset -= 1
else:
offset = 0
return offset
def register_megatron_training_hooks(model: list[torch.nn.Module], optimizer):
from megatron.core.distributed import finalize_model_grads
from megatron.core.utils import get_model_config
try:
from megatron.core.distributed.fsdp.mcore_fsdp_adapter import FullyShardedDataParallel as megatron_FSDP
except ImportError:
megatron_FSDP = DDP
# register some callbacks for megatron training, following https://github.com/NVIDIA/Megatron-LM/blob/core_v0.15.0rc7/megatron/training/training.py#L2039-L2057
for one_model in model:
config = get_model_config(one_model)
config.grad_scale_func = optimizer.scale_loss
config.finalize_model_grads_func = finalize_model_grads
overlap_param_gather = getattr(optimizer.config, "overlap_param_gather", False)
overlap_grad_reduce = getattr(one_model.ddp_config, "overlap_grad_reduce", False)
align_grad_reduce = True # default to True, seldom to be false
align_param_gather = getattr(one_model.ddp_config, "align_param_gather", False)
if isinstance(model[0], megatron_FSDP | DDP) and overlap_grad_reduce:
assert config.no_sync_func is None, (
"When overlap_grad_reduce is True, config.no_sync_func must be None; "
"a custom no_sync_func is not supported when overlapping grad-reduce"
)
config.no_sync_func = [model_chunk.no_sync for model_chunk in model]
if len(model) == 1:
config.no_sync_func = config.no_sync_func[0]
if align_grad_reduce:
config.grad_sync_func = [model_chunk.start_grad_sync for model_chunk in model]
if len(model) == 1:
config.grad_sync_func = config.grad_sync_func[0]
if overlap_param_gather and align_param_gather:
config.param_sync_func = [model_chunk.start_param_sync for model_chunk in model]
if len(model) == 1:
config.param_sync_func = config.param_sync_func[0]
def mapping_string_to_attn_backend(args: dict) -> dict:
if "attention_backend" in args and isinstance(args["attention_backend"], str):
from megatron.core.transformer.enums import AttnBackend
args["attention_backend"] = AttnBackend[args["attention_backend"]]
return args
def get_megatron_mtp_loss(n_micro_batch):
# Calculate MTP loss scale similar to Megatron-LM implementation
mtp_loss_scale = 1.0 / n_micro_batch
# Create a dummy total_loss_dict to collect MTP metrics
total_loss_dict = {}
# Track MTP metrics - this will populate total_loss_dict with MTP losses
MTPLossLoggingHelper.track_mtp_metrics(
loss_scale=mtp_loss_scale, iteration=0, writer=None, wandb_writer=None, total_loss_dict=total_loss_dict
)
# Add MTP metrics to losses_reduced if any were collected
# total_loss_dict: {'mtp_1 loss': tensor(value, device='cuda:0')}
output = {}
if total_loss_dict:
for key, value in total_loss_dict.items():
# Convert key to have proper prefix and format
formatted_key = f"mtp_losses/{key.replace(' ', '_')}"
# only added to the 0th batch, the MTP loss obtained is a global value, and will be the same for every batch
output[formatted_key] = value.cpu().item()
return output
def get_megatron_module_device(models: list[Any]) -> str:
if not models:
return "cpu"
model_chunk = models[0]
if not model_chunk.buffers:
try:
return next(model_chunk.module.parameters()).device.type
except StopIteration:
return "cpu"
buffer = model_chunk.buffers[0]
if buffer.param_data.storage().size() == 0:
return "cpu"
else:
return get_device_name()
def check_mtp_config(model_config: HFModelConfig, engine_config: McoreEngineConfig):
has_mtp = (
model_config.hf_config.num_nextn_predict_layers > 0
if hasattr(model_config.hf_config, "num_nextn_predict_layers")
else False
)
enable_mtp = model_config.mtp.enable
if "mtp_loss_scaling_factor" not in engine_config.override_transformer_config:
engine_config.override_transformer_config["mtp_loss_scaling_factor"] = model_config.mtp.mtp_loss_scaling_factor
if enable_mtp and not model_config.mtp.enable_train:
# disable parameter update by configure the loss scale to 0
engine_config.override_transformer_config["mtp_loss_scaling_factor"] = 0
# Modify the hf_config before initialization, and apply patch after innitialization
if enable_mtp and not has_mtp:
logger.error("enable mtp while model has no mtp layer, ignore model.mtp.enable")
model_config.mtp.enable = False
model_config.mtp.enable_train = False
elif has_mtp and not enable_mtp:
model_config.hf_config.num_nextn_predict_layers = 0
def patch_engine_mtp(module, model_config):
logger.warning("Applying mtp patch...")
from verl.models.mcore.mtp_patch import patch_mtp_layer_get_embeddings, patch_postprocess
print(module)
if isinstance(module, list):
for m in module:
patch_postprocess(m)
if model_config.mtp.detach_encoder:
patch_mtp_layer_get_embeddings(m)
else:
patch_postprocess(module)
if model_config.mtp.detach_encoder:
patch_mtp_layer_get_embeddings(module)
| verl__utils__megatron_utils.py |
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This file contains utilities to manipulate torch memory buffers
"""
from typing import Optional
import torch
from torch import nn
from verl.utils.device import get_device_name
class MemoryBuffer:
"""
A memory buffer is a contiguous torch tensor that may combine multiple tensors sharing with the underlying
memory. It must have a unique type to support this behavior.
"""
def __init__(self, numel: int, numel_padded: int, dtype: torch.dtype, source: Optional[torch.Tensor] = None):
self.numel = numel
self.numel_padded = numel_padded
self.dtype = dtype
if source is not None:
self.data = source
else:
self.data = torch.zeros(self.numel_padded, dtype=self.dtype, device=get_device_name(), requires_grad=False)
def zero(self):
"""Reset the buffer to zero."""
self.data.zero_()
def get(self, shape, start_index):
"""Return a tensor with the input `shape` as a view into the
1-D data starting at `start_index`."""
end_index = start_index + shape.numel()
assert end_index <= self.numel, "requested tensor is out of the buffer range."
buffer_tensor = self.data[start_index:end_index]
buffer_tensor = buffer_tensor.view(shape)
return buffer_tensor
def calc_padded_numel(shape: torch.Size, dtype: torch.dtype):
"""for cuda memory alignment, make sure alignment by 128-bits"""
align_numel = 128 // torch.finfo(dtype).bits
numel = shape.numel()
return (numel + align_numel - 1) // align_numel * align_numel
def get_weight_buffer_meta_from_module(module: nn.Module) -> dict[str, dict]:
"""
Return a dictionary containing name to a shape and dtype.
"""
weight_buffer_meta = {}
for name, param in sorted(module.named_parameters()):
weight_buffer_meta[name] = {"shape": param.shape, "dtype": param.dtype}
return weight_buffer_meta
def build_memory_buffer(weight_buffer_meta: dict[str, dict]) -> dict[torch.dtype, MemoryBuffer]:
"""Build the memory buffer given weight_buffer_meta
Args:
weight_buffer_meta: contains mapping from name to a dictionary containing shape and dtype of the tensors
Returns: a large memory buffer for each dtype that can hold all the tensors
"""
memory_buffers = {}
total_numel_map = {} # map from dtype to the total numel
for name, meta_info in sorted(weight_buffer_meta.items()):
shape = meta_info["shape"]
dtype = meta_info["dtype"]
assert isinstance(shape, torch.Size)
assert isinstance(dtype, torch.dtype)
if dtype not in total_numel_map:
total_numel_map[dtype] = 0
total_numel_map[dtype] += calc_padded_numel(shape, dtype)
for dtype, total_numel in total_numel_map.items():
memory_buffers[dtype] = MemoryBuffer(total_numel, total_numel, dtype)
return memory_buffers
def build_memory_reference_from_module(
module: torch.nn.Module, memory_buffers: dict[torch.dtype, MemoryBuffer], maintain_weight=True
):
start_index = {}
for dtype in memory_buffers:
start_index[dtype] = 0
for name, param in sorted(module.named_parameters()):
memory_buffer = memory_buffers[param.dtype]
buffer = memory_buffer.get(shape=param.shape, start_index=start_index[param.dtype])
# need to increment start_index
start_index[param.dtype] += calc_padded_numel(param.shape, param.dtype)
if maintain_weight:
buffer.copy_(param.data)
param.data = buffer
def build_memory_reference(weight_buffer_meta: dict[str, dict], memory_buffers: dict[torch.dtype, MemoryBuffer]):
"""Build the memory references. The memory buffers are built using the build_memory_buffer API.
This API will allocate a weight buffer pointer to the memory buffer according to the weight_buffer_meta.
Args:
weight_buffer_meta:
memory_buffers:
Returns:
"""
start_idx = {}
weight_buffers = {}
for dtype in memory_buffers:
start_idx[dtype] = 0
for name, meta_info in sorted(weight_buffer_meta.items()):
shape = meta_info["shape"]
dtype = meta_info["dtype"]
buffer = memory_buffers[dtype].get(shape, start_index=start_idx[dtype])
start_idx[dtype] += calc_padded_numel(shape, dtype)
weight_buffers[name] = buffer
return weight_buffers
class MemoryBufferModuleWrapper:
"""
Note that we do not design MemoryBufferModuleWrapper as an nn.Module due to
- It will change the checkpoint name
"""
def __init__(self, module: nn.Module):
super().__init__()
self.module = module
self.weight_buffer_meta = get_weight_buffer_meta_from_module(self.module)
self.memory_buffers = build_memory_buffer(self.weight_buffer_meta)
build_memory_reference_from_module(self.module, self.memory_buffers)
def get_memory_buffers(self):
return self.memory_buffers
def get_weight_buffer_meta(self):
return self.weight_buffer_meta
class MegatronMemoryBufferForRollout:
"""
We assume that
- inference engine has tp + dp
- actor has tp + pp + dp
- the tp between inference engine and actor should be the same
- memory_buffers: contains a list of memory_buffers, each is a dict from dtype to MemoryBuffer
- weight_buffers: contains a list of weight_buffers, each is a dict from name to param
- named_parameters: a dict from name to parameter that normalizes the names from pp and vpp. Note that
the named_parameters may not be directly compatible with inference engine. User has to take care of
this part such as the layout mismatches. (e.g. qkv transpose)
- Note that weight_buffer, named_parameters and memory_buffers share the same underlying GPU memory.
- When doing weight sync, the data is transfer via memory buffers
"""
def __init__(self, transform_memory_param_fn):
self._memory_buffers = []
self._weight_buffers = []
self._named_parameters = {}
self.transform_memory_param_fn = transform_memory_param_fn
def initialize_weight_buffer(self, weight_buffer_meta_pp: list[dict[str, dict]]):
"""
Initialize the weight buffer. The weight buffer is obtained according to the actor. We will construct
a large buffer for each dtype in the weight_buffer.
Args:
weight_buffer_meta: contains pp models, each pp models contains a dictionary of mapping from
Returns: None
"""
self.weight_buffer_meta_pp = weight_buffer_meta_pp
for weight_buffer_meta in self.weight_buffer_meta_pp:
memory_buffer = build_memory_buffer(weight_buffer_meta)
self._memory_buffers.append(memory_buffer)
self._weight_buffers.append(None)
def build_memory_reference(self):
for i, weight_buffer_meta in enumerate(self.weight_buffer_meta_pp):
self._weight_buffers[i] = build_memory_reference(weight_buffer_meta, self._memory_buffers[i])
self._named_parameters = self.transform_memory_param_fn(self._weight_buffers)
@property
def named_parameters(self):
return self._named_parameters
@property
def weight_buffers(self):
return self._weight_buffers
@property
def memory_buffers(self):
return self._memory_buffers
| verl__utils__memory_buffer.py |
# Copyright 2025 Bytedance Ltd. and/or its affiliates
# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import inspect
import logging
import os
from datetime import datetime
from pathlib import Path
import torch
from verl.utils.device import get_torch_device, is_cuda_available
logger = logging.getLogger(__file__)
logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN"))
def aggressive_empty_cache(force_sync: bool = True, max_retries: int = 3) -> None:
"""
More aggressive GPU memory cleanup function, tries to release PyTorch reserved
but unallocated memory.
Args:
force_sync: Whether to force device synchronization
max_retries: Maximum number of retries
"""
device = get_torch_device()
if not device.is_available():
return
for attempt in range(max_retries):
# Record memory status before cleanup
before_reserved = device.memory_reserved()
before_allocated = device.memory_allocated()
# Run garbage collection
gc.collect()
# Clear PyTorch cache
device.empty_cache()
# Force synchronization (optional)
if force_sync:
device.synchronize()
# Record memory status after cleanup
after_reserved = device.memory_reserved()
after_allocated = device.memory_allocated()
# Calculate freed memory
reserved_freed = before_reserved - after_reserved
allocated_freed = before_allocated - after_allocated
logger.info(
f"Memory cleanup attempt {attempt + 1}: Freed {reserved_freed / 1024**3:.2f} GB reserved, "
f"{allocated_freed / 1024**3:.2f} GB allocated"
)
# Stop retrying if little memory was freed
if reserved_freed < 1024**3: # less than 1GB
break
def reset_memory_stats() -> None:
"""Reset GPU memory statistics"""
if get_torch_device().is_available():
device = get_torch_device()
device.reset_peak_memory_stats()
device.reset_accumulated_memory_stats()
def get_memory_info() -> dict:
"""Get detailed GPU memory information"""
if not get_torch_device().is_available():
return {}
device = get_torch_device()
device_id = device.current_device()
return {
"total_memory_gb": device.get_device_properties(device_id).total_memory / 1024**3,
"reserved_memory_gb": device.memory_reserved() / 1024**3,
"allocated_memory_gb": device.memory_allocated() / 1024**3,
"cached_memory_gb": (device.memory_reserved() - device.memory_allocated()) / 1024**3,
"max_memory_allocated_gb": device.max_memory_allocated() / 1024**3,
"max_memory_reserved_gb": device.max_memory_reserved() / 1024**3,
}
def log_memory_usage(stage: str = "current") -> None:
"""Log GPU memory usage"""
if not get_torch_device().is_available():
return
info = get_memory_info()
logger.info(
f"Memory usage [{stage}]: "
f"Total: {info['total_memory_gb']:.2f} GB, "
f"Allocated: {info['allocated_memory_gb']:.2f} GB, "
f"Reserved: {info['reserved_memory_gb']:.2f} GB, "
f"Cached: {info['cached_memory_gb']:.2f} GB"
)
def optimize_memory_for_inference() -> None:
"""Optimize GPU memory usage for inference"""
if not get_torch_device().is_available():
return
# Set a more aggressive memory allocation policy
get_torch_device().set_per_process_memory_fraction(0.95) # Use 95% of GPU memory
# Clear cache
aggressive_empty_cache(force_sync=True)
logger.info("Optimized GPU memory usage for inference")
def optimize_memory_for_training() -> None:
"""Optimize GPU memory usage for training"""
if not get_torch_device().is_available():
return
# Set a moderate memory allocation policy
get_torch_device().set_per_process_memory_fraction(0.9) # Use 90% of GPU memory
# Clear cache
aggressive_empty_cache(force_sync=False)
logger.info("Optimized GPU memory usage for training")
def enable_memory_visualize(
trace_alloc_max_entries: int = 200_000,
stack_depth: int = 32,
context: str = "all",
stacks: str = "all",
devices=None,
record_context: bool = True,
):
"""
Enables memory history recording for CUDA allocations. This function
should be called before any large-scale CUDA allocations. For DDP or
multi-process setups, it must be called on each rank.
Args:
trace_alloc_max_entries (int): Maximum number of allocation entries
to record.
stack_depth (int): The depth of the call stack to capture for each
allocation. (Supported by some PyTorch versions).
context (str): The type of memory events to record.
'alloc': records only allocation events.
'state': records memory state changes.
'all': records both.
stacks (str): The type of call stacks to record.
'python': records Python stacks.
'cpp': records C++ stacks (available in some versions).
'all': records both.
devices (Union[int, list[int], None]): The device for which to enable
memory history. `None` enables it for the current default device.
record_context (bool): Whether to record context information for
allocations. Required by older PyTorch versions.
"""
# Memory history recording is CUDA-specific functionality
if not is_cuda_available:
logger.warning("[memory_visualize] Memory history recording is only available on CUDA devices")
return
f = get_torch_device().memory._record_memory_history
params = set(inspect.signature(f).parameters.keys())
def _one_call(dev_kw=None):
kwargs = {}
if "context" in params:
kwargs["context"] = context
if "stacks" in params:
kwargs["stacks"] = stacks
if "max_entries" in params:
kwargs["max_entries"] = trace_alloc_max_entries
elif "trace_alloc_max_entries" in params:
kwargs["trace_alloc_max_entries"] = trace_alloc_max_entries
if "stack_depth" in params:
kwargs["stack_depth"] = stack_depth
if dev_kw is not None:
if "device" in params:
kwargs["device"] = dev_kw
elif "devices" in params:
kwargs["devices"] = dev_kw if isinstance(dev_kw, list) else [dev_kw]
if "record_context" in params:
kwargs["record_context"] = record_context
try:
f(**kwargs)
return "native", kwargs
except TypeError:
try:
if "trace_alloc_max_entries" in params and "record_context" in params:
f(enabled=True, trace_alloc_max_entries=trace_alloc_max_entries, record_context=True)
return "legacy", {
"enabled": True,
"trace_alloc_max_entries": trace_alloc_max_entries,
"record_context": True,
}
else:
f(enabled=True)
return "legacy-min", {"enabled": True}
except Exception:
raise
if devices is None or isinstance(devices, str | int | torch.device):
mode, used = _one_call(devices if devices is not None else None)
else:
mode, used = "multi-device", {}
for d in list(devices):
_mode, _used = _one_call(d)
used[f"dev{d}"] = _used
device = get_torch_device()
if device.is_available():
device.reset_peak_memory_stats()
device.synchronize()
rank = int(os.environ.get("RANK", "0") or 0)
logger.info(f"[memory_visualize][rank {rank}] recording enabled ({mode}); args={used}")
class MemorySnapshotSampler:
"""
A utility class that dumps GPU memory snapshots.
This is useful for monitoring memory usage over a long-running process.
The dumped files can be visualized with https://docs.pytorch.org/memory_viz
Args:
out_dir (str): The directory where the snapshots will be saved.
tag (str): A tag for the snapshot filenames.
"""
def __init__(self, out_dir: str = "./mem_snapshots", tag: str = "periodic"):
self.out_dir = out_dir
self.tag = tag
def dump_memory_snapshot(self, out_dir: str = "./mem_snapshots", tag: str = "snapshot", sub_dir: str = None):
"""
Generates a memory snapshot and saves it as a pickle file in a specified directory.
The files are organized by timestamp in subdirectories, with all ranks' files
placed in the same timestamp subdirectory.
Args:
out_dir (str): The directory where the snapshot file will be saved.
The directory is created if it does not exist.
tag (str): A string tag to prepend to the filename for easier identification.
sub_dir (str): A subdirectory to place the snapshot file in.
"""
if sub_dir is None:
timestamp = datetime.now().strftime("%Y%m%d-%H%M")
out_path = Path(out_dir) / timestamp
else:
out_path = Path(out_dir) / sub_dir
out_path.mkdir(parents=True, exist_ok=True)
# get the GPU rank on the current process
rank = os.environ.get("RANK", "0")
pid = os.getpid()
# todo(chenyang): check wether we need to sync all ranks before dump
fname = f"{tag}_rank{rank}_pid{pid}.pickle"
path = out_path / fname
device = get_torch_device()
if not device.is_available():
logger.warning("[memory_visualize] is only available on CUDA devices.")
return
try:
device.synchronize()
# Memory snapshot is CUDA-specific functionality
device.memory._dump_snapshot(str(path))
logger.info(f"[memory_visualize] dumped: {path}")
except Exception as e:
logger.info(f"[memory_visualize][warn] dump failed: {e}")
| verl__utils__memory_utils.py |
# Copyright 2025 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Metrics utils.
"""
from enum import Enum
from typing import Any, Optional, Union
import numpy as np
import torch
def reduce_metrics(metrics: dict[str, Union["Metric", list[Any]]]) -> dict[str, Any]:
"""
Reduces a dictionary of metric lists by computing the mean, max, or min of each list.
The reduce operation is determined by the key name:
- If the key contains "max", np.max is used
- If the key contains "min", np.min is used
- Otherwise, np.mean is used
Args:
metrics: A dictionary mapping metric names to lists of metric values.
Returns:
A dictionary with the same keys but with each list replaced by its reduced value.
Example:
>>> metrics = {
... "loss": [1.0, 2.0, 3.0],
... "accuracy": [0.8, 0.9, 0.7],
... "max_reward": [5.0, 8.0, 6.0],
... "min_error": [0.1, 0.05, 0.2]
... }
>>> reduce_metrics(metrics)
{"loss": 2.0, "accuracy": 0.8, "max_reward": 8.0, "min_error": 0.05}
"""
for key, val in metrics.items():
if isinstance(val, Metric):
metrics[key] = val.aggregate()
elif "max" in key:
metrics[key] = np.max(val)
elif "min" in key:
metrics[key] = np.min(val)
else:
metrics[key] = np.mean(val)
return metrics
class AggregationType(Enum):
MEAN = "mean"
SUM = "sum"
MIN = "min"
MAX = "max"
NumericType = int, float, torch.Tensor, np.ndarray
Numeric = int | float | torch.Tensor | np.ndarray
class Metric:
"""
A metric aggregator for collecting and aggregating numeric values.
This class accumulates numeric values (int, float, or scalar tensors) and computes
an aggregate statistic based on the specified aggregation type (MEAN, SUM, MIN, or MAX).
Args:
aggregation: The aggregation method to use. Can be a string ("mean", "sum", "min", "max")
or an AggregationType enum value.
value: Optional initial value(s) to add. Can be a single numeric value or a list of values.
Example:
>>> metric = Metric(aggregation="mean", value=1.0)
>>> metric.append(2.0)
>>> metric.append(3.0)
>>> metric.aggregate()
2.0
"""
def __init__(self, aggregation: str | AggregationType, value: Optional[Numeric | list[Numeric]] = None) -> None:
if isinstance(aggregation, str):
self.aggregation = AggregationType(aggregation)
else:
self.aggregation = aggregation
if not isinstance(self.aggregation, AggregationType):
raise ValueError(f"Unsupported aggregation type: {aggregation}")
self.values = []
if value is not None:
self.append(value)
def append(self, value: Union[Numeric, "Metric"]) -> None:
if isinstance(value, Metric):
self.extend(value)
return
if isinstance(value, torch.Tensor):
if value.numel() != 1:
raise ValueError("Only scalar tensors can be converted to float")
value = value.detach().item()
if not isinstance(value, NumericType):
raise ValueError(f"Unsupported value type: {type(value)}")
self.values.append(value)
def extend(self, values: Union["Metric", list[Numeric]]) -> None:
if isinstance(values, Metric):
if values.aggregation != self.aggregation:
raise ValueError(f"Aggregation type mismatch: {self.aggregation} != {values.aggregation}")
values = values.values
for value in values:
self.append(value)
def aggregate(self) -> float:
return self._aggregate(self.values, self.aggregation)
@classmethod
def _aggregate(cls, values: list[Numeric], aggregation: AggregationType) -> float:
match aggregation:
case AggregationType.MEAN:
return np.mean(values)
case AggregationType.SUM:
return np.sum(values)
case AggregationType.MIN:
return np.min(values)
case AggregationType.MAX:
return np.max(values)
@classmethod
def aggregate_dp(cls, metric_lists: list["Metric"]) -> float:
if not metric_lists:
raise ValueError("Cannot aggregate an empty list of metrics.")
value_lists = [ml.values for ml in metric_lists]
if not all(len(ls) == len(value_lists[0]) for ls in value_lists):
raise ValueError(
f"All Metric instances must have the same number of values "
f"for dp aggregation: {[len(ls) for ls in value_lists]}"
)
value_arrays = np.array(value_lists) # [num_dp, num_grad_accumulation]
aggregation = metric_lists[0].aggregation
match aggregation:
case AggregationType.SUM | AggregationType.MEAN:
return cls._aggregate(
values=np.mean(value_arrays, axis=0), aggregation=aggregation
) # mean over dp ranks
case AggregationType.MIN | AggregationType.MAX:
return cls._aggregate(values=value_arrays.flatten(), aggregation=aggregation) # min/max over all values
@classmethod
def from_dict(cls, data: dict[str, Numeric], aggregation: str | AggregationType) -> dict[str, "Metric"]:
return {key: cls(value=value, aggregation=aggregation) for key, value in data.items()}
def init_list(self) -> "Metric":
return Metric(aggregation=self.aggregation)
| verl__utils__metric__utils.py |
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Utilities to create common models from huggingface
"""
import json
import os
import re
import warnings
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
from tensordict.tensorclass import NonTensorData
from torch import nn
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForImageTextToText,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelForVision2Seq,
GenerationConfig,
MistralForSequenceClassification,
PretrainedConfig,
PreTrainedModel,
)
from transformers.modeling_outputs import CausalLMOutputWithPast
from verl.models.registry import ModelRegistry
from verl.utils.import_utils import is_trl_available
class LambdaLayer(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, *args, **kwargs):
return self.fn(*args, **kwargs)
def squeeze(x):
return torch.squeeze(x, dim=-1)
def update_model_config(module_config, override_config_kwargs):
"""Update the module config with the override_config_kwargs.
Args:
module_config: The module config from Huggingface Transformers.
override_config_kwargs: The kwargs to override the module config.
"""
for key, val in override_config_kwargs.items():
if isinstance(val, dict):
update_model_config(getattr(module_config, key), val)
else:
setattr(module_config, key, val)
def get_huggingface_actor_config(model_name: str, override_config_kwargs=None, trust_remote_code=False) -> dict:
if override_config_kwargs is None:
override_config_kwargs = {}
assert isinstance(override_config_kwargs, dict), (
f"override_config_kwargs must be a dict, got {type(override_config_kwargs)}"
)
module_config = AutoConfig.from_pretrained(model_name, trust_remote_code=trust_remote_code)
update_model_config(module_config, override_config_kwargs)
return module_config
def get_generation_config(
model: str,
trust_remote_code: bool = False,
) -> Optional[GenerationConfig]:
try:
return GenerationConfig.from_pretrained(model)
except OSError: # Not found
try:
config = get_huggingface_actor_config(
model,
trust_remote_code=trust_remote_code,
)
return GenerationConfig.from_model_config(config)
except OSError: # Not found
return None
def create_huggingface_actor(model_name: str, override_config_kwargs=None, automodel_kwargs=None) -> nn.Module:
"""
Args:
model_name:
override_config_kwargs:
Returns:
"""
if override_config_kwargs is None:
override_config_kwargs = {}
if automodel_kwargs is None:
automodel_kwargs = {}
assert isinstance(override_config_kwargs, dict), (
f"override_config_kwargs must be a dict, got {type(override_config_kwargs)}"
)
module_config = get_huggingface_actor_config(
model_name, override_config_kwargs, trust_remote_code=automodel_kwargs.get("trust_remote_code", False)
)
module: nn.Module = AutoModelForCausalLM.from_config(module_config, **automodel_kwargs)
return module
def create_huggingface_critic(model_name: str, override_config_kwargs=None, automodel_kwargs=None) -> nn.Module:
"""
Args:
model_name:
override_config_kwargs:
Returns:
"""
critic_module: nn.Module = create_huggingface_actor(
model_name, override_config_kwargs=override_config_kwargs, automodel_kwargs=automodel_kwargs
)
if automodel_kwargs is None:
automodel_kwargs = {}
torch_dtype = automodel_kwargs.get("torch_dtype", torch.float32)
critic_module.lm_head = nn.Sequential(
nn.Linear(critic_module.config.hidden_size, 1, dtype=torch_dtype), LambdaLayer(fn=squeeze)
)
return critic_module
def get_model_size(model: nn.Module, scale="auto"):
n_params = sum(p.numel() for p in model.parameters())
if scale == "auto":
if n_params > 1e9:
scale = "B"
elif n_params > 1e6:
scale = "M"
elif n_params > 1e3:
scale = "K"
else:
scale = ""
if scale == "B":
n_params = n_params / 1e9
elif scale == "M":
n_params = n_params / 1e6
elif scale == "K":
n_params = n_params / 1e3
elif scale == "":
pass
else:
raise NotImplementedError(f"Unknown scale {scale}")
return n_params, scale
def print_model_size(model: nn.Module, name: str = None):
n_params, scale = get_model_size(model, scale="auto")
if name is None:
name = model.__class__.__name__
print(f"{name} contains {n_params:.2f}{scale} parameters")
def create_random_mask(
input_ids: torch.Tensor,
max_ratio_of_valid_token: float,
max_ratio_of_left_padding: float,
min_ratio_of_valid_token: float = 0,
):
"""Create a random mask given input_ids. Support left padding and right padding.
Process:
- Sample valid token length
- Sample left_padding length
- Generate padding
Args:
input_ids:
shape (batch_size, seq_len)
Returns:
"""
assert max_ratio_of_valid_token > 0 and max_ratio_of_valid_token <= 1.0
assert max_ratio_of_left_padding >= 0 and max_ratio_of_left_padding < 1.0
assert min_ratio_of_valid_token <= max_ratio_of_valid_token
batch_size, sequence_length = input_ids.shape
max_num_valid_tokens = int(sequence_length * max_ratio_of_valid_token)
min_num_valid_tokens = max(1, int(sequence_length * min_ratio_of_valid_token))
max_left_padding = int(sequence_length * max_ratio_of_left_padding)
assert max_num_valid_tokens + max_left_padding <= sequence_length
assert max_num_valid_tokens > 0 and max_ratio_of_valid_token <= sequence_length
masks = torch.ones_like(input_ids, dtype=torch.int64)
# TODO: we can make this faster
for i in range(batch_size):
num_left_padding = np.random.randint(low=0, high=max_left_padding + 1, dtype=np.int64)
num_valid = np.random.randint(low=min_num_valid_tokens, high=max_num_valid_tokens + 1, dtype=np.int64)
for index in range(num_left_padding):
masks[i, index] = 0
for index in range(num_left_padding + num_valid, sequence_length):
masks[i, index] = 0
return masks
def compute_position_id_with_mask(mask):
return torch.clip(torch.cumsum(mask, dim=-1) - 1, min=0, max=None)
def convert_weight_keys(state_dict: dict[str, torch.Tensor], model: PreTrainedModel):
# convert state dict keys: https://github.com/huggingface/transformers/pull/38385
if not hasattr(model, "_checkpoint_conversion_mapping"):
return state_dict
reverse_key_mapping = {v: k for k, v in model._checkpoint_conversion_mapping.items()}
original_weights = {}
for key, value in state_dict.items():
for pattern, replacement in reverse_key_mapping.items():
replacement = replacement.lstrip("^") # strip off un-needed chars and patterns
replacement = re.sub(r"\(.*\)", "", replacement)
key, n_replace = re.subn(pattern, replacement, key)
# Early exit of the loop
if n_replace > 0:
break
original_weights[key] = value
return original_weights
def check_exclude_modules(config, key: str) -> bool:
"""
A helper method to check if the passed module's key name matches any of the exclude modules in the adapter_config.
Adapted from https://github.com/huggingface/peft/blob/main/src/peft/tuners/tuners_utils.py
Args:
config (`LoraConfig` | `LycorisConfig`): A config to match exclude modules from
key (`str`): A key to search any matches in config
Returns:
True of match object if key matches any exclude modules from config, False if no match found
"""
if hasattr(config, "exclude_modules") and config.exclude_modules:
if isinstance(config.exclude_modules, str):
if re.fullmatch(config.exclude_modules, key):
return True
elif key in config.exclude_modules:
return True
elif any(key.endswith(f".{exclude_key}") for exclude_key in config.exclude_modules):
return True
return False
def check_target_modules(config, key: str) -> bool:
"""
A helper method to check if the passed module's key name matches any of the target modules in the adapter_config.
Adapted from https://github.com/huggingface/peft/blob/main/src/peft/tuners/tuners_utils.py
Args:
config (`LoraConfig` | `LycorisConfig`): A config to match target modules from
key (`str`): A key to search any matches in config
Returns:
True of match object if key matches any target modules from config, False if no match found
"""
if isinstance(config.target_modules, str):
target_module_found = re.fullmatch(config.target_modules, key)
elif key in config.target_modules:
# this module is specified directly in target_modules
target_module_found = True
else:
target_module_found = any(key.endswith(f".{target_key}") for target_key in config.target_modules)
layer_indexes = getattr(config, "layers_to_transform", None)
layers_pattern = getattr(config, "layers_pattern", None)
is_using_layer_indexes = layer_indexes is not None and (
len(layer_indexes) != 0 if isinstance(layer_indexes, list) else True
)
if is_using_layer_indexes and target_module_found:
layer_index = None
# TODO: It's still unclear how empty layers_pattern (None, [], or "") should behave
# For now, empty layers_pattern means any layer pattern is ok
if layers_pattern is None or len(layers_pattern) == 0:
layer_index = re.match(r".*\.[^.]*\.(\d+)\.", key)
else:
layers_pattern = [layers_pattern] if isinstance(layers_pattern, str) else layers_pattern
for pattern in layers_pattern:
layer_index = re.match(rf".*\.{pattern}\.(\d+)\.", key)
if layer_index is not None:
break
if layer_index is None:
target_module_found = False
else:
layer_index = int(layer_index.group(1))
if isinstance(layer_indexes, int):
target_module_found = layer_index == layer_indexes
else:
target_module_found = layer_index in layer_indexes
return target_module_found
def normalize_model_name(name, pp_rank, vpp_rank, transformer_config, layer_name="layers"):
"""
Transform the model name in each model_chunk in each pp stage into the name in inference engine
"""
from verl.utils.megatron_utils import get_transformer_layer_offset
layer_offset = get_transformer_layer_offset(pp_rank, vpp_rank, transformer_config)
if layer_name in name: # belong to an intermediate layer
split_name = name.split(".")
# find the num next to split_name
for i, name in enumerate(split_name):
if name == layer_name:
break
layer_num_idx = i + 1
# check the name
assert len(split_name) >= layer_num_idx + 1, f"split_name = {split_name}"
assert split_name[layer_num_idx].isdigit(), f"split_name = {split_name}"
# increment layer_num_idx by layer_offset
split_name[layer_num_idx] = str(int(split_name[layer_num_idx]) + layer_offset)
name = ".".join(split_name) # weight name in inference_tp_model
return name
def normalize_pp_vpp_params(params, num_hidden_layers, layer_name="layers"):
"""
Normalize the pp vpp params into a complete named parameters.
This is useful when gather parameters from pp ranks and passed to a model without pp
params: Iterable[List[Dict[str, param]]]
params contains a list of pp, with a list of vpp named_parameters in each vpp chunk.
output: Dict[str, param]
"""
pp_size = len(params)
for pp_rank in range(len(params)):
vpp_size = len(params[pp_rank])
for vpp_rank in range(vpp_size):
for name, param in params[pp_rank][vpp_rank].items():
normalized_name = normalize_model_name(
name, pp_rank, vpp_rank, pp_size, vpp_size, num_hidden_layers, layer_name=layer_name
)
yield normalized_name, param
def get_parallel_model_from_config(
config, megatron_config, pre_process=None, post_process=None, share_embeddings_and_output_weights=False, value=False
):
from megatron.core import ModelParallelConfig
assert isinstance(megatron_config, ModelParallelConfig)
model_class = _get_parallel_model_architecture_from_config(config, value)
model = model_class(
config,
megatron_config,
pre_process=pre_process,
post_process=post_process,
share_embeddings_and_output_weights=share_embeddings_and_output_weights,
)
return model
def _get_parallel_model_architecture_from_config(config: PretrainedConfig, value=False) -> type[nn.Module]:
architectures = getattr(config, "architectures", [])
for arch in architectures:
model_cls = ModelRegistry.load_model_cls(arch, value)
print("after load model cls")
if model_cls is not None:
return model_cls
raise ValueError(
f"Model architectures {architectures} are not supported for now. Supported architectures: "
f"{ModelRegistry.get_supported_archs()}"
)
def _load_hf_model(config, model_config, is_value_model):
"""Helper function containing the loading hf model logic"""
from accelerate import init_empty_weights
from megatron.core import parallel_state as mpu
from verl.models.mcore.saver import _megatron_calc_global_rank
assert hasattr(model_config, "architectures"), "architectures cannot be empty when load weight!"
architectures = getattr(model_config, "architectures", [])
# get auto class
auto_cls = get_hf_auto_model_class(model_config)
if config.model.path.startswith("hdfs:"):
from verl.utils.fs import copy_to_local
print(f"start download from {config.model.path}")
local_model_path = copy_to_local(src=config.model.path, use_shm=config.model.get("use_shm", False))
print("finish download")
else:
local_model_path = config.model.path
print(f"load from local dir {local_model_path}")
src_rank = _megatron_calc_global_rank(tp_rank=0, dp_rank=0, pp_rank=0, cp_rank=mpu.get_context_parallel_rank())
cpu_init_weights = lambda: torch.device("cpu")
init_context = init_empty_weights if torch.distributed.get_rank() != src_rank else cpu_init_weights
with init_context(), warnings.catch_warnings():
warnings.simplefilter("ignore")
# TODO: to find a better way to load mistral7b-rm lm_head
if "mistral7b-rm" in config.model.path:
model = MistralForSequenceClassification.from_pretrained(
local_model_path,
torch_dtype="auto",
# device_map="auto", # disable auto device_map, the HF weight is only loaded to CPU in src_rank
# low_cpu_mem_usage=True
) # use score head instead of lm_head
state_dict = model.state_dict()
state_dict["lm_head.weight"] = state_dict["score.weight"]
state_dict["model.embed_tokens.weight"] = state_dict["model.embed_tokens.weight"][
:32000
] # workaround, 32001 -> 32000
is_value_model = True
else:
model = auto_cls.from_pretrained(
local_model_path,
torch_dtype="auto",
# device_map="auto", # disable auto device_map, the HF weight is only loaded to CPU in src_rank
# low_cpu_mem_usage=True
)
state_dict = model.state_dict()
return architectures, model, state_dict, is_value_model
def get_hf_model_path(config):
if config.model.path.startswith("hdfs:"):
from verl.utils.fs import copy_to_local
local_model_path = copy_to_local(src=config.model.path, use_shm=config.model.get("use_shm", False))
else:
local_model_path = config.model.path
return local_model_path
def load_megatron_model_weights(config, model_config, parallel_model, params_dtype, is_value_model=False):
"""Load weights for verl customized model."""
architectures, model, state_dict, is_value_model = _load_hf_model(config, model_config, is_value_model)
from verl.models.weight_loader_registry import get_weight_loader
print(f"before weight loader: architectures = {architectures}...")
for arch in architectures:
print(f"call weight loader arch = {arch}, model config = {model.config}")
weight_loader = get_weight_loader(arch)
weight_loader(
state_dict=state_dict,
wrapped_models=parallel_model,
config=model.config,
params_dtype=params_dtype,
is_value_model=is_value_model,
tie_word_embeddings=model_config.tie_word_embeddings,
)
return model.config
def load_megatron_gptmodel_weights(config, model_config, parallel_model, params_dtype, is_value_model=False):
"""Load weights for mcore GPT model."""
_, model, state_dict, is_value_model = _load_hf_model(config, model_config, is_value_model)
from verl.models.mcore.loader import load_state_dict_to_megatron_gptmodel
load_state_dict_to_megatron_gptmodel(
state_dict=state_dict,
wrapped_models=parallel_model,
config=model.config,
params_dtype=params_dtype,
is_value_model=is_value_model,
)
del state_dict, model
# pad input_ids_rmpad, cu_seqlens and max_seqlen_in_batch to be divisible by tp
def pad_packed_inputs(unpad_tokens: torch.Tensor, cu_seqlens, max_seqlen_in_batch, size):
"""pad the tokens such that the total length is a multiple of size.
This function is useful when applying sequence parallel and context parallel
Args:
unpad_tokens: (total_nnz, ...). Tokens after removing padding
cu_seqlens: (total_nnz + 1,)
max_seqlen_in_batch: int
Returns:
"""
F = nn.functional
total_nnz = unpad_tokens.shape[0]
pad_size = 0 if total_nnz % size == 0 else size - total_nnz % size
# we assume adding a new data in the batch with seqlen pad_size
if pad_size > 0:
if unpad_tokens.ndim == 1:
unpad_tokens = F.pad(unpad_tokens, (0, pad_size))
elif unpad_tokens.ndim == 2:
unpad_tokens = F.pad(unpad_tokens, (0, 0, 0, pad_size))
else:
raise NotImplementedError(f"Padding dim {unpad_tokens.ndim()} is not supported")
cu_seqlens = F.pad(cu_seqlens, (0, 1), value=pad_size + cu_seqlens[-1])
max_seqlen_in_batch = max(max_seqlen_in_batch, pad_size)
return unpad_tokens, cu_seqlens, max_seqlen_in_batch
def load_mcore_dist_weights(parallel_model, dist_weight_path, is_value_model=False, prefix=""):
from megatron.core import dist_checkpointing
from megatron.core.dist_checkpointing.serialization import StrictHandling
from verl.utils.megatron_utils import unwrap_model
# strict = StrictHandling.IGNORE_ALL if is_value_model else StrictHandling.ASSUME_OK_UNEXPECTED
strict = StrictHandling.ASSUME_OK_UNEXPECTED
for model in parallel_model:
ssd = unwrap_model(model).sharded_state_dict(prefix=prefix)
if is_value_model:
for k in list(ssd.keys()):
if "output_layer" in k:
ssd.pop(k)
dist_checkpointing.load(ssd, dist_weight_path, strict=strict)
return
def get_parallel_gptmodel_from_config(
tfconfig, hf_config, pre_process=None, post_process=None, share_embeddings_and_output_weights=False, value=False
):
from megatron.core.models.gpt.gpt_layer_specs import get_gpt_decoder_block_spec
from megatron.core.models.gpt.gpt_model import GPTModel
use_te = True
assert tfconfig.normalization == "RMSNorm", "only RMSNorm is supported for now"
transformer_layer_spec = get_gpt_decoder_block_spec(tfconfig, use_transformer_engine=use_te)
rope_scaling_args = {}
if hf_config.rope_scaling is not None:
assert hf_config.rope_scaling["type"] == "linear", "only linear scaling is supported for now"
rope_scaling_args["seq_len_interpolation_factor"] = hf_config.rope_scaling["factor"]
parallel_model = GPTModel(
config=tfconfig,
transformer_layer_spec=transformer_layer_spec,
vocab_size=hf_config.vocab_size,
max_sequence_length=hf_config.max_position_embeddings,
pre_process=pre_process,
post_process=post_process,
share_embeddings_and_output_weights=share_embeddings_and_output_weights,
position_embedding_type="rope",
rotary_base=hf_config.rope_theta,
**rope_scaling_args,
)
# # for layer in parallel_model.decoder.layers:
# layer.self_attention.core_attention.flash_attention.softmax_scale = None
if post_process and value:
from verl.models.llama.megatron.layers.parallel_linear import LinearForLastLayer
parallel_model.output_layer = LinearForLastLayer(
input_size=tfconfig.hidden_size, output_size=1, config=tfconfig
)
return parallel_model
def patch_valuehead_model(model) -> None:
from types import MethodType
from transformers import PreTrainedModel
from trl import AutoModelForCausalLMWithValueHead
def tie_weights(self: "AutoModelForCausalLMWithValueHead") -> None:
if isinstance(self.pretrained_model, PreTrainedModel):
self.pretrained_model.tie_weights()
def get_input_embeddings(self: "AutoModelForCausalLMWithValueHead") -> torch.nn.Module:
if isinstance(self.pretrained_model, PreTrainedModel):
return self.pretrained_model.get_input_embeddings()
def get_output_embeddings(self: "AutoModelForCausalLMWithValueHead") -> torch.nn.Module:
if isinstance(self.pretrained_model, PreTrainedModel):
return self.pretrained_model.get_output_embeddings()
def can_generate(self):
return False
ignore_modules = [name for name, _ in model.named_parameters() if "pretrained_model" in name]
model._keys_to_ignore_on_save = ignore_modules
model.tie_weights = MethodType(tie_weights, model)
model.get_input_embeddings = MethodType(get_input_embeddings, model)
model.get_output_embeddings = MethodType(get_output_embeddings, model)
model.can_generate = MethodType(can_generate, model)
model._no_split_modules = getattr(model.pretrained_model, "_no_split_modules", [])
def load_valuehead_model(local_path, torch_dtype, model_config, trust_remote_code):
from transformers import AutoModelForCausalLM, AutoModelForTokenClassification, AutoModelForVision2Seq
try:
model = AutoModelForTokenClassification.from_pretrained(
pretrained_model_name_or_path=local_path,
torch_dtype=torch_dtype,
config=model_config,
attn_implementation="flash_attention_2",
trust_remote_code=trust_remote_code,
)
return model
except BaseException as e:
if not is_trl_available():
raise RuntimeError(
f"model({local_path}) is not a value head model, please install trl to make it valid"
) from e
assert is_trl_available()
from trl import AutoModelForCausalLMWithValueHead
if type(model_config) in AutoModelForVision2Seq._model_mapping.keys():
module_class = AutoModelForVision2Seq
else:
module_class = AutoModelForCausalLM
ori_model = module_class.from_pretrained(
pretrained_model_name_or_path=local_path,
torch_dtype=torch_dtype,
config=model_config,
attn_implementation="flash_attention_2",
trust_remote_code=trust_remote_code,
)
model = AutoModelForCausalLMWithValueHead.from_pretrained(ori_model)
patch_valuehead_model(model)
return model
_architecture_to_auto_class = {
"ForCausalLM": AutoModelForCausalLM,
"ForVision2Seq": AutoModelForVision2Seq,
"ForTokenClassification": AutoModelForTokenClassification,
"ForSequenceClassification": AutoModelForSequenceClassification,
}
def get_hf_auto_model_class(hf_config):
has_remote_code = hasattr(hf_config, "auto_map") and any(
hf_config.architectures[0] in val for val in hf_config.auto_map.values()
)
if has_remote_code:
auto_class = next(k for k, v in hf_config.auto_map.items() if hf_config.architectures[0] in v)
match auto_class:
case "AutoModelForVision2Seq":
actor_module_class = AutoModelForVision2Seq
case "AutoModelForCausalLM":
actor_module_class = AutoModelForCausalLM
case "AutoModelForImageTextToText":
actor_module_class = AutoModelForImageTextToText
case _:
actor_module_class = AutoModel
else:
actor_module_class = AutoModel
# For VLM models, we use type to check instead of architecture
if type(hf_config) in AutoModelForImageTextToText._model_mapping.keys():
actor_module_class = AutoModelForImageTextToText
else:
for key, cls in _architecture_to_auto_class.items():
if key in hf_config.architectures[0]:
actor_module_class = cls
break
return actor_module_class
def extract_multi_modal_inputs(
batch_data: list[dict[str, torch.Tensor]],
indices: Optional[list[int]] = None,
) -> dict[str, torch.Tensor | list[torch.Tensor]]:
"""
Extract and process multi-modal inputs from a batch.
Args:
batch_data (list[dict[str, torch.Tensor]]): The batch containing potential multi-modal inputs
indices (Optional[list[int]]): If provided, only extract inputs at these indices
Returns:
dict[str, torch.Tensor | list[torch.Tensor]]: Processed multi-modal inputs ready for model consumption
"""
multi_modal_inputs = {}
multi_modal_inputs_collected = {}
has_image_bound = False
selected_batch_data = batch_data
if indices is not None:
selected_batch_data = [batch_data[i] for i in indices if i < len(batch_data)]
for inputs in selected_batch_data:
inputs = inputs.data if isinstance(inputs, NonTensorData) else inputs
# Mixed pure text and multi-modal dataset.
if inputs is None:
continue
if "image_bound" in inputs:
has_image_bound = True
for key, value in inputs.items():
if value is not None:
if key not in multi_modal_inputs_collected:
multi_modal_inputs_collected[key] = []
multi_modal_inputs_collected[key].append(value)
for key, values in multi_modal_inputs_collected.items():
if has_image_bound: # minicpm-o logic
multi_modal_inputs[key] = values
else:
multi_modal_inputs[key] = torch.cat(values, dim=0)
return multi_modal_inputs
def get_lora_rank_from_adapter(adapter_path: str | os.PathLike) -> int:
"""
Extract LoRA rank from adapter configuration file.
Args:
adapter_path: Path to LoRA adapter directory
Returns:
LoRA rank value from adapter_config.json
Raises:
FileNotFoundError: If adapter path or config file doesn't exist
ValueError: If config file is invalid or missing rank
"""
adapter_path = os.path.abspath(os.path.expanduser(str(adapter_path)))
if not os.path.exists(adapter_path):
raise FileNotFoundError(f"LoRA adapter path not found: {adapter_path}")
config_path = os.path.join(adapter_path, "adapter_config.json")
if not os.path.exists(config_path):
raise FileNotFoundError(f"adapter_config.json not found in {adapter_path}")
try:
with open(config_path, encoding="utf-8") as f:
config = json.load(f)
if "r" not in config:
raise ValueError(f"LoRA rank 'r' not found in {config_path}")
return int(config["r"])
except json.JSONDecodeError as e:
raise ValueError(f"Invalid JSON in {config_path}: {e}") from e
except (KeyError, ValueError) as e:
raise ValueError(f"Cannot parse LoRA rank from {config_path}: {e}") from e
@dataclass
class CausalLMOutputForPPO(CausalLMOutputWithPast):
log_probs: Optional[torch.FloatTensor] = None
entropy: Optional[torch.FloatTensor] = None
| verl__utils__model.py |
# Copyright 2023-2024 SGLang Team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ipaddress
import socket
def is_ipv4(ip_str: str) -> bool:
"""
Check if the given string is an IPv4 address
Args:
ip_str: The IP address string to check
Returns:
bool: Returns True if it's an IPv4 address, False otherwise
"""
try:
ipaddress.IPv4Address(ip_str)
return True
except ipaddress.AddressValueError:
return False
def is_ipv6(ip_str: str) -> bool:
"""
Check if the given string is an IPv6 address
Args:
ip_str: The IP address string to check
Returns:
bool: Returns True if it's an IPv6 address, False otherwise
"""
try:
ipaddress.IPv6Address(ip_str)
return True
except ipaddress.AddressValueError:
return False
def is_valid_ipv6_address(address: str) -> bool:
try:
ipaddress.IPv6Address(address)
return True
except ValueError:
return False
def get_free_port(address: str) -> tuple[int, socket.socket]:
family = socket.AF_INET
if is_valid_ipv6_address(address):
family = socket.AF_INET6
sock = socket.socket(family=family, type=socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
sock.bind((address, 0))
port = sock.getsockname()[1]
return port, sock
| verl__utils__net_utils.py |
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn.functional as F
from einops import rearrange, repeat
# Copied from https://github.com/Dao-AILab/flash-attention/blob/main/flash_attn/bert_padding.py
class IndexFirstAxis(torch.autograd.Function):
@staticmethod
def forward(ctx, input, indices):
ctx.save_for_backward(indices)
assert input.ndim >= 2
ctx.first_axis_dim, other_shape = input.shape[0], input.shape[1:]
second_dim = other_shape.numel()
# TD [2022-03-04] For some reason torch.gather is a bit faster than indexing.
# return input[indices]
return torch.gather(rearrange(input, "b ... -> b (...)"), 0, repeat(indices, "z -> z d", d=second_dim)).reshape(
-1, *other_shape
)
@staticmethod
def backward(ctx, grad_output):
(indices,) = ctx.saved_tensors
assert grad_output.ndim >= 2
other_shape = grad_output.shape[1:]
grad_output = rearrange(grad_output, "b ... -> b (...)")
grad_input = torch.zeros(
[ctx.first_axis_dim, grad_output.shape[1]],
device=grad_output.device,
dtype=grad_output.dtype,
)
# TD [2022-03-04] For some reason torch.scatter is a bit faster than indexing.
# grad_input[indices] = grad_output
grad_input.scatter_(0, repeat(indices, "z -> z d", d=grad_output.shape[1]), grad_output)
return grad_input.reshape(ctx.first_axis_dim, *other_shape), None
index_first_axis = IndexFirstAxis.apply
# Copied from https://github.com/Dao-AILab/flash-attention/blob/main/flash_attn/bert_padding.py
class IndexPutFirstAxis(torch.autograd.Function):
@staticmethod
def forward(ctx, values, indices, first_axis_dim):
ctx.save_for_backward(indices)
assert indices.ndim == 1
assert values.ndim >= 2
output = torch.zeros(first_axis_dim, *values.shape[1:], device=values.device, dtype=values.dtype)
# TD [2022-03-04] For some reason torch.scatter is a bit faster than indexing.
output[indices] = values
# output.scatter_(0, repeat(indices, 'z -> z d', d=values.shape[1]), values)
return output
@staticmethod
def backward(ctx, grad_output):
(indices,) = ctx.saved_tensors
# TD [2022-03-04] For some reason torch.gather is a bit faster than indexing.
grad_values = grad_output[indices]
# grad_values = torch.gather(grad_output, 0, repeat(indices, 'z -> z d', d=grad_output.shape[1]))
return grad_values, None, None
index_put_first_axis = IndexPutFirstAxis.apply
# Copied from https://github.com/Dao-AILab/flash-attention/blob/main/flash_attn/bert_padding.py
def pad_input(hidden_states, indices, batch, seqlen):
"""
Arguments:
hidden_states: (total_nnz, ...), where total_nnz = number of tokens in selected in attention_mask.
indices: (total_nnz), the indices that represent the non-masked tokens of the original padded input sequence.
batch: int, batch size for the padded sequence.
seqlen: int, maximum sequence length for the padded sequence.
Return:
hidden_states: (batch, seqlen, ...)
"""
# dim = hidden_states.shape[-1]
# output = torch.zeros((batch * seqlen), dim, device=hidden_states.device, dtype=hidden_states.dtype)
# output[indices] = hidden_states
output = index_put_first_axis(hidden_states, indices, batch * seqlen)
return rearrange(output, "(b s) ... -> b s ...", b=batch)
# Copied from https://github.com/Dao-AILab/flash-attention/blob/main/flash_attn/bert_padding.py
def unpad_input(hidden_states, attention_mask, unused_mask=None):
"""
Arguments:
hidden_states: (batch, seqlen, ...)
attention_mask: (batch, seqlen), bool / int, 1 means valid and 0 means not valid.
unused_mask: (batch, seqlen), bool / int, 1 means the element is allocated but unused.
Return:
hidden_states: (total_nnz, ...), where total_nnz = number of tokens selected in attention_mask + unused_mask.
indices: (total_nnz), the indices of masked tokens from the flattened input sequence.
cu_seqlens: (batch + 1), the cumulative sequence lengths, used to index into hidden_states.
max_seqlen_in_batch: int
seqused: (batch), returns the number of tokens selected in attention_mask + unused_mask.
"""
all_masks = (attention_mask + unused_mask) if unused_mask is not None else attention_mask
seqlens_in_batch = all_masks.sum(dim=-1, dtype=torch.int32)
used_seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
indices = torch.nonzero(all_masks.flatten(), as_tuple=False).flatten()
max_seqlen_in_batch = seqlens_in_batch.max().item()
cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0))
# TD [2022-03-04] We don't want to index with a bool mask, because Pytorch will expand the
# bool mask, then call nonzero to get the indices, then index with those. The indices is @dim
# times larger than it needs to be, wasting memory. It's faster and more memory-efficient to
# index with integer indices. Moreover, torch's index is a bit slower than it needs to be,
# so we write custom forward and backward to make it a bit faster.
return (
index_first_axis(rearrange(hidden_states, "b s ... -> (b s) ..."), indices),
indices,
cu_seqlens,
max_seqlen_in_batch,
used_seqlens_in_batch,
)
| verl__utils__npu_flash_attn_utils.py |
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import warnings
from dataclasses import dataclass, field
from typing import Any, Optional
from omegaconf import MISSING
from verl.base_config import BaseConfig
@dataclass
class NsightToolConfig(BaseConfig):
"""Nsight tool config."""
"True for each task has its own database, False for all tasks in one training step share one database."
discrete: bool = False
name: str = "nsight"
def __post_init__(self) -> None:
pass
@dataclass
class TorchProfilerToolConfig(BaseConfig):
"""Torch profiler tool config."""
# options: cuda, cpu, memory, shapes, stack
contents: list[str] = field(default_factory=list)
discrete: bool = False
name: str = "torch"
def __post_init__(self) -> None:
"""config validation logics go here"""
__support_contents = ["cuda", "cpu", "memory", "shapes", "stack"]
for content in self.contents:
assert content in __support_contents, (
f"Profiler contents only supports {__support_contents}, but gets {content}"
)
assert isinstance(self.contents, list), f"Profiler contents must be of type list, got {type(self.contents)}"
@dataclass
class TorchMemoryToolConfig(BaseConfig):
"""Torch memory profiler tool config.
Args:
trace_alloc_max_entries (int): Maximum number of memory allocation entries to track.
stack_depth (int): Stack trace depth for memory allocations.
"""
trace_alloc_max_entries: int = 100_000
stack_depth: int = 32
name: str = "torch_memory"
def __post_init__(self) -> None:
"""config validation logics go here"""
assert isinstance(self.trace_alloc_max_entries, int), (
f"trace_alloc_max_entries must be int, got {type(self.trace_alloc_max_entries)}"
)
assert isinstance(self.stack_depth, int), f"stack_depth must be int, got {type(self.stack_depth)}"
assert self.trace_alloc_max_entries > 0, (
f"trace_alloc_max_entries must be positive, got {self.trace_alloc_max_entries}"
)
assert self.stack_depth > 0, f"stack_depth must be positive, got {self.stack_depth}"
@dataclass
class NPUToolConfig(NsightToolConfig):
"""NPU profiler too; config."""
# options: npu, cpu, memory, shapes, module, stack
contents: list[str] = field(default_factory=list)
# Collection level, optional values: level_none, level0, level1, level2.
level: str = "level0"
# Whether to automatically parse the data.
analysis: bool = False
name: str = "npu"
def __post_init__(self) -> None:
"""config validation logics go here"""
assert isinstance(self.contents, list), f"Profiler contents must be of type list, got {type(self.contents)}"
assert isinstance(self.level, str), f"Profiler level must be of type str, got {type(self.level)}"
assert isinstance(self.analysis, bool), f"Profiler analysis must be of type bool, got {type(self.analysis)}"
for content in self.contents:
assert content in ["npu", "cpu", "memory", "shapes", "module", "stack"], (
f"Profiler contents only supports npu, cpu, memory, shapes, module, stack, but gets {content}"
)
assert self.level in ["level_none", "level0", "level1", "level2"], (
f"Profiler level only supports level0, 1, 2, and level_none, but gets {self.level}"
)
@dataclass
class ProfilerConfig(BaseConfig):
"""Worker profiler config.
The inheritance from BaseConfig provides omegaconf.DictConfig-like interface for a dataclass config.
Args:
discrete (bool): True for each task has its own database, False for all tasks in one training step
share one database.
all_ranks (bool): Whether to profile all ranks.
ranks (list[int]): The ranks that will be profiled. Defaults to [].
global_tool_config (Any): Global tool configuration for all profiling tools.
"""
tool: Optional[str] = MISSING
enable: bool = False
all_ranks: bool = False
ranks: list[int] = field(default_factory=list)
save_path: Optional[str] = MISSING
tool_config: Any = MISSING # Just a placeholder, will use configs above directly
global_tool_config: Optional[Any] = None # Global tool configuration for all profiling tools
def union(self, other: "ProfilerConfig") -> "ProfilerConfig":
assert self.tool == other.tool, f"Cannot union ProfilerConfig with different tools: {self.tool} vs {other.tool}"
return ProfilerConfig(
tool=self.tool,
enable=self.enable or other.enable,
all_ranks=self.all_ranks or other.all_ranks,
ranks=list(set(self.ranks or []) | set(other.ranks or [])),
save_path=self.save_path,
tool_config=self.tool_config,
global_tool_config=self.global_tool_config or other.global_tool_config,
)
def intersect(self, other: "ProfilerConfig") -> "ProfilerConfig":
assert self.tool == other.tool, (
f"Cannot intersect ProfilerConfig with different tools: {self.tool} vs {other.tool}"
)
return ProfilerConfig(
tool=self.tool,
enable=self.enable and other.enable,
all_ranks=self.all_ranks and other.all_ranks,
ranks=list(set(self.ranks or []) & set(other.ranks or [])),
save_path=self.save_path,
tool_config=self.tool_config,
global_tool_config=self.global_tool_config if self.global_tool_config else other.global_tool_config,
)
def __post_init__(self) -> None:
"""config validation logics go here"""
assert isinstance(self.ranks, set | list | tuple), (
f"Profiler ranks must be of type list, got {type(self.ranks)}"
)
def build_vllm_profiler_args(profiler_config: ProfilerConfig, tool_config: BaseConfig, rank: int) -> dict:
"""
Build arguments and environment variables for vLLM profiler.
Acts as an adapter to bridge verl's unified profiler config and vLLM's specific requirements.
It sets environment variables for compatibility and constructs arguments for vLLM >= 0.13.0.
Args:
profiler_config (ProfilerConfig): The unified profiler configuration.
tool_config (BaseConfig): The tool configuration.
rank (int): The rank of the replica.
Returns:
dict: A dictionary of arguments to be passed to vLLM's start_profile method.
"""
if not profiler_config or not tool_config or not hasattr(tool_config, "contents"):
return {}
contents = tool_config.contents
with_stack = True if "stack" in contents or "module" in contents else False
record_shapes = True if "shapes" in contents else False
with_memory = True if "memory" in contents else False
save_path = os.path.join(profiler_config.save_path, f"agent_loop_rollout_replica_{rank}")
# vLLM < 0.13.0 supports controlling profiler via environment variables
os.environ["VLLM_TORCH_PROFILER_DIR"] = save_path
os.environ["VLLM_TORCH_PROFILER_WITH_STACK"] = "1" if with_stack else "0"
os.environ["VLLM_TORCH_PROFILER_RECORD_SHAPES"] = "1" if record_shapes else "0"
os.environ["VLLM_TORCH_PROFILER_WITH_PROFILE_MEMORY"] = "1" if with_memory else "0"
# vLLM >= 0.13.0 supports controlling profiler via arguments.
# While it maintains backward compatibility with environment variables,
# we provide arguments explicitly to align with the new API style.
return {
"profiler_config": json.dumps(
{
"profiler": "torch",
"torch_profiler_dir": save_path,
"torch_profiler_with_memory": with_memory,
"torch_profiler_with_stack": with_stack,
"torch_profiler_record_shapes": record_shapes,
}
)
}
def build_sglang_profiler_args(profiler_config: ProfilerConfig, tool_config: BaseConfig, rank: int) -> dict:
"""
Build arguments for SGLang profiler.
Args:
profiler_config (ProfilerConfig): The unified profiler configuration.
tool_config (BaseConfig): The tool configuration.
rank (int): The rank of the replica.
Returns:
dict: A dictionary of arguments suitable for starting the SGLang profiler.
"""
if not profiler_config or not tool_config or not hasattr(tool_config, "contents"):
return {}
contents = tool_config.contents
if "memory" in contents:
warnings.warn("SGLang profiler does not support memory profiling. Ignoring memory content.", stacklevel=2)
return {
"output_dir": os.path.join(profiler_config.save_path, f"agent_loop_rollout_replica_{rank}"),
"with_stack": "stack" in contents or "module" in contents,
"record_shapes": "shapes" in contents,
}
| verl__utils__profiler__config.py |
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Callable, Optional
def mark_start_range(
message: Optional[str] = None,
color: Optional[str] = None,
domain: Optional[str] = None,
category: Optional[str] = None,
) -> None:
pass
def mark_end_range(range_id: str) -> None:
pass
def mark_annotate(
message: Optional[str] = None,
color: Optional[str] = None,
domain: Optional[str] = None,
category: Optional[str] = None,
) -> Callable:
def decorator(func):
return func
return decorator
| verl__utils__profiler__empty_annotations.py |
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Inspired from https://gitee.com/ascend/MindSpeed-RL/blob/master/mindspeed_rl/utils/utils.py
import functools
import logging
import os
from contextlib import contextmanager
from typing import Any, Callable, Optional
import torch_npu
from packaging import version
from torch_npu.npu import mstx
from .config import NPUToolConfig
from .profile import DistProfiler, ProfilerConfig
def mark_start_range(message: Optional[str] = None) -> None:
"""Start a mark range in the profiler.
Args:
message (str, optional):
The message to be displayed in the profiler. Defaults to None.
"""
return mstx.range_start(message=message)
def mark_end_range(range_id: str) -> None:
"""End a mark range in the profiler.
Args:
range_id (str):
The id of the mark range to end.
"""
return mstx.range_end(range_id)
def mark_annotate(message: Optional[str] = None) -> Callable:
"""Decorate a function to annotate a mark range along with the function life cycle.
Args:
message (str, optional):
The message to be displayed in the profiler. Defaults to None.
"""
def decorator(func):
profile_message = message or func.__name__
return mstx.mstx_range(profile_message)(func)
return decorator
@contextmanager
def marked_timer(name: str, timing_raw: dict[str, float], *args: Any, **kwargs: Any) -> None:
"""Context manager for timing with MSTX markers.
This utility function measures the execution time of code within its context,
accumulates the timing information, and adds MSTX markers for profiling.
Args:
name (str): The name/identifier for this timing measurement.
timing_raw (Dict[str, float]): Dictionary to store timing information.
Yields:
None: This is a context manager that yields control back to the code block.
"""
if args:
logging.warning(f"Args are not supported in mstx_profile, but received: {args}")
if kwargs:
logging.warning(f"Kwargs are not supported in mstx_profile, but received: {kwargs}")
mark_range = mark_start_range(message=name)
from .performance import _timer
yield from _timer(name, timing_raw)
mark_end_range(mark_range)
def get_npu_profiler(
contents: list[str],
profile_level: str,
profile_save_path: str,
analysis: bool,
role: Optional[str] = None,
profile_step: Optional[str] = None,
):
"""Generate and return an NPU profiler object.
Args:
contents (list[str]):
A list of options to control the collection content,
such as npu, cpu, memory, shapes, module, stack.
profile_level (str):
The collection level, which can be set to level_none,
level0, level1 and level2.
profile_save_path (str):
The path to save the collected data.
analysis (bool):
Whether to enables automatic data parsing.
role (str, optional):
The role of the current data collection. Defaults to None.
profile_step(str, optional):
The current training step. Defaults to None.
"""
if profile_level == "level_none":
level = torch_npu.profiler.ProfilerLevel.Level_none
elif profile_level == "level0":
level = torch_npu.profiler.ProfilerLevel.Level0
elif profile_level == "level1":
level = torch_npu.profiler.ProfilerLevel.Level1
elif profile_level == "level2":
level = torch_npu.profiler.ProfilerLevel.Level2
else:
raise ValueError(f"level only supports level0, 1, 2, and level_none, but gets {profile_level}")
if profile_step:
profile_save_path = os.path.join(profile_save_path, profile_step)
if role:
profile_save_path = os.path.join(profile_save_path, role)
# The ability to filter communication via mstx_domain_exclude requires torch_npu==2.1 or higher.
if version.parse(torch_npu.__version__) < version.parse("2.1"):
raise RuntimeError("torch_npu==2.1 or higher is required to use mstx_domain_exclude")
experimental_config = torch_npu.profiler._ExperimentalConfig(
profiler_level=level,
export_type=torch_npu.profiler.ExportType.Db,
data_simplification=True,
msprof_tx=True,
mstx_domain_exclude=["communication"],
)
activites = []
if contents is None or "npu" in contents:
activites.append(torch_npu.profiler.ProfilerActivity.NPU)
if contents is None or "cpu" in contents:
activites.append(torch_npu.profiler.ProfilerActivity.CPU)
prof = torch_npu.profiler.profile(
with_modules=contents is None or "module" in contents,
with_stack=contents is None or "stack" in contents,
record_shapes=contents is None or "shapes" in contents,
profile_memory=contents is None or "memory" in contents,
activities=activites,
on_trace_ready=torch_npu.profiler.tensorboard_trace_handler(profile_save_path, analyse_flag=analysis),
experimental_config=experimental_config,
)
return prof
class NPUProfiler(DistProfiler):
"""
NPU profiler. Initialized in a worker to control the NPU profiler.
"""
_define_count = 0
def __init__(self, rank: int, config: ProfilerConfig, tool_config: NPUToolConfig, **kwargs):
"""Initialize the NsightSystemsProfiler.
Args:
rank (int): The rank of the current process.
config (Optional[ProfilerConfig]): Configuration for the profiler. If None, a default configuration is used.
tool_config (NPUToolConfig): The config to control npu profiler behavior.
"""
if not config:
config = ProfilerConfig(ranks=[], enable=False)
if not tool_config:
assert not config.enable, "tool_config must be set when profiler is enabled"
self.discrete: bool = tool_config.discrete
self.profile_npu = None
self.profile_contents = tool_config.contents
self.profile_level = tool_config.level
self.profile_save_path = config.save_path
self.analysis = tool_config.analysis
def start(self, **kwargs):
role = kwargs.get("role", None)
if not self.discrete and NPUProfiler._define_count == 0:
self.profile_npu = get_npu_profiler(
contents=self.profile_contents,
profile_level=self.profile_level,
profile_save_path=self.profile_save_path,
analysis=self.analysis,
role=role,
)
self.profile_npu.start()
NPUProfiler._define_count += 1
def stop(self):
if not self.discrete and NPUProfiler._define_count == 1:
self.profile_npu.step()
self.profile_npu.stop()
NPUProfiler._define_count -= 1
def annotate(self, message: Optional[str] = None, role: Optional[str] = None, **kwargs_outer) -> Callable:
"""Decorate a Worker member function to profile the current rank in the current training step.
Requires the target function to be a member function of a Worker,
which has a member field `profiler` with NPUProfiler type.
Args:
message (str, optional):
The message to be displayed in the profiler. Defaults to None.
role (str, optional):
The role of the current data collection. Defaults to None.
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs_inner):
profile_name = message or func.__name__
discrete_mode = self.discrete
if not discrete_mode:
mark_range = mark_start_range(message=profile_name)
else:
profile_npu = get_npu_profiler(
contents=self.profile_contents,
profile_level=self.profile_level,
profile_save_path=self.profile_save_path,
analysis=self.analysis,
role=role,
)
profile_npu.start()
mark_range = mark_start_range(message=profile_name)
result = func(*args, **kwargs_inner)
if not discrete_mode:
mark_end_range(mark_range)
else:
mark_end_range(mark_range)
profile_npu.step()
profile_npu.stop()
return result
return wrapper
return decorator
| verl__utils__profiler__mstx_profile.py |
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
from contextlib import contextmanager
from typing import Callable, Optional
import nvtx
import torch
from .config import NsightToolConfig
from .profile import DistProfiler, ProfilerConfig
def mark_start_range(
message: Optional[str] = None,
color: Optional[str] = None,
domain: Optional[str] = None,
category: Optional[str] = None,
) -> None:
"""Start a mark range in the profiler.
Args:
message (str, optional):
The message to be displayed in the profiler. Defaults to None.
color (str, optional):
The color of the range. Defaults to None.
domain (str, optional):
The domain of the range. Defaults to None.
category (str, optional):
The category of the range. Defaults to None.
"""
return nvtx.start_range(message=message, color=color, domain=domain, category=category)
def mark_end_range(range_id: str) -> None:
"""End a mark range in the profiler.
Args:
range_id (str):
The id of the mark range to end.
"""
return nvtx.end_range(range_id)
def mark_annotate(
message: Optional[str] = None,
color: Optional[str] = None,
domain: Optional[str] = None,
category: Optional[str] = None,
) -> Callable:
"""Decorate a function to annotate a mark range along with the function life cycle.
Args:
message (str, optional):
The message to be displayed in the profiler. Defaults to None.
color (str, optional):
The color of the range. Defaults to None.
domain (str, optional):
The domain of the range. Defaults to None.
category (str, optional):
The category of the range. Defaults to None.
"""
def decorator(func):
profile_message = message or func.__name__
return nvtx.annotate(profile_message, color=color, domain=domain, category=category)(func)
return decorator
@contextmanager
def marked_timer(
name: str,
timing_raw: dict[str, float],
color: str = None,
domain: Optional[str] = None,
category: Optional[str] = None,
):
"""Context manager for timing with NVTX markers.
This utility function measures the execution time of code within its context,
accumulates the timing information, and adds NVTX markers for profiling.
Args:
name (str): The name/identifier for this timing measurement.
timing_raw (Dict[str, float]): Dictionary to store timing information.
color (Optional[str]): Color for the NVTX marker. Defaults to None.
domain (Optional[str]): Domain for the NVTX marker. Defaults to None.
category (Optional[str]): Category for the NVTX marker. Defaults to None.
Yields:
None: This is a context manager that yields control back to the code block.
"""
mark_range = mark_start_range(message=name, color=color, domain=domain, category=category)
from .performance import _timer
yield from _timer(name, timing_raw)
mark_end_range(mark_range)
class NsightSystemsProfiler(DistProfiler):
"""Nsight system profiler. Installed in a worker to control the Nsight system profiler."""
def __init__(self, rank: int, config: Optional[ProfilerConfig], tool_config: Optional[NsightToolConfig], **kwargs):
"""Initialize the NsightSystemsProfiler.
Args:
rank (int): The rank of the current process.
config (Optional[ProfilerConfig]): Configuration for the profiler. If None, a default configuration is used.
"""
# If no configuration is provided, create a default ProfilerConfig with an empty list of ranks
if not config:
config = ProfilerConfig(ranks=[])
if not tool_config:
assert not config.enable, "tool_config must be provided when profiler is enabled"
self.discrete: bool = tool_config.discrete
def start(self, **kwargs):
if not self.discrete:
torch.cuda.profiler.start()
def stop(self):
if not self.discrete:
torch.cuda.profiler.stop()
def annotate(
self,
message: Optional[str] = None,
color: Optional[str] = None,
domain: Optional[str] = None,
category: Optional[str] = None,
**kwargs_outer,
) -> Callable:
"""Decorate a Worker member function to profile the current rank in the current training step.
Requires the target function to be a member function of a Worker, which has a member field `profiler` with
NightSystemsProfiler type.
Args:
message (str, optional):
The message to be displayed in the profiler. Defaults to None.
color (str, optional):
The color of the range. Defaults to None.
domain (str, optional):
The domain of the range. Defaults to None.
category (str, optional):
The category of the range. Defaults to None.
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs_inner):
profile_name = message or func.__name__
if self.discrete:
torch.cuda.profiler.start()
mark_range = mark_start_range(message=profile_name, color=color, domain=domain, category=category)
result = func(*args, **kwargs_inner)
mark_end_range(mark_range)
if self.discrete:
torch.cuda.profiler.stop()
return result
return wrapper
return decorator
| verl__utils__profiler__nvtx_profile.py |
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import inspect
import logging
from contextlib import contextmanager
from typing import Any, Optional
import torch
import torch.distributed as dist
from codetiming import Timer
from verl.utils.device import get_device_id, get_torch_device
from verl.utils.logger import DecoratorLoggerBase
def _get_current_mem_info(unit: str = "GB", precision: int = 2) -> tuple[str]:
"""Get current memory usage.
Note that CPU device memory info is always 0.
Args:
unit (str, optional): The unit of memory measurement. Defaults to "GB".
precision (int, optional): The number of decimal places to round memory values. Defaults to 2.
Returns:
tuple[str]: A tuple containing memory allocated, memory reserved, memory used, and memory total
in the specified unit.
"""
assert unit in ["GB", "MB", "KB"]
device = get_torch_device()
# torch.cpu.memory_allocated() does not exist
if device == torch.cpu:
return "0.00", "0.00", "0.00", "0.00"
divisor = 1024**3 if unit == "GB" else 1024**2 if unit == "MB" else 1024
mem_allocated = get_torch_device().memory_allocated()
mem_reserved = get_torch_device().memory_reserved()
# use get_torch_device().mem_get_info to profile device memory
# since vllm's sleep mode works below pytorch
# see https://github.com/vllm-project/vllm/pull/11743#issuecomment-2754338119
mem_free, mem_total = get_torch_device().mem_get_info()
mem_used = mem_total - mem_free
mem_allocated = f"{mem_allocated / divisor:.{precision}f}"
mem_reserved = f"{mem_reserved / divisor:.{precision}f}"
mem_used = f"{mem_used / divisor:.{precision}f}"
mem_total = f"{mem_total / divisor:.{precision}f}"
return mem_allocated, mem_reserved, mem_used, mem_total
def log_gpu_memory_usage(head: str, logger: logging.Logger = None, level=logging.DEBUG, rank: int = 0):
"""Log GPU memory usage information.
Args:
head (str): A descriptive header for the memory usage log message.
logger (logging.Logger, optional): Logger instance to use for logging. If None, prints to stdout.
level: Logging level to use. Defaults to logging.DEBUG.
rank (int): The rank of the process to log memory for. Defaults to 0.
"""
if (not dist.is_initialized()) or (rank is None) or (dist.get_rank() == rank):
mem_allocated, mem_reserved, mem_used, mem_total = _get_current_mem_info()
message = (
f"{head}, memory allocated (GB): {mem_allocated}, memory reserved (GB): {mem_reserved}, "
f"device memory used/total (GB): {mem_used}/{mem_total}"
)
if logger is None:
print(message)
else:
logger.log(msg=message, level=level)
class GPUMemoryLogger(DecoratorLoggerBase):
"""A decorator class to log GPU memory usage.
Example:
>>> from verl.utils.profiler.performance import GPUMemoryLogger
>>> @GPUMemoryLogger(role="actor")
>>> def update_actor(self, batch):
... # real actor update logics
... return
"""
def __init__(self, role: str, logger: logging.Logger = None, level=logging.DEBUG, log_only_rank_0: bool = True):
if dist.is_initialized() and dist.get_world_size() > 1:
rank = dist.get_rank()
else:
rank = 0
super().__init__(role, logger, level, rank, log_only_rank_0)
def __call__(self, decorated_function: callable):
def f(*args, **kwargs):
return self.log(decorated_function, *args, **kwargs)
return f
def log(self, func, *args, **kwargs):
name = func.__name__
mem_allocated, mem_reserved, mem_used, mem_total = _get_current_mem_info()
message = (
f"Before {name}, memory allocated (GB): {mem_allocated}, memory reserved (GB): {mem_reserved}, "
f"device memory used/total (GB): {mem_used}/{mem_total}"
)
self.logging_function(message)
output = func(*args, **kwargs)
mem_allocated, mem_reserved, mem_used, mem_total = _get_current_mem_info()
message = (
f"After {name}, memory allocated (GB): {mem_allocated}, memory reserved (GB): {mem_reserved}, "
f"device memory used/total (GB): {mem_used}/{mem_total}"
)
self.logging_function(message)
return output
def log_print(ctn: Any):
current_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
frame = inspect.currentframe().f_back
function_name = frame.f_code.co_name
line_number = frame.f_lineno
file_name = frame.f_code.co_filename.split("/")[-1]
print(f"[{current_time}-{file_name}:{line_number}:{function_name}]: {ctn}")
def _timer(name: str, timing_raw: dict[str, float]):
"""Inner function that handles the core timing logic.
Args:
name (str): The name/identifier for this timing measurement.
timing_raw (Dict[str, float]): Dictionary to store timing information.
"""
with Timer(name=name, logger=None) as timer:
yield
if name not in timing_raw:
timing_raw[name] = 0
timing_raw[name] += timer.last
@contextmanager
def simple_timer(name: str, timing_raw: dict[str, float]):
"""Context manager for basic timing without NVTX markers.
This utility function measures the execution time of code within its context
and accumulates the timing information in the provided dictionary.
Args:
name (str): The name/identifier for this timing measurement.
timing_raw (Dict[str, float]): Dictionary to store timing information.
Yields:
None: This is a context manager that yields control back to the code block.
"""
yield from _timer(name, timing_raw)
@contextmanager
def marked_timer(
name: str,
timing_raw: dict[str, float],
color: str = None,
domain: Optional[str] = None,
category: Optional[str] = None,
):
"""Context manager for timing with platform markers.
This utility function measures the execution time of code within its context,
accumulates the timing information, and adds platform markers for profiling.
This function is a default implementation when hardware profiler is not available.
Args:
name (str): The name/identifier for this timing measurement.
timing_raw (Dict[str, float]): Dictionary to store timing information.
color (Optional[str]): Color for the marker. Defaults to None.
domain (Optional[str]): Domain for the marker. Defaults to None.
category (Optional[str]): Category for the marker. Defaults to None.
Yields:
None: This is a context manager that yields control back to the code block.
"""
yield from _timer(name, timing_raw)
def reduce_timing(
timing_raw: dict[str, float], reduce_op: torch.distributed.ReduceOp = torch.distributed.ReduceOp.AVG
) -> dict[str, float]:
"""Reduce timing information across all processes.
This function uses distributed communication to gather and sum the timing
information from all processes in a distributed environment.
Args:
timing_raw (Dict[str, float]): Dictionary containing timing information.
Returns:
Dict[str, float]: Reduced timing information.
"""
if not dist.is_initialized():
return timing_raw
key_list, timing_list = [], []
for key in sorted(timing_raw.keys()):
key_list.append(key)
timing_list.append(timing_raw[key])
timing_list = torch.tensor(timing_list, dtype=torch.float32, device=get_device_id())
torch.distributed.all_reduce(timing_list, op=reduce_op)
timing_list = [tensor.item() for tensor in timing_list.to("cpu")]
timing_generate = {key_list[i]: timing_list[i] for i in range(len(key_list))}
return timing_generate
def topk_reduce_ratio_min_max(timing: float, k: int = 10) -> tuple[float, float, float]:
"""Calculate topk items take-up ratio, and min/max timing across all ranks."""
if not dist.is_initialized():
return -1.0, -1.0, -1.0
world_size = dist.get_world_size()
timing_tensor = torch.tensor(timing, dtype=torch.float32, device=get_device_id())
tensor_list = [torch.zeros(1, dtype=torch.float32, device=get_device_id()) for _ in range(world_size)]
torch.distributed.all_gather(tensor_list, timing_tensor)
tensor_stack = torch.stack(tensor_list)
timing_min = tensor_stack.min().cpu().item()
timing_max = tensor_stack.max().cpu().item()
top_k_percentile = torch.quantile(tensor_stack, 1 - k / 100)
tail_ratio = torch.mean((tensor_stack > top_k_percentile).float()).cpu().item()
return tail_ratio, timing_min, timing_max
def gather_timing(timing_raw: dict[str, float]) -> dict[str, list[float]]:
if not dist.is_initialized():
return {k: [v] for k, v in timing_raw.items()}
key_list, timing_list = [], []
for key in sorted(timing_raw.keys()):
key_list.append(key)
timing_list.append(timing_raw[key])
world_size = torch.distributed.get_world_size()
object_gather_list = [None] * world_size
torch.distributed.all_gather_object(object_gather_list, timing_list)
timing_generate = {
key_list[i]: [timing_list[i] for timing_list in object_gather_list] for i in range(len(key_list))
}
return timing_generate
| verl__utils__profiler__performance.py |
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
from typing import Callable, Optional
from ..memory_utils import MemorySnapshotSampler, enable_memory_visualize
from .config import ProfilerConfig, TorchMemoryToolConfig
def mark_start_range(
message: Optional[str] = None,
color: Optional[str] = None,
domain: Optional[str] = None,
category: Optional[str] = None,
) -> None:
"""Start a profiling range marker (no-op implementation).
Args:
message (Optional[str]): Message to associate with the range marker.
color (Optional[str]): Color for the marker visualization.
domain (Optional[str]): Domain for the marker.
category (Optional[str]): Category for the marker.
"""
pass
def mark_end_range(range_id: str) -> None:
"""End a profiling range marker (no-op implementation).
Args:
range_id (str): Identifier of the range to end.
"""
pass
def mark_annotate(
message: Optional[str] = None,
color: Optional[str] = None,
domain: Optional[str] = None,
category: Optional[str] = None,
) -> Callable:
"""Decorator to annotate a function with profiling markers (no-op implementation).
Args:
message (Optional[str]): Message to associate with the annotation.
color (Optional[str]): Color for the marker visualization.
domain (Optional[str]): Domain for the marker.
category (Optional[str]): Category for the marker.
Returns:
Callable: Decorator function that returns the original function unchanged.
"""
def decorator(func):
return func
return decorator
class DistProfiler:
"""A dispatcher that delegates to specific profilers based on config.tool.
Supported tools:
- nsys: NsightSystemsProfiler
- npu: NPUProfiler (Ascend)
- torch: PyTorch torch.profiler wrapper
- torch_memory: Torch CUDA memory snapshot dump
"""
def __init__(
self, rank: int, config: Optional[ProfilerConfig] = None, tool_config: Optional[object] = None, **kwargs
):
# Default config
if not config:
config = ProfilerConfig(ranks=[], enable=False, tool_config=None)
if tool_config is None:
tool_config = config.tool_config
self.config = config
self.tool_config = tool_config
self._impl = None
self._tool = getattr(config, "tool", None)
self._enable = config.enable
self._this_step = False
# Normalize rank selection
self._this_rank = False
if config.all_ranks:
self._this_rank = True
elif config.ranks:
self._this_rank = rank in config.ranks
else:
# default rank 0 if enabled but ranks unspecified
self._this_rank = (rank == 0) if self._enable else False
# TorchMemoryProfiler currently do not support discrete mode.
self._discrete = getattr(tool_config, "discrete", False) if tool_config else False
# Lazy import to avoid circular deps
if self._tool == "nsys":
from .nvtx_profile import NsightSystemsProfiler as _Nsight
self._impl = _Nsight(rank=rank, config=config, tool_config=tool_config, **kwargs)
elif self._tool == "npu":
from .mstx_profile import NPUProfiler as _Npu
self._impl = _Npu(rank=rank, config=config, tool_config=tool_config, **kwargs)
elif self._tool == "torch":
from .torch_profile import Profiler as _Torch
self._impl = _Torch(rank=rank, config=config, tool_config=tool_config)
elif self._tool == "torch_memory":
self._impl = TorchMemoryProfiler(rank=rank, config=config, tool_config=tool_config)
else:
# Fallback to a no-op impl
self._impl = _NoOpProfiler()
def check_enable(self):
return self._enable
def check_this_rank(self):
return self._this_rank
def check_this_step(self):
return self._this_step
def is_discrete_mode(self):
return self._discrete
def start(self, **kwargs):
if self.check_enable() and self.check_this_rank():
self._this_step = True
return getattr(self._impl, "start", lambda **_: None)(**kwargs)
def stop(self):
if self.check_enable() and self.check_this_rank():
self._this_step = False
return getattr(self._impl, "stop", lambda: None)()
@classmethod
def annotate(
cls,
message: Optional[str] = None,
color: Optional[str] = None,
domain: Optional[str] = None,
category: Optional[str] = None,
**kwargs_outer,
) -> Callable:
def decorator(func):
@functools.wraps(func)
def wrapper(self_instance, *args, **kwargs_inner):
profiler = getattr(self_instance, "profiler", None)
if (
not profiler
or not profiler.check_enable()
or not profiler.check_this_step()
or not profiler.check_this_rank()
):
return func(self_instance, *args, **kwargs_inner)
impl = profiler._impl
if hasattr(impl, "annotate"):
try:
actual_decorator = impl.annotate(
message=message, color=color, domain=domain, category=category, **kwargs_outer
)
return actual_decorator(func)(self_instance, *args, **kwargs_inner)
except Exception:
return func(self_instance, *args, **kwargs_inner)
return func(self_instance, *args, **kwargs_inner)
return wrapper
return decorator
class _NoOpProfiler:
def start(self, **kwargs):
return
def stop(self):
return
class TorchMemoryProfiler:
"""Profiler that dumps CUDA memory snapshots at step boundaries.
Behavior:
- On first construction (per process), enable memory history recording if CUDA is available
- On start(step=X), remember sub_dir for this step
- On stop(), dump a memory snapshot into config.save_path under the remembered sub_dir
"""
_memory_history_enabled: bool = False
def __init__(
self, rank: int, config: Optional[ProfilerConfig], tool_config: Optional[TorchMemoryToolConfig] = None
):
# Always respond to explicit start/stop calls for torch_memory tool,
# regardless of per-role enable flag, to align with global step control.
self.enable = True
if not config:
config = ProfilerConfig(ranks=[])
self.config = config
self.rank = rank
self.this_step = False
self.sub_dir = None
self.sampler = MemorySnapshotSampler()
# Get parameters from tool_config, with fallback to defaults
if tool_config:
trace_alloc_max_entries = tool_config.trace_alloc_max_entries
stack_depth = tool_config.stack_depth
else:
trace_alloc_max_entries = 100_000
stack_depth = 32
# Best-effort enable memory history once
if not TorchMemoryProfiler._memory_history_enabled:
try:
enable_memory_visualize(trace_alloc_max_entries=trace_alloc_max_entries, stack_depth=stack_depth)
except Exception:
# silently ignore if not supported
pass
TorchMemoryProfiler._memory_history_enabled = True
def start(self, **kwargs):
if not self.enable:
return
if not self._should_profile_this_rank():
return
profile_step = kwargs.get("profile_step", None)
# Keep ranks aligned under same folder name
self.sub_dir = f"step{profile_step}" if profile_step is not None else None
self.this_step = True
def stop(self):
if not self.enable or not self.this_step:
return
self.this_step = False
if not self._should_profile_this_rank():
return
out_dir = self.config.save_path or "outputs/profile"
tag = "torch_memory"
# Dump snapshot; all ranks write into same sub_dir
try:
self.sampler.dump_memory_snapshot(out_dir=out_dir, tag=tag, sub_dir=self.sub_dir)
except Exception:
pass
def _should_profile_this_rank(self) -> bool:
if self.config.all_ranks:
return True
if self.config.ranks:
return self.rank in self.config.ranks
# default rank 0
return self.rank == 0
class DistProfilerExtension:
"""An extension class for DistProfiler that provides distributed profiling capabilities.
It is intended for workers in verl that single controller invokes.
This class wraps a DistProfiler instance and provides methods to start/stop profiling
that can be dispatched across multiple ranks in a distributed training environment.
Args:
profiler (DistProfiler): The base distributed profiler instance to extend
"""
def __init__(self, profiler: DistProfiler):
self.profiler = profiler
from verl.single_controller.base.decorator import Dispatch, register
@register(dispatch_mode=Dispatch.ONE_TO_ALL)
def start_profile(self, **kwargs) -> None:
"""Start profiling for the current rank in the current training step."""
self.profiler.start(**kwargs)
@register(dispatch_mode=Dispatch.ONE_TO_ALL)
def stop_profile(self) -> None:
"""Stop profiling for the current rank in the current training step."""
self.profiler.stop()
| verl__utils__profiler__profile.py |
# Copyright 2025 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import os
from typing import Callable, Optional
import torch
from .config import ProfilerConfig, TorchProfilerToolConfig
from .profile import DistProfiler
def get_torch_profiler(
contents: list[str],
save_path: str,
role: Optional[str] = None,
save_file_prefix: Optional[str] = None,
rank: int = 0,
):
if role:
save_path = os.path.join(save_path, role)
os.makedirs(save_path, exist_ok=True)
save_file_name = f"prof_rank-{rank}.json.gz"
if save_file_prefix:
save_file_name = f"{save_file_prefix}_{save_file_name}"
save_path = os.path.join(save_path, save_file_name)
def _trace_handler(prof):
print(f"[Profiler] Saving trace to {save_path}")
prof.export_chrome_trace(save_path)
contents = set(contents) if contents else set()
activities = []
if not contents or "cpu" in contents:
activities.append(torch.profiler.ProfilerActivity.CPU)
if not contents or "cuda" in contents:
activities.append(torch.profiler.ProfilerActivity.CUDA)
return torch.profiler.profile(
activities=activities,
with_stack="stack" in contents,
record_shapes="shapes" in contents,
profile_memory="memory" in contents,
on_trace_ready=_trace_handler,
)
class Profiler(DistProfiler):
"""A PyTorch profiler wrapper class for collecting performance metrics.
This profiler provides a convenient interface for profiling PyTorch operations,
with support for:
- CPU and CUDA activity profiling
- Configurable profiling schedule (wait/warmup/active steps)
- Multi-rank profiling support
- Chrome trace export
Args:
config: Configuration object containing profiling parameters
"""
_define_count = 0
def __init__(
self,
rank,
config: ProfilerConfig,
tool_config: Optional[TorchProfilerToolConfig] = None,
save_file_prefix=None,
):
# note : if we do not set use_profile, it will be set as None, so that all function will be skip
config = config or ProfilerConfig(ranks=[], enable=False)
self.save_file_prefix = save_file_prefix
if not tool_config:
assert not config.enable, "tool_config must be provided when profiler is enabled"
self.prof = None
self.rank = rank
self.config = config
self.tool_config = tool_config
self.contents = self.tool_config.contents
self.save_path = self.config.save_path
# Align with other profilers: read discrete mode, default to False for torch profiler
self.discrete = getattr(self.tool_config, "discrete", False)
def check(self):
return self.prof is not None
def start(self, **kwargs):
role = kwargs.get("role", None)
if not self.discrete and Profiler._define_count == 0:
self.prof = get_torch_profiler(
contents=self.contents,
save_path=self.save_path,
role=role,
save_file_prefix=self.save_file_prefix,
rank=self.rank,
)
print(f"[Profiler] started for rank {self.rank}")
self.prof.start()
Profiler._define_count += 1
def step(self):
if self.check():
self.prof.step()
def stop(self):
if not self.discrete and Profiler._define_count == 1:
self.step()
print(f"[Profiler] stopped for rank {self.rank}")
self.prof.stop()
Profiler._define_count -= 1
def annotate(self, message: Optional[str] = None, role: Optional[str] = None, **kwargs_outer) -> Callable:
"""Decorate a Worker member function to profile the current rank in the current training step.
Requires the target function to be a member function of a Worker,
which has a member field `profiler` with Profiler type.
Args:
message (str, optional):
The message to be displayed in the profiler. Defaults to None.
role (str, optional):
The role of the current data collection. Defaults to None.
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs_inner):
profile_name = message or func.__name__
if not self.discrete:
# In continuous mode, we just record function, profiler started globally
with torch.profiler.record_function(profile_name):
return func(*args, **kwargs_inner)
# In discrete mode, we start/stop profiler around the function
prof = get_torch_profiler(
contents=self.contents,
save_path=self.save_path,
role=role,
save_file_prefix=self.save_file_prefix,
rank=self.rank,
)
prof.start()
with torch.profiler.record_function(profile_name):
result = func(*args, **kwargs_inner)
prof.stop()
return result
return wrapper
return decorator
| verl__utils__profiler__torch_profile.py |
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Contain small python utility functions
"""
import importlib
import multiprocessing
import os
import queue # Import the queue module for exception type hint
import signal
from contextlib import contextmanager
from functools import wraps
from types import SimpleNamespace
from typing import Any, Callable, Iterator, Optional
from verl.utils.metric import Metric
# --- Top-level helper for multiprocessing timeout ---
# This function MUST be defined at the top level to be pickleable
def _mp_target_wrapper(target_func: Callable, mp_queue: multiprocessing.Queue, args: tuple, kwargs: dict[str, Any]):
"""
Internal wrapper function executed in the child process.
Calls the original target function and puts the result or exception into the queue.
"""
try:
result = target_func(*args, **kwargs)
mp_queue.put((True, result)) # Indicate success and put result
except Exception as e:
# Ensure the exception is pickleable for the queue
try:
import pickle
pickle.dumps(e) # Test if the exception is pickleable
mp_queue.put((False, e)) # Indicate failure and put exception
except (pickle.PicklingError, TypeError):
# Fallback if the original exception cannot be pickled
mp_queue.put((False, RuntimeError(f"Original exception type {type(e).__name__} not pickleable: {e}")))
# Renamed the function from timeout to timeout_limit
def timeout_limit(seconds: float, use_signals: bool = False):
"""
Decorator to add a timeout to a function.
Args:
seconds: The timeout duration in seconds.
use_signals: (Deprecated) This is deprecated because signals only work reliably in the main thread
and can cause issues in multiprocessing or multithreading contexts.
Defaults to False, which uses the more robust multiprocessing approach.
Returns:
A decorated function with timeout.
Raises:
TimeoutError: If the function execution exceeds the specified time.
RuntimeError: If the child process exits with an error (multiprocessing mode).
NotImplementedError: If the OS is not POSIX (signals are only supported on POSIX).
"""
def decorator(func):
if use_signals:
if os.name != "posix":
raise NotImplementedError(f"Unsupported OS: {os.name}")
# Issue deprecation warning if use_signals is explicitly True
print(
"WARN: The 'use_signals=True' option in the timeout decorator is deprecated. \
Signals are unreliable outside the main thread. \
Please use the default multiprocessing-based timeout (use_signals=False)."
)
@wraps(func)
def wrapper_signal(*args, **kwargs):
def handler(signum, frame):
# Update function name in error message if needed (optional but good practice)
raise TimeoutError(f"Function {func.__name__} timed out after {seconds} seconds (signal)!")
old_handler = signal.getsignal(signal.SIGALRM)
signal.signal(signal.SIGALRM, handler)
# Use setitimer for float seconds support, alarm only supports integers
signal.setitimer(signal.ITIMER_REAL, seconds)
try:
result = func(*args, **kwargs)
finally:
# Reset timer and handler
signal.setitimer(signal.ITIMER_REAL, 0)
signal.signal(signal.SIGALRM, old_handler)
return result
return wrapper_signal
else:
# --- Multiprocessing based timeout (existing logic) ---
@wraps(func)
def wrapper_mp(*args, **kwargs):
q = multiprocessing.Queue(maxsize=1)
process = multiprocessing.Process(target=_mp_target_wrapper, args=(func, q, args, kwargs))
process.start()
process.join(timeout=seconds)
if process.is_alive():
process.terminate()
process.join(timeout=0.5) # Give it a moment to terminate
if process.is_alive():
print(f"Warning: Process {process.pid} did not terminate gracefully after timeout.")
# Update function name in error message if needed (optional but good practice)
raise TimeoutError(f"Function {func.__name__} timed out after {seconds} seconds (multiprocessing)!")
try:
success, result_or_exc = q.get(timeout=0.1) # Small timeout for queue read
if success:
return result_or_exc
else:
raise result_or_exc # Reraise exception from child
except queue.Empty as err:
exitcode = process.exitcode
if exitcode is not None and exitcode != 0:
raise RuntimeError(
f"Child process exited with error (exitcode: {exitcode}) before returning result."
) from err
else:
# Should have timed out if queue is empty after join unless process died unexpectedly
# Update function name in error message if needed (optional but good practice)
raise TimeoutError(
f"Operation timed out or process finished unexpectedly without result "
f"(exitcode: {exitcode})."
) from err
finally:
q.close()
q.join_thread()
return wrapper_mp
return decorator
def union_two_dict(dict1: dict, dict2: dict):
"""Union two dict. Will throw an error if there is an item not the same object with the same key.
Args:
dict1:
dict2:
Returns:
"""
for key, val in dict2.items():
if key in dict1:
assert dict2[key] == dict1[key], f"{key} in meta_dict1 and meta_dict2 are not the same object"
dict1[key] = val
return dict1
def rename_dict(data: dict, prefix: str = "") -> dict:
"""Add a prefix to all the keys in the data dict if it's name is not started with prefix
Args:
data: a dictionary
prefix: prefix
Returns:
dictionary with modified name
"""
new_data = {}
for key, val in data.items():
new_key = f"{prefix}{key}" if not key.startswith(prefix) else key
new_data[new_key] = val
return new_data
def append_to_dict(data: dict, new_data: dict, prefix: str = ""):
"""Append values from new_data to lists in data.
For each key in new_data, this function appends the corresponding value to a list
stored under the same key in data. If the key doesn't exist in data, a new list is created.
Args:
data (Dict): The target dictionary containing lists as values.
new_data (Dict): The source dictionary with values to append.
Returns:
None: The function modifies data in-place.
"""
for key, val in new_data.items():
new_key = f"{prefix}{key}" if not key.startswith(prefix) else key
if new_key not in data:
data[new_key] = val.init_list() if isinstance(val, Metric) else []
if isinstance(val, list):
data[new_key].extend(val)
else:
data[new_key].append(val)
class NestedNamespace(SimpleNamespace):
"""A nested version of SimpleNamespace that recursively converts dictionaries to namespaces.
This class allows for dot notation access to nested dictionary structures by recursively
converting dictionaries to NestedNamespace objects.
Example:
config_dict = {"a": 1, "b": {"c": 2, "d": 3}}
config = NestedNamespace(config_dict)
# Access with: config.a, config.b.c, config.b.d
Args:
dictionary: The dictionary to convert to a nested namespace.
**kwargs: Additional attributes to set on the namespace.
"""
def __init__(self, dictionary, **kwargs):
super().__init__(**kwargs)
for key, value in dictionary.items():
if isinstance(value, dict):
self.__setattr__(key, NestedNamespace(value))
else:
self.__setattr__(key, value)
class DynamicEnumMeta(type):
def __iter__(cls) -> Iterator[Any]:
return iter(cls._registry.values())
def __contains__(cls, item: Any) -> bool:
# allow `name in EnumClass` or `member in EnumClass`
if isinstance(item, str):
return item in cls._registry
return item in cls._registry.values()
def __getitem__(cls, name: str) -> Any:
return cls._registry[name]
def __reduce_ex__(cls, protocol):
# Always load the existing module and grab the class
return getattr, (importlib.import_module(cls.__module__), cls.__name__)
def names(cls):
return list(cls._registry.keys())
def values(cls):
return list(cls._registry.values())
class DynamicEnum(metaclass=DynamicEnumMeta):
_registry: dict[str, "DynamicEnum"] = {}
_next_value: int = 0
def __init__(self, name: str, value: int):
self.name = name
self.value = value
def __repr__(self):
return f"<{self.__class__.__name__}.{self.name}: {self.value}>"
def __reduce_ex__(self, protocol):
"""
Unpickle via: getattr(import_module(module).Dispatch, 'ONE_TO_ALL')
so the existing class is reused instead of re-executed.
"""
module = importlib.import_module(self.__class__.__module__)
enum_cls = getattr(module, self.__class__.__name__)
return getattr, (enum_cls, self.name)
@classmethod
def register(cls, name: str) -> "DynamicEnum":
key = name.upper()
if key in cls._registry:
raise ValueError(f"{key} already registered")
member = cls(key, cls._next_value)
cls._registry[key] = member
setattr(cls, key, member)
cls._next_value += 1
return member
@classmethod
def remove(cls, name: str):
key = name.upper()
member = cls._registry.pop(key)
delattr(cls, key)
return member
@classmethod
def from_name(cls, name: str) -> Optional["DynamicEnum"]:
return cls._registry.get(name.upper())
@contextmanager
def temp_env_var(key: str, value: str):
"""Context manager for temporarily setting an environment variable.
This context manager ensures that environment variables are properly set and restored,
even if an exception occurs during the execution of the code block.
Args:
key: Environment variable name to set
value: Value to set the environment variable to
Yields:
None
Example:
>>> with temp_env_var("MY_VAR", "test_value"):
... # MY_VAR is set to "test_value"
... do_something()
... # MY_VAR is restored to its original value or removed if it didn't exist
"""
original = os.environ.get(key)
os.environ[key] = value
try:
yield
finally:
if original is None:
os.environ.pop(key, None)
else:
os.environ[key] = original
def convert_to_regular_types(obj):
"""Convert Hydra configs and other special types to regular Python types."""
from omegaconf import DictConfig, ListConfig
if isinstance(obj, ListConfig | DictConfig):
return {k: convert_to_regular_types(v) for k, v in obj.items()} if isinstance(obj, DictConfig) else list(obj)
elif isinstance(obj, list | tuple):
return [convert_to_regular_types(x) for x in obj]
elif isinstance(obj, dict):
return {k: convert_to_regular_types(v) for k, v in obj.items()}
return obj
| verl__utils__py_functional.py |
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""QAT (Quantization-Aware Training) utilities for verl FSDP training."""
import json
import logging
import re
from dataclasses import dataclass, field
from typing import Any, Optional
import torch.nn as nn
from verl.base_config import BaseConfig
logger = logging.getLogger(__name__)
@dataclass
class QATConfig(BaseConfig):
"""Unified configuration for QAT (Quantization-Aware Training)."""
enable: bool = False
mode: str = "w4a16"
group_size: int = 16
ignore_patterns: list[str] = field(default_factory=lambda: ["lm_head", "embed_tokens", "re:.*mlp.gate$"])
activation_observer: str = "static_minmax"
quantization_config_path: Optional[str] = None
def load_quantization_config(qat_config: QATConfig) -> dict[str, Any]:
"""Load quantization config JSON file from QATConfig."""
if not qat_config.quantization_config_path:
raise ValueError("quantization_config_path is required when QAT is enabled")
logger.info(f"Loading QAT quantization config from: {qat_config.quantization_config_path}")
with open(qat_config.quantization_config_path) as f:
quant_config = json.load(f)
if qat_config.ignore_patterns:
original_ignore = quant_config.get("ignore", [])
quant_config["ignore"] = qat_config.ignore_patterns
if original_ignore != qat_config.ignore_patterns:
logger.info(f"Overriding JSON 'ignore' field: {original_ignore} -> {qat_config.ignore_patterns}")
logger.info("Successfully loaded QAT quantization config")
return quant_config
def _should_quantize(name: str, module: nn.Module, config: QATConfig) -> bool:
"""Check if a module should be quantized."""
if not isinstance(module, nn.Linear):
return False
for pattern in config.ignore_patterns:
if pattern.startswith("re:"):
regex = pattern[3:]
if re.match(regex, name):
logger.debug(f"Ignoring {name} due to regex pattern: {regex}")
return False
else:
if pattern in name:
logger.debug(f"Ignoring {name} due to pattern: {pattern}")
return False
if module.in_features % config.group_size != 0:
logger.warning(
f"Skipping {name}: in_features={module.in_features} not divisible by group_size={config.group_size}"
)
return False
return True
def apply_qat(
model: nn.Module,
config: QATConfig | dict[str, Any],
) -> nn.Module:
"""Apply QAT to a model by replacing nn.Linear with QATLinear."""
from verl.utils.qat.linear import QATLinear, QATMode
if not isinstance(config, QATConfig):
config = QATConfig(**config)
if not config.enable:
logger.info("QAT is disabled, returning original model")
return model
mode = QATMode(config.mode.lower())
logger.info(f"Applying QAT with mode={mode.value}, group_size={config.group_size}")
modules_to_replace = []
for name, module in model.named_modules():
if _should_quantize(name, module, config):
modules_to_replace.append((name, module))
logger.info(f"Found {len(modules_to_replace)} Linear layers to convert to QAT")
converted_count = 0
for name, module in modules_to_replace:
if isinstance(module, QATLinear):
continue
fake_quant_module = QATLinear.from_linear(
module,
mode=mode,
group_size=config.group_size,
activation_observer=config.activation_observer,
)
_set_module(model, name, fake_quant_module)
converted_count += 1
logger.info(f"Successfully applied QAT to {converted_count} layers")
return model
def _set_module(model: nn.Module, name: str, new_module: nn.Module):
"""Set a module in the model by its full name."""
parts = name.split(".")
parent = model
for part in parts[:-1]:
parent = getattr(parent, part)
setattr(parent, parts[-1], new_module)
FUSION_PATTERNS = {
"qkv": ["q_proj", "k_proj", "v_proj"],
"gate_up": ["gate_proj", "up_proj"],
}
def setup_fusion_siblings(model: nn.Module):
"""Setup fusion siblings for QKV and GateUp layers."""
import weakref
from verl.utils.qat.linear import QATLinear
qat_modules = {name: m for name, m in model.named_modules() if isinstance(m, QATLinear)}
counts = {}
for group_name, suffixes in FUSION_PATTERNS.items():
groups: dict[str, dict[str, nn.Module]] = {}
for name, module in qat_modules.items():
for suffix in suffixes:
if name.endswith(suffix):
parent = name.rsplit(".", 1)[0]
groups.setdefault(parent, {})[suffix] = module
count = 0
for parent, projs in groups.items():
if len(projs) >= 2:
modules = list(projs.values())
for i, m in enumerate(modules):
siblings = modules[:i] + modules[i + 1 :]
m._fusion_siblings_ref = [weakref.ref(s) for s in siblings]
count += 1
counts[group_name] = count
logger.info(f"[QAT Fuse] Setup fusion siblings: {counts}")
return counts
def enable_qat_fuse(model: nn.Module):
"""Enable QAT fuse mode: sets up fusion siblings for weight scale fusion."""
setup_fusion_siblings(model)
model._qat_fuse_enabled = True
logger.info("[QAT Fuse] Enabled QAT fuse mode")
def invalidate_all_scales(model: nn.Module):
"""Clear all cached weight scales after optimizer.step()."""
from verl.utils.qat.linear import QATLinear
count = 0
for module in model.modules():
if isinstance(module, QATLinear):
module._weight_blockwise_scale = None
module._weight_global_scale = None
module._cached_weight_amax = None
count += 1
logger.debug(f"[QAT Fuse] Invalidated scales for {count} QATLinear layers")
| verl__utils__qat__core.py |
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""QAT FakeQuantized Linear module for NVFP4 (W4A4/W4A16) with FSDP compatibility.
Includes Triton kernels for high-performance FP4 quantization.
"""
from enum import Enum
from typing import Optional
import torch
import torch.nn as nn
import torch.nn.functional as F
__all__ = ["QATLinear", "QATMode"]
import triton
import triton.language as tl
_TORCH_TO_TL_DTYPE = {
torch.float32: tl.float32,
torch.float16: tl.float16,
torch.bfloat16: tl.bfloat16,
}
FP4_E2M1_MAX: float = 6.0
FP8_E4M3_MAX: float = 448.0
@triton.jit
def _fp4_fake_quant_kernel(
x_ptr,
y_ptr,
M,
N,
global_scale_ptr,
stride_xm,
stride_xn,
stride_ym,
stride_yn,
BLOCK_SIZE: tl.constexpr,
TILE_M: tl.constexpr,
TILE_N: tl.constexpr,
NUM_FP4_BLOCKS: tl.constexpr,
OUT_DTYPE: tl.constexpr,
FP4_MAX: tl.constexpr,
FP8_MAX: tl.constexpr,
):
pid_m = tl.program_id(axis=0)
pid_n = tl.program_id(axis=1)
row_start = pid_m * TILE_M
col_start = pid_n * TILE_N
x_block_ptr = tl.make_block_ptr(
base=x_ptr,
shape=(M, N),
strides=(stride_xm, stride_xn),
offsets=(row_start, col_start),
block_shape=(TILE_M, TILE_N),
order=(1, 0),
)
y_block_ptr = tl.make_block_ptr(
base=y_ptr,
shape=(M, N),
strides=(stride_ym, stride_yn),
offsets=(row_start, col_start),
block_shape=(TILE_M, TILE_N),
order=(1, 0),
)
global_scale = tl.load(global_scale_ptr).to(tl.float32)
global_scale_safe = tl.where(global_scale > 0.0, global_scale, 1e-12)
tile = tl.load(x_block_ptr, boundary_check=(0, 1), padding_option="zero").to(tl.float32)
tile_reshaped = tl.reshape(tile, (TILE_M, NUM_FP4_BLOCKS, BLOCK_SIZE))
x_abs = tl.abs(tile_reshaped)
block_max = tl.max(x_abs, axis=2, keep_dims=True)
block_max_scaled = block_max / (FP4_MAX * global_scale_safe)
block_max_scaled = tl.minimum(block_max_scaled, FP8_MAX)
block_max_quant = block_max_scaled.to(tl.float8e4nv).to(tl.float32) * global_scale
block_max_quant = tl.where(block_max_quant >= 1e-5, block_max_quant, 1.0)
block_max_quant_broadcast = tl.broadcast_to(block_max_quant, (TILE_M, NUM_FP4_BLOCKS, BLOCK_SIZE))
abs_scaled = x_abs / block_max_quant_broadcast
q_val = tl.where(
abs_scaled <= 0.25,
0.0,
tl.where(
abs_scaled < 0.75,
0.5,
tl.where(
abs_scaled <= 1.25,
1.0,
tl.where(
abs_scaled < 1.75,
1.5,
tl.where(
abs_scaled <= 2.5,
2.0,
tl.where(abs_scaled < 3.5, 3.0, tl.where(abs_scaled <= 5.0, 4.0, FP4_MAX)),
),
),
),
),
)
x_rescaled = q_val * block_max_quant_broadcast
x_rescaled = tl.where(tile_reshaped >= 0, x_rescaled, -x_rescaled)
tile_quant = tl.reshape(x_rescaled, (TILE_M, TILE_N))
tl.store(y_block_ptr, tile_quant.to(OUT_DTYPE), boundary_check=(0, 1))
def fp4_fake_quant_weight(
weight: torch.Tensor,
global_amax: torch.Tensor = None,
block_size: int = 16,
tile_rows: int = 16,
tile_cols: int = 64,
) -> torch.Tensor:
"""Apply FP4 fake quantization using Triton kernel."""
x_shape = weight.shape
x_dtype = weight.dtype
x = weight.reshape(-1, x_shape[-1]).contiguous()
M, N = x.shape
y = torch.empty_like(x)
stride_xm, stride_xn = x.stride()
stride_ym, stride_yn = y.stride()
tile_cols = max(tile_cols, block_size)
tile_cols_aligned = ((tile_cols + block_size - 1) // block_size) * block_size
num_fp4_blocks = tile_cols_aligned // block_size
if global_amax is None:
global_amax = weight.abs().max().to(torch.float32)
global_scale = global_amax.float() / (FP4_E2M1_MAX * FP8_E4M3_MAX)
grid = (triton.cdiv(M, tile_rows), triton.cdiv(N, tile_cols_aligned))
_fp4_fake_quant_kernel[grid](
x,
y,
M,
N,
global_scale,
stride_xm,
stride_xn,
stride_ym,
stride_yn,
BLOCK_SIZE=block_size,
TILE_M=tile_rows,
TILE_N=tile_cols_aligned,
NUM_FP4_BLOCKS=num_fp4_blocks,
OUT_DTYPE=_TORCH_TO_TL_DTYPE[x_dtype],
FP4_MAX=FP4_E2M1_MAX,
FP8_MAX=FP8_E4M3_MAX,
)
return y.view(*x_shape)
class STEFP4QuantTriton(torch.autograd.Function):
"""Straight-Through Estimator wrapper for Triton FP4 quantization kernel."""
@staticmethod
def forward(ctx, x: torch.Tensor, global_amax: torch.Tensor, block_size: int) -> torch.Tensor:
return fp4_fake_quant_weight(x, global_amax=global_amax, block_size=block_size)
@staticmethod
def backward(ctx, grad_output: torch.Tensor) -> tuple:
return grad_output, None, None
class QATMode(str, Enum):
"""QAT quantization mode."""
W4A4 = "w4a4" # Weight 4-bit, Activation 4-bit (dynamic)
W4A16 = "w4a16" # Weight 4-bit, Activation 16-bit (weight only)
class QATLinear(nn.Linear):
"""QAT FakeQuantized Linear layer with FSDP compatibility."""
_UNINITIALIZED_SCALE = -1.0
def __init__(
self,
in_features: int,
out_features: int,
bias: bool = True,
mode: QATMode = QATMode.W4A4,
group_size: int = 16,
activation_observer: str = "static_minmax", # Observer strategy for activation global_scale
device: Optional[torch.device] = None,
dtype: Optional[torch.dtype] = None,
):
super().__init__(in_features, out_features, bias, device=device, dtype=dtype)
self.mode = mode
self.group_size = group_size
self.activation_observer = activation_observer
self._weight_blockwise_scale: Optional[torch.Tensor] = None
self._weight_global_scale: Optional[torch.Tensor] = None
self._cached_weight_amax: Optional[torch.Tensor] = None
self._fusion_siblings_ref = None
if mode == QATMode.W4A4:
self.register_buffer(
"input_global_scale", torch.tensor([self._UNINITIALIZED_SCALE], dtype=torch.float32), persistent=True
)
self.register_buffer(
"input_amax", torch.tensor([self._UNINITIALIZED_SCALE], dtype=torch.float32), persistent=True
)
self._ema_decay: float = 0.01
self.fake_quant_enabled = True
@classmethod
def from_linear(
cls,
linear: nn.Linear,
mode: QATMode = QATMode.W4A4,
group_size: int = 16,
activation_observer: str = "static_minmax",
) -> "QATLinear":
"""Create QATLinear from an existing nn.Linear."""
has_bias = linear.bias is not None
new_linear = cls(
in_features=linear.in_features,
out_features=linear.out_features,
bias=has_bias,
mode=mode,
group_size=group_size,
activation_observer=activation_observer,
device=linear.weight.device,
dtype=linear.weight.dtype,
)
if linear.weight.device != torch.device("meta"):
new_linear.weight = nn.Parameter(linear.weight.clone())
if has_bias:
new_linear.bias = nn.Parameter(linear.bias.clone())
return new_linear
def _is_amax_initialized(self) -> bool:
"""Check if input_amax has been initialized."""
if not hasattr(self, "input_amax"):
return False
return self.input_amax.item() != self._UNINITIALIZED_SCALE
def _update_input_global_scale(self, x: torch.Tensor):
"""Update static input_global_scale based on observer strategy."""
assert self.mode == QATMode.W4A4, "_update_input_global_scale should only be called in W4A4 mode"
current_amax = torch.amax(torch.abs(x)).detach().to(torch.float32)
if torch.distributed.is_initialized() and torch.distributed.get_world_size() > 1:
torch.distributed.all_reduce(current_amax, op=torch.distributed.ReduceOp.MAX)
scale_factor = FP8_E4M3_MAX * FP4_E2M1_MAX
if self.activation_observer == "memoryless_minmax":
new_scale = (scale_factor / (current_amax + 1e-12)).view(1)
self.input_global_scale.copy_(new_scale.to(self.input_global_scale.device))
elif self.activation_observer == "static_minmax":
if not self._is_amax_initialized():
self.input_amax.copy_(current_amax.view(1).to(self.input_amax.device))
else:
new_amax = torch.maximum(self.input_amax, current_amax.view(1).to(self.input_amax.device))
self.input_amax.copy_(new_amax)
amax_f32 = self.input_amax.to(torch.float32)
new_scale = (scale_factor / (amax_f32 + 1e-12)).float().view(1)
self.input_global_scale.copy_(new_scale.to(self.input_global_scale.device))
elif self.activation_observer == "minmax":
if not self._is_amax_initialized():
self.input_amax.copy_(current_amax.view(1).to(self.input_amax.device))
else:
new_amax = (1 - self._ema_decay) * self.input_amax + self._ema_decay * current_amax.view(1).to(
self.input_amax.device
)
self.input_amax.copy_(new_amax)
amax_f32 = self.input_amax.to(torch.float32)
new_scale = (scale_factor / (amax_f32 + 1e-12)).float().view(1)
self.input_global_scale.copy_(new_scale.to(self.input_global_scale.device))
else:
raise ValueError(f"Unknown activation_observer: {self.activation_observer}")
def _fake_quantize_weight(self, weight: torch.Tensor) -> torch.Tensor:
"""Apply fake quantization to weight tensor using Triton kernel."""
with torch.no_grad():
if self._cached_weight_amax is not None:
global_amax = self._cached_weight_amax
else:
siblings_ref = getattr(self, "_fusion_siblings_ref", None)
if siblings_ref is not None:
siblings = [ref() for ref in siblings_ref if ref() is not None]
siblings = [s for s in siblings if s.weight.device != torch.device("meta")]
for sibling in siblings:
sibling_amax = getattr(sibling, "_cached_weight_amax", None)
if sibling_amax is not None:
global_amax = sibling_amax
self._cached_weight_amax = global_amax
break
else:
all_modules = [self] + siblings
amaxes = [m.weight.abs().max().to(torch.float32) for m in all_modules]
global_amax = torch.max(torch.stack(amaxes))
self._cached_weight_amax = global_amax
for sibling in siblings:
sibling._cached_weight_amax = global_amax
else:
global_amax = weight.abs().max().to(torch.float32)
self._cached_weight_amax = global_amax
if self._weight_global_scale is None:
self._weight_global_scale = global_amax.float() / (FP4_E2M1_MAX * FP8_E4M3_MAX)
result = STEFP4QuantTriton.apply(weight, global_amax, self.group_size)
return result
def _fake_quantize_activation(self, x: torch.Tensor) -> torch.Tensor:
"""Apply fake quantization to activation tensor (W4A4 mode only)."""
original_shape = x.shape
if x.dim() == 3:
x_2d = x.view(-1, x.shape[-1])
else:
x_2d = x
if self.training:
self._update_input_global_scale(x_2d)
if self.input_global_scale.item() == self._UNINITIALIZED_SCALE:
raise RuntimeError("W4A4 input_global_scale uninitialized. Load PTQ model first.")
global_amax = (FP4_E2M1_MAX * FP8_E4M3_MAX) / self.input_global_scale.to(x.device)
result = STEFP4QuantTriton.apply(x_2d, global_amax, self.group_size)
return result.view(original_shape)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Forward pass with fake quantization."""
if not self.fake_quant_enabled:
return F.linear(x, self.weight, self.bias)
weight_fq = self._fake_quantize_weight(self.weight)
if self.mode == QATMode.W4A4:
x_fq = self._fake_quantize_activation(x)
else:
x_fq = x
return F.linear(x_fq, weight_fq, self.bias)
def extra_repr(self) -> str:
return (
f"in_features={self.in_features}, out_features={self.out_features}, "
f"bias={self.bias is not None}, mode={self.mode.value}, "
f"group_size={self.group_size}, fake_quant_enabled={self.fake_quant_enabled}"
)
| verl__utils__qat__linear.py |
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Fast NVFP4 Quantizer for verl FSDP training.
Directly computes scales and quantizes weights using compressed_tensors APIs.
Includes scale computation utilities for weight quantization.
"""
import logging
import os
import re
from typing import Generator, Iterable, Optional
import torch
from compressed_tensors.compressors.quantized_compressors.fp4_quantized import NVFP4PackedCompressor
from compressed_tensors.quantization.quant_args import (
FP4_E2M1_DATA,
FP8_E4M3_DATA,
QuantizationArgs,
QuantizationStrategy,
QuantizationType,
)
from compressed_tensors.quantization.utils.helpers import generate_gparam
from verl.utils.device import get_device_name, get_torch_device
logger = logging.getLogger(__name__)
logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN"))
_LAYER_IDX_RE = re.compile(r"layers\.(\d+)\.")
def compute_blockwise_scale(
weight: torch.Tensor,
global_scale: torch.Tensor,
group_size: int = 16,
) -> torch.Tensor:
"""Compute blockwise scale using pre-computed global_scale (for fusion).
Returns FP8 E4M3 blockwise scale tensor.
"""
out_features, in_features = weight.shape
num_groups = in_features // group_size
weight_reshaped = weight.view(out_features, num_groups, group_size)
block_max = torch.amax(torch.abs(weight_reshaped), dim=-1).to(torch.float32)
local_scale = block_max / FP4_E2M1_DATA.max
blockwise_scale_f32 = torch.clamp(
global_scale * local_scale,
min=-FP8_E4M3_DATA.max,
max=FP8_E4M3_DATA.max,
)
blockwise_scale = blockwise_scale_f32.to(torch.float8_e4m3fn)
eps = torch.finfo(torch.float8_e4m3fn).eps
blockwise_scale = torch.where(
blockwise_scale == 0,
torch.tensor(eps, dtype=blockwise_scale.dtype, device=weight.device),
blockwise_scale,
)
return blockwise_scale
# Fusion patterns for transformer models
FUSE_PATTERNS = {
"qkv": ["q_proj", "k_proj", "v_proj"],
"gate_up": ["gate_proj", "up_proj"],
}
def fuse_global_scales(
layer_global_scales: dict[str, torch.Tensor],
strategy: str = "min",
) -> dict[str, torch.Tensor]:
"""Fuse global scales for QKV/GateUp groups (take min across group)."""
if not layer_global_scales:
return {}
# Group by parent module
parent_to_children: dict[str, dict[str, str]] = {}
for name in layer_global_scales:
parent, child = name.rsplit(".", 1) if "." in name else ("", name)
parent_to_children.setdefault(parent, {})[child] = name
fused_scales = {}
processed = set()
for parent, children in parent_to_children.items():
for _, patterns in FUSE_PATTERNS.items():
matched = [children[p] for p in patterns if p in children]
if len(matched) == len(patterns):
group_scales = [layer_global_scales[n] for n in matched]
if strategy == "min":
fused_scale = torch.min(torch.cat(group_scales)).reshape([1])
else:
raise ValueError(f"Unknown fuse strategy: {strategy}")
for layer_name in matched:
fused_scales[layer_name] = fused_scale.clone()
processed.add(layer_name)
for name, scale in layer_global_scales.items():
if name not in processed:
fused_scales[name] = scale
return fused_scales
class QATQuantizer:
"""Quantizer for QAT-trained weights using compressed_tensors APIs."""
def __init__(
self,
mode: str = "w4a16",
group_size: int = 16,
ignore_patterns: Optional[list] = None,
device: Optional[torch.device] = None,
param_dtype: Optional[torch.dtype] = None,
):
self.mode = mode.lower()
self._is_w4a4 = self.mode == "w4a4" # W4A4 needs input_global_scale
self.group_size = group_size
self.ignore_patterns = ignore_patterns or ["lm_head", "embed_tokens", "re:.*mlp.gate$"]
self.device = device or torch.device(get_device_name())
self.param_dtype = param_dtype
self._compressor = NVFP4PackedCompressor()
self._quant_args = QuantizationArgs(
num_bits=4,
type=QuantizationType.FLOAT,
symmetric=True,
strategy=QuantizationStrategy.TENSOR_GROUP,
group_size=group_size,
scale_dtype=FP8_E4M3_DATA.dtype,
)
def _should_quantize(self, name: str, tensor: torch.Tensor) -> bool:
"""Check if parameter should be quantized."""
if not name.endswith(".weight"):
return False
if tensor.dim() != 2:
return False
if tensor.shape[1] % self.group_size != 0:
return False
module_name = name.rsplit(".weight", 1)[0]
for pattern in self.ignore_patterns:
if pattern.startswith("re:"):
# Regex pattern - use re.match like vLLM does
regex = pattern[3:]
if re.match(regex, module_name):
return False
else:
if pattern in module_name:
return False
return True
@staticmethod
def _extract_layer_idx(name: str) -> Optional[int]:
"""Extract decoder layer index from parameter name."""
match = _LAYER_IDX_RE.search(name)
return int(match.group(1)) if match else None
def _process_layer_group(
self,
layer_idx: Optional[int],
layer_params: dict[str, torch.Tensor],
input_global_scales: dict[str, torch.Tensor],
output_device: torch.device,
) -> list[tuple[str, torch.Tensor]]:
"""Quantize one decoder layer's buffered params. Returns list of (name, tensor)."""
layer_weights = {}
layer_passthrough = {}
for name, tensor in layer_params.items():
if "input_global_scale" in name or "input_amax" in name:
continue
if self._should_quantize(name, tensor):
layer_name = name.rsplit(".weight", 1)[0]
layer_weights[layer_name] = (name, tensor)
else:
layer_passthrough[name] = tensor
if layer_idx is None and layer_weights:
raise RuntimeError(
f"[QAT Quantizer] Unexpected quantizable weights outside decoder layers: "
f"{list(layer_weights.keys())}. These should be in ignore_patterns."
)
if not layer_weights:
return [(name, tensor.to(output_device)) for name, tensor in layer_passthrough.items()]
# Move weights to GPU, compute global scales
weights_on_gpu = {}
layer_global_scales = {}
for layer_name, (_, tensor) in layer_weights.items():
weight_gpu = tensor.to(device=self.device, dtype=self.param_dtype)
weights_on_gpu[layer_name] = weight_gpu
amax = torch.amax(torch.abs(weight_gpu)).to(torch.float32)
layer_global_scales[layer_name] = generate_gparam(
-amax.unsqueeze(0),
amax.unsqueeze(0),
scale_data=FP8_E4M3_DATA,
quant_data=FP4_E2M1_DATA,
dtype=torch.float32,
)
fused_global_scales = fuse_global_scales(layer_global_scales, strategy="min")
results = []
for layer_name, weight_gpu in weights_on_gpu.items():
fused_global_scale = fused_global_scales[layer_name]
weight_scale = compute_blockwise_scale(weight_gpu, fused_global_scale, self.group_size)
weight_packed = self._compressor.compress_weight(
weight=weight_gpu,
scale=weight_scale.float(),
global_scale=fused_global_scale,
quantization_args=self._quant_args,
)["weight_packed"]
results.append((f"{layer_name}.weight_packed", weight_packed.to(output_device)))
results.append((f"{layer_name}.weight_scale", weight_scale.to(output_device)))
results.append((f"{layer_name}.weight_global_scale", fused_global_scale.to(output_device)))
if self._is_w4a4:
if layer_name in input_global_scales:
results.append(
(
f"{layer_name}.input_global_scale",
input_global_scales[layer_name].float().to(output_device),
)
)
else:
raise ValueError(
f"W4A4 mode requires input_global_scale for layer '{layer_name}', "
f"but it's not found or uninitialized (-1.0)."
)
del weights_on_gpu, layer_global_scales, fused_global_scales
for name, tensor in layer_passthrough.items():
results.append((name, tensor.to(output_device)))
return results
def quantize_with_fusion(
self,
params: dict[str, torch.Tensor] | Iterable[tuple[str, torch.Tensor]],
target_device: Optional[torch.device] = None,
) -> Generator[tuple[str, torch.Tensor], None, None]:
"""Streaming quantize: consume input layer by layer, yield (name, tensor) pairs."""
if isinstance(params, dict):
params = params.items()
output_device = target_device or torch.device("cpu")
_sentinel = object()
current_layer_idx = _sentinel
layer_buffer: dict[str, torch.Tensor] = {}
input_global_scales: dict[str, torch.Tensor] = {}
for name, tensor in params:
tensor_cpu = tensor.to("cpu") if tensor.is_cuda else tensor
layer_idx = self._extract_layer_idx(name)
# Collect input_global_scales for W4A4 as we go
if self._is_w4a4 and "input_global_scale" in name:
scale_layer_name = name.replace(".input_global_scale", "")
if tensor_cpu.numel() == 1 and tensor_cpu.item() == -1.0:
logger.warning(f"W4A4: {scale_layer_name} input_global_scale is uninitialized")
else:
input_global_scales[scale_layer_name] = tensor_cpu
# Layer boundary: flush previous layer
if layer_idx != current_layer_idx and current_layer_idx is not _sentinel and layer_buffer:
yield from self._process_layer_group(
current_layer_idx, layer_buffer, input_global_scales, output_device
)
layer_buffer = {}
current_layer_idx = layer_idx
layer_buffer[name] = tensor_cpu
# Flush last buffered layer
if layer_buffer:
yield from self._process_layer_group(current_layer_idx, layer_buffer, input_global_scales, output_device)
get_torch_device().empty_cache()
__all__ = [
"QATQuantizer",
]
| verl__utils__qat__quantizer.py |
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
vLLM NVFP4 Patches for Dynamic Weight Updates.
Enables dynamic weight reloading for NVFP4 quantized models in vLLM.
Supported schemes:
- Dense: W4A16-FP4, W4A4-FP4
- MoE: NVFP4-MoE
"""
import logging
import os
from typing import Optional
from unittest.mock import patch
import torch
from torch.nn import Parameter
from verl.utils.device import get_device_name
logger = logging.getLogger(__name__)
logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN"))
class ParamMetaDict(dict):
"""
Dict-like class for parameter management with metadata-based rebuild and tensor swap.
Supports:
- Rebuild of deleted parameters from saved metadata
- Tensor Swap for parameters with shape changes (address stability for CUDA Graph)
"""
def __init__(self, model: torch.nn.Module, device: Optional[torch.device] = None):
"""
Initialize ParamMetaDict from a model.
Args:
model: vLLM model (may be wrapped in ModelRunner)
device: Device for created parameters
"""
super().__init__()
self.device = device
# Get the actual model (handle vLLM's wrapper structure)
actual_model = model
if hasattr(model, "model"):
actual_model = model.model
self._model = actual_model
# Build mappings by scanning all modules
self._layer_meta_cache: dict[str, dict] = {} # Cache of _hf_param_meta
self._tensor_swap_layers: dict[str, dict] = {} # Layers needing tensor swap
self._build_mappings()
# Initialize with current parameters
for name, param in actual_model.named_parameters():
self[name] = param
def _build_mappings(self):
"""Build layer metadata cache for rebuild and tensor swap."""
for layer_name, module in self._model.named_modules():
# Check for _hf_param_meta which indicates this layer has HF format params
if hasattr(module, "_hf_param_meta"):
self._layer_meta_cache[layer_name] = {
"module": module,
"meta": module._hf_param_meta,
}
# Check for tensor swap layers (weight_scale with shape change)
if "weight_scale" in module._hf_param_meta:
marlin_refs = getattr(module, "_marlin_tensor_refs", {})
if "weight_scale" in marlin_refs:
self._tensor_swap_layers[layer_name] = {
"module": module,
"marlin_ref": marlin_refs["weight_scale"],
"hf_meta": module._hf_param_meta["weight_scale"],
}
# MoE layers (w13_weight_scale, w2_weight_scale)
if "w13_weight_scale" in module._hf_param_meta:
marlin_refs = getattr(module, "_marlin_tensor_refs", {})
if "w13_weight_scale" in marlin_refs:
self._tensor_swap_layers[f"{layer_name}.w13"] = {
"module": module,
"param_name": "w13_weight_scale",
"marlin_ref": marlin_refs["w13_weight_scale"],
"hf_meta": module._hf_param_meta["w13_weight_scale"],
}
if "w2_weight_scale" in marlin_refs:
self._tensor_swap_layers[f"{layer_name}.w2"] = {
"module": module,
"param_name": "w2_weight_scale",
"marlin_ref": marlin_refs["w2_weight_scale"],
"hf_meta": module._hf_param_meta["w2_weight_scale"],
}
def _try_rebuild(self, key: str) -> Optional[Parameter]:
"""
Try to rebuild a parameter from metadata if it was deleted.
Args:
key: Full parameter name
Returns:
Rebuilt parameter or None if cannot rebuild
"""
# Extract layer name and param name
parts = key.rsplit(".", 1)
if len(parts) != 2:
return None
layer_name, param_name = parts
# Check if we have metadata for this layer
if layer_name not in self._layer_meta_cache:
return None
cache_entry = self._layer_meta_cache[layer_name]
module = cache_entry["module"]
meta = cache_entry["meta"]
# Check if this param needs rebuild
if param_name not in meta:
return None
# Already exists on module?
if hasattr(module, param_name):
param = getattr(module, param_name)
if param is not None:
return param
# Rebuild from metadata
new_param = _create_param_from_meta(module, param_name, meta[param_name], self.device)
module.register_parameter(param_name, new_param)
return new_param
def prepare_for_reload(self) -> None:
"""Replace Marlin-format tensors with HF-shape tensors for reload."""
for layer_name, swap_info in self._tensor_swap_layers.items():
module = swap_info["module"]
param_name = swap_info.get("param_name", "weight_scale")
hf_meta = swap_info["hf_meta"]
if hasattr(module, param_name):
new_param = _create_param_from_meta(module, param_name, hf_meta, self.device)
setattr(module, param_name, new_param)
def __getitem__(self, key: str) -> Parameter:
"""Get parameter with rebuild support."""
# Try standard lookup first
if key in dict.keys(self):
return super().__getitem__(key)
# Try rebuild from metadata
param = self._try_rebuild(key)
if param is not None:
self[key] = param
return param
raise KeyError(f"Parameter not found: {key}")
def __contains__(self, key: str) -> bool:
"""Check if parameter exists (with rebuild check)."""
if super().__contains__(key):
return True
# Check if can rebuild from metadata
parts = key.rsplit(".", 1)
if len(parts) == 2:
layer_name, param_name = parts
if layer_name in self._layer_meta_cache:
meta = self._layer_meta_cache[layer_name]["meta"]
if param_name in meta:
return True
return False
def get(self, key: str, default=None):
"""Get parameter with default."""
try:
return self[key]
except KeyError:
return default
def _create_param_from_meta(
module: torch.nn.Module,
param_name: str,
meta: dict,
device: Optional[torch.device] = None,
) -> Parameter:
"""Create a Parameter from saved metadata. Used by rebuild and tensor swap."""
shape = meta["shape"]
dtype = meta["dtype"]
dev = device or meta.get("device", get_device_name())
param_class = meta.get("param_class", Parameter)
weight_loaders = getattr(module, "_weight_loaders", {})
weight_loader = weight_loaders.get(param_name)
data = torch.empty(shape, dtype=dtype, device=dev)
try:
if param_class is not Parameter and weight_loader is not None:
kwargs = {"data": data, "weight_loader": weight_loader}
if "input_dim" in meta:
kwargs["input_dim"] = meta["input_dim"]
if "output_dim" in meta:
kwargs["output_dim"] = meta["output_dim"]
new_param = param_class(**kwargs)
else:
new_param = Parameter(data, requires_grad=False)
if weight_loader is not None:
new_param.weight_loader = weight_loader
except Exception as e:
logger.warning(f"Failed to create param {param_name} with class {param_class}: {e}, using Parameter")
new_param = Parameter(data, requires_grad=False)
if weight_loader is not None:
new_param.weight_loader = weight_loader
if "quant_method" in meta:
new_param.quant_method = meta["quant_method"]
return new_param
def save_param_meta(layer: torch.nn.Module, param_name: str):
"""Save parameter metadata for rebuild."""
if not hasattr(layer, "_hf_param_meta"):
layer._hf_param_meta = {}
param = getattr(layer, param_name, None)
if param is None:
return
meta = {
"shape": tuple(param.shape),
"dtype": param.dtype,
"device": str(param.device),
"param_class": type(param), # Save the actual parameter class
}
# Save vLLM-specific attributes needed for reconstruction
if hasattr(param, "_input_dim"):
meta["input_dim"] = param._input_dim
if hasattr(param, "_output_dim"):
meta["output_dim"] = param._output_dim
# Save MoE-specific attributes (quant_method is required by weight_loader)
if hasattr(param, "quant_method"):
meta["quant_method"] = param.quant_method
layer._hf_param_meta[param_name] = meta
def _check_first_call(layer: torch.nn.Module) -> bool:
"""Check if this is the first process_weights call, and increment counter."""
count = getattr(layer, "_process_weights_call_count", 0)
layer._process_weights_call_count = count + 1
return count == 0
# Dense W4A16 Patches
def patched_w4a16_process_weights_after_loading(self, layer: torch.nn.Module) -> None:
"""Patched process_weights_after_loading for W4A16 Dense layer."""
import vllm._custom_ops as ops
from vllm.model_executor.layers.quantization.utils.marlin_utils_fp4 import (
marlin_make_workspace_new,
marlin_permute_scales,
nvfp4_marlin_process_global_scale,
nvfp4_marlin_process_scales,
)
is_first_call = _check_first_call(layer)
group_size = 16
part_size_n = layer.output_size_per_partition
part_size_k = layer.input_size_per_partition
device = layer.weight_packed.device
param_dtype = getattr(layer, "params_dtype", torch.float16)
# Save metadata (first call only)
if is_first_call:
save_param_meta(layer, "weight_packed")
save_param_meta(layer, "weight_global_scale")
save_param_meta(layer, "weight_scale")
if not hasattr(layer, "_weight_loaders"):
layer._weight_loaders = {}
for pname in ["weight_packed", "weight_global_scale", "weight_scale"]:
param = getattr(layer, pname, None)
if param is not None and hasattr(param, "weight_loader"):
layer._weight_loaders[pname] = param.weight_loader
# Get HF format data
weight_packed_hf = layer.weight_packed.data
weight_global_scale_hf = layer.weight_global_scale.data
weight_scale_hf = layer.weight_scale.data
# Create workspace (first call only)
if is_first_call:
layer.workspace = marlin_make_workspace_new(device)
# Convert to Marlin format
perm = torch.empty(0, dtype=torch.int, device=device)
qweight = weight_packed_hf.view(torch.int32).T.contiguous()
marlin_weight = ops.gptq_marlin_repack(
b_q_weight=qweight,
perm=perm,
size_k=part_size_k,
size_n=part_size_n,
num_bits=4,
is_a_8bit=False,
)
weight_scale = weight_scale_hf.T.contiguous().to(param_dtype)
weight_scale_permuted = marlin_permute_scales(
s=weight_scale,
size_k=part_size_k,
size_n=part_size_n,
group_size=group_size,
is_a_8bit=False,
)
marlin_weight_scale = nvfp4_marlin_process_scales(weight_scale_permuted)
weight_scale_2_raw = (1.0 / weight_global_scale_hf.max()).to(param_dtype)
marlin_weight_scale_2 = nvfp4_marlin_process_global_scale(weight_scale_2_raw)
# Update compute parameters
if is_first_call:
layer.weight = Parameter(marlin_weight, requires_grad=False)
layer.weight_scale = Parameter(marlin_weight_scale, requires_grad=False)
layer.weight_scale_2 = Parameter(marlin_weight_scale_2, requires_grad=False)
if not hasattr(layer, "_marlin_tensor_refs"):
layer._marlin_tensor_refs = {}
layer._marlin_tensor_refs["weight_scale"] = layer.weight_scale.data
else:
layer.weight.data.copy_(marlin_weight)
layer.weight_scale_2.data.copy_(marlin_weight_scale_2)
marlin_scale_ref = layer._marlin_tensor_refs.get("weight_scale")
if marlin_scale_ref is not None:
marlin_scale_ref.copy_(marlin_weight_scale)
layer.weight_scale = Parameter(marlin_scale_ref, requires_grad=False)
else:
logger.warning("W4A16: _marlin_tensor_refs['weight_scale'] not found")
layer.weight_scale = Parameter(marlin_weight_scale, requires_grad=False)
# Delete HF parameters
if hasattr(layer, "weight_packed"):
delattr(layer, "weight_packed")
if hasattr(layer, "weight_global_scale"):
delattr(layer, "weight_global_scale")
def patched_w4a4_process_weights_after_loading(self, layer: torch.nn.Module) -> None:
"""Patched process_weights_after_loading for W4A4 Dense (all backends)."""
from vllm.model_executor.layers.quantization.utils.quant_utils import swizzle_blockscale
is_first_call = _check_first_call(layer)
_W4A4_HF_PARAMS = ["weight_packed", "weight_scale", "weight_global_scale", "input_global_scale"]
if is_first_call:
for pname in _W4A4_HF_PARAMS:
save_param_meta(layer, pname)
if not hasattr(layer, "_weight_loaders"):
layer._weight_loaders = {}
for pname in _W4A4_HF_PARAMS:
param = getattr(layer, pname, None)
if param is not None and hasattr(param, "weight_loader"):
layer._weight_loaders[pname] = param.weight_loader
weight_packed_data = layer.weight_packed.data
weight_scale_data = layer.weight_scale.data
input_global_scale_data = layer.input_global_scale.data
weight_global_scale_data = layer.weight_global_scale.data
global_input_scale = input_global_scale_data.max().to(torch.float32)
global_weight_scale = weight_global_scale_data.max().to(torch.float32)
if self.backend == "flashinfer-trtllm":
from flashinfer import shuffle_matrix_a, shuffle_matrix_sf_a
epilogue_tile_m = 128
processed_weight = shuffle_matrix_a(weight_packed_data.view(torch.uint8), epilogue_tile_m)
processed_weight_scale = (
shuffle_matrix_sf_a(weight_scale_data.view(torch.uint8), epilogue_tile_m)
.reshape(weight_scale_data.shape)
.view(torch.float8_e4m3fn)
)
elif self.backend == "fbgemm":
processed_weight_scale = swizzle_blockscale(weight_scale_data).view(-1).view(torch.uint8)
processed_weight = weight_packed_data
else:
# cutlass / flashinfer-cutlass
processed_weight_scale = swizzle_blockscale(weight_scale_data)
processed_weight = weight_packed_data
alpha = 1.0 / (global_input_scale * global_weight_scale)
if is_first_call:
layer.weight_packed = Parameter(processed_weight, requires_grad=False)
layer.weight_scale = Parameter(processed_weight_scale, requires_grad=False)
layer.input_global_scale = Parameter(global_input_scale, requires_grad=False)
layer.weight_global_scale = Parameter(global_weight_scale, requires_grad=False)
layer.alpha = Parameter(alpha, requires_grad=False)
if not hasattr(layer, "_marlin_tensor_refs"):
layer._marlin_tensor_refs = {}
layer._marlin_tensor_refs["weight_packed"] = layer.weight_packed.data
layer._marlin_tensor_refs["weight_scale"] = layer.weight_scale.data
layer._marlin_tensor_refs["input_global_scale"] = layer.input_global_scale.data
layer._marlin_tensor_refs["weight_global_scale"] = layer.weight_global_scale.data
layer._marlin_tensor_refs["alpha"] = layer.alpha.data
else:
refs = layer._marlin_tensor_refs
for ref_name, new_data in [
("weight_packed", processed_weight),
("weight_scale", processed_weight_scale),
("input_global_scale", global_input_scale),
("weight_global_scale", global_weight_scale),
("alpha", alpha),
]:
ref = refs.get(ref_name)
if ref is not None:
ref.copy_(new_data)
setattr(layer, ref_name, Parameter(ref, requires_grad=False))
else:
logger.warning(f"W4A4: _marlin_tensor_refs['{ref_name}'] not found, creating new Parameter")
setattr(
layer,
ref_name,
Parameter(
new_data.clone() if isinstance(new_data, torch.Tensor) else torch.tensor(new_data),
requires_grad=False,
),
)
def _marlin_repack_experts(packed, perm, size_k, size_n, num_experts):
"""Repack weight for each expert into Marlin format and stack."""
import vllm._custom_ops as ops
result = []
for i in range(num_experts):
qweight = packed[i].view(torch.int32).T.contiguous()
result.append(
ops.gptq_marlin_repack(
b_q_weight=qweight,
perm=perm,
size_k=size_k,
size_n=size_n,
num_bits=4,
is_a_8bit=False,
)
)
return torch.stack(result)
def _marlin_process_scales_experts(scale_hf, param_dtype, size_k, size_n, group_size, num_experts):
"""Process scales for each expert into Marlin format and stack."""
from vllm.model_executor.layers.quantization.utils.marlin_utils_fp4 import (
marlin_permute_scales,
nvfp4_marlin_process_scales,
)
result = []
scales = scale_hf.to(param_dtype)
for i in range(num_experts):
s = marlin_permute_scales(
s=scales[i].T,
size_k=size_k,
size_n=size_n,
group_size=group_size,
is_a_8bit=False,
)
result.append(nvfp4_marlin_process_scales(s))
return torch.stack(result)
def _process_nvfp4_moe_marlin(self, layer: torch.nn.Module, is_first_call: bool) -> None:
"""Process MoE layer with MARLIN backend (W4A16)."""
from vllm.model_executor.layers.fused_moe.oracle.nvfp4 import make_nvfp4_moe_kernel
from vllm.model_executor.layers.quantization.utils.marlin_utils_fp4 import (
marlin_make_workspace_new,
nvfp4_marlin_process_global_scale,
)
group_size = 16
e = layer.num_experts
k = layer.hidden_size
n = layer.intermediate_size_per_partition
device = layer.w13_weight_packed.device
param_dtype = layer.params_dtype
w13_num_shards = 2 if self.moe.is_act_and_mul else 1
if is_first_call:
layer.workspace = marlin_make_workspace_new(device, 4)
perm = torch.empty(0, dtype=torch.int, device=device)
if self.moe.is_act_and_mul and not torch.allclose(
layer.w13_weight_global_scale[:, 0], layer.w13_weight_global_scale[:, 1]
):
logger.warning("w1_weight_global_scale must match w3_weight_global_scale. Accuracy may be affected.")
size_n_w13, size_k_w13 = n * w13_num_shards, k
size_n_w2, size_k_w2 = k, n
w13_weight_marlin = _marlin_repack_experts(layer.w13_weight_packed.data, perm, size_k_w13, size_n_w13, e)
w2_weight_marlin = _marlin_repack_experts(layer.w2_weight_packed.data, perm, size_k_w2, size_n_w2, e)
w13_weight_scale_marlin = _marlin_process_scales_experts(
layer.w13_weight_scale.data, param_dtype, size_k_w13, size_n_w13, group_size, e
)
w2_weight_scale_marlin = _marlin_process_scales_experts(
layer.w2_weight_scale.data, param_dtype, size_k_w2, size_n_w2, group_size, e
)
# Process global scales
w13_scale_2 = 1.0 / layer.w13_weight_global_scale[:, 0]
w2_scale_2 = 1.0 / layer.w2_weight_global_scale.data
w13_scale_2_processed = nvfp4_marlin_process_global_scale(w13_scale_2.to(param_dtype))
w2_scale_2_processed = nvfp4_marlin_process_global_scale(w2_scale_2.to(param_dtype))
# Update parameters
if is_first_call:
layer.w13_weight = Parameter(w13_weight_marlin, requires_grad=False)
layer.w2_weight = Parameter(w2_weight_marlin, requires_grad=False)
layer.w13_weight_scale = Parameter(w13_weight_scale_marlin, requires_grad=False)
layer.w2_weight_scale = Parameter(w2_weight_scale_marlin, requires_grad=False)
layer.w13_weight_scale_2 = Parameter(w13_scale_2_processed, requires_grad=False)
layer.w2_weight_scale_2 = Parameter(w2_scale_2_processed, requires_grad=False)
if not hasattr(layer, "_marlin_tensor_refs"):
layer._marlin_tensor_refs = {}
layer._marlin_tensor_refs["w13_weight_scale"] = layer.w13_weight_scale.data
layer._marlin_tensor_refs["w2_weight_scale"] = layer.w2_weight_scale.data
else:
layer.w13_weight.data.copy_(w13_weight_marlin)
layer.w2_weight.data.copy_(w2_weight_marlin)
layer.w13_weight_scale_2.data.copy_(w13_scale_2_processed)
layer.w2_weight_scale_2.data.copy_(w2_scale_2_processed)
w13_marlin_ref = layer._marlin_tensor_refs.get("w13_weight_scale")
w2_marlin_ref = layer._marlin_tensor_refs.get("w2_weight_scale")
if w13_marlin_ref is not None:
w13_marlin_ref.copy_(w13_weight_scale_marlin)
layer.w13_weight_scale = Parameter(w13_marlin_ref, requires_grad=False)
else:
logger.warning("MoE: _marlin_tensor_refs['w13_weight_scale'] not found")
layer.w13_weight_scale.data.copy_(w13_weight_scale_marlin)
if w2_marlin_ref is not None:
w2_marlin_ref.copy_(w2_weight_scale_marlin)
layer.w2_weight_scale = Parameter(w2_marlin_ref, requires_grad=False)
else:
logger.warning("MoE: _marlin_tensor_refs['w2_weight_scale'] not found")
layer.w2_weight_scale.data.copy_(w2_weight_scale_marlin)
layer.w13_input_scale = None
layer.w2_input_scale = None
# Initialize kernel
self.moe_quant_config = self.get_fused_moe_quant_config(layer)
if self.moe_quant_config is not None and (
(not self.moe.moe_parallel_config.use_all2all_kernels) or self.moe.moe_parallel_config.use_naive_all2all_kernels
):
self.kernel = make_nvfp4_moe_kernel(
moe_quant_config=self.moe_quant_config,
moe_config=self.moe,
experts_cls=self.experts_cls,
)
def _process_nvfp4_moe_flashinfer_cutlass(self, layer: torch.nn.Module, is_first_call: bool) -> None:
"""Process MoE layer with FlashInfer/CUTLASS backend (W4A4)."""
from vllm.model_executor.layers.fused_moe.oracle.nvfp4 import (
convert_to_nvfp4_moe_kernel_format,
make_nvfp4_moe_kernel,
)
from vllm.model_executor.utils import replace_parameter
w13_packed = layer.w13_weight_packed.data
w2_packed = layer.w2_weight_packed.data
w13_scale_hf = layer.w13_weight_scale.data
w2_scale_hf = layer.w2_weight_scale.data
if self.moe.is_act_and_mul and not torch.allclose(
layer.w13_weight_global_scale[:, 0], layer.w13_weight_global_scale[:, 1]
):
logger.warning("w1_weight_global_scale must match w3_weight_global_scale. Accuracy may be affected.")
w13_weight_global_scale = layer.w13_weight_global_scale[:, 0].contiguous()
w13_temp = Parameter(w13_packed.clone(), requires_grad=False)
w2_temp = Parameter(w2_packed.clone(), requires_grad=False)
if is_first_call:
layer.w13_weight = w13_temp
layer.w2_weight = w2_temp
(
w13,
w13_scale,
w13_scale_2,
a13_scale,
w2,
w2_scale,
w2_scale_2,
a2_scale,
) = convert_to_nvfp4_moe_kernel_format(
nvfp4_backend=self.nvfp4_backend,
layer=layer,
w13=w13_temp,
w13_scale=w13_scale_hf,
w13_scale_2=(1.0 / w13_weight_global_scale),
a13_scale=(1.0 / layer.w13_input_global_scale),
w2=w2_temp,
w2_scale=w2_scale_hf,
w2_scale_2=(1.0 / layer.w2_weight_global_scale),
a2_scale=(1.0 / layer.w2_input_global_scale),
is_act_and_mul=self.moe.is_act_and_mul,
)
# Update parameters
if is_first_call:
replace_parameter(layer, "w13_weight", w13)
replace_parameter(layer, "w2_weight", w2)
layer.w13_weight_scale = Parameter(w13_scale, requires_grad=False)
layer.w2_weight_scale = Parameter(w2_scale, requires_grad=False)
if not hasattr(layer, "_marlin_tensor_refs"):
layer._marlin_tensor_refs = {}
layer._marlin_tensor_refs["w13_weight_scale"] = layer.w13_weight_scale.data
layer._marlin_tensor_refs["w2_weight_scale"] = layer.w2_weight_scale.data
else:
layer.w13_weight.data.copy_(w13.data)
layer.w2_weight.data.copy_(w2.data)
w13_scale_ref = layer._marlin_tensor_refs.get("w13_weight_scale")
w2_scale_ref = layer._marlin_tensor_refs.get("w2_weight_scale")
if w13_scale_ref is not None:
w13_scale_ref.copy_(w13_scale)
layer.w13_weight_scale = Parameter(w13_scale_ref, requires_grad=False)
else:
logger.warning("MoE W4A4: _marlin_tensor_refs['w13_weight_scale'] not found")
layer.w13_weight_scale.data.copy_(w13_scale)
if w2_scale_ref is not None:
w2_scale_ref.copy_(w2_scale)
layer.w2_weight_scale = Parameter(w2_scale_ref, requires_grad=False)
else:
logger.warning("MoE W4A4: _marlin_tensor_refs['w2_weight_scale'] not found")
layer.w2_weight_scale.data.copy_(w2_scale)
layer.w13_weight_scale_2 = w13_scale_2
layer.w2_weight_scale_2 = w2_scale_2
layer.w13_input_scale = a13_scale
layer.w2_input_scale = a2_scale
# Initialize kernel
self.moe_quant_config = self.get_fused_moe_quant_config(layer)
if self.moe_quant_config is not None and (
(not self.moe.moe_parallel_config.use_all2all_kernels) or self.moe.moe_parallel_config.use_naive_all2all_kernels
):
self.kernel = make_nvfp4_moe_kernel(
moe_quant_config=self.moe_quant_config,
moe_config=self.moe,
experts_cls=self.experts_cls,
)
# MoE NVFP4 Patches (entry points)
def patched_nvfp4_moe_process_weights_after_loading(self, layer: torch.nn.Module) -> None:
"""Patched process_weights_after_loading for NVFP4 MoE layer."""
from vllm.model_executor.layers.fused_moe.oracle.nvfp4 import NvFp4MoeBackend
is_first_call = _check_first_call(layer)
# Save metadata (first call only)
if is_first_call:
save_param_meta(layer, "w13_weight_packed")
save_param_meta(layer, "w2_weight_packed")
save_param_meta(layer, "w13_weight_scale")
save_param_meta(layer, "w2_weight_scale")
if not hasattr(layer, "_weight_loaders"):
layer._weight_loaders = {}
for pname in ["w13_weight_packed", "w2_weight_packed", "w13_weight_scale", "w2_weight_scale"]:
param = getattr(layer, pname, None)
if param is not None and hasattr(param, "weight_loader"):
layer._weight_loaders[pname] = param.weight_loader
is_marlin = self.nvfp4_backend == NvFp4MoeBackend.MARLIN
if is_marlin:
_process_nvfp4_moe_marlin(self, layer, is_first_call)
else:
_process_nvfp4_moe_flashinfer_cutlass(self, layer, is_first_call)
# Delete HF parameters
if hasattr(layer, "w13_weight_packed"):
delattr(layer, "w13_weight_packed")
if hasattr(layer, "w2_weight_packed"):
delattr(layer, "w2_weight_packed")
_PATCH_TARGETS = [
# Dense W4A16
(
"vllm.model_executor.layers.quantization.compressed_tensors.schemes."
"compressed_tensors_w4a16_nvfp4.CompressedTensorsW4A16Fp4.process_weights_after_loading",
patched_w4a16_process_weights_after_loading,
),
# Dense W4A4
(
"vllm.model_executor.layers.quantization.compressed_tensors.schemes."
"compressed_tensors_w4a4_nvfp4.CompressedTensorsW4A4Fp4.process_weights_after_loading",
patched_w4a4_process_weights_after_loading,
),
# MoE NVFP4
(
"vllm.model_executor.layers.quantization.compressed_tensors."
"compressed_tensors_moe.CompressedTensorsW4A4Nvfp4MoEMethod.process_weights_after_loading",
patched_nvfp4_moe_process_weights_after_loading,
),
]
_applied_patches = []
def apply_qat_patches():
"""Apply NVFP4 patches to support dynamic weight updates. Call before model loading."""
global _applied_patches
if _applied_patches:
logger.warning("QAT patches already applied, skipping")
return _applied_patches
logger.info("Applying NVFP4 patches for dynamic weight loading...")
for target, replacement in _PATCH_TARGETS:
p = patch(target, replacement)
_applied_patches.append(p)
p.start()
logger.info(f"Applied {len(_applied_patches)} NVFP4 patches for dynamic weight loading")
return _applied_patches
def prepare_qat_for_load_weights(model, device=None):
"""
Prepare QAT model for weight loading. Call ONCE before multi-bucket weight loading.
Args:
model: vLLM model
device: Device for created parameters
"""
inner_model = model
if hasattr(model, "model"):
inner_model = model.model
param_meta = ParamMetaDict(inner_model, device=device)
param_meta.prepare_for_reload()
logger.info(f"[prepare_qat] Tensor swap prepared for {len(param_meta._tensor_swap_layers)} layers")
# Rebuild deleted (W4A16) or overwritten (W4A4) params back to HF format
rebuilt_count = 0
for layer_name, cache_entry in param_meta._layer_meta_cache.items():
module = cache_entry["module"]
for param_name, pm in cache_entry["meta"].items():
existing = getattr(module, param_name, None)
if existing is not None:
hf_shape = tuple(pm["shape"])
hf_dtype = pm["dtype"]
if (
tuple(existing.shape) == hf_shape
and existing.dtype == hf_dtype
and hasattr(existing, "weight_loader")
):
continue
new_param = _create_param_from_meta(module, param_name, pm, device)
module.register_parameter(param_name, new_param)
rebuilt_count += 1
logger.info(f"[prepare_qat] Rebuilt {rebuilt_count} parameters")
inner_model._param_meta_for_restore = param_meta
return param_meta
def manual_process_weights_after_loading(model):
"""Trigger weight post-processing for all quantized layers after load_weights."""
dense_count = 0
moe_count = 0
actual_model = model
if hasattr(model, "model"):
actual_model = model.model
for module in actual_model.modules():
if hasattr(module, "scheme"):
module.scheme.process_weights_after_loading(module)
dense_count += 1
quant_method = getattr(module, "quant_method", None)
if quant_method is not None and not hasattr(module, "scheme"):
if hasattr(quant_method, "process_weights_after_loading"):
# Skip KV cache quantization methods
if "KVCache" in quant_method.__class__.__name__:
continue
quant_method.process_weights_after_loading(module)
moe_count += 1
logger.debug(f"Processed {dense_count} dense layers, {moe_count} MoE layers")
return dense_count + moe_count
__all__ = [
"apply_qat_patches",
"prepare_qat_for_load_weights",
"manual_process_weights_after_loading",
]
| verl__utils__qat__vllm_patch.py |
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Contains commonly used utilities for ray
"""
import asyncio
import concurrent.futures
import functools
import inspect
import os
from typing import Any, Optional
import ray
def ray_noset_visible_devices(env_vars=os.environ):
# Refer to
# https://github.com/ray-project/ray/blob/161849364a784442cc659fb9780f1a6adee85fce/python/ray/_private/accelerators/nvidia_gpu.py#L95-L96
# https://github.com/ray-project/ray/blob/161849364a784442cc659fb9780f1a6adee85fce/python/ray/_private/accelerators/amd_gpu.py#L102-L103
# https://github.com/ray-project/ray/blob/3b9e729f6a669ffd85190f901f5e262af79771b0/python/ray/_private/accelerators/amd_gpu.py#L114-L115
# https://github.com/ray-project/ray/blob/161849364a784442cc659fb9780f1a6adee85fce/python/ray/_private/accelerators/npu.py#L94-L95
# https://github.com/ray-project/ray/blob/161849364a784442cc659fb9780f1a6adee85fce/python/ray/_private/accelerators/hpu.py#L116-L117
# https://github.com/ray-project/ray/blob/161849364a784442cc659fb9780f1a6adee85fce/python/ray/_private/accelerators/neuron.py#L108-L109
# https://github.com/ray-project/ray/blob/161849364a784442cc659fb9780f1a6adee85fce/python/ray/_private/accelerators/tpu.py#L171-L172
# https://github.com/ray-project/ray/blob/161849364a784442cc659fb9780f1a6adee85fce/python/ray/_private/accelerators/intel_gpu.py#L97-L98
NOSET_VISIBLE_DEVICES_ENV_VARS_LIST = [
"RAY_EXPERIMENTAL_NOSET_CUDA_VISIBLE_DEVICES",
"RAY_EXPERIMENTAL_NOSET_ROCR_VISIBLE_DEVICES",
"RAY_EXPERIMENTAL_NOSET_HIP_VISIBLE_DEVICES",
"RAY_EXPERIMENTAL_NOSET_ASCEND_RT_VISIBLE_DEVICES",
"RAY_EXPERIMENTAL_NOSET_HABANA_VISIBLE_MODULES",
"RAY_EXPERIMENTAL_NOSET_NEURON_RT_VISIBLE_CORES",
"RAY_EXPERIMENTAL_NOSET_TPU_VISIBLE_CHIPS",
"RAY_EXPERIMENTAL_NOSET_ONEAPI_DEVICE_SELECTOR",
]
return any(env_vars.get(env_var) for env_var in NOSET_VISIBLE_DEVICES_ENV_VARS_LIST)
def parallel_put(data_list: list[Any], max_workers: Optional[int] = None):
"""
Puts a list of data into the Ray object store in parallel using a thread pool.
Args:
data_list (List[Any]): A list of Python objects to be put into the Ray object store.
max_workers (int, optional): The maximum number of worker threads to use.
Defaults to min(len(data_list), 16).
Returns:
List[ray.ObjectRef]: A list of Ray object references corresponding to the input data_list,
maintaining the original order.
"""
assert len(data_list) > 0, "data_list must not be empty"
def put_data(index, data):
return index, ray.put(data)
if max_workers is None:
max_workers = min(len(data_list), 16)
with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
data_list_f = [executor.submit(put_data, i, data) for i, data in enumerate(data_list)]
res_lst = []
for future in concurrent.futures.as_completed(data_list_f):
res_lst.append(future.result())
# reorder based on index
output = [None for _ in range(len(data_list))]
for res in res_lst:
index, data_ref = res
output[index] = data_ref
return output
def get_event_loop():
try:
loop = asyncio.get_event_loop()
except RuntimeError:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
return loop
def auto_await(func):
"""Auto await a coroutine function.
If the function is called in an async context (with a running event loop),
it will return the coroutine object. Otherwise, it will block the current thread
and run the coroutine until completion.
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
coro = func(*args, **kwargs)
if not inspect.iscoroutine(coro):
return coro
try:
loop = asyncio.get_running_loop()
except RuntimeError:
loop = None
if loop and loop.is_running():
return coro
else:
return asyncio.run(coro)
return wrapper
| verl__utils__ray_utils.py |
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import time
import ray
from cupy.cuda.nccl import NcclCommunicator, get_unique_id
from ray.util import list_named_actors
@ray.remote
class NCCLIDStore:
def __init__(self, nccl_id):
self._nccl_id = nccl_id
def get(self):
return self._nccl_id
def get_nccl_id_store_by_name(name):
all_actors = list_named_actors(all_namespaces=True)
matched_actors = [actor for actor in all_actors if actor.get("name", None) == name]
if len(matched_actors) == 1:
actor = matched_actors[0]
return ray.get_actor(**actor)
elif len(matched_actors) > 1:
logging.warning("multiple actors with same name found: %s", matched_actors)
elif len(matched_actors) == 0:
logging.info("failed to get any actor named %s", name)
return None
def create_nccl_communicator_in_ray(
rank: int, world_size: int, group_name: str, max_retries: int = 100, interval_s: int = 5
):
if rank == 0:
nccl_id = get_unique_id()
nccl_id_store = NCCLIDStore.options(name=group_name).remote(nccl_id)
assert ray.get(nccl_id_store.get.remote()) == nccl_id
communicator = NcclCommunicator(
ndev=world_size,
commId=nccl_id,
rank=0,
)
return communicator
else:
for i in range(max_retries):
nccl_id_store = get_nccl_id_store_by_name(group_name)
if nccl_id_store is not None:
logging.info("nccl_id_store %s got", group_name)
nccl_id = ray.get(nccl_id_store.get.remote())
logging.info("nccl id for %s got: %s", group_name, nccl_id)
communicator = NcclCommunicator(
ndev=world_size,
commId=nccl_id,
rank=rank,
)
return communicator
logging.info("failed to get nccl_id for %d time, sleep for %d seconds", i + 1, interval_s)
time.sleep(interval_s)
| verl__utils__rendezvous__ray_backend.py |
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from mathruler.grader import extract_boxed_content, grade_answer
def format_reward(predict_str: str) -> float:
pattern = re.compile(r"<think>.*</think>.*\\boxed\{.*\}.*", re.DOTALL)
match_result = re.fullmatch(pattern, predict_str)
return 1.0 if match_result else 0.0
def acc_reward(predict_str: str, ground_truth: str, use_boxed: bool = True) -> float:
if use_boxed:
answer = extract_boxed_content(predict_str)
else:
answer = predict_str
return 1.0 if grade_answer(answer, ground_truth) else 0.0
def compute_score(predict_str: str, ground_truth: str, use_boxed: bool = True, format_score: float = 0.1) -> float:
return (1.0 - format_score) * acc_reward(predict_str, ground_truth, use_boxed) + format_score * format_reward(
predict_str
)
| verl__utils__reward_score__geo3k.py |
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
_SOLUTION_CLIP_CHARS = 300
def extract_solution(solution_str, method="strict"):
assert method in ["strict", "flexible"]
# Optimization: Regular expression matching on very long strings can be slow.
# For math problems, the final answer is usually at the end.
# We only match on the last 300 characters, which is a safe approximation for 300 tokens.
if len(solution_str) > _SOLUTION_CLIP_CHARS:
solution_str = solution_str[-_SOLUTION_CLIP_CHARS:]
if method == "strict":
# this also tests the formatting of the model
solutions = re.findall("#### (\\-?[0-9\\.\\,]+)", solution_str)
if len(solutions) == 0:
final_answer = None
else:
# take the last solution
final_answer = solutions[-1].replace(",", "").replace("$", "")
elif method == "flexible":
answer = re.findall("(\\-?[0-9\\.\\,]+)", solution_str)
final_answer = None
if len(answer) == 0:
# no reward is there is no answer
pass
else:
invalid_str = ["", "."]
# find the last number that is not '.'
for final_answer in reversed(answer):
if final_answer not in invalid_str:
break
return final_answer
def compute_score(solution_str, ground_truth, method="strict", format_score=0.0, score=1.0):
"""The scoring function for GSM8k.
Reference: Trung, Luong, et al. "Reft: Reasoning with reinforced fine-tuning." Proceedings of the 62nd Annual
Meeting of the Association for Computational Linguistics (Volume 1: Long Papers). 2024.
Args:
solution_str: the solution text
ground_truth: the ground truth
method: the method to extract the solution, choices are 'strict' and 'flexible'
format_score: the score for the format
score: the score for the correct answer
"""
answer = extract_solution(solution_str=solution_str, method=method)
if answer is None:
return 0
else:
if answer == ground_truth:
return score
else:
return format_score
| verl__utils__reward_score__gsm8k.py |
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Adapted from https://github.com/EleutherAI/lm-evaluation-harness/blob/main/lm_eval/tasks/hendrycks_math/utils.py
import re
from typing import Optional
def last_boxed_only_string(string: str) -> Optional[str]:
"""Extract the last LaTeX boxed expression from a string.
Args:
string: Input string containing LaTeX code
Returns:
The last boxed expression or None if not found
"""
idx = string.rfind("\\boxed{")
if idx < 0:
return None
i = idx
right_brace_idx = None
num_left_braces_open = 0
while i < len(string):
if string[i] == "{":
num_left_braces_open += 1
if string[i] == "}":
num_left_braces_open -= 1
if num_left_braces_open == 0:
right_brace_idx = i
break
i += 1
return string[idx : right_brace_idx + 1] if right_brace_idx is not None else None
def remove_boxed(s: str) -> str:
"""Remove the LaTeX boxed command from a string.
Args:
s: String with format "\\boxed{content}"
Returns:
The content inside the boxed command
"""
left = "\\boxed{"
assert s[: len(left)] == left, f"box error: {s}"
assert s[-1] == "}", f"box error: {s}"
return s[len(left) : -1]
# Constants for normalization
SUBSTITUTIONS = [
("an ", ""),
("a ", ""),
(".$", "$"),
("\\$", ""),
(r"\ ", ""),
(" ", ""),
("mbox", "text"),
(",\\text{and}", ","),
("\\text{and}", ","),
("\\text{m}", "\\text{}"),
]
REMOVED_EXPRESSIONS = [
"square",
"ways",
"integers",
"dollars",
"mph",
"inches",
"hours",
"km",
"units",
"\\ldots",
"sue",
"points",
"feet",
"minutes",
"digits",
"cents",
"degrees",
"cm",
"gm",
"pounds",
"meters",
"meals",
"edges",
"students",
"childrentickets",
"multiples",
"\\text{s}",
"\\text{.}",
"\\text{\ns}",
"\\text{}^2",
"\\text{}^3",
"\\text{\n}",
"\\text{}",
r"\mathrm{th}",
r"^\circ",
r"^{\circ}",
r"\;",
r",\!",
"{,}",
'"',
"\\dots",
]
def normalize_final_answer(final_answer: str) -> str:
"""Normalize a final answer to a quantitative reasoning question.
Args:
final_answer: The answer string to normalize
Returns:
Normalized answer string
"""
final_answer = final_answer.split("=")[-1]
# Apply substitutions and removals
for before, after in SUBSTITUTIONS:
final_answer = final_answer.replace(before, after)
for expr in REMOVED_EXPRESSIONS:
final_answer = final_answer.replace(expr, "")
# Extract and normalize LaTeX math
final_answer = re.sub(r"(.*?)(\$)(.*?)(\$)(.*)", "$\\3$", final_answer)
final_answer = re.sub(r"(\\text\{)(.*?)(\})", "\\2", final_answer)
final_answer = re.sub(r"(\\textbf\{)(.*?)(\})", "\\2", final_answer)
final_answer = re.sub(r"(\\overline\{)(.*?)(\})", "\\2", final_answer)
final_answer = re.sub(r"(\\boxed\{)(.*)(\})", "\\2", final_answer)
# Normalize shorthand TeX:
# \fracab -> \frac{a}{b}
# \frac{abc}{bef} -> \frac{abc}{bef}
# \fracabc -> \frac{a}{b}c
# \sqrta -> \sqrt{a}
# \sqrtab -> sqrt{a}b
final_answer = re.sub(r"(frac)([^{])(.)", "frac{\\2}{\\3}", final_answer)
final_answer = re.sub(r"(sqrt)([^{])", "sqrt{\\2}", final_answer)
final_answer = final_answer.replace("$", "")
# Normalize numbers
if final_answer.replace(",", "").isdigit():
final_answer = final_answer.replace(",", "")
return final_answer.strip()
def is_correct_minerva(
solution_str: str, gt: str, gt_need_extract: bool = False, answer_pattern: str = r"(?i)Answer\s*:\s*([^\n]+)"
) -> tuple[bool, str]:
"""Check if the solution is correct according to Minerva criteria.
Args:
solution_str: The solution string to check
gt: The ground truth answer
gt_need_extract: Whether the ground truth needs extraction
answer_pattern: Regex pattern to extract the answer
Returns:
Tuple of (is_correct, normalized_prediction)
"""
# Extract answer from solution
match = re.findall(answer_pattern, solution_str)
extracted_answer = match[-1] if match else "[INVALID]"
pred = normalize_final_answer(extracted_answer)
# Process ground truth
if gt_need_extract:
gt = normalize_final_answer(remove_boxed(last_boxed_only_string(gt)))
else:
gt = normalize_final_answer(gt)
return (pred == gt), pred
def is_correct_strict_box(
pred: str, gt: str, pause_tokens_index: Optional[list[int]] = None
) -> tuple[int, Optional[str]]:
"""Check if the prediction is correct using strict boxed answer criteria.
Args:
pred: The prediction string
gt: The ground truth answer
pause_tokens_index: Indices of pause tokens
Returns:
Tuple of (score, extracted_prediction)
"""
# Extract the relevant part of the prediction
if pause_tokens_index is not None:
assert len(pause_tokens_index) == 4
pred = pred[pause_tokens_index[-1] - 100 :]
else:
pred = pred[-100:]
# Extract and check the boxed answer
boxed_pred = last_boxed_only_string(pred)
extracted_pred = remove_boxed(boxed_pred) if boxed_pred is not None else None
return 1 if (extracted_pred == gt) else -1, extracted_pred
def verify(
solution_str: str, answer: str, strict_box_verify: bool = False, pause_tokens_index: Optional[list[int]] = None
) -> bool:
"""Verify if the solution is correct.
Args:
solution_str: The solution string to verify
answer: The ground truth answer
strict_box_verify: Whether to use strict box verification
pause_tokens_index: Indices of pause tokens
Returns:
True if the solution is correct, False otherwise
"""
if strict_box_verify:
correct, pred = is_correct_strict_box(solution_str, answer, pause_tokens_index)
return correct == 1, pred
correct, pred = is_correct_minerva(solution_str, answer)
return correct, pred
def compute_score(
solution_str: str,
ground_truth: str,
strict_box_verify: bool = False,
pause_tokens_index: Optional[list[int]] = None,
) -> float:
"""Compute the reward score for a solution.
Args:
solution_str: The solution string
ground_truth: The ground truth answer
strict_box_verify: Whether to use strict box verification
pause_tokens_index: Indices of pause tokens
Returns:
Reward score (1.0 for correct, -1.0 for incorrect)
"""
# Limit solution length for efficiency
solution_str = solution_str[-300:] # The longest answer in MATH-500 has 159 characters
# Verify the solution
correct, pred = verify(solution_str, ground_truth, strict_box_verify, pause_tokens_index)
reward = 1.0 if correct else -1.0
acc = correct
return {
"score": reward,
"acc": acc,
"pred": pred,
}
| verl__utils__reward_score__math_dapo.py |
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Adapted from https://github.com/EleutherAI/lm-evaluation-harness/blob/main/lm_eval/tasks/hendrycks_math/utils.py
def compute_score(solution_str, ground_truth) -> float:
retval = 0.0
try:
string_in_last_boxed = last_boxed_only_string(solution_str)
if string_in_last_boxed is not None:
answer = remove_boxed(string_in_last_boxed)
if is_equiv(answer, ground_truth):
retval = 1.0
except Exception as e:
print(e)
return retval
# string normalization from https://github.com/EleutherAI/lm-evaluation-harness/blob/master/lm_eval/tasks/hendrycks_math.py
def is_equiv(str1, str2, verbose=False):
if str1 is None and str2 is None:
print("WARNING: Both None")
return True
if str1 is None or str2 is None:
return False
try:
ss1 = strip_string(str1)
ss2 = strip_string(str2)
if verbose:
print(ss1, ss2)
return ss1 == ss2
except Exception:
return str1 == str2
def remove_boxed(s):
if "\\boxed " in s:
left = "\\boxed "
assert s[: len(left)] == left
return s[len(left) :]
left = "\\boxed{"
assert s[: len(left)] == left
assert s[-1] == "}"
return s[len(left) : -1]
def last_boxed_only_string(string):
idx = string.rfind("\\boxed")
if "\\boxed " in string:
return "\\boxed " + string.split("\\boxed ")[-1].split("$")[0]
if idx < 0:
idx = string.rfind("\\fbox")
if idx < 0:
return None
i = idx
right_brace_idx = None
num_left_braces_open = 0
while i < len(string):
if string[i] == "{":
num_left_braces_open += 1
if string[i] == "}":
num_left_braces_open -= 1
if num_left_braces_open == 0:
right_brace_idx = i
break
i += 1
retval = None if right_brace_idx is None else string[idx : right_brace_idx + 1]
return retval
def fix_fracs(string):
substrs = string.split("\\frac")
new_str = substrs[0]
if len(substrs) > 1:
substrs = substrs[1:]
for substr in substrs:
new_str += "\\frac"
if substr[0] == "{":
new_str += substr
else:
try:
assert len(substr) >= 2
except Exception:
return string
a = substr[0]
b = substr[1]
if b != "{":
if len(substr) > 2:
post_substr = substr[2:]
new_str += "{" + a + "}{" + b + "}" + post_substr
else:
new_str += "{" + a + "}{" + b + "}"
else:
if len(substr) > 2:
post_substr = substr[2:]
new_str += "{" + a + "}" + b + post_substr
else:
new_str += "{" + a + "}" + b
string = new_str
return string
def fix_a_slash_b(string):
if len(string.split("/")) != 2:
return string
a = string.split("/")[0]
b = string.split("/")[1]
try:
a = int(a)
b = int(b)
assert string == "{}/{}".format(a, b)
new_string = "\\frac{" + str(a) + "}{" + str(b) + "}"
return new_string
except Exception:
return string
def remove_right_units(string):
# "\\text{ " only ever occurs (at least in the val set) when describing units
if "\\text{ " in string:
splits = string.split("\\text{ ")
assert len(splits) == 2
return splits[0]
else:
return string
def fix_sqrt(string):
if "\\sqrt" not in string:
return string
splits = string.split("\\sqrt")
new_string = splits[0]
for split in splits[1:]:
if split[0] != "{":
a = split[0]
new_substr = "\\sqrt{" + a + "}" + split[1:]
else:
new_substr = "\\sqrt" + split
new_string += new_substr
return new_string
def strip_string(string):
# linebreaks
string = string.replace("\n", "")
# remove inverse spaces
string = string.replace("\\!", "")
# replace \\ with \
string = string.replace("\\\\", "\\")
# replace tfrac and dfrac with frac
string = string.replace("tfrac", "frac")
string = string.replace("dfrac", "frac")
# remove \left and \right
string = string.replace("\\left", "")
string = string.replace("\\right", "")
# Remove circ (degrees)
string = string.replace("^{\\circ}", "")
string = string.replace("^\\circ", "")
# remove dollar signs
string = string.replace("\\$", "")
# remove units (on the right)
string = remove_right_units(string)
# remove percentage
string = string.replace("\\\\%", "")
string = string.replace("\\%", "")
# " 0." equivalent to " ." and "{0." equivalent to "{." Alternatively, add "0" if "." is the start of the string
string = string.replace(" .", " 0.")
string = string.replace("{.", "{0.")
# if empty, return empty string
if len(string) == 0:
return string
if string[0] == ".":
string = "0" + string
# to consider: get rid of e.g. "k = " or "q = " at beginning
if len(string.split("=")) == 2 and len(string.split("=")[0]) <= 2:
string = string.split("=")[1]
# fix sqrt3 --> sqrt{3}
string = fix_sqrt(string)
# remove spaces
string = string.replace(" ", "")
# \frac1b or \frac12 --> \frac{1}{b} and \frac{1}{2}, etc. Even works with \frac1{72} (but not \frac{72}1).
# Also does a/b --> \\frac{a}{b}
string = fix_fracs(string)
# manually change 0.5 --> \frac{1}{2}
if string == "0.5":
string = "\\frac{1}{2}"
# NOTE: X/Y changed to \frac{X}{Y} in dataset, but in simple cases fix in case the model output is X/Y
string = fix_a_slash_b(string)
return string
| verl__utils__reward_score__math_reward.py |
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
from math_verify.errors import TimeoutException
from math_verify.metric import math_metric
from math_verify.parser import ExprExtractionConfig, LatexExtractionConfig
except ImportError:
print("To use Math-Verify, please install it first by running `pip install math-verify`.")
def compute_score(model_output: str, ground_truth: str, timeout_score: float = 0) -> bool:
verify_func = math_metric(
gold_extraction_target=(LatexExtractionConfig(),),
pred_extraction_target=(ExprExtractionConfig(), LatexExtractionConfig()),
)
ret_score = 0.0
# Wrap the ground truth in \boxed{} format for verification
ground_truth_boxed = "\\boxed{" + ground_truth + "}"
try:
ret_score, _ = verify_func([ground_truth_boxed], [model_output])
except Exception:
pass
except TimeoutException:
ret_score = timeout_score
return ret_score
| verl__utils__reward_score__math_verify.py |
# Copyright 2024 PRIME team and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Borrowed from: https://huggingface.co/spaces/codeparrot/apps_metric/blob/main/utils.py
import multiprocessing
import os
import sys
import traceback
from typing import Optional
from .testing_util import run_test
def _temp_run(sample, generation, debug, result, metadata_list, timeout):
with open(os.devnull, "w") as devnull:
sys.stdout = devnull
sys.stderr = devnull
try:
res, metadata = run_test(in_outs=sample, test=generation, debug=debug, timeout=timeout)
result.append(res)
metadata_list.append(metadata)
except Exception:
# print(e) # some tracebacks are extremely long.
traceback.print_exc(10)
result.append([-1 for i in range(len(sample["inputs"]))])
metadata_list.append({})
def check_correctness(in_outs: Optional[dict], generation, timeout=10, debug=True):
"""Check correctness of code generation with a global timeout.
The global timeout is to catch some extreme/rare cases not handled by the timeouts
inside `run_test`"""
manager = multiprocessing.Manager()
result = manager.list()
metadata_list = manager.list()
p = multiprocessing.Process(target=_temp_run, args=(in_outs, generation, debug, result, metadata_list, timeout))
p.start()
p.join(timeout=timeout + 1)
if p.is_alive():
p.kill()
# p.terminate()
if not result:
# consider that all tests failed
result = [[-1 for i in range(len(in_outs["inputs"]))]]
if debug:
print("global timeout")
return result[0], metadata_list
| verl__utils__reward_score__prime_code__utils.py |
# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright (c) Microsoft Corporation.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE
# Copyright (c) 2023 OpenAI
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Copyright (c) 2021 Dan Hendrycks
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Copyright 2024 PRIME team and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This logic is largely copied from the Hendrycks' MATH release (math_equivalence), and borrowed from:
- https://github.com/microsoft/ToRA/blob/main/src/eval/grader.py
- https://github.com/microsoft/ProphetNet/tree/master/CRITIC
- https://github.com/openai/prm800k
"""
import contextlib
import math
import re
from math import isclose
# sympy related
from sympy import N, simplify
from sympy.parsing.latex import parse_latex
from sympy.parsing.sympy_parser import parse_expr
# verl related
from verl.utils.py_functional import timeout_limit
def is_digit(s):
try:
if "{,}" in str(s):
num = float(str(s).replace("{,}", ""))
return True, num
num = float(str(s).replace(",", ""))
return True, num
except ValueError:
return False, None
def normalize(answer, pi) -> str:
# checking if answer is $<number> and removing $ in that case to compare
if isinstance(answer, str) and bool(re.match(r"\$\d+(\.\d+)?", answer)):
return answer[1:]
# checking if answer is <number>% or <number>\\% and removing %
if isinstance(answer, str) and (
bool(re.match(r"^\d+(\.\d+)?%$", answer)) or bool(re.match(r"^\d+(\.\d+)?\\%$", answer))
):
return answer.replace("\\%", "").replace("%", "")
# handle base
answer = handle_base(answer)
# handle pi
answer = handle_pi(answer, pi)
return answer
def handle_base(x) -> str:
if isinstance(x, str) and "_" in x:
# Due to base
x = x.split("_")[0]
x = float(x)
return int(x)
return x
def handle_pi(string, pi):
if isinstance(string, str) and "\\pi" in string:
# Find the first occurrence of "\pi"
idx = string.find("\\pi")
# Iterate over the string and find all occurrences of "\pi" with a valid previous character
while idx != -1:
if idx > 0 and string[idx - 1].isdigit():
# Replace "\pi" with "*math.pi" if the previous character is a digit
string = string[:idx] + f"*{pi}" + string[idx + 3 :]
else:
# Replace "\pi" with "1*math.pi" if the previous character is not a digit
string = string[:idx] + f"1*{pi}" + string[idx + 3 :]
# Find the next occurrence of "\pi"
idx = string.find("\\pi", idx + 1)
# Evaluate the expression using eval() function
with contextlib.suppress(Exception):
string = eval(string)
return string
def math_equal(
prediction: bool | float | str,
reference: float | str,
include_percentage: bool = True,
tolerance: float = 1e-4,
timeout: float = 10.0,
pi: float = math.pi,
) -> bool:
"""
Exact match of math if and only if:
1. numerical equal: both can convert to float and are equal
2. symbolic equal: both can convert to sympy expression and are equal
"""
prediction = normalize(prediction, pi)
reference = normalize(reference, pi)
if isinstance(prediction, str) and len(prediction) > 1000: # handling weird corner-cases
prediction = prediction[:1000]
# 0. string comparison
if isinstance(prediction, str) and isinstance(reference, str):
if prediction.strip().lower() == reference.strip().lower():
return True
if prediction.replace(" ", "") == reference.replace(" ", ""):
return True
try: # 1. numerical equal
if is_digit(prediction)[0] and is_digit(reference)[0]:
prediction = is_digit(prediction)[1]
reference = is_digit(reference)[1]
# number questions
gt_result = [reference / 100, reference, reference * 100] if include_percentage else [reference]
for item in gt_result:
try:
if isclose(item, prediction, rel_tol=tolerance):
return True
except Exception:
continue
return False
except Exception:
pass
if not prediction and prediction not in [0, False]:
return False
# 2. symbolic equal
reference = str(reference).strip()
prediction = str(prediction).strip()
## deal with [], (), {}
prediction = format_intervals(prediction)
pred_str, ref_str = prediction, reference
if (prediction.startswith("[") and prediction.endswith("]") and not reference.startswith("(")) or (
prediction.startswith("(") and prediction.endswith(")") and not reference.startswith("[")
):
pred_str = pred_str.strip("[]()")
ref_str = ref_str.strip("[]()")
for s in ["{", "}", "(", ")"]:
ref_str = ref_str.replace(s, "")
pred_str = pred_str.replace(s, "")
if pred_str == ref_str:
return True
## [a, b] vs. [c, d], return a==c and b==d
if (
prediction
and reference
and prediction[0] in "(["
and prediction[-1] in ")]"
and prediction[0] == reference[0]
and prediction[-1] == reference[-1]
):
pred_parts = prediction[1:-1].split(",")
ref_parts = reference[1:-1].split(",")
if len(pred_parts) == len(ref_parts) and all(
[
math_equal(pred_pt, ref_pt, include_percentage, tolerance)
for pred_pt, ref_pt in zip(pred_parts, ref_parts, strict=True)
]
):
return True
if "," in prediction and "," in reference:
pred_parts = [item.strip() for item in prediction.split(",")]
ref_parts = [item.strip() for item in reference.split(",")]
if len(pred_parts) == len(ref_parts):
return bool(
all(
[
math_equal(pred_parts[i], ref_parts[i], include_percentage, tolerance)
for i in range(len(pred_parts))
]
)
)
# if we have point == tuple of values
if prediction.startswith("Point") and reference[0] == "(" and reference[-1] == ")":
pred_parts = prediction[prediction.find("(") + 1 : -1].split(",")
ref_parts = reference[1:-1].split(",")
if len(pred_parts) == len(ref_parts) and all(
[
math_equal(pred_pt, ref_pt, include_percentage, tolerance)
for pred_pt, ref_pt in zip(pred_parts, ref_parts, strict=False)
]
):
return True
# if reference is a matrix
if r"\begin{pmatrix}" in reference and prediction.startswith("Matrix"):
try:
pred_matrix = parse_expr(prediction)
ref_matrix_items = reference.split()[1:-1:2]
if len(pred_matrix) == len(ref_matrix_items) and all(
[
math_equal(pred, ref, include_percentage, tolerance)
for ref, pred in zip(ref_matrix_items, pred_matrix, strict=False)
]
):
return True
except Exception:
pass
elif r"\begin{pmatrix}" in reference and prediction.startswith("[") and prediction.endswith("]"):
if isinstance(eval(prediction), list):
try:
pred_matrix = eval(prediction)
# ref_matrix_items = reference.split()[1:-1:2]
ref_matrix_items = (
reference.removeprefix(r"\\begin{pmatrix}")
.removeprefix(r"\begin{pmatrix}")
.removesuffix(r"\\end{pmatrix}")
.removesuffix(r"\end{pmatrix}")
)
ref_matrix_items = ref_matrix_items.split("\\")
ref_matrix_items = [row.split("&") if "&" in row else row for row in ref_matrix_items]
if len(pred_matrix) == len(ref_matrix_items) and all(
[
math_equal(pred, ref, include_percentage, tolerance)
for ref, pred in zip(ref_matrix_items, pred_matrix, strict=False)
]
):
return True
except Exception:
pass
return symbolic_equal(prediction, reference, tolerance, timeout)
def symbolic_equal(a, b, tolerance, timeout=10.0):
def _parse(s):
for f in [parse_expr, parse_latex]:
try:
with timeout_limit(seconds=timeout):
return f(s)
except TimeoutError:
print(f"Parsing timed out for {s}")
continue
except Exception:
continue
return s
a = _parse(a)
b = _parse(b)
try:
with timeout_limit(seconds=timeout):
if simplify(a - b) == 0:
return True
except TimeoutError:
print(f"Simplification timed out for {a} - {b}")
pass
except Exception:
pass
try:
with timeout_limit(seconds=timeout):
if isclose(N(a), N(b), rel_tol=tolerance):
return True
except TimeoutError:
print(f"Numerical evaluation timed out for {a}, {b}")
pass
except Exception:
pass
return False
def format_intervals(prediction):
patterns = {
"Interval(": r"^Interval\((.*)\)$",
"Interval.Ropen(": r"^Interval\.Ropen\((.*)\)$",
"Interval.Lopen(": r"^Interval\.Lopen\((.*)\)$",
"Interval.open(": r"^Interval\.open\((.*)\)$",
}
for key, pattern in patterns.items():
match = re.match(pattern, prediction)
if match:
inner_content = match.group(1)
if key == "Interval(": # Intarval(a, b) == [a, b]
return f"[{inner_content}]"
elif key == "Interval.Ropen(": # Intarval.Ropen(a, b) == [a, b)
return f"[{inner_content})"
elif key == "Interval.Lopen(": # Intarval.Lopen(a, b) == (a, b]
return f"({inner_content}]"
elif key == "Interval.open(": # Intarval.open(a, b) == (a, b)
return f"({inner_content})"
return prediction
| verl__utils__reward_score__prime_math__grader.py |
# Copyright 2024 PRIME team and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright (c) 2021 Dan Hendrycks
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
This logic is largely copied from the Hendrycks' MATH release (math_equivalence).
From: https://github.com/openai/prm800k/blob/main/prm800k/grading/math_normalize.py
"""
import re
from typing import Optional
def normalize_answer(answer: Optional[str]) -> Optional[str]:
if answer is None:
return None
answer = answer.strip()
try:
# Remove enclosing `\text{}`.
m = re.search(r"^\\text\{(?P<text>.+?)\}$", answer)
if m is not None:
answer = m.group("text").strip()
return _strip_string(answer)
except Exception:
return answer
def _fix_fracs(string):
substrs = string.split("\\frac")
new_str = substrs[0]
if len(substrs) > 1:
substrs = substrs[1:]
for substr in substrs:
new_str += "\\frac"
if substr[0] == "{":
new_str += substr
else:
try:
assert len(substr) >= 2
except Exception:
return string
a = substr[0]
b = substr[1]
if b != "{":
if len(substr) > 2:
post_substr = substr[2:]
new_str += "{" + a + "}{" + b + "}" + post_substr
else:
new_str += "{" + a + "}{" + b + "}"
else:
if len(substr) > 2:
post_substr = substr[2:]
new_str += "{" + a + "}" + b + post_substr
else:
new_str += "{" + a + "}" + b
string = new_str
return string
def _fix_a_slash_b(string):
if len(string.split("/")) != 2:
return string
a = string.split("/")[0]
b = string.split("/")[1]
try:
a = int(a)
b = int(b)
assert string == "{}/{}".format(a, b)
new_string = "\\frac{" + str(a) + "}{" + str(b) + "}"
return new_string
except Exception:
return string
def _remove_right_units(string):
# "\\text{ " only ever occurs (at least in the val set) when describing units
if "\\text{ " in string:
splits = string.split("\\text{ ")
assert len(splits) == 2
return splits[0]
else:
return string
def _fix_sqrt(string):
if "\\sqrt" not in string:
return string
splits = string.split("\\sqrt")
new_string = splits[0]
for split in splits[1:]:
if split[0] != "{":
a = split[0]
new_substr = "\\sqrt{" + a + "}" + split[1:]
else:
new_substr = "\\sqrt" + split
new_string += new_substr
return new_string
def _strip_string(string):
# linebreaks
string = string.replace("\n", "")
# remove inverse spaces
string = string.replace("\\!", "")
# replace \\ with \
string = string.replace("\\\\", "\\")
# replace tfrac and dfrac with frac
string = string.replace("tfrac", "frac")
string = string.replace("dfrac", "frac")
# remove \left and \right
string = string.replace("\\left", "")
string = string.replace("\\right", "")
# Remove circ (degrees)
string = string.replace("^{\\circ}", "")
string = string.replace("^\\circ", "")
# remove dollar signs
string = string.replace("\\$", "")
# remove units (on the right)
string = _remove_right_units(string)
# remove percentage
string = string.replace("\\\\%", "")
string = string.replace("\\%", "")
# " 0." equivalent to " ." and "{0." equivalent to "{." Alternatively, add "0" if "." is the start of the string
string = string.replace(" .", " 0.")
string = string.replace("{.", "{0.")
# if empty, return empty string
if len(string) == 0:
return string
if string[0] == ".":
string = "0" + string
# to consider: get rid of e.g. "k = " or "q = " at beginning
if len(string.split("=")) == 2 and len(string.split("=")[0]) <= 2:
string = string.split("=")[1]
# fix sqrt3 --> sqrt{3}
string = _fix_sqrt(string)
# remove spaces
string = string.replace(" ", "")
# \frac1b or \frac12 --> \frac{1}{b} and \frac{1}{2}, etc. Even works with \frac1{72} (but not \frac{72}1).
# Also does a/b --> \\frac{a}{b}
string = _fix_fracs(string)
# manually change 0.5 --> \frac{1}{2}
if string == "0.5":
string = "\\frac{1}{2}"
# NOTE: X/Y changed to \frac{X}{Y} in dataset, but in simple cases fix in case the model output is X/Y
string = _fix_a_slash_b(string)
return string
| verl__utils__reward_score__prime_math__math_normalize.py |
# Copyright 2025 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import concurrent.futures # <-- Import concurrent.futures
import json
import logging
import os
import threading
import time
import traceback
import uuid
from typing import Any, Optional
import requests
DEFAULT_TIMEOUT = 10 # Default compile and run timeout
MAX_RETRIES = 3
INITIAL_RETRY_DELAY = 1
API_TIMEOUT = 10
logger = logging.getLogger(__name__)
# Define supported languages list (optional, for documentation or validation)
SUPPORTED_LANGUAGES = [
"python",
"cpp",
"nodejs",
"go",
"go_test",
"java",
"php",
"csharp",
"bash",
"typescript",
"sql",
"rust",
"cuda",
"lua",
"R",
"perl",
"D_ut",
"ruby",
"scala",
"julia",
"pytest",
"junit",
"kotlin_script",
"jest",
"verilog",
"python_gpu",
"lean",
"swift",
"racket",
]
def call_sandbox_api(
sandbox_fusion_url: str,
code: str,
stdin: Optional[str],
compile_timeout: int,
run_timeout: int,
memory_limit_mb: int,
language: str = "python",
) -> tuple[Optional[dict[str, Any]], Optional[str]]: # <-- Remove request_id parameter
"""
Calls the remote sandbox API to execute code with retry logic for Gateway Timeout,
using increasing delay between retries. Logs internal calls with a unique ID.
Args:
sandbox_fusion_url: The URL of the sandbox fusion API.
code: The code string to execute.
stdin: The standard input string.
compile_timeout: Compile timeout in seconds.
run_timeout: Run timeout in seconds.
language: The programming language of the code (e.g., "python", "cpp", "java"). Defaults to "python".
Returns:
A tuple (response_json, error_message).
If successful, response_json is the API's returned JSON object, error_message is None.
If failed after retries, response_json is None, error_message contains the error information.
"""
request_id = str(uuid.uuid4()) # <-- Generate request_id internally
log_prefix = f"[Request ID: {request_id}] " # <-- Create log prefix
if language not in SUPPORTED_LANGUAGES:
error_msg = f"{log_prefix}Unsupported language: {language}"
logger.error(error_msg)
return None, error_msg
payload = json.dumps(
{
"compile_timeout": compile_timeout,
"run_timeout": run_timeout,
"code": code,
"stdin": stdin,
"memory_limit_MB": memory_limit_mb,
"language": language, # Use the passed language parameter
"files": {},
"fetch_files": [],
}
)
headers = {"Content-Type": "application/json", "Accept": "application/json"}
# Calculate a reasonable request timeout based on compile/run timeouts plus a buffer
request_timeout = compile_timeout + run_timeout + API_TIMEOUT
last_error = None # Store the last error encountered
for attempt in range(MAX_RETRIES):
try:
logger.info(
f"{log_prefix}Attempt {attempt + 1}/{MAX_RETRIES}: Calling sandbox API at {sandbox_fusion_url}"
) # <-- Use internal log_prefix
response = requests.post(
sandbox_fusion_url,
headers=headers,
data=payload,
timeout=request_timeout, # Use the calculated timeout
)
# Check for Gateway Timeout (504) specifically for retrying
if response.status_code == 504:
last_error = (
f"{log_prefix}API Request Error: Gateway Timeout (504) on attempt "
f"{attempt + 1}/{MAX_RETRIES}"
) # <-- Use internal log_prefix
logger.warning(last_error)
if attempt < MAX_RETRIES - 1: # Don't sleep after the last attempt
# Calculate increasing delay (e.g., 1s, 2s, 4s, ...) or (1s, 2s, 3s, ...)
# Simple linear increase: delay = INITIAL_RETRY_DELAY * (attempt + 1)
# Exponential backoff: delay = INITIAL_RETRY_DELAY * (2 ** attempt)
delay = INITIAL_RETRY_DELAY * (attempt + 1) # Using linear increase for simplicity
logger.info(f"{log_prefix}Retrying after {delay} seconds...") # <-- Use internal log_prefix
time.sleep(delay)
continue # Go to the next retry attempt
# Check for other HTTP errors (e.g., 4xx, other 5xx)
response.raise_for_status()
# If successful (status code 2xx)
logger.info(
f"{log_prefix}Sandbox API call successful on attempt {attempt + 1}"
) # <-- Use internal log_prefix
return response.json(), None
except requests.exceptions.RequestException as e:
last_error = f"{log_prefix}API Request Error: {e}" # <-- Use internal log_prefix
break # Exit retry loop on non-504 request errors
except json.JSONDecodeError as e:
raw_response_text = response.text if "response" in locals() else "N/A"
last_error = f"{log_prefix}API Response JSON Decode Error: {e}" # <-- Use internal log_prefix
break # Exit retry loop on JSON decode errors
except Exception as e:
last_error = f"{log_prefix}Unexpected Error: {e}" # <-- Use internal log_prefix
break # Exit retry loop on other unexpected errors
# If loop finishes without returning success, return the last recorded error
logger.error(f"{log_prefix}Sandbox API call failed. Last error: {last_error}") # <-- Use internal log_prefix
# Return the error message without the prefix, as the caller doesn't need the internal ID
# Ensure API call failure returns error message, leading to -1 in check_correctness
return None, last_error.replace(log_prefix, "API Call Failed: ") if last_error else "API Call Failed after retries"
def _process_single_case(
case_index: int,
stdin_data: Any,
expected_output: Any,
sandbox_fusion_url: str,
generation: str,
timeout: int,
memory_limit_mb: int,
language: str,
concurrent_semaphore: Optional[threading.Semaphore] = None,
fn_name: Optional[str] = None,
) -> tuple[int, dict[str, Any]]:
"""Helper function to process a single test case."""
api_response = None
error_msg = None
logger.info(f"Processing test case {case_index + 1}.")
current_generation_code = generation
if fn_name and language == "python":
# Wrapper assumes stdin_data is a JSON string for function arguments.
wrapper_code = f"""
import traceback
from string import *
from re import *
from datetime import *
from collections import *
from heapq import *
from bisect import *
from copy import *
from math import *
from random import *
from statistics import *
from itertools import *
from functools import *
from operator import *
from io import *
from sys import *
from json import *
from builtins import *
from typing import *
import string
import re
import datetime
import collections
import heapq
import bisect
import copy
import math
import random
import statistics
import itertools
import functools
import operator
import io
import sys
import json
# === User's Original Code START ===
{generation}
# === User's Original Code END ===
_SANDBOX_FN_NAME = "{fn_name}"
def _execute_user_function():
# --- Input Parsing ---
_raw_input_str = sys.stdin.read()
_args = []
if _raw_input_str.strip(): # If there's input
try:
_args = [json.loads(line) for line in _raw_input_str.split('\\n')]
except json.JSONDecodeError as _je:
sys.stderr.write(f"WrapperError: Invalid JSON input for '{{_SANDBOX_FN_NAME}}': {{_je}}\\nInput was: "
f"{{_raw_input_str[:200]}}\\n")
return None, True # result, error_occurred
# --- Function Location and Execution ---
try:
_target_callable = None
# Try global scope first
if _SANDBOX_FN_NAME in globals():
_target_callable = globals()[_SANDBOX_FN_NAME]
# Else, if 'Solution' class exists, try to get its method
elif 'Solution' in globals():
_Solution_class = globals()['Solution']
# Attempt to instantiate and get method.
# Errors (e.g., Solution not a class, instantiation fails, method missing)
# will be caught by the broad except block below.
_solution_instance = _Solution_class()
_target_callable = getattr(_solution_instance, _SANDBOX_FN_NAME)
if not _target_callable:
sys.stderr.write(f"WrapperError: Function or method '{{_SANDBOX_FN_NAME}}' not found.\\n")
return None, True # result, error_occurred
_fn_result = _target_callable(*_args)
return _fn_result, False # result, no_error
except Exception: # Catches errors from Solution instantiation, getattr, or function call
sys.stderr.write(f"Error during setup or execution of '{{_SANDBOX_FN_NAME}}':\\n{{traceback.format_exc()}}\\n")
return None, True # result, error_occurred
if __name__ == '__main__':
_result, _error_occurred = _execute_user_function()
if not _error_occurred:
# Serialize result to stdout
if isinstance(_result, (dict, list, tuple)) or _result is None or isinstance(_result, bool):
print(json.dumps(_result))
elif isinstance(_result, (int, float, str)):
print(str(_result)) # Ensure string conversion for print
else:
# For other types, default to string representation.
print(str(_result))
# Optional: To explicitly exit with an error code if the sandbox relies on it
# else:
# sys.exit(1)
"""
current_generation_code = wrapper_code
stdin = None if stdin_data is None else str(stdin_data)
try:
if concurrent_semaphore:
# logger.debug(f"Case {case_index + 1}: Attempting to acquire semaphore.")
with concurrent_semaphore:
# logger.debug(f"Case {case_index + 1}: Semaphore acquired. Calling API.")
api_response, error_msg = call_sandbox_api(
sandbox_fusion_url=sandbox_fusion_url,
code=current_generation_code,
stdin=stdin,
compile_timeout=timeout,
run_timeout=timeout,
memory_limit_mb=memory_limit_mb,
language=language,
)
# logger.debug(f"Case {case_index + 1}: Semaphore released.")
else:
api_response, error_msg = call_sandbox_api(
sandbox_fusion_url=sandbox_fusion_url,
code=current_generation_code,
stdin=stdin,
compile_timeout=timeout,
run_timeout=timeout,
memory_limit_mb=memory_limit_mb,
language=language,
)
except Exception as e:
error_msg = f"API Request Exception during check_correctness for case {case_index + 1}: {e}"
logger.error(f"Case {case_index + 1}: {error_msg}")
traceback.print_exc()
metadata = {
"case_index": case_index,
"input": stdin,
"expected_output": str(expected_output) if expected_output else None,
"api_request_error": error_msg,
"api_response": None,
"status": "unknown",
"stdout": None,
"stderr": None,
"exit_code": None,
"duration": None,
"compile_duration": None,
"compile_stderr": None,
"api_status": None,
"compile_status": None,
"run_status": None,
}
result_status = -1 # Default error: API request error or unknown sandbox error
if error_msg:
metadata["status"] = "api_error"
result_status = -1 # API request itself failed (includes timeout after retries)
logger.error(f"Case {case_index}: API error occurred: {error_msg}")
# Log code and input only on error for brevity
generation_to_log = generation[:200] + "..." if len(generation) > 200 else generation
logger.error(f"Case {case_index}: code: {generation_to_log}")
logger.error(f"Case {case_index}: input: {stdin}")
elif api_response:
# --- Add debug logging ---
logger.debug(f"Case {case_index}: API Response: {api_response}")
metadata["api_response"] = api_response
metadata["api_status"] = api_response.get("status")
compile_result = api_response.get("compile_result")
run_result = api_response.get("run_result")
# Extract compile information
if compile_result:
metadata["compile_status"] = compile_result.get("status")
metadata["compile_duration"] = compile_result.get("execution_time")
metadata["compile_stderr"] = compile_result.get("stderr")
# Extract run information
if run_result:
metadata["run_status"] = run_result.get("status")
metadata["stdout"] = run_result.get("stdout")
metadata["stderr"] = run_result.get("stderr") # stderr during runtime
metadata["exit_code"] = run_result.get("return_code")
metadata["duration"] = run_result.get("execution_time")
# --- Determine status based on API response ---
api_status = metadata["api_status"]
if api_status == "SandboxError":
metadata["status"] = "sandbox_error"
result_status = -1 # Internal sandbox error
elif api_status == "Failed":
# --- Add debug logging ---
logger.debug(f"API returned Failed status. Response: {api_response}")
logger.debug(f"Compile Result: {compile_result}")
logger.debug(f"Run Result: {run_result}")
# --- Check the logic here ---
# Compile failed or timed out
is_compile_error = compile_result and (
metadata["compile_status"] in ["Error", "TimeLimitExceeded"]
or (metadata["compile_status"] == "Finished" and compile_result.get("return_code") != 0)
)
if is_compile_error:
# Differentiate between compile_error and compile_timeout based on specific status
if metadata["compile_status"] == "TimeLimitExceeded":
metadata["status"] = "compile_timeout"
else: # Includes Error and Finished but return_code != 0 cases
metadata["status"] = "compile_error"
result_status = -4
# Run failed or timed out
elif run_result:
# Modified condition: Check for TimeLimitExceeded OR (Finished with non-zero exit code) OR Error status
is_runtime_error = (
metadata["run_status"] == "TimeLimitExceeded"
or metadata["run_status"] == "Error"
or (metadata["run_status"] == "Finished" and run_result.get("return_code") != 0)
)
if is_runtime_error:
if metadata["run_status"] == "TimeLimitExceeded":
metadata["status"] = "timeout" # Runtime timeout
result_status = -3
else: # Includes Error and Finished with non-zero return_code
metadata["status"] = "runtime_error"
result_status = -2
else:
# Other Failed status with run_result, classify as unknown failure
logger.warning(f"Unknown run_status '{metadata['run_status']}' or state within Failed API status.")
metadata["status"] = "unknown_failure"
result_status = -1 # Default to -1
else:
# Status is Failed but neither a clear compile error nor run_result exists
logger.warning("API status Failed but cannot determine specific error type (compile/run).")
metadata["status"] = "unknown_failure_state"
result_status = -1 # Default to -1
elif api_status == "Success":
# Run completed successfully, now check the answer
if run_result and metadata["run_status"] == "Finished":
actual_output = metadata["stdout"] if metadata["stdout"] is not None else ""
# Note: Output might contain trailing newlines, need normalization
if expected_output is None or str(actual_output).rstrip("\n") == str(expected_output).rstrip("\n"):
result_status = True
metadata["status"] = "success"
else:
result_status = False
metadata["status"] = "wrong_answer"
else:
# Status is Success but run_result status is not Finished, this is unexpected
metadata["status"] = "unexpected_success_state"
result_status = -1 # Classify as unknown error
else:
# API returned an unknown top-level status
logger.warning(f"Unknown API status received: {api_status}")
metadata["status"] = f"unknown_api_status_{api_status}"
result_status = -1 # Default to -1
else: # api_response is None and no error_msg (Should not happen with current call_sandbox_api logic)
metadata["status"] = "unknown_api_state"
result_status = -1
logger.error(f"Case {case_index}: Unknown API state (no response and no error message).")
return result_status, metadata
def check_correctness(
sandbox_fusion_url: str,
in_outs: Optional[dict],
generation: str,
timeout: int = DEFAULT_TIMEOUT,
memory_limit_mb: int = 1024,
language: str = "python",
concurrent_semaphore: Optional[threading.Semaphore] = None,
) -> tuple[list[Any], list[dict[str, Any]]]:
"""
Checks the correctness of code generation using the remote sandbox API,
processing test cases concurrently.
Args:
sandbox_fusion_url: The URL of the sandbox fusion API.
in_outs: Dictionary containing "inputs" and "outputs" lists.
generation: The generated code string.
timeout: Timeout for each test case (compile and run share this timeout).
language: The programming language of the code.
Returns:
A tuple (results, metadata_list).
results: A list containing the test result for each input/output pair
(True/False/-1 api/sandbox err, -2 runtime err, -3 timeout, -4 compile err).
Results are ordered corresponding to the inputs.
metadata_list: A list containing metadata dictionaries for each test case,
ordered corresponding to the inputs.
"""
logger.info("Starting correctness check for generation.")
if not in_outs or "inputs" not in in_outs or "outputs" not in in_outs:
logger.warning("Invalid in_outs format provided.")
return [-1], [{"error": "Invalid input/output data"}]
inputs = in_outs["inputs"]
expected_outputs = in_outs["outputs"]
fn_name = in_outs.get("fn_name")
num_cases = len(inputs)
assert_cases = in_outs.get("assert_case", [""] * num_cases) # Default to empty strings if not provided
results = [None] * num_cases # Initialize with placeholders
metadata_list = [None] * num_cases # Initialize with placeholders
if num_cases == 0:
logger.warning("Empty inputs provided.")
return [], []
if len(inputs) != len(expected_outputs):
logger.warning(f"Mismatch between number of inputs ({len(inputs)}) and outputs ({len(expected_outputs)}).")
# Return error based on the number of inputs provided
return [-1] * num_cases, [{"error": "Input/output count mismatch", "case_index": i} for i in range(num_cases)]
# If assert_cases is provided, it overrides inputs and outputs
if len(assert_cases) != num_cases:
logger.warning(
f"Mismatch between number of assert cases ({len(assert_cases)}) and inputs/outputs ({num_cases})."
)
return [-1] * num_cases, [{"error": "Input/output count mismatch", "case_index": i} for i in range(num_cases)]
first_compile_error_index = -1
# max_workers is limited by sandbox_fusion_max_concurrent from concurrent_semaphore
with concurrent.futures.ThreadPoolExecutor(max_workers=max(32, os.cpu_count() * 5)) as executor:
# Submit all tasks, passing the concurrent_semaphore to _process_single_case
future_to_index = {
executor.submit(
_process_single_case,
i,
stdin_data,
expected_outputs[i],
sandbox_fusion_url,
generation + "\n\n" + assert_cases[i], # Append assert case to generation
timeout,
memory_limit_mb,
language,
concurrent_semaphore,
fn_name,
): i
for i, stdin_data in enumerate(inputs)
}
# Process results as they complete
for future in concurrent.futures.as_completed(future_to_index):
index = future_to_index[future]
try:
result_status, metadata = future.result()
results[index] = result_status
metadata_list[index] = metadata
# Check for compile error (-4)
if result_status == -4:
if first_compile_error_index == -1 or index < first_compile_error_index:
first_compile_error_index = index
# Optimization: could potentially cancel futures for index > first_compile_error_index
# However, cancellation is not guaranteed. Post-processing is safer.
except Exception as exc:
logger.error(f"Test case {index} generated an exception: {exc}")
traceback.print_exc()
results[index] = -1 # Mark as API/internal error
metadata_list[index] = {
"case_index": index,
"input": str(inputs[index]),
"expected_output": str(expected_outputs[index]) if expected_outputs[index] else None,
"api_request_error": f"Internal execution error: {exc}",
"status": "internal_error",
}
# Post-processing for compile errors
if first_compile_error_index != -1:
logger.warning(
f"Compile error detected in case {first_compile_error_index}. Marking subsequent cases as compile errors."
)
for i in range(first_compile_error_index + 1, num_cases):
# Only update if not already processed (though it should be None or have a result)
if results[i] != -4: # Avoid overwriting if it somehow already got -4
results[i] = -4
# Update or create metadata for skipped cases due to compile error
if metadata_list[i] is None: # If future failed before returning metadata
metadata_list[i] = {
"case_index": i,
"input": str(inputs[i]),
"expected_output": str(expected_outputs[i]) if expected_outputs[i] else None,
"api_request_error": None,
"status": "compile_error_skipped", # Indicate skipped due to prior compile error
}
else: # If future completed but result is overridden
metadata_list[i]["status"] = "compile_error_skipped"
logger.info(f"Correctness check finished. Results: {results}")
return results, metadata_list
| verl__utils__reward_score__sandbox_fusion__utils.py |
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright 2023-2024 SGLang Team
# Copyright 2025 Search-R1 Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Adapted from https://github.com/PeterGriffinJin/Search-R1/blob/main/verl/utils/reward_score/qa_em.py
import random
import re
import string
def normalize_answer(s):
def remove_articles(text):
return re.sub(r"\b(a|an|the)\b", " ", text)
def white_space_fix(text):
return " ".join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def em_check(prediction, golden_answers):
if isinstance(golden_answers, str):
golden_answers = [golden_answers]
normalized_prediction = normalize_answer(prediction)
score = 0
for golden_answer in golden_answers:
golden_answer = normalize_answer(golden_answer)
if golden_answer == normalized_prediction:
score = 1
break
return score
def subem_check(prediction, golden_answers):
if isinstance(golden_answers, str):
golden_answers = [golden_answers]
normalized_prediction = normalize_answer(prediction)
score = 0
for golden_answer in golden_answers:
golden_answer = normalize_answer(golden_answer)
if golden_answer in normalized_prediction:
score = 1
break
return score
def extract_solution(solution_str):
"""Extract the equation from the solution string."""
# Remove everything before the first "Assistant:"
# if "Assistant:" in solution_str:
# solution_str = solution_str.split("Assistant:", 1)[1]
# elif "<|im_start|>assistant" in solution_str:
# solution_str = solution_str.split("<|im_start|>assistant", 1)[1]
# else:
# return None
# solution_str = solution_str.split('\n')[-1]
answer_pattern = r"<answer>(.*?)</answer>"
match = re.finditer(answer_pattern, solution_str, re.DOTALL)
matches = list(match)
# If there are 0 matches, return None
if len(matches) < 1:
return None
# If there are 2 or more matches, return the last one
return matches[-1].group(1).strip()
def count_answer_tags(text):
opening_tags = text.count("<answer>")
closing_tags = text.count("</answer>")
return opening_tags, closing_tags
def compute_score(solution_str, ground_truth, method="strict", format_score=0.0, score=1.0):
"""The scoring function for exact match (EM).
Args:
solution_str: the solution text
ground_truth: the ground truth
method: the method to extract the solution, choices are 'strict' and 'flexible'
format_score: the score for the format
score: the score for the correct answer
"""
answer = extract_solution(solution_str=solution_str)
open_count, close_count = count_answer_tags(solution_str)
do_print = random.randint(1, 64) == 1
if do_print:
print("--------------------------------")
print(f"Golden answers: {ground_truth['target']}")
if answer is not None:
print(f"Extracted answer is not None: {answer}")
else:
print("Extracted answer: None!")
print(f"Solution string: {solution_str}")
if answer is None:
return 0
else:
if em_check(answer, ground_truth["target"]):
if open_count > 10 or close_count > 10: # prevent output a lot of </answer>
score = score / 4
return score
return score
else:
return format_score
def compute_score_subem(solution_str, ground_truth, method="strict", format_score=0.0, score=1.0):
"""The scoring function for substring exact match (EM).
Args:
solution_str: the solution text
ground_truth: the ground truth
method: the method to extract the solution, choices are 'strict' and 'flexible'
format_score: the score for the format
score: the score for the correct answer
"""
answer = extract_solution(solution_str=solution_str)
do_print = random.randint(1, 64) == 1
if do_print:
print("--------------------------------")
print(f"Golden answers: {ground_truth['target']}")
print(f"Extracted answer: {answer}")
print(f"Solution string: {solution_str}")
if answer is None:
return 0
else:
if subem_check(answer, ground_truth["target"]):
return score
else:
return format_score
| verl__utils__reward_score__search_r1_like_qa_em.py |
# Copyright 2025 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
from verl.protocol import DataProto
class RolloutSkip:
"""
RolloutSkip skips sequence generation during rollout by attempting to load previously dumped data.
If no dumped data is found, it generates new sequences and saves them to disk.
Args:
config: The configuration object containing rollout settings.
rollout_wg: The worker group that handles the rollout process.
Note:
When rollout.n or rollout.gen_batch_size differ from previous runs,
new sequences will be generated and saved with different filenames.
"""
print_mark = "[RolloutSkip()]"
def __init__(self, config, rollout_wg):
self.rollout_config = config.actor_rollout_ref.rollout
self.exp_name = config.data.get("experiment_name", "")
self.project_name = config.data.get("project_name", "")
self.n = int(self.rollout_config.get("n", 0))
self.gbs = int(config.data.get("gen_batch_size", config.data.get("train_batch_size", 0)))
self.dumped_dir = Path(self.rollout_config.get("skip_dump_dir", "/tmp/verl/rollout_dump"))
self.dumped_dir.mkdir(parents=True, exist_ok=True)
# Check if path is in Ray temporary directory
if str(self.dumped_dir.absolute()).startswith("/tmp/ray/session"):
print(
f"\033[33m{self.print_mark} Warning: \nUsing dump path ",
f"'{self.dumped_dir.absolute()}' is not recommended ",
"as it's located in /tmp/ray/session*\033[0m",
flush=True,
)
print(
f"{self.print_mark} Rollout skip dump path set to: ",
f"{self.dumped_dir.absolute()}",
flush=True,
)
self._rollout_wg = rollout_wg
@property
def curr_path_dump(self):
return self.dumped_dir.joinpath(f"{self.exp_name}_{self.project_name}_GBS{self.gbs}__N{self.n}").absolute()
def wrap_generate_sequences(self):
try:
self._rollout_wg.generate_sequences = wrap_generate_sequences(self, self._rollout_wg)
print(
f"{self.print_mark} Successfully patched `actor_rollout_wg.generate_sequences()`",
flush=True,
)
except Exception as e:
raise RuntimeError(
"{self.print_mark} Failed to patch `actor_rollout_wg.generate_sequences()`",
flush=True,
) from e
def try_load(self):
if not self.curr_path_dump.exists():
print(
f"{self.print_mark} No data dump found at {self.curr_path_dump}.",
"The trainer will generate and automatically dump the data for this first run.",
flush=True,
)
return None
try:
# * Load
ret_batch = DataProto.load_from_disk(self.curr_path_dump)
print(
f"\033[32m{self.print_mark} Successfully load pre-generated data from {self.curr_path_dump}\033[0m",
flush=True,
)
return ret_batch
except Exception as e:
print(
f"\033[31m{self.print_mark} Failed to load pre-generated data from {self.curr_path_dump}",
f"Error: {str(e)}\033[0m",
flush=True,
)
return None
def dump(self, outputs: DataProto):
try:
outputs.save_to_disk(self.curr_path_dump)
print(
f"\033[32m{self.print_mark} Successfully dump data in {self.curr_path_dump}\033[0m",
flush=True,
)
except Exception as e:
print(
f"\033[31m{self.print_mark} Failed to dump data in {self.curr_path_dump}: {e}\033[0m",
flush=True,
)
def wrap_generate_sequences(rolloutskip: RolloutSkip, rollout_wg):
generate_sequences = rollout_wg.generate_sequences
def warp_fn(batch, **kwargs):
gen_batch_output = rolloutskip.try_load()
if gen_batch_output is None:
# * 1. Generation
gen_batch_output = generate_sequences(batch, **kwargs)
# * 2. Dump
rolloutskip.dump(gen_batch_output)
return gen_batch_output
return warp_fn
| verl__utils__rollout_skip.py |
# Copyright 2025 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import functools
import inspect
import os
from contextvars import ContextVar
from typing import Optional
from pydantic import BaseModel
from verl.utils.ray_utils import get_event_loop
_trace_enabled: ContextVar[bool] = ContextVar("_trace_enabled", default=True)
class RolloutTraceConfig:
"""Configuration for rollout tracing with various backends.
Singleton configuration class for managing rollout trace settings across different
tracing backends like Weave and MLflow.
Args:
backend (Optional[str]): Tracing backend to use ('weave', 'mlflow', or None).
client (Optional[object]): Client instance for the selected backend.
token2text (bool): Whether to convert tokens to text in traces. Defaults to False.
project_name (str): Name of the project for tracing.
experiment_name (str): Name of the experiment for tracing.
max_samples_per_step_per_worker (Optional[int]): Maximum number of unique samples to trace
per worker per step. If None, all samples are traced. If set, each worker will randomly
select up to this many unique samples to trace (including all their rollouts for GRPO).
Total traces = max_samples_per_step_per_worker * num_workers * n_rollouts_per_sample.
"""
_instance: Optional["RolloutTraceConfig"] = None
backend: Optional[str] = None
client: Optional[object] = None
token2text: bool = False
_initialized: bool = False
project_name: str = None
experiment_name: str = None
max_samples_per_step_per_worker: Optional[int] = None
def __new__(cls, *args, **kwargs):
if cls._instance is None:
cls._instance = super().__new__(cls)
cls._instance._initialized = False
return cls._instance
@classmethod
def get_instance(cls) -> "RolloutTraceConfig":
if cls._instance is None:
cls._instance = cls()
return cls._instance
@classmethod
def init(
cls,
project_name: str,
experiment_name: str,
backend: str,
token2text: bool = False,
max_samples_per_step_per_worker: Optional[int] = None,
):
config = cls.get_instance()
if config._initialized:
return
config.backend = backend
config.token2text = token2text
config.project_name = project_name
config.experiment_name = experiment_name
config.max_samples_per_step_per_worker = max_samples_per_step_per_worker
if backend == "weave":
import weave
config.client = weave.init(project_name)
elif backend == "mlflow":
import mlflow
mlflow.config.enable_async_logging()
config.client = mlflow
MLFLOW_TRACKING_URI = os.environ.get("MLFLOW_TRACKING_URI", "sqlite:////tmp/mlruns.db")
mlflow.set_tracking_uri(MLFLOW_TRACKING_URI)
mlflow.set_experiment(project_name)
else:
config.client = None
config._initialized = True
@classmethod
def get_backend(cls) -> Optional[str]:
return cls.get_instance().backend
@classmethod
def get_client(cls) -> Optional[object]:
return cls.get_instance().client
@classmethod
def enable_token2text(cls) -> Optional[bool]:
return cls.get_instance().token2text
@classmethod
def reset(cls):
cls._instance = None
@contextlib.contextmanager
def rollout_trace_attr(
sample_index=None, step=None, rollout_n=None, name="rollout_trace", validate=False, trace: bool = True
):
"""A context manager to add attributes to a trace for the configured backend.
Args:
sample_index: Sample index for the trace.
step: Training step number.
rollout_n: Rollout number (for GRPO with multiple rollouts per sample).
name: Name for the trace span (used by mlflow backend).
validate: Whether this is a validation run.
trace: If False, disables tracing for the duration of the context.
"""
backend = RolloutTraceConfig.get_backend()
should_skip = backend is not None and not trace
if should_skip:
token = _trace_enabled.set(False)
try:
yield
finally:
_trace_enabled.reset(token)
return
# Build attributes for the trace
attributes = {}
if backend:
if sample_index is not None:
attributes["sample_index"] = sample_index
if step is not None:
attributes["step"] = step
if rollout_n is not None:
attributes["rollout_n"] = rollout_n
attributes["validate"] = validate
attributes["experiment_name"] = RolloutTraceConfig.get_instance().experiment_name
if not attributes or backend is None:
yield
return
if backend == "weave":
import weave
with weave.attributes(attributes):
yield
elif backend == "mlflow":
import mlflow
with mlflow.start_span(name=name) as span:
trace_id = span.trace_id
for key, value in attributes.items():
mlflow.set_trace_tag(trace_id, str(key), str(value))
yield
else:
yield
def rollout_trace_op(func):
@functools.wraps(func)
async def async_wrapper(self, *args, **kwargs):
if not _trace_enabled.get():
return await func(self, *args, **kwargs)
backend = RolloutTraceConfig.get_backend()
enable_token2text = RolloutTraceConfig.enable_token2text()
if backend is None:
return await func(self, *args, **kwargs)
sig = inspect.signature(func)
bound_args = sig.bind(self, *args, **kwargs)
bound_args.apply_defaults()
inputs = dict(bound_args.arguments)
del inputs["self"]
async def add_token2text(self, result):
if hasattr(result, "prompt_ids") and hasattr(self, "tokenizer") and hasattr(self.tokenizer, "decode"):
# Use model_dump() for Pydantic models to get a proper copy,
# otherwise vars() returns a reference to internal __dict__ which
# can cause serialization issues with MLflow
if isinstance(result, BaseModel):
_result = result.model_dump()
else:
_result = dict(vars(result))
loop = get_event_loop()
if hasattr(result, "prompt_ids"):
prompt_text = await loop.run_in_executor(None, self.tokenizer.decode, result.prompt_ids)
_result["prompt_text"] = prompt_text
if hasattr(result, "response_ids"):
response_text = await loop.run_in_executor(None, self.tokenizer.decode, result.response_ids)
_result["response_text"] = response_text
return _result
return result
if backend == "weave":
tracer = RolloutTraceConfig.get_client()
from weave.trace.context import call_context
cur_attributes = {**call_context.call_attributes.get()}
call = tracer.create_call(op=func.__qualname__, inputs=inputs, attributes=cur_attributes)
try:
result = await func(self, *args, **kwargs)
if enable_token2text:
_result = await add_token2text(self, result)
tracer.finish_call(call, output=_result)
else:
tracer.finish_call(call, output=result)
return result
except Exception as e:
tracer.finish_call(call, exception=e)
raise e
elif backend == "mlflow":
import mlflow
with mlflow.start_span(name=func.__qualname__) as span:
span.set_inputs(inputs)
result = await func(self, *args, **kwargs)
if enable_token2text:
_result = await add_token2text(self, result)
span.set_outputs(_result)
else:
span.set_outputs(result)
return result
else:
return await func(self, *args, **kwargs)
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
if not _trace_enabled.get():
return func(self, *args, **kwargs)
backend = RolloutTraceConfig.get_backend()
if backend is None:
return func(self, *args, **kwargs)
sig = inspect.signature(func)
bound_args = sig.bind(self, *args, **kwargs)
bound_args.apply_defaults()
inputs = dict(bound_args.arguments)
del inputs["self"]
if backend == "weave":
tracer = RolloutTraceConfig.get_client()
from weave.trace.context import call_context
cur_attributes = {**call_context.call_attributes.get()}
call = tracer.create_call(op=func.__qualname__, inputs=inputs, attributes=cur_attributes)
try:
result = func(self, *args, **kwargs)
tracer.finish_call(call, output=result)
return result
except Exception as e:
tracer.finish_call(call, exception=e)
raise e
elif backend == "mlflow":
import mlflow
return mlflow.trace(func)(self, *args, **kwargs)
else:
return func(self, *args, **kwargs)
return async_wrapper if inspect.iscoroutinefunction(func) else wrapper
| verl__utils__rollout_trace.py |
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import heapq
from itertools import chain
import torch
from torch import distributed as dist
from verl.protocol import DataProto
from verl.utils import tensordict_utils as tu
from verl.utils.device import get_device_name
def calculate_workload(seqlen_list: torch.Tensor) -> torch.Tensor:
"""Calculate approximate computational workload for transformer attention.
Estimates FLOPs for dense transformer blocks based on sequence length using
the formula: FLOPs ≈ 12 * hidden_size² * seqlen + 2 * hidden_size * seqlen²
The constants are calibrated for a 7B model (hidden_size=4096), yielding:
workload ∝ 24576 * seqlen + seqlen²
Args:
seqlen_list: Sequence lengths as a tensor.
Returns:
torch.Tensor: Estimated workload values proportional to actual FLOPs.
Note:
The returned values are relative workloads, not actual FLOP counts.
Useful for balancing computation across data parallel ranks.
"""
return 24576 * seqlen_list + seqlen_list**2
def karmarkar_karp(seqlen_list: list[int], k_partitions: int, equal_size: bool) -> list[list[int]]:
"""Partition items into k groups using the Karmarkar-Karp differencing method.
Implements the Largest Differencing Method (LDM) algorithm for balanced
multi-way number partitioning. This heuristic produces near-optimal partitions
by iteratively combining the sets with the largest difference.
Args:
seqlen_list: Values to partition (typically sequence lengths or workloads).
k_partitions: Number of partitions to create.
equal_size: If True, each partition will have exactly len(seqlen_list) / k_partitions
items. If False, partitions may have different sizes.
Returns:
list[list[int]]: List of k partitions, each containing indices into seqlen_list.
See Also:
https://en.wikipedia.org/wiki/Largest_differencing_method
Note:
When equal_size=True, len(seqlen_list) must be divisible by k_partitions.
"""
# see: https://en.wikipedia.org/wiki/Largest_differencing_method
class Set:
def __init__(self) -> None:
self.sum = 0
self.items = []
def add(self, idx: int, val: int):
self.items.append((idx, val))
self.sum += val
def merge(self, other):
for idx, val in other.items:
self.items.append((idx, val))
self.sum += val
def __lt__(self, other):
if self.sum != other.sum:
return self.sum < other.sum
if len(self.items) != len(other.items):
return len(self.items) < len(other.items)
return self.items < other.items
class State:
def __init__(self, items: list[tuple[int, int]], k: int) -> None:
self.k = k
# sets should always be decreasing order
self.sets = [Set() for _ in range(k)]
assert len(items) in [1, k], f"{len(items)} not in [1, {k}]"
for i, (idx, seqlen) in enumerate(items):
self.sets[i].add(idx=idx, val=seqlen)
self.sets = sorted(self.sets, reverse=True)
def get_partitions(self):
partitions = []
for i in range(len(self.sets)):
cur_partition = []
for idx, _ in self.sets[i].items:
cur_partition.append(idx)
partitions.append(cur_partition)
return partitions
def merge(self, other):
for i in range(self.k):
self.sets[i].merge(other.sets[self.k - 1 - i])
self.sets = sorted(self.sets, reverse=True)
@property
def spread(self) -> int:
return self.sets[0].sum - self.sets[-1].sum
def __lt__(self, other):
# least heap, let the state with largest spread to be popped first,
# if the spread is the same, let the state who has the largest set
# to be popped first.
if self.spread != other.spread:
return self.spread > other.spread
return self.sets[0] > other.sets[0]
def __repr__(self) -> str:
repr_str = "["
for i in range(self.k):
if i > 0:
repr_str += ","
repr_str += "{"
for j, (_, seqlen) in enumerate(self.sets[i].items):
if j > 0:
repr_str += ","
repr_str += str(seqlen)
repr_str += "}"
repr_str += "]"
return repr_str
sorted_seqlen_list = sorted([(seqlen, i) for i, seqlen in enumerate(seqlen_list)])
states_pq = []
if equal_size:
assert len(seqlen_list) % k_partitions == 0, f"{len(seqlen_list)} % {k_partitions} != 0"
for offset in range(0, len(sorted_seqlen_list), k_partitions):
items = []
for i in range(k_partitions):
seqlen, idx = sorted_seqlen_list[offset + i]
items.append((idx, seqlen))
heapq.heappush(states_pq, State(items=items, k=k_partitions))
else:
for seqlen, idx in sorted_seqlen_list:
heapq.heappush(states_pq, State(items=[(idx, seqlen)], k=k_partitions))
while len(states_pq) > 1:
state0 = heapq.heappop(states_pq)
state1 = heapq.heappop(states_pq)
# merge states
state0.merge(state1)
heapq.heappush(states_pq, state0)
final_state = states_pq[0]
partitions = final_state.get_partitions()
if equal_size:
for i, partition in enumerate(partitions):
assert len(partition) * k_partitions == len(seqlen_list), (
f"{len(partition)} * {k_partitions} != {len(seqlen_list)}"
)
return partitions
def greedy_partition(seqlen_list: list[int], k_partitions: int, equal_size: bool) -> list[list[int]]:
"""Partition items into k groups using a greedy assignment strategy.
Assigns each item to the partition with the smallest current sum, iterating
through items in order. Simpler but typically less optimal than Karmarkar-Karp.
Args:
seqlen_list: Values to partition (typically sequence lengths or workloads).
k_partitions: Number of partitions to create.
equal_size: If True, adds a bias to ensure equal partition sizes.
Requires len(seqlen_list) to be divisible by k_partitions.
Returns:
list[list[int]]: List of k partitions, each containing indices into seqlen_list.
Note:
When equal_size=True, a large bias is added to encourage equal distribution
of items before considering the actual values.
"""
bias = sum(seqlen_list) + 1 if equal_size else 0
sorted_seqlen = [(seqlen + bias, i) for i, seqlen in enumerate(seqlen_list)]
partitions = [[] for _ in range(k_partitions)]
partition_sums = [0 for _ in range(k_partitions)]
for seqlen, i in sorted_seqlen:
min_idx = None
for j in range(k_partitions):
if min_idx is None or partition_sums[j] < partition_sums[min_idx]:
min_idx = j
partitions[min_idx].append(i)
partition_sums[min_idx] += seqlen
if equal_size:
for i, partition in enumerate(partitions):
assert len(partition) * k_partitions == len(seqlen_list), (
f"{len(partition)} * {k_partitions} != {len(seqlen_list)}"
)
return partitions
def get_seqlen_balanced_partitions(seqlen_list: list[int], k_partitions: int, equal_size: bool):
"""
Calculates partitions of indices from seqlen_list such that the sum of sequence lengths
in each partition is balanced. Uses the Karmarkar-Karp differencing method.
This is useful for balancing workload across devices or batches, especially when
dealing with variable sequence lengths.
Args:
seqlen_list (List[int]): A list of sequence lengths for each item.
k_partitions (int): The desired number of partitions.
equal_size (bool): If True, ensures that each partition has the same number of items.
Requires len(seqlen_list) to be divisible by k_partitions.
If False, partitions can have varying numbers of items, focusing
only on balancing the sum of sequence lengths.
Returns:
List[List[int]]: A list containing k_partitions lists. Each inner list contains the
original indices of the items assigned to that partition. The indices
within each partition list are sorted.
Raises:
AssertionError: If len(seqlen_list) < k_partitions.
AssertionError: If equal_size is True and len(seqlen_list) is not divisible by k_partitions.
AssertionError: If any resulting partition is empty.
"""
assert len(seqlen_list) >= k_partitions, f"number of items:[{len(seqlen_list)}] < k_partitions:[{k_partitions}]"
def _check_and_sort_partitions(partitions):
assert len(partitions) == k_partitions, f"{len(partitions)} != {k_partitions}"
seen_idx = set()
sorted_partitions = [None] * k_partitions
for i, partition in enumerate(partitions):
assert len(partition) > 0, f"the {i}-th partition is empty"
for idx in partition:
seen_idx.add(idx)
sorted_partitions[i] = sorted(partition)
assert seen_idx == set(range(len(seqlen_list)))
return sorted_partitions
partitions = karmarkar_karp(seqlen_list=seqlen_list, k_partitions=k_partitions, equal_size=equal_size)
return _check_and_sort_partitions(partitions)
def log_seqlen_unbalance(seqlen_list: list[int], partitions: list[list[int]], prefix):
"""
Calculate and log metrics related to sequence length imbalance before and after partitioning.
Args:
seqlen_list (List[int]): A list of sequence lengths for each item.
partitions (List[List[int]]): A list of partitions, where each inner list contains indices
from seqlen_list assigned to that partition.
prefix (str): A prefix to be added to each metric key in the returned dictionary.
Returns:
dict: A dictionary containing metrics related to sequence length imbalance.
"""
# Get the number of partitions
k_partition = len(partitions)
# assert len(seqlen_list) % k_partition == 0
batch_size = len(seqlen_list) // k_partition
min_sum_seqlen = None
max_sum_seqlen = None
total_sum_seqlen = 0
# Iterate over each batch of sequence lengths
for offset in range(0, len(seqlen_list), batch_size):
cur_sum_seqlen = sum(seqlen_list[offset : offset + batch_size])
if min_sum_seqlen is None or cur_sum_seqlen < min_sum_seqlen:
min_sum_seqlen = cur_sum_seqlen
if max_sum_seqlen is None or cur_sum_seqlen > max_sum_seqlen:
max_sum_seqlen = cur_sum_seqlen
total_sum_seqlen += cur_sum_seqlen
balanced_sum_seqlen_list = []
for partition in partitions:
cur_sum_seqlen_balanced = sum([seqlen_list[i] for i in partition])
balanced_sum_seqlen_list.append(cur_sum_seqlen_balanced)
# print("balanced_sum_seqlen_list: ", balanced_sum_seqlen_list)
min_sum_seqlen_balanced = min(balanced_sum_seqlen_list)
max_sum_seqlen_balanced = max(balanced_sum_seqlen_list)
return {
f"{prefix}/min": min_sum_seqlen,
f"{prefix}/max": max_sum_seqlen,
f"{prefix}/minmax_diff": max_sum_seqlen - min_sum_seqlen,
f"{prefix}/balanced_min": min_sum_seqlen_balanced,
f"{prefix}/balanced_max": max_sum_seqlen_balanced,
f"{prefix}/mean": total_sum_seqlen / len(partitions),
}
def ceildiv(a: int, b: int) -> int:
"""Compute ceiling division of a by b.
Returns the smallest integer greater than or equal to a/b.
Uses the identity: ceil(a/b) = floor((a + b - 1) / b) = -(-a // b)
Args:
a: Dividend (numerator).
b: Divisor (denominator), must be non-zero.
Returns:
int: Ceiling of a divided by b.
Example:
>>> ceildiv(7, 3) # ceil(7/3) = ceil(2.33) = 3
3
>>> ceildiv(6, 3) # ceil(6/3) = ceil(2.0) = 2
2
"""
return -(a // -b)
def roundup_divisible(a: int, b: int) -> int:
"""Round up a to the nearest multiple of b.
Returns the smallest multiple of b that is >= a.
Args:
a: Value to round up.
b: Divisor to round to (must be positive).
Returns:
int: Smallest multiple of b that is >= a.
Example:
>>> roundup_divisible(7, 4) # nearest multiple of 4 >= 7 is 8
8
>>> roundup_divisible(8, 4) # 8 is already a multiple of 4
8
"""
return ((a + b - 1) // b) * b
def rearrange_micro_batches(
batch,
max_token_len,
dp_group=None,
num_batches_divided_by=None,
same_micro_num_in_dp=True,
min_num_micro_batch=None,
use_dynamic_bsz_balance=True,
):
"""
Split a batch into micro-batches by total token count, with optional DP sync and padding.
Args:
batch (TensorDict): must include "attention_mask" (B*S); other fields are sliced similarly.
max_token_len (int): max sum of attention_mask per micro-batch.
dp_group (optional): torch.distributed group for data-parallel sync.
num_batches_divided_by (optional): virtual pipeline parallel size, for megatron.
same_micro_num_in_dp (bool): if True and dp_group set, pad all ranks to the same count.
min_num_micro_batch (int, optional): force at least this many splits (pads empty ones).
use_dynamic_bsz_balance (bool, optional): balance the computational workload between micro-batches
Returns:
List[TensorDict]: the micro-batches.
List[List[int]]: index lists mapping each micro-batch back to original positions.
"""
# this is per local micro_bsz
input_ids = batch["input_ids"]
if input_ids.is_nested:
seq_len_effective: torch.Tensor = input_ids.offsets().diff()
max_seq_len = max(seq_len_effective)
else:
max_seq_len = batch["attention_mask"].shape[-1]
seq_len_effective: torch.Tensor = batch["attention_mask"].sum(dim=1)
assert max_token_len >= max_seq_len, (
f"max_token_len must be greater than the sequence length. Got {max_token_len=} and {max_seq_len=}"
)
total_seqlen = seq_len_effective.sum().item()
# NOTE: num_microbatches <= batch_size, so take the min of this two.
num_micro_batches = min(len(seq_len_effective), ceildiv(total_seqlen, max_token_len))
if min_num_micro_batch is not None:
# used to support pp
num_micro_batches = max(min_num_micro_batch, num_micro_batches)
if dist.is_initialized() and same_micro_num_in_dp:
num_micro_batches = torch.tensor([num_micro_batches], device=get_device_name())
dist.all_reduce(num_micro_batches, op=dist.ReduceOp.MAX, group=dp_group)
num_micro_batches = num_micro_batches.cpu().item()
if num_batches_divided_by is not None:
num_micro_batches = roundup_divisible(num_micro_batches, num_batches_divided_by)
assert num_micro_batches <= len(seq_len_effective)
# upcast to int64 to avoid potential overflow im `calculate_workload` computation.
seq_len_effective = seq_len_effective.long()
# note that seq_len_effective is a GPU tensor. We need to make it a list to avoid D2H!
workloads = calculate_workload(seq_len_effective).cpu().tolist()
micro_bsz_idx = get_seqlen_balanced_partitions(workloads, num_micro_batches, equal_size=False)
if use_dynamic_bsz_balance:
# Use the sum of squared sequence lengths to approximate attention computation workload
micro_bsz_idx.sort(
key=lambda partition: (
sum(workloads[idx] for idx in partition),
partition[0] if partition else 0,
),
reverse=True,
)
# Place smaller micro-batches at both ends to reduce the bubbles exposed during the warm-up and cool-down.
micro_bsz_idx = micro_bsz_idx[::2][::-1] + micro_bsz_idx[1::2]
micro_batches = []
for partition in micro_bsz_idx:
curr_micro_batch = tu.index_select_tensor_dict(batch, partition)
micro_batches.append(curr_micro_batch)
return micro_batches, micro_bsz_idx
def get_reverse_idx(idx_map):
"""
Build the inverse of an index mapping.
Args:
idx_map (Sequence[int]): Sequence where idx_map[i] = j.
Returns:
List[int]: Inverse mapping list such that output[j] = i for each i.
"""
reverse_idx_map = copy.deepcopy(idx_map)
for i, idx in enumerate(idx_map):
reverse_idx_map[idx] = i
return reverse_idx_map
def prepare_dynamic_batch(
data: DataProto,
max_token_len: int,
dp_group=None,
num_batches_divided_by=None,
same_micro_num_in_dp=True,
min_num_micro_batch=None,
use_dynamic_bsz_balance=True,
) -> tuple[list[DataProto], list[list[int]]]:
"""
Prepare a batch for dynamic batching.
Args:
data (DataProto): The input data.
max_token_len (int): The maximum token length for dynamic batching.
Returns:
Tuple[List[DataProto], List[List[int]]]: A tuple containing a list of DataProto objects
and a list of index lists.
"""
batch, batch_idx_list = rearrange_micro_batches(
data.batch,
max_token_len=max_token_len,
dp_group=dp_group,
num_batches_divided_by=num_batches_divided_by,
same_micro_num_in_dp=same_micro_num_in_dp,
min_num_micro_batch=min_num_micro_batch,
use_dynamic_bsz_balance=use_dynamic_bsz_balance,
)
micro_batches = []
for i, batch_idx in enumerate(batch_idx_list):
tensors = dict(batch[i])
non_tensors = {key: value[batch_idx] for key, value in data.non_tensor_batch.items()}
meta_info = copy.deepcopy(data.meta_info)
micro_batches.append(DataProto.from_dict(tensors, non_tensors, meta_info=meta_info))
return micro_batches, batch_idx_list
def restore_dynamic_batch(data: torch.Tensor, batch_idx_list: list[list[int]]) -> torch.Tensor:
"""
Restore a batch from dynamic batching.
Args:
data (torch.Tensor): The input data.
batch_idx_list (List[List[int]]): The list of index lists.
Returns:
torch.Tensor: The restored data.
"""
indices = list(chain.from_iterable(batch_idx_list))
batch_size = data.shape[0]
assert len(indices) == batch_size, f"{len(indices)} vs. {batch_size}"
revert_indices = torch.tensor(get_reverse_idx(indices), dtype=torch.long)
if data.is_nested:
data_lst = data.unbind()
tensors = [data_lst[i] for i in revert_indices]
reverted_data = torch.nested.as_nested_tensor(tensors, layout=torch.jagged)
else:
reverted_data = data[revert_indices]
return reverted_data
def get_group_balanced_partitions(
seqlen_list: list[int],
uid_list: list,
k_partitions: int,
) -> list[list[int]]:
"""
Partition samples into k groups while keeping samples with the same uid together.
Args:
seqlen_list: List of sequence lengths for each sample.
uid_list: List of uids identifying which samples share the same prefix.
Samples with the same uid will be kept together.
k_partitions: Number of partitions (typically world_size).
Returns:
List of k lists, each containing sample indices assigned to that partition.
Samples with the same uid are guaranteed to be in the same partition.
"""
assert len(seqlen_list) == len(uid_list), "seqlen_list and uid_list must have same length"
# Build groups: each group contains indices of samples with the same uid
# Assumes samples with same uid are contiguous
groups = [] # List of (group_indices, group_total_seqlen)
current_uid = None
current_indices = []
current_seqlen = 0
for i, (seqlen, uid) in enumerate(zip(seqlen_list, uid_list, strict=False)):
if uid != current_uid:
if current_indices:
groups.append((current_indices, current_seqlen))
current_uid = uid
current_indices = [i]
current_seqlen = seqlen
else:
current_indices.append(i)
current_seqlen += seqlen
# Don't forget the last group
if current_indices:
groups.append((current_indices, current_seqlen))
num_groups = len(groups)
assert num_groups >= k_partitions, (
f"Number of uid groups ({num_groups}) must be >= k_partitions ({k_partitions}). "
f"Consider reducing world_size or increasing batch_size."
)
# Calculate workload for each group (as integers for partitioning)
group_workloads = []
for indices, total_seqlen in groups:
# Use sum of individual workloads for more accurate estimation
workload = sum(int(calculate_workload(torch.tensor([seqlen_list[i]])).item()) for i in indices)
group_workloads.append(workload)
# Use Karmarkar-Karp to partition groups
# equal_size=True ensures each partition gets the same number of groups,
# which is required when each group has the same number of samples (rollout.n)
group_partitions = get_seqlen_balanced_partitions(
seqlen_list=group_workloads,
k_partitions=k_partitions,
equal_size=True,
)
# Convert group partitions to sample partitions
sample_partitions = []
for group_partition in group_partitions:
sample_indices = []
for group_idx in group_partition:
sample_indices.extend(groups[group_idx][0])
sample_partitions.append(sorted(sample_indices))
return sample_partitions
| verl__utils__seqlen_balancing.py |
# Copyright 2025 Bytedance Ltd. and/or its affiliates
# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import torch
from verl.utils.kernel.fp8_kernel import scaled_fp8_blockwise
logger = logging.getLogger(__file__)
logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "INFO"))
def should_quantize_param(param_name: str) -> bool:
"""Determine whether to quantize to FP8 based on parameter name
Quantization rules:
- Must end with .weight (exclude bias)
- Exclude embedding layers
- Exclude normalization layers
- Exclude output layer (lm_head)
"""
# Must be a weight parameter
if not param_name.endswith(".weight"):
return False
# Layer types to exclude
exclude_patterns = [
"embed_tokens", # Embedding layer
"lm_head", # Output layer
"layernorm", # LayerNorm
"norm", # Various Norm layers
"ln_", # LayerNorm variants
"embeddings", # Embeddings
"mlp.gate.weight", # MoE router
]
# Check if matches exclude patterns
param_lower = param_name.lower()
for pattern in exclude_patterns:
if pattern in param_lower:
return False
# Layer types to include (Linear layers)
include_patterns = [
"q_proj", # Query projection
"k_proj", # Key projection
"v_proj", # Value projection
"o_proj", # Output projection
"gate_proj", # Gate projection (for MLP)
"up_proj", # Up projection (for MLP)
"down_proj", # Down projection (for MLP)
"fc1", # Fully connected 1
"fc2", # Fully connected 2
"mlp", # MLP layers
]
# Check if matches include patterns
for pattern in include_patterns:
if pattern in param_lower:
logger.debug(f"Will quantize FP8: {param_name}")
return True
# Do not quantize by default
logger.debug(f"Skip quantization: {param_name}")
return False
def quant_weights_by_name(weights, quant_config, dtype=torch.bfloat16):
"""FP8 quantization based on parameter name using a memory-efficient generator.
Args:
weights: Generator or iterable of (name, tensor) pairs
quant_config: Quantization configuration
dtype: Data type for intermediate computation
Yields:
Tuples of (name, tensor) for each weight and its scale
"""
if isinstance(quant_config, dict):
weight_block_size = quant_config.get("weight_block_size")
else:
weight_block_size = getattr(quant_config, "weight_block_size", None)
if weight_block_size is None:
raise ValueError("weight_block_size not found in quant_config")
for k, v in weights:
# Check if quantization is needed
if not should_quantize_param(k):
yield (k, v)
continue
# Quantize to FP8
try:
if torch.distributed.get_rank() == 0:
logger.debug(f"Quantizing to FP8 blockwise: {k}")
param_lp, param_scale = scaled_fp8_blockwise(
v.to(dtype),
weight_block_size=weight_block_size,
)
param_scale = param_scale.squeeze(-1)
# Yield the quantized weight and scale
yield (k, param_lp)
yield (k + "_scale_inv", param_scale)
# Explicitly delete to help GC
del param_lp, param_scale
except Exception as e:
logger.error(f"Failed to quantize {k}: {e}")
# If quantization fails, use original weights
yield (k, v)
| verl__utils__sglang__sglang_fp8_utils.py |
# Copyright 2025 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import Any, Iterable
import torch
from tensordict import TensorDict
from tensordict.tensorclass import NonTensorData, NonTensorStack
def assign_non_tensor_data(tensor_dict: TensorDict, key, val):
"""Assign a single non-tensor value to a TensorDict.
Wraps the value in NonTensorData so it can be stored alongside tensors
in the TensorDict. Use this for scalar metadata or simple non-tensor values.
Args:
tensor_dict: The TensorDict to assign to.
key: The key under which to store the value.
val: Any non-tensor value to store (e.g., string, int, dict).
Raises:
AssertionError: If tensor_dict is not a TensorDict.
Example:
>>> td = TensorDict({"obs": torch.randn(3, 4)}, batch_size=[3])
>>> assign_non_tensor_data(td, "experiment_name", "run_001")
"""
assert isinstance(tensor_dict, TensorDict), "input dict must be a TensorDict"
tensor_dict[key] = NonTensorData(val)
def assign_non_tensor_stack(tensor_dict: TensorDict, key, val: list):
"""Assign a list with potentially nested structures (lists, dicts, etc.) to TensorDict.
This function handles complex nested data structures like:
- Lists of lists: [[], [0.5, 0.8], [0.9]]
- Lists of dicts: [{"acc": 1.0}, {"acc": 0.0}]
- Lists of lists of dicts: [[{"content": "...", "role": "user"}]]
These structures are wrapped in NonTensorStack so TensorDict can handle them correctly.
Args:
tensor_dict: The TensorDict to assign to
key: The key to assign the value under
val: A list containing potentially nested structures
Example:
>>> td = TensorDict({}, batch_size=[])
>>> turn_scores = [[], [0.5, 0.8], [0.9]]
>>> assign_non_tensor_stack(td, "turn_scores", turn_scores)
>>> # Now td["turn_scores"] contains the nested data
"""
# Convert list to NonTensorStack to handle nested structures
# This wraps each item in NonTensorData to preserve complex objects
# TODO(petersh6): can convert back to val directly if we are not accessing .data from the NonTensorStack
assert isinstance(tensor_dict, TensorDict), "input dict must be a TensorDict"
tensor_dict[key] = NonTensorStack.from_list([NonTensorData(item) for item in val])
def assign_non_tensor(tensor_dict: TensorDict, **kwargs):
"""Assign non-tensor data to a TensorDict.
Automatically detects if the value is a list with nested structures and uses
the appropriate assignment method (NonTensorData for simple values,
NonTensorStack for lists with nested structures).
Args:
tensor_dict: The TensorDict to assign to
**kwargs: Key-value pairs where values can be:
- Simple values (stored as NonTensorData)
- Lists with nested structures (stored as NonTensorStack)
Example:
>>> td = TensorDict({"obs": torch.randn(3, 4)}, batch_size=[3])
>>> assign_non_tensor(
... tensor_dict=td,
... metadata="experiment_1", # Simple value
... turn_scores=[[], [0.5, 0.8], [0.9]] # Nested list
... )
"""
assert isinstance(tensor_dict, TensorDict), "input dict must be a TensorDict"
for key, val in kwargs.items():
if isinstance(val, (NonTensorData | NonTensorStack)):
tensor_dict[key] = val
elif isinstance(val, list):
# For lists, use NonTensorStack
assign_non_tensor_stack(tensor_dict=tensor_dict, key=key, val=val)
else:
# For non-list values, use NonTensorData
assign_non_tensor_data(tensor_dict=tensor_dict, key=key, val=val)
return tensor_dict
def unwrap_non_tensor_data(data):
"""Unwrap a NonTensorData object to get the underlying value.
If the input is a NonTensorData wrapper, extracts and returns the
underlying data. Otherwise, returns the input unchanged.
Args:
data: Either a NonTensorData object or any other value.
Returns:
The unwrapped data if input was NonTensorData, otherwise the
original input unchanged.
Example:
>>> wrapped = NonTensorData("hello")
>>> unwrap_non_tensor_data(wrapped)
'hello'
>>> unwrap_non_tensor_data(42) # Non-wrapped value
42
"""
if isinstance(data, NonTensorData):
return data.data
return data
def get_non_tensor_data(data: TensorDict, key: str, default):
"""Retrieve and unwrap non-tensor data from a TensorDict.
Fetches the value for the given key from the TensorDict and automatically
unwraps it if it's stored as NonTensorData.
Args:
data: The TensorDict to retrieve from.
key: The key to look up.
default: Value to return if the key is not found.
Returns:
The unwrapped value if the key exists and was wrapped in NonTensorData,
the raw value if it wasn't wrapped, or the default if key not found.
Example:
>>> td = TensorDict({}, batch_size=[])
>>> assign_non_tensor_data(td, "config", {"lr": 0.01})
>>> get_non_tensor_data(td, "config", None)
{'lr': 0.01}
>>> get_non_tensor_data(td, "missing", "default_value")
'default_value'
"""
output = data.get(key, default)
return unwrap_non_tensor_data(output)
def concat_nested_tensors(tensors: list[torch.Tensor]) -> torch.Tensor:
"""Concatenate multiple nested tensors along the batch dimension.
Takes a list of nested tensors with jagged layout and concatenates them
into a single nested tensor. Each input tensor must have 2 or more dimensions and be contiguous.
Args:
tensors: List of nested tensors to concatenate. All tensors must
be nested, contiguous, and have 2 or more dimensions.
Returns:
A new nested tensor with jagged layout containing all rows from
the input tensors concatenated along dimension 0.
Raises:
AssertionError: If any tensor is not nested, not contiguous, or
doesn't have 2 or more dimensions.
Example:
>>> t1 = torch.nested.as_nested_tensor([torch.randn(3), torch.randn(5)], layout=torch.jagged)
>>> t2 = torch.nested.as_nested_tensor([torch.randn(2), torch.randn(4)], layout=torch.jagged)
>>> result = concat_nested_tensors([t1, t2])
>>> # result contains 4 rows: lengths [3, 5, 2, 4]
"""
for tensor in tensors:
assert tensor.is_nested and tensor.is_contiguous()
unbind_tensors = []
for tensor in tensors:
assert len(tensor.shape) >= 2, f"nested tensor must have 2 or more dimensions. Got {tensor.shape}"
unbind_tensor = tensor.unbind(0)
unbind_tensors.extend(list(unbind_tensor))
tensor = torch.nested.as_nested_tensor(unbind_tensors, layout=torch.jagged)
return tensor
def concat_tensordict_with_none_bsz(data: list[TensorDict]):
"""Handle concatenation of TensorDicts with empty batch size.
For TensorDicts that contain only metadata (NonTensorData) with no batch
dimension, returns the first TensorDict as the concatenation result.
Args:
data: List of TensorDicts, each with empty batch_size (batch_size=[]).
Returns:
The first TensorDict from the list, as metadata concatenation
simply preserves the first instance.
Raises:
AssertionError: If any TensorDict has a non-empty batch_size.
Note:
This is used internally by concat_tensordict when handling
TensorDicts that contain only non-tensor metadata.
"""
for d in data:
assert len(d.batch_size) == 0
# directly return the first meta info
return data[0]
def concat_tensordict(data: list[TensorDict]) -> TensorDict:
"""Concatenate multiple TensorDicts along dimension zero.
Combines a list of TensorDicts into a single TensorDict by concatenating
all tensors along the batch dimension (dim=0). Handles nested tensors
specially by unbinding and rebinding them.
Args:
data: List of TensorDicts to concatenate. All TensorDicts must have
the same keys and the same set of nested tensor keys.
Returns:
A new TensorDict containing concatenated tensors from all inputs.
Raises:
AssertionError: If data is empty or if TensorDicts have inconsistent
nested tensor keys.
Note:
- For TensorDicts with empty batch_size, returns the first one
- Nested tensors are handled specially via concat_nested_tensors
- Regular tensors use TensorDict.cat for efficient concatenation
"""
assert len(data) > 0, "Must have at least one tensordict"
# Find nested tensor keys from the first tensordict
nested_tensor_keys = {key for key, value in data[0].items() if isinstance(value, torch.Tensor) and value.is_nested}
if not nested_tensor_keys:
if len(data[0].batch_size) == 0:
return concat_tensordict_with_none_bsz(data)
# if batch size is None (only contain NonTensorData)
return TensorDict.cat(data, dim=0)
# Create a list of tensordicts containing only non-nested tensors for concatenation
regular_tds = []
for td in data:
current_nested_keys = {k for k, v in td.items() if isinstance(v, torch.Tensor) and v.is_nested}
assert current_nested_keys == nested_tensor_keys, "All tensordicts must have the same set of nested tensors."
# Create a new TensorDict with non-nested items without modifying the original
regular_items = {k: v for k, v in td.items() if k not in nested_tensor_keys}
regular_tds.append(TensorDict(regular_items, batch_size=td.batch_size, device=td.device))
# Concatenate the regular tensordicts
output = TensorDict.cat(regular_tds, dim=0)
# Concatenate and add nested tensors to the output
for key in nested_tensor_keys:
nested_tensors_to_concat = [td[key] for td in data]
output[key] = concat_nested_tensors(nested_tensors_to_concat)
return output
def chunk_tensordict(td: TensorDict, chunks: int) -> list[TensorDict]:
"""Split a TensorDict into equal-sized chunks with special nested tensor handling.
Divides a TensorDict into the specified number of chunks along the batch
dimension. Handles 3D+ nested tensors specially since torch.chunk() doesn't
support jagged tensors with 3 or more dimensions.
Args:
td: The TensorDict to split.
chunks: Number of chunks to create. Must evenly divide len(td).
Returns:
List of TensorDicts, each containing a portion of the original data.
Raises:
AssertionError: If td is not a TensorDict or if its length is not
evenly divisible by chunks.
Note:
This is a workaround for PyTorch issue #153238 where torch.chunk()
doesn't support 3D jagged tensors (e.g., MRoPE position_ids).
See: https://github.com/pytorch/pytorch/issues/153238
"""
assert isinstance(td, TensorDict) and len(td) % chunks == 0, (
f"expecting td with length divisible by chunks, but got {len(td)} and {chunks}"
)
chunk_size = len(td) // chunks
keys = {key for key, val in td.items() if isinstance(val, torch.Tensor) and val.is_nested and val.dim() >= 3}
new_td = TensorDict({k: v for k, v in td.items() if k not in keys}, batch_size=td.batch_size, device=td.device)
tds = new_td.chunk(chunks=chunks)
for key in keys:
tensors = td[key].unbind(dim=0)
for i, chunk_td in enumerate(tds):
chunk_td[key] = torch.nested.as_nested_tensor(
tensors[i * chunk_size : (i + 1) * chunk_size], layout=torch.jagged
)
return tds
def get_tensordict(tensor_dict: dict[str, torch.Tensor | list], non_tensor_dict: dict = None) -> TensorDict:
"""Create a TensorDict from tensors and non-tensor data.
Automatically handles nested structures in lists by converting them to NonTensorStack.
This enables support for:
- Lists of lists: [[], [0.5, 0.8], [0.9]]
- Lists of dicts: [{"acc": 1.0}, {"acc": 0.0}]
- Lists of lists of dicts: [[{"content": "...", "role": "user"}]]
Args:
tensor_dict: Dictionary of tensors and lists to include in the TensorDict
non_tensor_dict: Dictionary of metadata to store as NonTensorData
Returns:
TensorDict with proper handling of nested structures
Example:
>>> td = get_tensordict(
... tensor_dict={
... "obs": torch.randn(3, 4),
... "turn_scores": [[], [0.5, 0.8], [0.9]] # Nested list
... },
... non_tensor_dict={"experiment": "test"}
... )
"""
tensor_dict = tensor_dict.copy()
if non_tensor_dict is None:
non_tensor_dict = {}
batch_size = None
for key, val in tensor_dict.items():
if isinstance(val, torch.Tensor) and val.is_nested:
assert val.is_contiguous(), "Nested tensors must be contiguous. Try setting layout=torch.jagged"
assert val.layout == torch.jagged, "Nested tensors must be jagged."
# Skip validation for NonTensorStack as it's already properly formatted
if isinstance(val, NonTensorStack):
if batch_size is None:
batch_size = len(val)
else:
assert len(val) == batch_size, (
f"Batch size of NonTensorStack {key} is not consistent with other tensors. "
f"Expected {batch_size}, got {len(val)}"
)
continue
if isinstance(val, list):
for v in val:
assert not isinstance(v, torch.Tensor), (
"Passing a list makes the data NonTensorStack, "
"which doesn't support torch.Tensor. Please convert to numpy first"
)
# Convert to NonTensorStack to handle nested structures
tensor_dict[key] = NonTensorStack.from_list([NonTensorData(item) for item in val])
assert isinstance(val, torch.Tensor | list)
if batch_size is None:
batch_size = val.size(0) if isinstance(val, torch.Tensor) else len(val)
else:
val_batch_size = val.size(0) if isinstance(val, torch.Tensor) else len(val)
assert val_batch_size == batch_size, (
f"Batch size of tensor {key} is not consistent with other tensors. "
f"Expected {batch_size}, got {val_batch_size}"
)
if batch_size is None:
batch_size = []
else:
batch_size = [batch_size]
for key, val in non_tensor_dict.items():
assert key not in tensor_dict
tensor_dict[key] = NonTensorData(val)
return TensorDict(source=tensor_dict, batch_size=batch_size)
def index_select_tensor_dict(batch: TensorDict, indices: torch.Tensor | list[int]) -> TensorDict:
"""Select rows from a TensorDict using indices.
Creates a new TensorDict containing only the rows specified by indices.
Handles regular tensors, nested tensors, NonTensorStack, and NonTensorData
appropriately.
Args:
batch: The TensorDict to index into. Can be None.
indices: 1D tensor or list of integers specifying which rows to select.
Returns:
A new TensorDict containing only the selected rows, or None if
batch was None.
Raises:
AssertionError: If indices is not 1-dimensional.
Note:
- Regular tensors are indexed directly
- Nested tensors are unbound, indexed, and rebound
- NonTensorStack is indexed by batch dimension
- NonTensorData (scalar metadata) is preserved unchanged
"""
if isinstance(indices, list):
indices = torch.tensor(indices)
assert indices.dim() == 1, "indices must be a 1D tensor"
data_dict = {}
batch_size = indices.shape[0]
if batch is not None:
for key, tensor in batch.items():
if isinstance(tensor, torch.Tensor) and not tensor.is_nested:
data_dict[key] = tensor[indices]
elif isinstance(tensor, torch.Tensor) and tensor.is_nested:
tensor_lst = tensor.unbind() # for performance
data_dict[key] = torch.nested.as_nested_tensor(
[tensor_lst[idx] for idx in indices], layout=torch.jagged
)
else:
# This handles NonTensorStack (indexable by batch dim) and NonTensorData (scalar metadata).
if tensor.shape:
data_dict[key] = tensor[indices]
else:
data_dict[key] = tensor
selected_batch = TensorDict(source=data_dict, batch_size=batch_size)
else:
selected_batch = None
return selected_batch
def union_tensor_dict(tensor_dict1: TensorDict, tensor_dict2: TensorDict) -> TensorDict:
"""Merge two TensorDicts, adding keys from the second to the first.
Performs an in-place union of two TensorDicts. Keys from tensor_dict2
that don't exist in tensor_dict1 are added. Keys that exist in both
must have identical values.
Args:
tensor_dict1: The base TensorDict to merge into (modified in-place).
tensor_dict2: The TensorDict whose keys will be added to tensor_dict1.
Returns:
The modified tensor_dict1 containing the union of both TensorDicts.
Raises:
AssertionError: If batch sizes don't match, or if a key exists in
both TensorDicts with different values.
Example:
>>> td1 = TensorDict({"a": torch.tensor([1, 2])}, batch_size=[2])
>>> td2 = TensorDict({"b": torch.tensor([3, 4])}, batch_size=[2])
>>> result = union_tensor_dict(td1, td2)
>>> list(result.keys())
['a', 'b']
"""
assert tensor_dict1.batch_size == tensor_dict2.batch_size, (
f"Two tensor dict must have identical batch size. Got {tensor_dict1.batch_size} and {tensor_dict2.batch_size}"
)
for key in tensor_dict2.keys():
if key not in tensor_dict1.keys():
# Note that there is a difference between tensor_dict2[key] and tensor_dict2.get(key)
tensor_dict1[key] = tensor_dict2.get(key)
else:
if isinstance(tensor_dict2[key], torch.Tensor):
assert tensor_dict1[key].equal(tensor_dict2[key]), (
f"{key} in tensor_dict1 and tensor_dict2 are not the same object"
)
else:
# non-tensor
assert tensor_dict1[key] == tensor_dict2[key], (
f"{key} in tensor_dict1 and tensor_dict2 are not the same object"
)
return tensor_dict1
def make_iterator(tensordict: TensorDict, mini_batch_size, epochs, seed=None, dataloader_kwargs=None):
"""Create an iterator that yields mini-batches from a TensorDict.
Wraps a TensorDict in a DataLoader-style iterator that yields mini-batches
for the specified number of epochs. Useful for training loops.
Args:
tensordict: The TensorDict to iterate over.
mini_batch_size: Size of each mini-batch. Must evenly divide the
TensorDict's batch size.
epochs: Number of times to iterate through the entire dataset.
seed: Optional random seed for reproducible shuffling.
dataloader_kwargs: Optional dict of additional kwargs to pass to
the underlying DataLoader (e.g., shuffle=True, num_workers=4).
Returns:
An iterator that yields TensorDict mini-batches.
Raises:
AssertionError: If batch size is not divisible by mini_batch_size.
Example:
>>> td = TensorDict({"obs": torch.randn(100, 4)}, batch_size=[100])
>>> for batch in make_iterator(td, mini_batch_size=10, epochs=2):
... # batch is a TensorDict with batch_size=[10]
... pass
"""
from torch.utils.data import DataLoader
assert tensordict.batch_size[0] % mini_batch_size == 0, f"{tensordict.batch_size[0]} % {mini_batch_size} != 0"
# we can directly create a dataloader from TensorDict
if dataloader_kwargs is None:
dataloader_kwargs = {}
if seed is not None:
generator = torch.Generator()
generator.manual_seed(seed)
else:
generator = None
assert isinstance(dataloader_kwargs, dict)
idx_lst = torch.arange(tensordict.shape[0])
train_dataloader = DataLoader(
dataset=idx_lst, batch_size=mini_batch_size, collate_fn=lambda x: x, generator=generator, **dataloader_kwargs
)
def get_data():
for _ in range(epochs):
for idx in train_dataloader:
yield index_select_tensor_dict(tensordict, idx)
return iter(get_data())
def assert_tensordict_eq(tensordict1: TensorDict, tensordict2: TensorDict):
"""Assert that two TensorDicts are equal.
Performs a deep equality check between two TensorDicts, verifying that
they have the same keys with identical values. Handles nested tensors
by comparing their unbound components.
Args:
tensordict1: First TensorDict to compare.
tensordict2: Second TensorDict to compare.
Raises:
AssertionError: If the TensorDicts differ in keys, value types, or
value contents. The error message indicates what differs.
Note:
- Regular tensors are compared element-wise
- Nested tensors are unbound and compared component by component
- Non-tensor values are compared with standard equality
"""
tensordict1_key_set = set(tensordict1.keys())
tensordict2_key_set = set(tensordict2.keys())
assert tensordict1_key_set == tensordict2_key_set, (
f"key set diffs. Got {tensordict2_key_set=} vs {tensordict1_key_set=}"
)
for key in tensordict1.keys():
val = tensordict1[key]
val2 = tensordict2[key]
assert type(val) is type(val2), f"The type of {key} must be the same. Got {type(val)} vs {type(val2)}"
if isinstance(val, torch.Tensor):
if val.is_nested:
assert val.is_nested and val2.is_nested, (
f"Both tensors must be nested tensors. {val.is_nested=}, {val2.is_nested=}"
)
t1, t2 = val.unbind(), val2.unbind()
assert len(t1) == len(t2), f"Nested tensor should have the same lengths. {len(t1)=} vs {len(t2)=}"
for c1, c2 in zip(t1, t2, strict=True):
assert torch.equal(c1, c2), f"Nested tensor components have different values. {c1=} vs {c2=}"
else:
assert torch.all(torch.eq(val, val2)).item()
else:
assert val == val2
def get(tensordict: TensorDict, key: str, default=None) -> Any:
"""Get a value from a TensorDict with automatic unwrapping.
Retrieves a value from the TensorDict and automatically converts it
to a Python-native format:
- Tensors are returned as-is
- NonTensorStack is converted to a Python list
- NonTensorData is unwrapped to its underlying value
Args:
tensordict: The TensorDict to retrieve from.
key: The key to look up.
default: Value to return if the key doesn't exist. Defaults to None.
Returns:
The value for the key in its native format, or default if not found.
Example:
>>> td = get_tensordict({"obs": torch.randn(3, 4), "labels": ["a", "b", "c"]})
>>> get(td, "obs") # Returns torch.Tensor
>>> get(td, "labels") # Returns ["a", "b", "c"] as a list
>>> get(td, "missing", "default") # Returns "default"
"""
if key not in tensordict:
return default
output = tensordict.get(key)
if isinstance(output, torch.Tensor):
return output
elif isinstance(output, NonTensorStack):
return output.tolist()
else:
assert isinstance(output, NonTensorData)
return output.data
def get_keys(tensordict: TensorDict, keys: Iterable[str]) -> TensorDict:
"""Extract a subset of keys from a TensorDict into a new TensorDict.
Creates a new TensorDict containing only the specified keys. Values
are properly categorized as tensor or non-tensor data.
Args:
tensordict: The source TensorDict.
keys: Iterable of key names to extract.
Returns:
A new TensorDict containing only the specified keys with their values.
Raises:
KeyError: If any key in keys doesn't exist in the tensordict.
Example:
>>> td = get_tensordict({"a": torch.randn(3), "b": torch.randn(3), "c": torch.randn(3)})
>>> subset = get_keys(td, ["a", "c"])
>>> list(subset.keys())
['a', 'c']
"""
tensor_output = {}
non_tensor_output = {}
for key in keys:
if key not in tensordict.keys():
raise KeyError(f"key {key} not in tensordict")
output = tensordict.get(key)
if isinstance(output, torch.Tensor):
tensor_output[key] = output
elif isinstance(output, NonTensorStack):
tensor_output[key] = output.tolist()
else:
assert isinstance(output, NonTensorData)
non_tensor_output[key] = output.data
return get_tensordict(tensor_output, non_tensor_output)
def pop(tensordict: TensorDict, key: str, default=None) -> Any:
"""Remove and return a value from a TensorDict with automatic unwrapping.
Removes the specified key from the TensorDict and returns its value,
automatically converting to Python-native format (same as get()).
Args:
tensordict: The TensorDict to pop from.
key: The key to remove and return.
default: Value to return if the key doesn't exist. Defaults to None.
Returns:
The value for the key in its native format, or default if not found.
The key is removed from the TensorDict.
Example:
>>> td = get_tensordict({"obs": torch.randn(3, 4), "labels": ["a", "b", "c"]})
>>> labels = pop(td, "labels") # Returns ["a", "b", "c"], removes from td
>>> "labels" in td.keys()
False
"""
_sentinel = object()
output = tensordict.pop(key, _sentinel)
if output is _sentinel:
return default
if isinstance(output, torch.Tensor):
return output
elif isinstance(output, NonTensorStack):
return output.tolist()
else:
assert isinstance(output, NonTensorData)
return output.data
def pop_keys(tensordict: TensorDict, keys: Iterable[str]) -> TensorDict:
"""Remove multiple keys from a TensorDict and return them as a new TensorDict.
Removes the specified keys from the source TensorDict and creates a new
TensorDict containing those keys and their values.
Args:
tensordict: The source TensorDict to pop from (modified in-place).
keys: Iterable of key names to remove and return.
Returns:
A new TensorDict containing the popped keys and their values.
Raises:
KeyError: If any key in keys doesn't exist in the tensordict.
Example:
>>> td = get_tensordict({"a": torch.randn(3), "b": torch.randn(3), "c": torch.randn(3)})
>>> popped = pop_keys(td, ["a", "c"])
>>> list(td.keys()) # Only 'b' remains
['b']
>>> list(popped.keys())
['a', 'c']
"""
tensor_output = {}
non_tensor_output = {}
for key in keys:
if key not in tensordict.keys():
raise KeyError(f"key {key} not in tensordict")
output = tensordict.get(key)
if isinstance(output, torch.Tensor):
tensor_output[key] = tensordict.pop(key)
elif isinstance(output, NonTensorStack):
tensor_output[key] = tensordict.pop(key).tolist()
else:
assert isinstance(output, NonTensorData)
non_tensor_output[key] = tensordict.pop(key)
return get_tensordict(tensor_output, non_tensor_output)
def pad_to_divisor(data: TensorDict, size_divisor: int):
"""Pad a TensorDict's batch dimension to be divisible by a given divisor.
If the TensorDict's length is not evenly divisible by size_divisor,
pads the batch dimension by repeating elements from the beginning.
Useful for ensuring even distribution across workers in distributed training.
Args:
data: The TensorDict to pad.
size_divisor: The divisor that the padded length must be divisible by.
Returns:
tuple: A tuple containing:
- data (TensorDict): The padded TensorDict (or original if no padding needed)
- pad_size (int): Number of elements added as padding (0 if none)
Raises:
AssertionError: If data is not a TensorDict.
Example:
>>> td = TensorDict({"obs": torch.randn(10, 4)}, batch_size=[10])
>>> padded, pad_size = pad_to_divisor(td, 4)
>>> len(padded) # 12 (next multiple of 4 after 10)
12
>>> pad_size
2
"""
assert isinstance(data, TensorDict), "data must be a TensorDict"
if len(data) % size_divisor != 0:
pad_size = size_divisor - len(data) % size_divisor
padding_protos = []
remaining_pad = pad_size
while remaining_pad > 0:
take_size = min(remaining_pad, len(data))
padding_protos.append(data[:take_size])
remaining_pad -= take_size
data_padded = torch.cat([data] + padding_protos)
else:
if len(data) == 0:
logging.warning("padding a DataProto with no item, no changed made")
pad_size = 0
data_padded = data
return data_padded, pad_size
def unpad(data: TensorDict, pad_size):
"""Remove padding from a TensorDict.
Reverses the effect of pad_to_divisor by removing the specified number
of elements from the end of the TensorDict.
Args:
data: The padded TensorDict.
pad_size: Number of padding elements to remove. If 0, returns
data unchanged.
Returns:
The TensorDict with padding removed, equivalent to data[:-pad_size].
Example:
>>> td = TensorDict({"obs": torch.randn(12, 4)}, batch_size=[12])
>>> unpadded = unpad(td, pad_size=2)
>>> len(unpadded)
10
"""
if pad_size != 0:
data = data[:-pad_size]
return data
def contiguous(data: TensorDict) -> TensorDict:
"""Call contiguous on a tensor dict. The contiguous function of tensordict lib will make NonTensorStack.
This function will always return a new tensordict
Args:
data: The input tensordict
Returns:
a tensordict that is contiguous
"""
tensor_dict = {}
non_tensor_dict = {}
for key in data.keys():
val = data.get(key)
if isinstance(val, NonTensorData):
non_tensor_dict[key] = val
elif isinstance(val, NonTensorStack):
tensor_dict[key] = val
else:
assert isinstance(val, torch.Tensor), f"Expect val to be a torch.Tensor. Got {type(val)}"
tensor_dict[key] = val.contiguous()
return get_tensordict(tensor_dict=tensor_dict, non_tensor_dict=non_tensor_dict)
def maybe_fix_3d_position_ids(data: TensorDict):
# note for tensordict with pickle/unpickle. nested tensor in tensordict after consolidate and pickle/unpickle
# will incur indexing error for ragged tensor. This only happens when using 3D position ids in VLMs.
# This is likely a bug in tensordict. As a workaround, we manually set _ragged_index.
if "position_ids" in data.keys() and data["position_ids"].dim() == 3 and data["position_ids"].is_nested:
data["position_ids"]._ragged_idx = 2
| verl__utils__tensordict_utils.py |
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils for tokenization."""
import types
import warnings
__all__ = ["hf_tokenizer", "hf_processor"]
def set_pad_token_id(tokenizer):
"""Set pad_token_id to eos_token_id if it is None.
Args:
tokenizer (transformers.PreTrainedTokenizer): The tokenizer to be set.
"""
if tokenizer.pad_token_id is None:
tokenizer.pad_token_id = tokenizer.eos_token_id
warnings.warn(f"tokenizer.pad_token_id is None. Now set to {tokenizer.eos_token_id}", stacklevel=1)
if tokenizer.pad_token is None:
tokenizer.pad_token = tokenizer.eos_token
warnings.warn(f"tokenizer.pad_token is None. Now set to {tokenizer.eos_token}", stacklevel=1)
def hf_tokenizer(name_or_path, correct_pad_token=True, correct_gemma2=True, **kwargs):
"""Create a huggingface pretrained tokenizer which correctness handles eos and pad tokens.
Args:
name (str): The name of the tokenizer.
correct_pad_token (bool): Whether to correct the pad token id.
correct_gemma2 (bool): Whether to correct the gemma2 tokenizer.
Returns:
transformers.PreTrainedTokenizer: The pretrained tokenizer.
"""
from transformers import AutoTokenizer
if correct_gemma2 and isinstance(name_or_path, str) and "gemma-2-2b-it" in name_or_path:
# the EOS token in gemma2 is ambiguious, which may worsen RL performance.
# https://huggingface.co/google/gemma-2-2b-it/commit/17a01657f5c87135bcdd0ec7abb4b2dece04408a
warnings.warn(
"Found gemma-2-2b-it tokenizer. Set eos_token and eos_token_id to <end_of_turn> and 107.", stacklevel=1
)
kwargs["eos_token"] = "<end_of_turn>"
kwargs["eos_token_id"] = 107
tokenizer = AutoTokenizer.from_pretrained(name_or_path, **kwargs)
if correct_pad_token:
set_pad_token_id(tokenizer)
return tokenizer
def hf_processor(name_or_path, **kwargs):
"""Create a huggingface processor to process multimodal data.
Args:
name_or_path (str): The name of the processor.
Returns:
transformers.ProcessorMixin: The pretrained processor.
"""
from transformers import AutoConfig, AutoProcessor
try:
processor = AutoProcessor.from_pretrained(name_or_path, **kwargs)
config = AutoConfig.from_pretrained(name_or_path, **kwargs)
# Bind vlm model's get_rope_index method to processor
processor.config = config
match processor.__class__.__name__:
case "Qwen2VLProcessor":
from transformers.models.qwen2_vl import Qwen2VLModel
processor.get_rope_index = types.MethodType(Qwen2VLModel.get_rope_index, processor)
case "Qwen2_5_VLProcessor":
from transformers.models.qwen2_5_vl import Qwen2_5_VLModel
processor.get_rope_index = types.MethodType(Qwen2_5_VLModel.get_rope_index, processor)
case "Qwen3VLProcessor":
from transformers.models.qwen3_vl import Qwen3VLModel
processor.get_rope_index = types.MethodType(Qwen3VLModel.get_rope_index, processor)
case "Glm4vImageProcessor":
from transformers.models.glm4v import Glm4vModel
processor.get_rope_index = types.MethodType(Glm4vModel.get_rope_index, processor)
case "MllamaProcessor":
pass # MllamaProcessor and MllamaModel doesn't have get_rope_index property
case _:
raise ValueError(f"Unsupported processor type: {processor.__class__.__name__}")
except Exception as e:
processor = None
# TODO(haibin.lin): try-catch should be removed after adding transformer version req to setup.py to avoid
# silent failure
warnings.warn(f"Failed to create processor: {e}. This may affect multimodal processing", stacklevel=1)
# Avoid load tokenizer, see:
# https://github.com/huggingface/transformers/blob/v4.49.0/src/transformers/models/auto/processing_auto.py#L344
if processor is not None and "Processor" not in processor.__class__.__name__:
processor = None
return processor
| verl__utils__tokenizer.py |
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Adapted from Cruise.
"""
import torch
HALF_LIST = [16, "16", "fp16", "float16", torch.float16]
FLOAT_LIST = [32, "32", "fp32", "float32", torch.float32]
BFLOAT_LIST = ["bf16", "bfloat16", torch.bfloat16]
class PrecisionType:
"""Type of precision used.
>>> PrecisionType.HALF == 16
True
>>> PrecisionType.HALF in (16, "16")
True
"""
HALF = "16"
FLOAT = "32"
FULL = "64"
BFLOAT = "bf16"
MIXED = "mixed"
@staticmethod
def supported_type(precision: str | int) -> bool:
return any(x == precision for x in PrecisionType)
@staticmethod
def supported_types() -> list[str]:
return [x.value for x in PrecisionType]
@staticmethod
def is_fp16(precision):
return precision in HALF_LIST
@staticmethod
def is_fp32(precision):
return precision in FLOAT_LIST
@staticmethod
def is_bf16(precision):
return precision in BFLOAT_LIST
@staticmethod
def to_dtype(precision):
if precision in HALF_LIST:
return torch.float16
elif precision in FLOAT_LIST:
return torch.float32
elif precision in BFLOAT_LIST:
return torch.bfloat16
else:
raise RuntimeError(f"unexpected precision: {precision}")
@staticmethod
def to_str(precision):
if precision == torch.float16:
return "fp16"
elif precision == torch.float32:
return "fp32"
elif precision == torch.bfloat16:
return "bf16"
else:
raise RuntimeError(f"unexpected precision: {precision}")
| verl__utils__torch_dtypes.py |
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Contain small torch utilities
"""
import math
from contextlib import contextmanager
from typing import Optional
import torch
import torch.distributed
import torch.nn.functional as F
from tensordict import TensorDict
from torch import nn
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from transformers import PreTrainedTokenizer
from verl.utils.device import get_device_name, get_torch_device
try:
from flash_attn.ops.triton.cross_entropy import cross_entropy_loss
FLAH_ATTN_CROSS_ENTROPY_LOSS_AVAILABLE = True
except ImportError:
FLAH_ATTN_CROSS_ENTROPY_LOSS_AVAILABLE = False
try:
import torch_npu
NPU_CROSS_ENTROPY_LOSS_AVAILABLE = hasattr(torch_npu, "npu_cross_entropy_loss")
except ImportError:
NPU_CROSS_ENTROPY_LOSS_AVAILABLE = False
def gather_from_labels(data: torch.Tensor, label: torch.Tensor) -> torch.Tensor:
"""Gather values from data tensor at positions specified by label indices.
Selects elements from the last dimension of `data` based on indices in `label`.
Commonly used to extract log-probabilities for specific token IDs from a
vocabulary distribution.
Args:
data: Input tensor of shape (..., vocab_size) containing values to gather from.
label: Index tensor of shape (...,) with values in range [0, vocab_size).
Returns:
torch.Tensor: Gathered values with shape (...,), same as label shape.
Example:
>>> logits = torch.randn(2, 3, 100) # [batch, seq, vocab]
>>> labels = torch.randint(0, 100, (2, 3)) # [batch, seq]
>>> gathered = gather_from_labels(logits, labels) # [batch, seq]
"""
output = torch.gather(data, -1, label.unsqueeze(-1)).squeeze(-1)
return output
def logprobs_from_logits(logits, labels, inplace_backward=True):
"""
Compute per-token log-probabilities for the given labels.
Uses a Flash-Attention–based cross-entropy (if available) for efficient backward,
otherwise falls back to a standard log-softmax+gather approach.
See: https://github.com/pytorch/pytorch/issues/563#issuecomment-330103591
Args:
logits (Tensor): Model outputs of shape (..., vocab_size).
labels (LongTensor): True class indices of shape matching logits[..., :-1].
inplace_backward (bool): If True and Flash-Attn is available, perform backward in-place.
Returns:
Tensor: Log-probabilities of the target labels, shape logits.shape[:-1].
"""
if FLAH_ATTN_CROSS_ENTROPY_LOSS_AVAILABLE:
batch_dim = logits.shape[:-1]
last_dim = logits.shape[-1]
logits = logits.reshape(-1, last_dim)
labels = labels.reshape(-1)
output = logprobs_from_logits_flash_attn(logits, labels, inplace_backward=inplace_backward)
output = output.view(*batch_dim)
elif NPU_CROSS_ENTROPY_LOSS_AVAILABLE:
output = logprobs_from_logits_torch_npu(logits, labels)
else:
output = logprobs_from_logits_v2(logits, labels)
return output
def logprobs_from_logits_flash_attn(
logits: torch.Tensor, labels: torch.Tensor, inplace_backward: bool = True
) -> torch.Tensor:
"""Compute log-probabilities using Flash Attention's optimized cross-entropy.
Uses the Flash Attention library's Triton-based cross-entropy implementation
for efficient computation on NVIDIA GPUs.
Args:
logits: Model output logits of shape (batch_size, vocab_size).
labels: Target token indices of shape (batch_size,).
inplace_backward: If True, perform backward pass in-place for memory efficiency.
Returns:
torch.Tensor: Log-probabilities for target labels, shape (batch_size,).
Raises:
AssertionError: If flash-attn version < 2.4.3 (different return format).
"""
output = cross_entropy_loss(logits, labels, inplace_backward=inplace_backward)
assert isinstance(output, tuple), (
"please make sure flash-attn>=2.4.3 where cross_entropy_loss returns Tuple[losses, z_losses]."
)
return -output[0]
def logprobs_from_logits_torch_npu(logits: torch.Tensor, labels: torch.Tensor) -> torch.Tensor:
"""Compute log-probabilities using Ascend NPU's optimized cross-entropy.
Uses torch_npu's native cross-entropy implementation for efficient
computation on Huawei Ascend NPU devices.
Args:
logits: Model output logits of shape (..., vocab_size).
labels: Target token indices of shape (...,).
Returns:
torch.Tensor: Log-probabilities for target labels, same shape as labels.
"""
batch_dim = logits.shape[:-1]
logits = logits.reshape(-1, logits.shape[-1])
loss, _, _, _ = torch_npu.npu_cross_entropy_loss(logits, labels.reshape(-1), reduction="none")
return -loss.view(*batch_dim)
def logprobs_from_logits_naive(logits: torch.Tensor, labels: torch.Tensor) -> torch.Tensor:
"""Compute log-probabilities using standard log-softmax approach.
Simple implementation using PyTorch's log_softmax followed by gathering.
Less memory-efficient than specialized implementations but works on all devices.
Args:
logits: Model output logits of shape (..., vocab_size).
labels: Target token indices of shape (...,).
Returns:
torch.Tensor: Log-probabilities for target labels, same shape as labels.
"""
logp = F.log_softmax(logits, dim=-1)
logpy = gather_from_labels(logp, labels)
return logpy
def logprobs_from_logits_v2(logits: torch.FloatTensor, labels: torch.Tensor) -> torch.Tensor:
"""Memory-efficient log-probability computation using row-wise processing.
Computes log-probabilities by processing one row at a time to reduce peak
memory consumption. Uses logsumexp for float32/float64, falls back to
log_softmax for bfloat16 due to numerical stability concerns.
The mathematical identity used is: log_softmax(x_i) = x_i - logsumexp(x)
Args:
logits: Model output logits of shape (batch_size, seq_len, vocab_size)
or (batch_size, vocab_size).
labels: Target token indices matching logits shape without vocab dimension.
Returns:
torch.Tensor: Log-probabilities for target labels.
Note:
This implementation trades compute for memory by iterating over batch
dimension, making it suitable for large vocabulary sizes.
"""
if logits.dtype in [torch.float32, torch.float64]:
logits_labels = torch.gather(logits, dim=-1, index=labels.unsqueeze(-1)).squeeze(-1)
# loop to reduce peak mem consumption
logsumexp_values = torch.stack([torch.logsumexp(logit, dim=-1) for logit in logits])
logprobs_labels = logits_labels - logsumexp_values # log_softmax(x_i) = x_i - logsumexp(x)
else:
# logsumexp approach is unstable with bfloat16, fall back to slightly less efficent approach
logprobs_labels = []
for row_logits, row_labels in zip(logits, labels, strict=True): # loop to reduce peak mem consumption
row_logprobs = F.log_softmax(row_logits, dim=-1)
row_logprobs_labels = row_logprobs.gather(dim=-1, index=row_labels.unsqueeze(-1)).squeeze(-1)
logprobs_labels.append(row_logprobs_labels)
logprobs_labels = torch.stack(logprobs_labels)
return logprobs_labels
def clip_by_value(x: torch.Tensor, tensor_min: torch.Tensor, tensor_max: torch.Tensor) -> torch.Tensor:
"""Clip tensor values to a range defined by tensor bounds.
Extension of torch.clamp that supports tensor-valued min/max bounds
instead of only scalar bounds.
Args:
x: Input tensor to clip.
tensor_min: Minimum bound tensor (broadcastable to x).
tensor_max: Maximum bound tensor (broadcastable to x).
Returns:
torch.Tensor: Clipped tensor with values in [tensor_min, tensor_max].
See Also:
https://github.com/pytorch/pytorch/issues/2793#issuecomment-428784713
"""
clipped = torch.max(torch.min(x, tensor_max), tensor_min)
return clipped
def entropy_from_logits(logits: torch.Tensor) -> torch.Tensor:
"""Calculate Shannon entropy from unnormalized logits.
Computes H(p) = -sum(p * log(p)) using the numerically stable formula:
entropy = logsumexp(logits) - sum(softmax(logits) * logits)
Args:
logits: Unnormalized log-probabilities of shape (..., vocab_size).
Returns:
torch.Tensor: Entropy values with shape (...,), one per distribution.
"""
pd = torch.nn.functional.softmax(logits, dim=-1)
entropy = torch.logsumexp(logits, dim=-1) - torch.sum(pd * logits, dim=-1)
return entropy
def entropy_from_logits_with_chunking(logits: torch.Tensor, chunk_size: int = 2048) -> torch.Tensor:
"""Memory-efficient entropy calculation using chunked processing.
Computes entropy by processing the batch in chunks to reduce peak memory
usage. Useful for large batch sizes or when memory is constrained.
Args:
logits: Unnormalized log-probabilities of shape (batch_size, vocab_size).
chunk_size: Number of samples to process at once. Defaults to 2048.
Returns:
torch.Tensor: Entropy values with shape (batch_size,).
Note:
Converts chunks to float32 for numerical stability during computation.
"""
entropy = torch.zeros(logits.shape[0], device=logits.device)
for i in range(0, logits.shape[0], chunk_size):
logits_chunk = logits[i : i + chunk_size].float()
pd_chunk = torch.nn.functional.softmax(logits_chunk, dim=-1)
entropy_chunk = torch.logsumexp(logits_chunk, dim=-1) - torch.sum(pd_chunk * logits_chunk, dim=-1)
entropy[i : i + chunk_size] = entropy_chunk
return entropy
def masked_sum(values: torch.Tensor, mask: torch.Tensor, axis: int | tuple[int, ...] | None = None) -> torch.Tensor:
"""Compute sum of tensor values where mask is True.
NaN values outside the mask are replaced with zeros to prevent
contaminating the sum.
Args:
values: Input tensor containing values to sum.
mask: Boolean or numeric mask tensor (same shape as values).
Non-zero values indicate elements to include.
axis: Dimension(s) along which to sum. None sums all elements.
Returns:
torch.Tensor: Sum of masked values, reduced along specified axis.
"""
# If NaNs exist out of mask, replace NaNs in values with a value that
# won't affect the sum (e.g., 0 for masked regions)
valid_values = torch.where(mask.bool(), values, 0.0)
return (valid_values * mask).sum(axis=axis)
def masked_mean(values, mask, axis=None):
"""
Compute the mean of `values` over elements selected by `mask`.
Args:
values (Tensor): Input tensor.
mask (Tensor): Boolean or numeric mask of the same shape as `values`.
axis (int or tuple of int, optional): Dimension(s) along which to compute the mean.
Defaults to None (over all elements).
Returns:
Tensor: Masked mean, with shape equal to `values` reduced over `axis`.
"""
s = masked_sum(values, mask, axis)
return s / (mask.sum(axis=axis) + 1e-8)
def masked_var(values, mask, unbiased=True):
"""Compute variance of tensor with masked values."""
mean = masked_mean(values, mask)
centered_values = values - mean
variance = masked_mean(centered_values**2, mask)
if unbiased:
mask_sum = mask.sum()
if mask_sum == 0:
raise ValueError("At least one element in the mask has to be 1.")
# note that if mask_sum == 1, then there is a division by zero issue
# to avoid it you just need to use a larger minibatch_size
if mask_sum == 1:
raise ValueError("The sum of the mask is one, which can cause a division by zero.")
bessel_correction = mask_sum / (mask_sum - 1)
variance = variance * bessel_correction
return variance
def masked_whiten(values, mask, shift_mean=True):
"""
Whiten `values` by normalizing with mean and variance computed over `mask`.
Args:
values (torch.Tensor): Input tensor.
mask (torch.Tensor): Boolean tensor of same shape, selects elements for stats.
shift_mean (bool): If True (default), output is zero-mean;
if False, the original mean is re-added after scaling.
Returns:
torch.Tensor: Whitened tensor of same shape as `values`.
"""
mean, var = masked_mean(values, mask), masked_var(values, mask)
whitened = (values - mean) * torch.rsqrt(var + 1e-8)
if not shift_mean:
whitened += mean
return whitened
def get_response_mask(response_id: torch.Tensor, eos_token: int | list[int] = 2, dtype=torch.int64):
"""
end of sentence token can be int or list: 1 or [1, 2]
e.g.
response_id = torch.tensor([[20, 10, 34, 1, 0, 0, 0],
[78, 0, 76, 2, 1, 0, 0],
[23, 98, 1, 0, 0, 0, 0],
[33, 3, 98, 45, 1, 0, 0]])
#eos_token=1
response_mask: tensor([[1, 1, 1, 1, 0, 0, 0],
[1, 1, 1, 1, 1, 0, 0],
[1, 1, 1, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 0, 0]])
#eos_token=[1,2]
response_mask: tensor([[1, 1, 1, 1, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 0, 0]])
"""
eos_mask = torch.isin(response_id, torch.tensor(eos_token, device=response_id.device)).int()
return (eos_mask.cumsum(dim=1) - eos_mask).eq(0).to(dtype)
def compute_grad_norm(model: nn.Module) -> float:
"""Compute the squared L2 norm of all gradients in a model.
Sums the squared values of all gradient tensors across all parameters.
Useful for monitoring gradient magnitudes during training.
Args:
model: PyTorch model with computed gradients.
Returns:
float: Sum of squared gradient values (not the square root).
Note:
Returns the squared norm, not the norm itself. To get the actual
L2 norm, take the square root of the returned value.
"""
total_grad_square = 0
for param in model.parameters():
if param.grad is not None:
total_grad_square += torch.sum(torch.square(param.grad.detach())).item()
return total_grad_square
def broadcast_dict_tensor(tensors: dict[str, torch.Tensor] | TensorDict, src: int, group) -> None:
"""Broadcast all tensors in a dictionary from source rank to all ranks.
Iterates over all tensors in the dictionary and broadcasts each one
from the source rank to all other ranks in the process group.
Args:
tensors: Dictionary or TensorDict containing tensors to broadcast.
src: Source rank from which to broadcast.
group: Process group for the broadcast operation.
Note:
This implementation broadcasts tensors one at a time. Could be optimized
to use a single broadcast with packed tensors.
"""
for key in tensors.sorted_keys:
torch.distributed.broadcast(tensors[key], src=src, group=group, async_op=False)
def allgather_dict_tensors(
tensors: dict[str, torch.Tensor] | TensorDict, size: int, group, dim: int = 0
) -> dict[str, torch.Tensor] | TensorDict:
"""Gather tensors from all ranks and concatenate them.
Performs all_gather on each tensor in the dictionary and concatenates
the results along the specified dimension.
Args:
tensors: Dictionary or TensorDict containing tensors to gather.
size: Number of ranks in the process group.
group: Process group for the all_gather operation.
dim: Dimension along which to concatenate gathered tensors. Defaults to 0.
Returns:
Dictionary or TensorDict (matching input type) with gathered and
concatenated tensors. Each tensor's size along `dim` is multiplied by `size`.
Note:
This implementation gathers tensors one at a time synchronously.
Could be optimized using async ops or packed all_gather.
"""
if isinstance(tensors, TensorDict):
is_tensor_dict = True
tensors_as_dict = tensors.to_dict()
else:
tensors_as_dict = tensors
is_tensor_dict = False
output = {}
sorted_keys = sorted(tensors_as_dict.keys())
for key in sorted_keys:
val = tensors_as_dict[key]
output[key] = [torch.empty_like(val) for _ in range(size)]
torch.distributed.all_gather(output[key], val, group=group, async_op=False)
output[key] = torch.cat(output[key], dim=dim)
if is_tensor_dict:
output = TensorDict(source=output, batch_size=tensors.batch_size[0] * size)
return output
def allgather_dict_into_dict(data: dict, group=None) -> dict:
"""allgather a dict into a dict of list
Args:
data: a dict
group: the process group to allgather
Returns: dict containing a list of the results from allgather
"""
assert isinstance(data, dict), f"Expect data to be a dictionary, Got {type(data)}"
group_size = torch.distributed.get_world_size(group=group)
final_metrics = {}
all_metrics_lst = [None for _ in range(group_size)]
torch.distributed.all_gather_object(all_metrics_lst, data, group=group)
for all_metrics in all_metrics_lst:
for key, val in all_metrics.items():
if key not in final_metrics:
final_metrics[key] = []
final_metrics[key].append(val)
return final_metrics
def split_dict_tensor_into_batches(tensors: TensorDict, batch_size) -> list[TensorDict]:
assert tensors.batch_size[0] % batch_size == 0, (
f"input data batch size: {tensors.batch_size[0]}, split batch size: {batch_size}"
)
return tensors.split(batch_size)
def pad_2d_list_to_length(response, pad_token_id, max_length=None):
"""
pad a 2D list (e.g. responses, logprobs) to a 2D tensor.
"""
response_length = max(len(sub_list) for sub_list in response)
target_length = max_length if max_length is not None and max_length > response_length else response_length
padded_response = [tuple(sub_list) + (pad_token_id,) * (target_length - len(sub_list)) for sub_list in response]
tensor = torch.tensor(padded_response)
return tensor
def pad_sequence_to_length(tensors, max_seq_len, pad_token_id, left_pad=False):
"""
pad a 2D tensors (e.g. responses, logprobs) in the last dim to max_seq_length.
input shape: [bs, seq_length]
output shape: [bs, max_seq_length]
"""
if tensors.shape[-1] >= max_seq_len:
return tensors
# (0, max_seq_len - tensors.shape[-1]) means right pad to max_seq_length and no left pad
pad_tuple = (max_seq_len - tensors.shape[-1], 0) if left_pad else (0, max_seq_len - tensors.shape[-1])
return F.pad(tensors, pad_tuple, "constant", pad_token_id)
def postprocess_data(
input_ids: torch.Tensor,
attention_mask: torch.Tensor,
max_length: int,
pad_token_id: int,
left_pad=True,
truncation="error",
):
"""Process tokenizer outputs to consistent shapes via padding/truncation.
Args:
input_ids: Token indices [batch_size, seq_len]
attention_mask: Mask [batch_size, seq_len]
max_length: Target sequence length
pad_token_id: Padding token ID
left_pad: Pad left if True
truncation: "left", "right", "middle" or "error"
Returns:
(input_ids, attention_mask) padded/truncated to max_length
"""
assert truncation in ["left", "right", "middle", "error"]
assert input_ids.ndim == 2
sequence_length = input_ids.shape[-1]
if sequence_length < max_length:
input_ids = pad_sequence_to_length(
input_ids, max_seq_len=max_length, pad_token_id=pad_token_id, left_pad=left_pad
)
attention_mask = pad_sequence_to_length(
attention_mask, max_seq_len=max_length, pad_token_id=0, left_pad=left_pad
)
elif sequence_length > max_length:
if truncation == "left":
# actually, left truncation may not be reasonable
input_ids = input_ids[:, -max_length:]
attention_mask = attention_mask[:, -max_length:]
elif truncation == "right":
input_ids = input_ids[:, :max_length]
attention_mask = attention_mask[:, :max_length]
elif truncation == "middle":
left_half = max_length // 2
right_half = max_length - left_half
input_ids = torch.cat([input_ids[:, :left_half], input_ids[:, -right_half:]], dim=-1)
attention_mask = torch.cat([attention_mask[:, :left_half], attention_mask[:, -right_half:]], dim=-1)
elif truncation == "error":
raise NotImplementedError(f"{sequence_length=} is larger than {max_length=}")
else:
raise NotImplementedError(f"Unknown truncation method {truncation}")
return input_ids, attention_mask
def tokenize_and_postprocess_data(
prompt: str, tokenizer: PreTrainedTokenizer, max_length: int, pad_token_id: int, left_pad=True, truncation="error"
):
"""Tokenize text and process outputs to consistent tensor shapes.
Args:
prompt: Input text to tokenize
tokenizer: HuggingFace tokenizer instance
max_length: Target sequence length
pad_token_id: Padding token ID
left_pad: Pad left if True
truncation: Truncation strategy ("left"/"right"/"error")
Returns:
Tuple of (input_ids, attention_mask) from postprocess_data
"""
input_data = tokenizer(prompt, return_tensors="pt", add_special_tokens=False)
input_ids = input_data["input_ids"]
attention_mask = input_data["attention_mask"]
return postprocess_data(input_ids, attention_mask, max_length, pad_token_id, left_pad, truncation)
def remove_pad_token(input_ids: torch.Tensor, attention_mask: torch.Tensor):
"""Remove the pad token.
Args:
input_ids shape: [bs, seq_length]
attention_mask shape: [bs, seq_length]
Returns:
no_padding_batch(List[List[int]]): contains the rmpad token ids per query.
"""
no_padding_batch = []
for ids, mask in zip(input_ids, attention_mask, strict=True):
no_padding_batch.append((ids[len(ids) - mask.sum() :]).cpu().numpy().tolist())
return no_padding_batch
def log_probs_from_logits_response(input_ids, logits, response_length):
"""Compute the response log_probs from full logits. Note that logits = model(input_ids)
Args:
input_ids: [batch_size, seqlen]
logits: [batch_size, seqlen, vocab_size]
Returns:
response_log_prob:
"""
response_logits = logits[:, -response_length - 1 : -1]
response = input_ids[:, -response_length:]
response_log_prob = logprobs_from_logits(logits=response_logits, labels=response)
return response_log_prob
def log_probs_from_logits_response_rmpad(input_ids, attention_mask, logits_rmpad, response_length):
"""Compute the log_probs from logits with rmpad logits and pad input. Note that
logits_rmpad = model(input_ids_rmpad). For each sentences, there is a shift between
logits and input_ids.
The reason for this function to is to compute logprobs_from_logits in rmpad mode because it is memory-intensive
for large vocab_size
Args:
input_ids: [batch_size, seqlen]
attention_mask: [batch_size, seqlen]
logits_rmpad: [total_nnz, vocab_size]
response_length: int
"""
from flash_attn.bert_padding import pad_input, unpad_input
batch_size, seqlen = input_ids.shape
input_ids_rmpad, indices, *_ = unpad_input(input_ids.unsqueeze(-1), attention_mask=attention_mask)
input_ids_rmpad = input_ids_rmpad.squeeze(-1)
input_ids_rmpad_rolled = torch.roll(input_ids_rmpad, shifts=-1, dims=0)
full_log_probs_rmpad = logprobs_from_logits(logits=logits_rmpad, labels=input_ids_rmpad_rolled) # (total_nnz,)
full_output = pad_input(
hidden_states=full_log_probs_rmpad.unsqueeze(-1), indices=indices, batch=batch_size, seqlen=seqlen
)
output = full_output.squeeze(-1)[:, -response_length - 1 : -1] # [batch_size, response_length]
return output
def log_probs_from_logits_all_rmpad(input_ids_rmpad, logits_rmpad, indices, batch_size, seqlen, response_length):
"""Compute the log_probs from logits with rmpad input_ids and logits. Note that
logits_rmpad = model(input_ids_rmpad). For each sentences, there is a shift between
logits and input_ids.
The reason for this function to is to compute logprobs_from_logits in rmpad mode because it is memory-intensive
for large vocab_size
Args:
input_ids_rmpad: [1, total_nnz]
logits_rmpad: [total_nnz, vocab_size]
indices: [total_nnz]
batch_size: int
seqlen: int
response_length: int
"""
if get_device_name() == "cuda":
from flash_attn.bert_padding import pad_input
elif get_device_name() == "npu":
from verl.utils.attention_utils import pad_input
input_ids_rmpad = input_ids_rmpad.transpose(0, 1) # transpose back to [total_nnz, 1]
input_ids_rmpad = input_ids_rmpad.squeeze(-1)
input_ids_rmpad_rolled = torch.roll(input_ids_rmpad, shifts=-1, dims=0)
full_log_probs_rmpad = logprobs_from_logits(logits=logits_rmpad, labels=input_ids_rmpad_rolled) # (total_nnz,)
full_output = pad_input(
hidden_states=full_log_probs_rmpad.unsqueeze(-1), indices=indices, batch=batch_size, seqlen=seqlen
)
output = full_output.squeeze(-1)[:, -response_length - 1 : -1] # [batch_size, response_length]
return output
def post_process_logits(input_ids, logits, temperature, top_k, top_p):
if temperature != 1.0:
logits = logits.div_(temperature) # inplace operation to avoid OOM
# TODO: add them back
# if top_k is not None and top_k > 0:
# logits = TopKLogitsWarper(top_k=top_k)(input_ids, logits)
# if top_p is not None and top_p < 1.0 and top_p > 0.0:
# logits = TopPLogitsWarper(top_p=top_p)(input_ids, logits)
return logits
def calculate_sum_pi_squared_from_logits(logits: torch.Tensor):
"""
Compute exact sum of squared probabilities from logits.
Formula: Σπ² = exp(logsumexp(2*logits) - 2*logsumexp(logits))
Used for optimal baseline variance reduction as described in
"What Matters for Model Merging at Scale?" (arXiv:2410.03617)
Args:
logits: Logits tensor (..., vocab_size).
Returns:
Sum of squared probabilities tensor (...).
"""
return torch.exp(torch.logsumexp(2.0 * logits, dim=-1) - 2.0 * torch.logsumexp(logits, dim=-1))
"""
Optimizer related
"""
def get_cosine_schedule_with_warmup(
optimizer: Optimizer,
num_warmup_steps: int,
num_training_steps: int,
min_lr_ratio: float = 0.0,
num_cycles: float = 0.5,
last_epoch: int = -1,
init_lr_ratio: float = None,
):
"""
Create a schedule with a learning rate that decreases following the values of the cosine function between the
initial lr set in the optimizer to 0, after a warmup period during which it increases linearly between 0 and the
initial lr set in the optimizer.
Args:
optimizer (:class:`~torch.optim.Optimizer`):
The optimizer for which to schedule the learning rate.
num_warmup_steps (:obj:`int`):
The number of steps for the warmup phase.
num_training_steps (:obj:`int`):
The total number of training steps.
min_lr_ratio (:obj:`float`, `optional`, defaults to 0.0):
The minimum lr ratio w.r.t the maximum.
num_cycles (:obj:`float`, `optional`, defaults to 0.5):
The number of waves in the cosine schedule (the defaults is to just decrease from the max value to 0
following a half-cosine).
last_epoch (:obj:`int`, `optional`, defaults to -1):
The index of the last epoch when resuming training.
init_lr_ratio (:obj:`float`, `optional`, defaults to None):
The initial lr ratio w.r.t the maximum.
Return:
:obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
"""
min_lr_ratio = 0.0 if min_lr_ratio is None else min_lr_ratio
assert min_lr_ratio >= 0 and min_lr_ratio <= 1.0
coef = (1 - min_lr_ratio) * 0.5
intercept = (1 + min_lr_ratio) * 0.5
init_lr_ratio = 0.0 if init_lr_ratio is None else init_lr_ratio
assert init_lr_ratio >= 0 and init_lr_ratio <= 1.0
def lr_lambda(current_step):
if current_step < num_warmup_steps:
return init_lr_ratio + (1.0 - init_lr_ratio) * (float(current_step) / float(max(1, num_warmup_steps)))
progress = float(current_step - num_warmup_steps) / float(max(1, num_training_steps - num_warmup_steps))
x = math.cos(math.pi * float(num_cycles) * 2.0 * progress)
return max(min_lr_ratio, x * coef + intercept)
return LambdaLR(optimizer, lr_lambda, last_epoch)
def get_constant_schedule_with_warmup(
optimizer: Optimizer,
num_warmup_steps: int,
last_epoch: int = -1,
):
"""
Create a constant LR schedule with a linear warmup phase.
Args:
optimizer (Optimizer): Wrapped optimizer.
num_warmup_steps (int): Number of steps to ramp up the LR from 0 to initial value.
last_epoch (int, optional): The index of the last epoch when resuming training. Defaults to -1.
Returns:
LambdaLR: Scheduler that increases LR linearly during warmup, then holds it constant.
"""
def lr_lambda(current_step):
if current_step < num_warmup_steps:
return float(current_step) / float(max(1.0, num_warmup_steps))
return 1.0
return LambdaLR(optimizer, lr_lambda, last_epoch)
def prepare_decoder_attention_mask(attention_mask, input_shape, inputs_embeds):
# create causal mask
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
combined_attention_mask = None
if input_shape[-1] > 1:
combined_attention_mask = _make_causal_mask(
input_shape,
inputs_embeds.dtype,
device=inputs_embeds.device,
)
if attention_mask is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to(
inputs_embeds.device
)
combined_attention_mask = (
expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask
)
return combined_attention_mask
# Copied from transformers.models.bart.modeling_bart._make_causal_mask
def _make_causal_mask(input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device):
"""
Make causal mask used for bi-directional self-attention.
"""
bsz, tgt_len = input_ids_shape
mask = torch.full((tgt_len, tgt_len), torch.finfo(dtype).min, device=device)
mask_cond = torch.arange(mask.size(-1), device=device)
mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
mask = mask.to(dtype)
return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len)
# Copied from transformers.models.bart.modeling_bart._expand_mask
def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
"""
Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
"""
bsz, src_len = mask.size()
tgt_len = tgt_len if tgt_len is not None else src_len
expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
inverted_mask = 1.0 - expanded_mask
return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min)
def get_unpad_data(attention_mask):
seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
max_seqlen_in_batch = seqlens_in_batch.max().item()
cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0))
return (
indices,
cu_seqlens,
max_seqlen_in_batch,
)
def get_wsd_schedule_with_warmup(
optimizer: Optimizer,
num_warmup_steps: int,
num_training_steps: int,
min_lr_ratio: float = 0.0,
num_cycles: float = 0.5,
last_epoch: int = -1,
stable_ratio: float = 0.9,
):
"""
Create a Warmup-Stable-Decay learning rate scheduler.
The schedule follows three phases:
1. Warmup: Learning rate increases linearly from 0 to the initial LR
2. Stable: Learning rate remains constant at the initial LR
3. Decay: Learning rate decreases following a cosine curve to min_lr_ratio * initial LR
Args:
optimizer (:class:`~torch.optim.Optimizer`):
The optimizer for which to schedule the learning rate.
num_warmup_steps (:obj:`int`):
The number of steps for the warmup phase.
num_training_steps (:obj:`int`):
The total number of training steps.
min_lr_ratio (:obj:`float`, `optional`, defaults to 0.0):
The minimum learning rate ratio w.r.t the initial learning rate.
num_cycles (:obj:`float`, `optional`, defaults to 0.5):
The number of waves in the cosine schedule during decay phase.
last_epoch (:obj:`int`, `optional`, defaults to -1):
The index of the last epoch when resuming training.
stable_ratio (:obj:`float`, `optional`, defaults to 0.0):
The ratio of non-warmup steps that should maintain a constant learning rate.
Set to 0.0 to behave exactly like cosine schedule.
Return:
:obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
"""
remaining_steps = max(0, num_training_steps - num_warmup_steps)
num_stable_steps = int(remaining_steps * stable_ratio)
num_decay_steps = remaining_steps - num_stable_steps
def lr_lambda(current_step):
if current_step < num_warmup_steps:
return float(current_step) / float(max(1, num_warmup_steps))
if current_step < num_warmup_steps + num_stable_steps:
return 1.0
if current_step < num_training_steps:
progress = float(current_step - num_warmup_steps - num_stable_steps) / float(max(1, num_decay_steps))
value = max(0.0, 0.5 * (1.0 + math.cos(math.pi * float(num_cycles) * 2.0 * progress)))
return (1.0 - min_lr_ratio) * value + min_lr_ratio
return min_lr_ratio
return LambdaLR(optimizer, lr_lambda, last_epoch)
@contextmanager
def check_device_is_available():
"""
Some modules must be imported after CUDA is initialized. Such as sglang's sharding manager.
This context manager checks if CUDA is available and raises an error if it is not.
"""
if not get_torch_device().is_available():
raise RuntimeError("Device {} must be initialized before importing this module.".format(get_device_name()))
yield
def distributed_mean_max_min_std(local_tensor, compute_max=True, compute_min=True, compute_std=True):
"""Compute distributed statistics across all processes.
Args:
local_tensor: Tensor containing local values
compute_max: Include maximum value calculation
compute_min: Include minimum value calculation
compute_std: Include standard deviation calculation
Returns:
Tuple containing (mean, max, min, std) in this order. None for disabled metrics.
"""
# Sum the local tensor across all processes
local_sum = torch.sum(local_tensor)
local_num = torch.tensor(torch.numel(local_tensor), device=get_device_name())
torch.distributed.all_reduce(local_sum, op=torch.distributed.ReduceOp.SUM)
torch.distributed.all_reduce(local_num, op=torch.distributed.ReduceOp.SUM)
global_mean = local_sum / local_num
if compute_max:
local_max = torch.max(local_tensor)
torch.distributed.all_reduce(local_max, op=torch.distributed.ReduceOp.MAX)
else:
local_max = None
if compute_min:
local_min = torch.min(local_tensor)
torch.distributed.all_reduce(local_min, op=torch.distributed.ReduceOp.MIN)
else:
local_min = None
if compute_std:
square_diff = torch.sum(torch.pow(local_tensor - global_mean, 2))
torch.distributed.all_reduce(square_diff, op=torch.distributed.ReduceOp.SUM)
global_std = torch.sqrt(square_diff / (local_num - 1))
else:
global_std = None
return global_mean, local_max, local_min, global_std
def distributed_masked_mean(local_tensor, local_mask):
"""Compute global mean of non-masked elements across distributed processes.
Args:
local_tensor (torch.Tensor): Input tensor with local values
local_mask (torch.Tensor): Binary mask (1=valid, 0=ignore) matching local_tensor shape
Returns:
torch.Tensor: Global mean of all valid elements across processes
"""
local_tensor = local_tensor * local_mask
local_sum = torch.sum(local_tensor)
local_num = torch.sum(local_mask)
torch.distributed.all_reduce(local_sum, op=torch.distributed.ReduceOp.SUM)
torch.distributed.all_reduce(local_num, op=torch.distributed.ReduceOp.SUM)
global_mean = local_sum / local_num
return global_mean
def expand_as_nested(tensor: torch.Tensor, nested_tensor: torch.Tensor) -> torch.Tensor:
"""
Args:
tensor: a tensor with shape (bsz,)
nested_tensor: a nested tensor with shape (bsz, xxx)
Returns:
a tensor with the same shape as nested_tensor
"""
assert nested_tensor.is_nested, "nested_tensor must be nested"
assert tensor.shape[0] == nested_tensor.shape[0], (
f"The batch shape must be the same. Got {tensor.shape[0]} vs {nested_tensor.shape[0]}"
)
assert len(tensor.shape) == 1, "The ndim of tensor must be 1"
assert len(nested_tensor.shape) == 2, "The ndim of nested_tensor must be 2"
offsets = nested_tensor.offsets()
seqlens = offsets.diff()
output = torch.repeat_interleave(tensor, seqlens, dim=0)
output = torch.nested.nested_tensor_from_jagged(values=output, offsets=offsets)
return output
@contextmanager
def use_original_torch_compile():
"""torch.compile might be replaced by mindspeed on NPU, this contextmanager
can revert torch.compile temporarily.
"""
try:
from mindspeed.patch_utils import MindSpeedPatchesManager
compile_patch = None
for patch in MindSpeedPatchesManager.patches_info.values():
if patch.orig_module_name == "torch" and patch.orig_func_name == "compile":
if patch.is_applied():
compile_patch = patch
break
if compile_patch is not None:
compile_patch.remove_patch()
yield
compile_patch.apply_patch()
else:
yield
except Exception:
yield
| verl__utils__torch_functional.py |
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A unified tracking interface that supports logging data to different backend
"""
import dataclasses
import json
import os
from enum import Enum
from functools import partial
from pathlib import Path
from typing import Any
import orjson
class Tracking:
"""A unified tracking interface for logging experiment data to multiple backends.
This class provides a centralized way to log experiment metrics, parameters, and artifacts
to various tracking backends including WandB, MLflow, SwanLab, TensorBoard, and console.
Attributes:
supported_backend: List of supported tracking backends.
logger: Dictionary of initialized logger instances for each backend.
"""
supported_backend = [
"wandb",
"mlflow",
"swanlab",
"vemlp_wandb",
"tensorboard",
"console",
"clearml",
"trackio",
"file",
]
def __init__(self, project_name, experiment_name, default_backend: str | list[str] = "console", config=None):
if isinstance(default_backend, str):
default_backend = [default_backend]
for backend in default_backend:
if backend == "tracking":
import warnings
warnings.warn("`tracking` logger is deprecated. use `wandb` instead.", DeprecationWarning, stacklevel=2)
else:
assert backend in self.supported_backend, f"{backend} is not supported"
self.logger = {}
if "tracking" in default_backend or "wandb" in default_backend:
import os
import wandb
settings = None
if config and config["trainer"].get("wandb_proxy", None):
settings = wandb.Settings(https_proxy=config["trainer"]["wandb_proxy"])
entity = os.environ.get("WANDB_ENTITY", None)
wandb.init(project=project_name, name=experiment_name, entity=entity, config=config, settings=settings)
self.logger["wandb"] = wandb
if "trackio" in default_backend:
import trackio
trackio.init(project=project_name, name=experiment_name, config=config)
self.logger["trackio"] = trackio
if "mlflow" in default_backend:
import os
import mlflow
MLFLOW_TRACKING_URI = os.environ.get("MLFLOW_TRACKING_URI", "sqlite:////tmp/mlruns.db")
mlflow.set_tracking_uri(MLFLOW_TRACKING_URI)
# Some cloud providers like Azure ML or Databricks automatically set MLFLOW_RUN_ID
# If set, attach to the existing run instead of creating a new one
run_id = os.environ.get("MLFLOW_RUN_ID")
if run_id:
mlflow.start_run(run_id=run_id)
else:
# Project_name is actually experiment_name in MLFlow
# If experiment does not exist, will create a new experiment
experiment = mlflow.set_experiment(project_name)
mlflow.start_run(experiment_id=experiment.experiment_id, run_name=experiment_name)
mlflow.log_params(_compute_mlflow_params_from_objects(config))
self.logger["mlflow"] = _MlflowLoggingAdapter()
if "swanlab" in default_backend:
import os
import swanlab
SWANLAB_API_KEY = os.environ.get("SWANLAB_API_KEY", None)
SWANLAB_LOG_DIR = os.environ.get("SWANLAB_LOG_DIR", "swanlog")
SWANLAB_MODE = os.environ.get("SWANLAB_MODE", "cloud")
if SWANLAB_API_KEY:
swanlab.login(SWANLAB_API_KEY) # NOTE: previous login information will be overwritten
if config is None:
config = {} # make sure config is not None, otherwise **config will raise error
swanlab.init(
project=project_name,
experiment_name=experiment_name,
config={"FRAMEWORK": "verl", **config},
logdir=SWANLAB_LOG_DIR,
mode=SWANLAB_MODE,
)
self.logger["swanlab"] = swanlab
if "vemlp_wandb" in default_backend:
import os
import volcengine_ml_platform
from volcengine_ml_platform import wandb as vemlp_wandb
volcengine_ml_platform.init(
ak=os.environ["VOLC_ACCESS_KEY_ID"],
sk=os.environ["VOLC_SECRET_ACCESS_KEY"],
region=os.environ["MLP_TRACKING_REGION"],
)
vemlp_wandb.init(
project=project_name,
name=experiment_name,
config=config,
sync_tensorboard=True,
)
self.logger["vemlp_wandb"] = vemlp_wandb
if "tensorboard" in default_backend:
self.logger["tensorboard"] = _TensorboardAdapter(project_name, experiment_name)
if "console" in default_backend:
from verl.utils.logger import LocalLogger
self.console_logger = LocalLogger(print_to_console=True)
self.logger["console"] = self.console_logger
if "clearml" in default_backend:
self.logger["clearml"] = ClearMLLogger(project_name, experiment_name, config)
if "file" in default_backend:
self.logger["file"] = FileLogger(project_name, experiment_name)
def log(self, data, step, backend=None):
for default_backend, logger_instance in self.logger.items():
if backend is None or default_backend in backend:
logger_instance.log(data=data, step=step)
def __del__(self):
if "wandb" in self.logger:
self.logger["wandb"].finish(exit_code=0)
if "swanlab" in self.logger:
self.logger["swanlab"].finish()
if "vemlp_wandb" in self.logger:
self.logger["vemlp_wandb"].finish(exit_code=0)
if "tensorboard" in self.logger:
self.logger["tensorboard"].finish()
if "clearml" in self.logger:
self.logger["clearml"].finish()
if "trackio" in self.logger:
self.logger["trackio"].finish()
if "file" in self.logger:
self.logger["file"].finish()
class ClearMLLogger:
def __init__(self, project_name: str, experiment_name: str, config):
self.project_name = project_name
self.experiment_name = experiment_name
import clearml
self._task: clearml.Task = clearml.Task.init(
task_name=experiment_name,
project_name=project_name,
continue_last_task=True,
output_uri=False,
)
self._task.connect_configuration(config, name="Hyperparameters")
def _get_logger(self):
return self._task.get_logger()
def log(self, data, step):
import numpy as np
import pandas as pd
# logs = self._rewrite_logs(data)
logger = self._get_logger()
for k, v in data.items():
title, series = k.split("/", 1)
if isinstance(v, int | float | np.floating | np.integer):
logger.report_scalar(
title=title,
series=series,
value=v,
iteration=step,
)
elif isinstance(v, pd.DataFrame):
logger.report_table(
title=title,
series=series,
table_plot=v,
iteration=step,
)
else:
logger.warning(
f'Trainer is attempting to log a value of "{v}" of type {type(v)} for key "{k}". This '
f"invocation of ClearML logger's function is incorrect so this attribute was dropped. "
)
def finish(self):
self._task.close()
class FileLogger:
def __init__(self, project_name: str, experiment_name: str):
self.project_name = project_name
self.experiment_name = experiment_name
self.filepath = os.getenv("VERL_FILE_LOGGER_PATH", None)
if self.filepath is None:
root_path = os.path.expanduser(os.getenv("VERL_FILE_LOGGER_ROOT", "."))
directory = os.path.join(root_path, self.project_name)
os.makedirs(directory, exist_ok=True)
self.filepath = os.path.join(directory, f"{self.experiment_name}.jsonl")
print(f"Creating file logger at {self.filepath}")
self.fp = open(self.filepath, "wb", buffering=0)
def log(self, data, step):
data = {"step": step, "data": data}
self.fp.write(orjson.dumps(data, option=orjson.OPT_SERIALIZE_NUMPY) + b"\n")
def finish(self):
self.fp.close()
class _TensorboardAdapter:
def __init__(self, project_name, experiment_name):
import os
from torch.utils.tensorboard import SummaryWriter
tensorboard_dir = os.environ.get("TENSORBOARD_DIR", f"tensorboard_log/{project_name}/{experiment_name}")
os.makedirs(tensorboard_dir, exist_ok=True)
print(f"Saving tensorboard log to {tensorboard_dir}.")
self.writer = SummaryWriter(tensorboard_dir)
def log(self, data, step):
for key in data:
self.writer.add_scalar(key, data[key], step)
def finish(self):
self.writer.close()
class _MlflowLoggingAdapter:
def __init__(self):
import logging
import re
self.logger = logging.getLogger(__name__)
# MLflow metric key validation logic:
# https://github.com/mlflow/mlflow/blob/master/mlflow/utils/validation.py#L157C12-L157C44
# Only characters allowed: slashes, alphanumerics, underscores, periods, dashes, colons,
# and spaces.
self._invalid_chars_pattern = re.compile(
r"[^/\w.\- :]"
) # Allowed: slashes, alphanumerics, underscores, periods, dashes, colons, and spaces.
self._consecutive_slashes_pattern = re.compile(r"/+")
def log(self, data, step):
import mlflow
def sanitize_key(key):
# First replace @ with _at_ for backward compatibility
sanitized = key.replace("@", "_at_")
# Replace consecutive slashes with a single slash (MLflow treats them as file paths)
sanitized = self._consecutive_slashes_pattern.sub("/", sanitized)
# Then replace any other invalid characters with _
sanitized = self._invalid_chars_pattern.sub("_", sanitized)
if sanitized != key:
self.logger.warning(
"[MLflow] Metric key '%s' sanitized to '%s' due to invalid characters.", key, sanitized
)
return sanitized
results = {sanitize_key(k): v for k, v in data.items()}
mlflow.log_metrics(metrics=results, step=step)
def _compute_mlflow_params_from_objects(params) -> dict[str, Any]:
if params is None:
return {}
return _flatten_dict(_transform_params_to_json_serializable(params, convert_list_to_dict=True), sep="/")
def _transform_params_to_json_serializable(x, convert_list_to_dict: bool):
_transform = partial(_transform_params_to_json_serializable, convert_list_to_dict=convert_list_to_dict)
if dataclasses.is_dataclass(x):
return _transform(dataclasses.asdict(x))
if isinstance(x, dict):
return {k: _transform(v) for k, v in x.items()}
if isinstance(x, list):
if convert_list_to_dict:
return {"list_len": len(x)} | {f"{i}": _transform(v) for i, v in enumerate(x)}
else:
return [_transform(v) for v in x]
if isinstance(x, Path):
return str(x)
if isinstance(x, Enum):
return x.value
return x
def _flatten_dict(raw: dict[str, Any], *, sep: str) -> dict[str, Any]:
import pandas as pd
ans = pd.json_normalize(raw, sep=sep).to_dict(orient="records")[0]
assert isinstance(ans, dict)
return ans
@dataclasses.dataclass
class ValidationGenerationsLogger:
project_name: str = None
experiment_name: str = None
def log(self, loggers, samples, step):
if "wandb" in loggers:
self.log_generations_to_wandb(samples, step)
if "swanlab" in loggers:
self.log_generations_to_swanlab(samples, step)
if "mlflow" in loggers:
self.log_generations_to_mlflow(samples, step)
if "clearml" in loggers:
self.log_generations_to_clearml(samples, step)
if "tensorboard" in loggers:
self.log_generations_to_tensorboard(samples, step)
if "vemlp_wandb" in loggers:
self.log_generations_to_vemlp_wandb(samples, step)
def log_generations_to_vemlp_wandb(self, samples, step):
from volcengine_ml_platform import wandb as vemlp_wandb
self._log_generations_to_wandb(samples, step, vemlp_wandb)
def log_generations_to_wandb(self, samples, step):
import wandb
self._log_generations_to_wandb(samples, step, wandb)
def _log_generations_to_wandb(self, samples, step, wandb):
"""Log samples to wandb as a table"""
# Create column names for all samples
columns = ["step"] + sum(
[[f"input_{i + 1}", f"output_{i + 1}", f"score_{i + 1}"] for i in range(len(samples))], []
)
if not hasattr(self, "validation_table"):
# Initialize the table on first call
self.validation_table = wandb.Table(columns=columns)
# Create a new table with same columns and existing data
# Workaround for https://github.com/wandb/wandb/issues/2981#issuecomment-1997445737
new_table = wandb.Table(columns=columns, data=self.validation_table.data)
# Add new row with all data
row_data = []
row_data.append(step)
for sample in samples:
row_data.extend(sample)
new_table.add_data(*row_data)
# Update reference and log
if wandb.run is not None:
wandb.log({"val/generations": new_table}, step=step)
self.validation_table = new_table
def log_generations_to_swanlab(self, samples, step):
"""Log samples to swanlab as text"""
import swanlab
swanlab_table = swanlab.echarts.Table()
# Create column names
headers = ["step", "input", "output", "score"]
swanlab_row_list = [[step, *sample] for sample in samples]
swanlab_table.add(headers=headers, rows=swanlab_row_list)
# Log to swanlab
swanlab.log({"val/generations": swanlab_table}, step=step)
def log_generations_to_mlflow(self, samples, step):
"""Log validation generation to mlflow as artifacts"""
# https://mlflow.org/docs/latest/api_reference/python_api/mlflow.html?highlight=log_artifact#mlflow.log_artifact
import tempfile
import mlflow
try:
with tempfile.TemporaryDirectory() as tmp_dir:
validation_gen_step_file = Path(tmp_dir, f"val_step{step}.json")
row_data = []
for sample in samples:
data = {"input": sample[0], "output": sample[1], "score": sample[2]}
row_data.append(data)
with open(validation_gen_step_file, "w") as file:
json.dump(row_data, file)
mlflow.log_artifact(validation_gen_step_file)
except Exception as e:
print(f"WARNING: save validation generation file to mlflow failed with error {e}")
def log_generations_to_clearml(self, samples, step):
"""Log validation generation to clearml as table"""
import clearml
import pandas as pd
task: clearml.Task | None = clearml.Task.current_task()
if task is None:
return
table = [
{
"step": step,
"input": sample[0],
"output": sample[1],
"score": sample[2],
}
for sample in samples
]
logger = task.get_logger()
logger.report_table(
series="Validation generations",
title="Validation",
table_plot=pd.DataFrame.from_records(table),
iteration=step,
)
def log_generations_to_tensorboard(self, samples, step):
"""Log samples to tensorboard as text"""
# Initialize tensorboard writer if not exists
if not hasattr(self, "writer"):
from torch.utils.tensorboard import SummaryWriter
# Use the same directory structure as _TensorboardAdapter
if self.project_name and self.experiment_name:
default_dir = os.path.join("tensorboard_log", self.project_name, self.experiment_name)
else:
default_dir = "tensorboard_log"
tensorboard_dir = os.environ.get("TENSORBOARD_DIR", default_dir)
os.makedirs(tensorboard_dir, exist_ok=True)
self.writer = SummaryWriter(log_dir=tensorboard_dir)
# Format the samples data into readable text
text_content = f"**Generation Results - Step {step}**\n\n"
for i, sample in enumerate(samples):
text_content += f"### Sample {i + 1}\n"
# Assuming sample contains [input, output, score]
if len(sample) >= 3:
input_text, output_text, score = sample[0], sample[1], sample[2]
text_content += f"**Input:** {input_text}\n\n"
text_content += f"**Output:** {output_text}\n\n"
text_content += f"**Score:** {score}\n\n"
else:
# Handle cases where sample format might be different
text_content += f"**Data:** {sample}\n\n"
text_content += "---\n\n"
# Log to tensorboard as text
self.writer.add_text("val/generations", text_content, step)
# Flush to ensure data is written
self.writer.flush()
| verl__utils__tracking.py |
# Copyright 2025 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import functools
import inspect
import logging
import os
import threading
from functools import wraps
from typing import TYPE_CHECKING, Any, Callable
if TYPE_CHECKING:
from verl.single_controller.base.decorator import Dispatch
from tensordict import TensorDict
try:
from transfer_queue import (
AsyncTransferQueueClient,
BatchMeta,
TransferQueueClient,
)
except ImportError:
# TODO: Use a hacky workaround for ImportError since
# transfer_queue isn't a default verl dependency.
class BatchMeta:
pass
from verl.protocol import DataProto
logger = logging.getLogger(__name__)
logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN"))
_TRANSFER_QUEUE_CLIENT = None
is_transferqueue_enabled = os.environ.get("TRANSFER_QUEUE_ENABLE", False)
def create_transferqueue_client(
client_id: str,
config,
sync: bool = False,
) -> "AsyncTransferQueueClient | TransferQueueClient":
global _TRANSFER_QUEUE_CLIENT
if _TRANSFER_QUEUE_CLIENT is None:
if sync:
_TRANSFER_QUEUE_CLIENT = TransferQueueClient(client_id, config.controller_info)
else:
_TRANSFER_QUEUE_CLIENT = AsyncTransferQueueClient(client_id, config.controller_info)
_TRANSFER_QUEUE_CLIENT.initialize_storage_manager(manager_type=config.storage_backend, config=config)
return _TRANSFER_QUEUE_CLIENT
def get_transferqueue_client() -> "AsyncTransferQueueClient | TransferQueueClient":
return _TRANSFER_QUEUE_CLIENT
# TODO (TQ): verl will make all actor async, so this can be cleanup later.
def _run_async_in_temp_loop(async_func: Callable[..., Any], *args, **kwargs) -> Any:
# Use a temporary event loop in a new thread because event
# loop may already exist in server mode
tmp_event_loop = asyncio.new_event_loop()
thread = threading.Thread(
target=tmp_event_loop.run_forever,
name="batchmeta dataproto converter",
daemon=True,
)
def run_coroutine(coroutine):
if not thread.is_alive():
thread.start()
future = asyncio.run_coroutine_threadsafe(coroutine, tmp_event_loop)
return future.result()
async def stop_loop():
tmp_event_loop.stop()
try:
return run_coroutine(async_func(*args, **kwargs))
finally:
if thread.is_alive():
asyncio.run_coroutine_threadsafe(stop_loop(), tmp_event_loop)
thread.join()
def _find_batchmeta(*args, **kwargs):
for arg in args:
if isinstance(arg, BatchMeta):
return arg
for v in kwargs.values():
if isinstance(v, BatchMeta):
return v
return None
async def _async_batchmeta_to_dataproto(batchmeta: "BatchMeta") -> DataProto:
if batchmeta.samples == [] or batchmeta.samples is None:
return DataProto(
batch=TensorDict({}, batch_size=(0,)),
non_tensor_batch={},
meta_info=batchmeta.extra_info.copy(),
)
tensordict = await _TRANSFER_QUEUE_CLIENT.async_get_data(batchmeta)
return DataProto.from_tensordict(tensordict, meta_info=batchmeta.extra_info.copy())
def _batchmeta_to_dataproto(batchmeta: "BatchMeta") -> DataProto:
return _run_async_in_temp_loop(_async_batchmeta_to_dataproto, batchmeta)
async def _async_update_batchmeta_with_output(output: DataProto, batchmeta: "BatchMeta", func_name=None) -> "BatchMeta":
pid = os.getpid()
for k, v in output.meta_info.items():
batchmeta.set_extra_info(k, v)
if len(output) > 0:
tensordict = output.to_tensordict()
# pop meta_info
for key in output.meta_info.keys():
tensordict.pop(key)
logger.info(
f"Task {func_name} (pid={pid}) putting output data to TransferQueue with "
f"batch_size={tensordict.batch_size},\n"
f"tensordict keys={list(tensordict.keys())}"
)
updated_batch_meta = await _TRANSFER_QUEUE_CLIENT.async_put(data=tensordict, metadata=batchmeta)
return updated_batch_meta
else:
return batchmeta
def _update_batchmeta_with_output(output: DataProto, batchmeta: "BatchMeta", func_name=None) -> "BatchMeta":
updated_batch_meta = _run_async_in_temp_loop(_async_update_batchmeta_with_output, output, batchmeta, func_name)
return updated_batch_meta
def _compute_need_collect(dispatch_mode: "dict | Dispatch", args: list) -> bool:
"""Compute whether data collection is needed for the current worker.
This function determines whether the current worker should collect data based on
the dispatch mode configuration and worker parameters. It's used to optimize
distributed data collection by ensuring only the appropriate rank collects data.
Args:
dispatch_mode: Controls data collection logic for the current worker. Can be None,
a Dispatch instance, or a dict with 'collect_fn' key. If None or Dispatch,
always returns True (current worker should collect). If dict, checks
collect_fn for lazy compute optimization.
args: List of arguments passed to the function. Should contain a Worker instance
as the first argument when using lazy compute mode.
Returns:
bool: True if data collection is needed, False otherwise.
Note:
Only checks worker attributes when dispatch_mode is a dict with 'collect_fn',
the collect_fn is 'collect_lazy_compute_data_proto', and args[0] is a Worker.
Otherwise, returns True. For the lazy compute case, checks the worker's
data parallel rank for the mesh specified in collect_fn.args[0] to determine
if this worker should collect data.
"""
from verl.single_controller.base.decorator import Dispatch
from verl.single_controller.base.worker import Worker
if dispatch_mode is None or isinstance(dispatch_mode, Dispatch):
return True
assert "collect_fn" in dispatch_mode.keys(), "collect_fn should be in dispatch_mode."
collect_fn = dispatch_mode["collect_fn"]
# Check if collect_fn is a functools.partial and handle gracefully
if isinstance(collect_fn, functools.partial):
collect_fn_name = collect_fn.func.__name__
if collect_fn_name != "collect_lazy_compute_data_proto" or len(args) < 1 or not isinstance(args[0], Worker):
return True
collect_mesh_name = collect_fn.args[0] if collect_fn.args else None
if collect_mesh_name is None:
return True
return args[0].query_collect_info(collect_mesh_name)
else:
# If collect_fn is not a partial, we can't extract mesh_name information
# Fall back to default behavior (collect data)
return True
def _postprocess_common(output, put_data, need_collect):
"""Common post-processing logic for function outputs in TransferQueue bridge.
This function handles the final return value based on whether data should be
put into storage (put_data) and whether collection is needed (need_collect).
It ensures proper return types based on the execution context.
Args:
output: The original output from the decorated function. Can be any type.
put_data: bool, indicating whether the output should be put into TransferQueue.
If True, output will be put to TQ and return the corresponding BatchMeta;
if False, output will not be put into TQ.
need_collect: bool, indicating whether this process needs to collect data.
If False, the output will be replaced by an empty BatchMeta or DataProto
to avoid redundant communication.
Returns:
- BatchMeta.empty(): When put_data=True but need_collect=False, indicating
no data should be stored but BatchMeta structure is expected.
- DataProto(): When put_data=False, need_collect=False, and output is DataProto,
returning an empty DataProto.
- output: In all other cases, returns the original output unchanged.
Note:
This function is used in the tqbridge decorator to normalize return values
across different execution paths and avoid redundant data operations in
distributed scenarios.
"""
if put_data and not need_collect:
return BatchMeta.empty()
elif not put_data and not need_collect and isinstance(output, DataProto):
return DataProto()
else:
return output
def tqbridge(dispatch_mode: "dict | Dispatch" = None, put_data: bool = True):
"""Creates a decorator for bridging BatchMeta and DataProto.
This decorator automatically handles conversions between `BatchMeta` and
`DataProto` in function parameters, and decides whether to sync function
output back to `BatchMeta` based on configuration(`put_data`). It supports
both synchronous and asynchronous functions (async def), and can control
whether to enable enhanced logic via the global `HAS_TQ` variable (when disabled,
simply calls the original function as-is).
Args:
dispatch_mode: Controls data collection behavior for the current worker. Passed to
_compute_need_collect to determine if current worker should collect data.
If None, _compute_need_collect will return True to fallback default logics.
put_data: Whether put the DataProto into Storage after func return.
If True, after function execution, the output result will be
updated to `BatchMeta` and `BatchMeta` will be returned;
If False, the function output result will be returned directly.
Defaults to True.
Returns:
A decorator function used to decorate target functions (synchronous or asynchronous).
"""
def decorator(func):
pid = os.getpid()
@wraps(func)
def inner(*args, **kwargs):
batchmeta = _find_batchmeta(*args, **kwargs)
if batchmeta is None:
return func(*args, **kwargs)
else:
logger.info(
f"Task {func.__name__} (pid={pid}) is getting len_samples={batchmeta.size}, "
f"global_idx={batchmeta.global_indexes}"
)
args = [_batchmeta_to_dataproto(arg) if isinstance(arg, BatchMeta) else arg for arg in args]
kwargs = {k: _batchmeta_to_dataproto(v) if isinstance(v, BatchMeta) else v for k, v in kwargs.items()}
output = func(*args, **kwargs)
need_collect = _compute_need_collect(dispatch_mode, args)
if put_data and need_collect:
updated_batch_meta = _update_batchmeta_with_output(output, batchmeta, func.__name__)
return updated_batch_meta
return _postprocess_common(output, put_data, need_collect)
@wraps(func)
async def async_inner(*args, **kwargs):
batchmeta = _find_batchmeta(*args, **kwargs)
if batchmeta is None:
return await func(*args, **kwargs)
else:
logger.info(
f"Task {func.__name__} (pid={pid}) is getting len_samples={batchmeta.size}, "
f"global_idx={batchmeta.global_indexes}"
)
args = [await _async_batchmeta_to_dataproto(arg) if isinstance(arg, BatchMeta) else arg for arg in args]
kwargs = {
k: await _async_batchmeta_to_dataproto(v) if isinstance(v, BatchMeta) else v
for k, v in kwargs.items()
}
output = await func(*args, **kwargs)
need_collect = _compute_need_collect(dispatch_mode, args)
if put_data and need_collect:
updated_batchmeta = await _async_update_batchmeta_with_output(output, batchmeta, func.__name__)
return updated_batchmeta
return _postprocess_common(output, put_data, need_collect)
@wraps(func)
def dummy_inner(*args, **kwargs):
output = func(*args, **kwargs)
return output
@wraps(func)
async def dummy_async_inner(*args, **kwargs):
output = await func(*args, **kwargs)
return output
wrapper_inner = inner if is_transferqueue_enabled else dummy_inner
wrapper_async_inner = async_inner if is_transferqueue_enabled else dummy_async_inner
wrapper = wrapper_async_inner if inspect.iscoroutinefunction(func) else wrapper_inner
return wrapper
return decorator
| verl__utils__transferqueue_utils.py |
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Compatibility utilities for different versions of transformers library.
"""
import importlib.metadata
from functools import lru_cache
from typing import Optional
from packaging import version
# Handle version compatibility for flash_attn_supports_top_left_mask
# This function was added in newer versions of transformers
try:
from transformers.modeling_flash_attention_utils import flash_attn_supports_top_left_mask
except ImportError:
# For older versions of transformers that don't have this function
# Default to False as a safe fallback for older versions
def flash_attn_supports_top_left_mask():
"""Fallback implementation for older transformers versions.
Returns False to disable features that require this function.
"""
return False
@lru_cache
def is_transformers_version_in_range(min_version: Optional[str] = None, max_version: Optional[str] = None) -> bool:
try:
# Get the installed version of the transformers library
transformers_version_str = importlib.metadata.version("transformers")
except importlib.metadata.PackageNotFoundError as e:
raise ModuleNotFoundError("The `transformers` package is not installed.") from e
transformers_version = version.parse(transformers_version_str)
lower_bound_check = True
if min_version is not None:
lower_bound_check = version.parse(min_version) <= transformers_version
upper_bound_check = True
if max_version is not None:
upper_bound_check = transformers_version <= version.parse(max_version)
return lower_bound_check and upper_bound_check
| verl__utils__transformers_compat.py |
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Utilities for DeepSpeed Ulysses Sequence Parallelism.
DeepSpeed Ulysses Paper: https://arxiv.org/abs/2309.14509
Inspired from: https://github.com/deepspeedai/DeepSpeed/blob/master/deepspeed/sequence/layer.py
"""
from typing import Any, Optional
import torch
import torch.distributed as dist
from torch import Tensor
from torch.distributed import ProcessGroup
_ULYSSES_SEQUENCE_PARALLEL_GROUP = None
def set_ulysses_sequence_parallel_group(group: dist.ProcessGroup):
"""
Set ulysses sequence parallel process group.
"""
global _ULYSSES_SEQUENCE_PARALLEL_GROUP
_ULYSSES_SEQUENCE_PARALLEL_GROUP = group
def get_ulysses_sequence_parallel_group() -> Optional[dist.ProcessGroup]:
"""
Get ulysses sequence parallel process group.
"""
global _ULYSSES_SEQUENCE_PARALLEL_GROUP
return _ULYSSES_SEQUENCE_PARALLEL_GROUP
def get_ulysses_sequence_parallel_world_size(group: ProcessGroup = None) -> int:
"""
Get ulysses sequence parallel world size.
"""
group = get_ulysses_sequence_parallel_group() if group is None else group
return dist.get_world_size(group) if group else 1
def get_ulysses_sequence_parallel_rank(group: ProcessGroup = None) -> int:
"""
Get ulysses sequence parallel rank.
"""
group = get_ulysses_sequence_parallel_group() if group is None else group
return dist.get_rank(group) if group else 0
def gather_seq_scatter_heads(
x: Tensor,
seq_dim: int,
head_dim: int,
unpadded_dim_size: int = 0,
group: ProcessGroup = None,
) -> Tensor:
"""
A func to sync embedding input with alltoall in sequence parallel
gather sequence dimension and scatter head dim:
e.g. seq_dim: 1, head_dim: 2
[bsz, seq/n, h, ...] -> [bsz, seq, h/n, ...]
"""
group = get_ulysses_sequence_parallel_group() if group is None else group
if not group:
return x
sp_world = get_ulysses_sequence_parallel_world_size(group)
x = SeqAllToAll.apply(group, x, head_dim, seq_dim)
if unpadded_dim_size and unpadded_dim_size % sp_world != 0:
padding_size = x.size(seq_dim) - unpadded_dim_size
x = _unpad_tensor(x, seq_dim, padding_size)
return x
def gather_heads_scatter_seq(x: Tensor, head_dim: int, seq_dim: int, group: ProcessGroup = None) -> Tensor:
"""
A func to sync attention result with alltoall in sequence parallel
gather head dimension and scatter seq dim:
e.g. seq_dim: 1, head_dim: 2
[bsz, seq, h/n, ...] -> [bsz, seq/n, h, ...]
"""
group = get_ulysses_sequence_parallel_group() if group is None else group
if not group:
return x
dim_size = x.size(seq_dim)
sp_world = get_ulysses_sequence_parallel_world_size(group)
if dim_size % sp_world != 0:
padding_size = sp_world - (dim_size % sp_world)
x = _pad_tensor(x, seq_dim, padding_size)
return SeqAllToAll.apply(group, x, seq_dim, head_dim, False)
def _pad_tensor(x: Tensor, dim: int, padding_size: int) -> Tensor:
shape = list(x.shape)
shape[dim] = padding_size
pad = torch.zeros(shape, dtype=x.dtype, device=x.device)
return torch.cat([x, pad], dim=dim)
def _unpad_tensor(x: Tensor, dim: int, padding_size: int) -> Tensor:
slc = [slice(None)] * len(x.shape)
slc[dim] = slice(0, -padding_size)
return x[tuple(slc)]
def slice_input_tensor(x: Tensor, dim: int, padding: bool = True, group: ProcessGroup = None) -> Tensor:
group = get_ulysses_sequence_parallel_group() if group is None else group
sp_world_size = dist.get_world_size(group)
sp_rank = get_ulysses_sequence_parallel_rank()
dim_size = x.size(dim)
# pad before slice
if padding and dim_size % sp_world_size:
padding_size = sp_world_size - (dim_size % sp_world_size)
x = _pad_tensor(x, dim, padding_size)
# slice the input tensor
parts = x.size(dim) // sp_world_size
slc = [slice(None)] * len(x.shape)
slc[dim] = slice(sp_rank * parts, (sp_rank + 1) * parts)
return x[tuple(slc)].contiguous()
def all_to_all_tensor(
local_input: Tensor,
scatter_dim: int,
gather_dim: int,
group: Optional[dist.ProcessGroup] = None,
async_op: bool = False,
):
group = get_ulysses_sequence_parallel_group() if group is None else group
seq_world_size = dist.get_world_size(group)
input_list = [t.contiguous() for t in torch.tensor_split(local_input, seq_world_size, scatter_dim)]
output_list = [torch.empty_like(input_list[0]) for _ in range(seq_world_size)]
comm = dist.all_to_all(output_list, input_list, group=group, async_op=async_op)
if async_op:
def wait():
comm.wait()
return torch.cat(output_list, dim=gather_dim).contiguous()
return wait
return torch.cat(output_list, dim=gather_dim).contiguous()
def all_gather_tensor(local_tensor: Tensor, group: Optional[dist.ProcessGroup] = None, async_op: bool = False):
group = get_ulysses_sequence_parallel_group() if group is None else group
sp_world_size = dist.get_world_size(group=group)
output_shape = list(local_tensor.shape)
output_shape[0] = output_shape[0] * sp_world_size
output = torch.empty(output_shape, dtype=local_tensor.dtype, device=local_tensor.device)
dist.all_gather_into_tensor(output, local_tensor, group=group, async_op=async_op)
return output
class SeqAllToAll(torch.autograd.Function):
@staticmethod
def forward(
ctx: Any,
group: dist.ProcessGroup,
local_input: Tensor,
scatter_dim: int,
gather_dim: int,
async_op: bool = False,
) -> Tensor:
ctx.group = group
ctx.scatter_dim = scatter_dim
ctx.gather_dim = gather_dim
ctx.async_op = async_op
return all_to_all_tensor(local_input, scatter_dim, gather_dim, group, async_op)
@staticmethod
def backward(ctx: Any, *grad_output: Tensor) -> tuple[None, Tensor, None, None]:
input_t = torch.cat(grad_output[1:], dim=ctx.gather_dim).contiguous() if ctx.async_op else grad_output[0]
return (
None,
all_to_all_tensor(input_t, ctx.gather_dim, ctx.scatter_dim, ctx.group, False),
None,
None,
None,
None,
)
class Gather(torch.autograd.Function):
@staticmethod
def forward(
ctx: Any,
group: dist.ProcessGroup,
local_tensor: Tensor,
gather_dim: int,
grad_scaler: bool = True,
async_op=False,
) -> Tensor:
ctx.group = group
ctx.gather_dim = gather_dim
ctx.grad_scaler = grad_scaler
ctx.async_op = async_op
sp_world_size = dist.get_world_size(group=group)
ctx.sp_world_size = sp_world_size
sp_rank = dist.get_rank(group=group)
ctx.sp_rank = sp_rank
local_shape = list(local_tensor.size())
split_size = local_shape[0]
part_size = local_shape[gather_dim] # store original size
ctx.part_size = part_size
output = all_gather_tensor(local_tensor, group, async_op)
return torch.cat(output.split(split_size, dim=0), dim=gather_dim)
@staticmethod
def backward(ctx: Any, grad_output: Tensor) -> Any:
if ctx.grad_scaler:
grad_output = grad_output * ctx.sp_world_size
return (
None,
grad_output.split(ctx.part_size, dim=ctx.gather_dim)[ctx.sp_rank].contiguous(),
None,
None,
None,
None,
)
def gather_outpus_and_unpad(*args, **kwargs):
raise RuntimeError(
"please use verl.utils.ulysses.gather_outputs_and_unpad instead of verl.utils.ulysses.gather_outpus_and_unpad"
)
def gather_outputs_and_unpad(
x: Tensor,
gather_dim: int,
unpad_dim: int = None,
padding_size: int = 0,
grad_scaler: bool = True,
group: Optional[dist.ProcessGroup] = None,
):
"""
Gather a tensor across a process group and optionally unpad its padded elements.
Args:
x (Tensor): Input tensor to gather.
gather_dim (int): Dimension along which to gather across ranks.
unpad_dim (int, optional): Dimension from which to remove padding. If None, no unpadding.
padding_size (int): Number of padding elements to remove on `unpad_dim`. Defaults to 0.
grad_scaler (bool): Whether to apply gradient scaling during gather. Defaults to True.
group (ProcessGroup, optional): Process group for gathering. If None, uses
`get_ulysses_sequence_parallel_group()`. If still None, returns `x` unchanged.
Returns:
Tensor: The gathered tensor, with padding removed if requested.
"""
group = get_ulysses_sequence_parallel_group() if group is None else group
if group is None:
return x
x = Gather.apply(group, x, gather_dim, grad_scaler)
if unpad_dim is not None:
assert isinstance(padding_size, int), "padding size is not given or is not an integer"
if padding_size == 0:
return x
x = _unpad_tensor(x, unpad_dim, padding_size)
return x
def ulysses_pad(
input_ids_rmpad: torch.Tensor, position_ids_rmpad: Optional[torch.Tensor] = None, sp_size: int = 1, pad_value=0
):
if position_ids_rmpad is not None:
assert position_ids_rmpad.size(-2) == 1
assert input_ids_rmpad.size(-1) == position_ids_rmpad.size(-1)
if sp_size <= 1:
return input_ids_rmpad, position_ids_rmpad, 0
_, total_seq_len = input_ids_rmpad.shape
pad_size = (sp_size - total_seq_len % sp_size) % sp_size
if pad_size > 0:
input_ids_rmpad = torch.nn.functional.pad(input_ids_rmpad, (0, pad_size), value=pad_value)
if position_ids_rmpad is not None:
pad_pos_ids = torch.arange(pad_size, device=position_ids_rmpad.device).unsqueeze(0)
if position_ids_rmpad.dim() == 3:
pad_pos_ids = pad_pos_ids.unsqueeze(0).repeat(position_ids_rmpad.size(0), 1, 1)
position_ids_rmpad = torch.cat((position_ids_rmpad, pad_pos_ids), dim=-1)
return input_ids_rmpad, position_ids_rmpad, pad_size
def ulysses_pad_and_slice_inputs(
input_ids_rmpad: torch.Tensor,
position_ids_rmpad: Optional[torch.Tensor] = None,
sp_size: int = 1,
skip_position_ids_rmpad: bool = False,
pad_value=0,
):
"""
Pad and slice input_ids to be divisible by sp_size
Pad position_ids to be divisible by sp_size.
Note both input_ids_rmpad and position_ids_rmpad will be padded and sliced.
The is the utility of pre-forward for ulysses sequence parallelism
Args:
input_ids_rmpad: shape of [bsz, seqlen]
position_ids_rmpad: shape of [bsz, seqlen], where bsz must be 1
sp_size (int): ulysses sequence parallelism size
skip_position_ids_rmpad: whether to skip position_ids_rmpad for VeOmniEngine
Returns:
torch.Tensor: padded and sliced input_ids
torch.Tensor: padded and sliced position_ids
int: pad size
"""
input_ids_rmpad, position_ids_rmpad, pad_size = ulysses_pad(
input_ids_rmpad, position_ids_rmpad, sp_size, pad_value=pad_value
)
input_ids_rmpad = slice_input_tensor(input_ids_rmpad, dim=1, padding=False)
if position_ids_rmpad is not None and not skip_position_ids_rmpad:
position_ids_rmpad = slice_input_tensor(position_ids_rmpad, dim=1, padding=False)
return input_ids_rmpad, position_ids_rmpad, pad_size
def validate_ulysses_config(num_heads, ulysses_sequence_size):
if ulysses_sequence_size > 1:
assert num_heads % ulysses_sequence_size == 0, (
f"num_heads ({num_heads}) must be divisible by ulysses sequence size({ulysses_sequence_size})"
)
| verl__utils__ulysses.py |
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# To support different vLLM versions, we add the model into SUPPORTED_MOE_MODELS separately to avoid triggering
# unsupported issues.
SUPPORTED_MOE_MODELS = []
try:
from vllm.model_executor.models.deepseek_v2 import DeepseekV2ForCausalLM, DeepseekV3ForCausalLM
SUPPORTED_MOE_MODELS.append(DeepseekV2ForCausalLM)
SUPPORTED_MOE_MODELS.append(DeepseekV3ForCausalLM)
except ImportError:
pass
try:
from vllm.model_executor.models.mixtral import MixtralForCausalLM
SUPPORTED_MOE_MODELS.append(MixtralForCausalLM)
except ImportError:
pass
try:
from vllm.model_executor.models.qwen2_moe import Qwen2MoeForCausalLM
SUPPORTED_MOE_MODELS.append(Qwen2MoeForCausalLM)
except ImportError:
pass
try:
from vllm.model_executor.models.qwen3_moe import Qwen3MoeForCausalLM
SUPPORTED_MOE_MODELS.append(Qwen3MoeForCausalLM)
except ImportError:
pass
try:
from vllm.model_executor.models.qwen3_vl_moe import Qwen3MoeLLMForCausalLM
SUPPORTED_MOE_MODELS.append(Qwen3MoeLLMForCausalLM)
except ImportError:
pass
try:
from vllm.model_executor.models.qwen3_next import Qwen3NextForCausalLM
SUPPORTED_MOE_MODELS.append(Qwen3NextForCausalLM)
except ImportError:
pass
try:
from vllm.model_executor.models.kimi_vl import KimiVLForConditionalGeneration
SUPPORTED_MOE_MODELS.append(KimiVLForConditionalGeneration)
except ImportError:
pass
def patch_vllm_moe_model_weight_loader(model):
# this is a work around to load the weight of vllm fused moe model
# it is from a bug from vllm 0.8.2
# all the weights are supposed to have a weight_loader, but the moe weights
# do not have a weight_loader, so we need to patch it
# (True, 'model.embed_tokens.weight')
# (True, 'model.layers.0.self_attn.qkv_proj.weight')
# (True, 'model.layers.0.self_attn.qkv_proj.bias')
# (True, 'model.layers.0.self_attn.o_proj.weight')
# (True, 'model.layers.0.mlp.gate.weight')
# (True, 'model.layers.0.mlp.shared_expert.gate_up_proj.weight')
# (True, 'model.layers.0.mlp.shared_expert.down_proj.weight')
# (False, 'model.layers.0.mlp.shared_expert_gate.weight') use default
# (False, 'model.layers.0.input_layernorm.weight') use default
# (False, 'model.layers.0.post_attention_layernorm.weight') use default
# (False, 'model.layers.0.mlp.experts.w13_weight') use mlp.experts.weight_loader
# (False, 'model.layers.0.mlp.experts.w2_weight') use mlp.experts.weight_loader
# Early return if no MOE models are supported
if not SUPPORTED_MOE_MODELS:
return
original_model_type = type(model)
if hasattr(model, "runnable") and "ACLGraphWrapper" in str(original_model_type):
model = model.runnable
original_model_type = type(model)
# Define MLP attribute mapping for different model types
MLP_ATTR_MAPPING = {}
try:
from vllm.model_executor.models.mixtral import MixtralForCausalLM
MLP_ATTR_MAPPING[MixtralForCausalLM] = "block_sparse_moe"
except ImportError:
pass
DEFAULT_MLP_ATTR = "mlp"
# Get inner model (either model.model or model.language_model)
inner_model = getattr(model, "model", None) or getattr(model, "language_model", None)
if inner_model is None:
raise ValueError("The provided model does not have a valid 'model' or 'language_model' attribute.")
if not isinstance(model, tuple(SUPPORTED_MOE_MODELS)) and not isinstance(inner_model, tuple(SUPPORTED_MOE_MODELS)):
return
# TODO(@leisuzz): class Qwen3MoeLLMForCausalLM is not available if VLLM version < 0.11.0,
# will update the 'if statement' with 'isinstance' when verl commonly use VLLM version >= 0.11.0
if type(inner_model).__name__ == "Qwen3MoeLLMForCausalLM":
inner_model = inner_model.model # Reassign inner_model in Qwen3-vl
for layer_idx, layer in enumerate(inner_model.layers):
mlp_attr = MLP_ATTR_MAPPING.get(original_model_type, DEFAULT_MLP_ATTR)
mlp = getattr(layer, mlp_attr, None)
if not mlp:
continue
experts = getattr(mlp, "experts", None)
if not experts or not hasattr(experts, "weight_loader"):
continue
# Patch the weight loaders
for name, param in mlp.named_parameters():
if "w13_weight" in name or "w2_weight" in name:
param.weight_loader = experts.weight_loader
| verl__utils__vllm__patch.py |
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from msgspec import field
from packaging import version as vs
try:
from vllm.lora.lora_model import LoRAModel
except ImportError:
from vllm.lora.models import LoRAModel
from vllm.lora.request import LoRARequest
from vllm.lora.utils import get_adapter_absolute_path
from vllm.lora.worker_manager import LRUCacheWorkerLoRAManager
from verl.third_party.vllm import get_version
class TensorLoRARequest(LoRARequest):
peft_config: dict = field(default=None)
lora_tensors: dict = field(default=None)
class VLLMHijack:
@staticmethod
def hijack():
def hijack__load_adapter(self, lora_request: TensorLoRARequest) -> LoRAModel:
"""
based on vllm.lora.worker_manager.WorkerLoRAManager._load_adapter, support load adapter with lora tensors
Reason:
VLLM does not support adding LoRA from tensors directly. It only supports adding LoRA via file paths.
To synchronize the LoRA tensors of the actor model, we need to find a workaround to enable VLLM to
load memory-based LoRA tensors.
"""
try:
supported_lora_modules = self._adapter_manager.supported_lora_modules
packed_modules_mapping = self._adapter_manager.packed_modules_mapping
expected_lora_modules: list[str] = []
for module in supported_lora_modules:
if module in packed_modules_mapping:
expected_lora_modules.extend(packed_modules_mapping[module])
else:
expected_lora_modules.append(module)
expected_lora_modules = list(set(expected_lora_modules))
lora_tensors = None
from vllm.lora.peft_helper import PEFTHelper
if isinstance(lora_request, TensorLoRARequest):
peft_config = lora_request.peft_config
lora_tensors = lora_request.lora_tensors
peft_helper = PEFTHelper.from_dict(peft_config)
else:
lora_path = get_adapter_absolute_path(lora_request.lora_path)
peft_helper = PEFTHelper.from_local_dir(lora_path, self.max_position_embeddings)
# Validates the LoRA configuration against requirements before
# loading weights, throwing an exception if validation fails.
peft_helper.validate_legal(self.lora_config)
# For some models like Qwen2VL, we need to use hf_to_vllm_mapper
# to ensure correct loading of lora weights.
model = self._adapter_manager.model
hf_to_vllm_mapper = None
if hasattr(model, "hf_to_vllm_mapper") and model.hf_to_vllm_mapper is not None:
hf_to_vllm_mapper = model.hf_to_vllm_mapper
lora_request_kwargs = {
"peft_helper": peft_helper,
"lora_model_id": lora_request.lora_int_id,
"device": "cpu",
"dtype": self.lora_config.lora_dtype,
"weights_mapper": hf_to_vllm_mapper,
}
if hasattr(self, "embedding_padding_modules"):
lora_request_kwargs["embedding_modules"] = self.embedding_modules
lora_request_kwargs["embedding_padding_modules"] = self.embedding_padding_modules
else:
lora_request_kwargs["model_vocab_size"] = self.vocab_size
if hasattr(self.lora_config, "lora_extra_vocab_size"):
lora_request_kwargs["target_embedding_padding"] = (
self.vocab_size + self.lora_config.lora_extra_vocab_size
)
if isinstance(lora_request, TensorLoRARequest):
lora = self._lora_model_cls.from_lora_tensors(
tensors=lora_tensors,
**lora_request_kwargs,
)
else:
lora = self._lora_model_cls.from_local_checkpoint(
lora_path,
expected_lora_modules,
**lora_request_kwargs,
)
except Exception:
raise
if getattr(lora, "extra_vocab_size", 0) > getattr(self.lora_config, "lora_extra_vocab_size", 0):
raise ValueError(
f"LoRA added vocab size {lora.extra_vocab_size} is greater than lora_extra_vocab_size "
f"{self.lora_config.lora_extra_vocab_size}."
)
return lora
def do_hijack(target_cls, target_method_name, hooking_method):
setattr(target_cls, target_method_name, hooking_method)
do_hijack(LRUCacheWorkerLoRAManager, "_load_adapter", hijack__load_adapter)
def is_version_ge(pkg: str = "vllm", minver: str = "0.7.3"):
"""check if the package version is greater than or equal to the minimum version"""
return vs.parse(get_version(pkg)) >= vs.parse(minver)
| verl__utils__vllm__utils.py |
# Copyright 2025 Bytedance Ltd. and/or its affiliates
# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from dataclasses import dataclass, field
from unittest.mock import patch
import torch
import vllm
from packaging import version
try:
from vllm.model_executor.layers.fused_moe.layer import FusedMoE
from vllm.model_executor.layers.linear import LinearBase
except ImportError as e:
raise ImportError("FP8 quantization not available") from e
from verl.utils.kernel.fp8_kernel import scaled_fp8_blockwise
logger = logging.getLogger(__name__)
# Ref: https://github.com/NVIDIA-NeMo/RL/commit/bc24887c72a6e1b2699a228bc87c588546dfe6b7
@dataclass()
class FP8State:
# A cache of fp8 parameter names, we can check this cache to see if a
# param name corresponds to a fp8 weight
seen_params: set = field(default_factory=lambda: set())
fp8_param_names: set = field(default_factory=lambda: set())
vllm_patches: list = field(default_factory=lambda: [])
fp8_state: FP8State = FP8State()
def is_fp8_model(vllm_config):
from vllm.model_executor.layers.quantization.fp8 import Fp8Config
if hasattr(vllm_config, "quant_config") and isinstance(vllm_config.quant_config, Fp8Config):
return True
return False
def get_module_from_param_name(model, name: str):
# Split the name into parts (e.g., 'layers', '0', 'self_attn', 'q_proj', 'weight')
# The module path is all but the last part (the parameter's own name)
path_parts = name.split(".")
module_path = path_parts[:-1]
# Replace with the fused model name
packed_modules_mapping = model.packed_modules_mapping
reversed_mapping = {
original_name: fused_name
for fused_name, original_names_list in packed_modules_mapping.items()
for original_name in original_names_list
}
if module_path[-1] in reversed_mapping.keys():
module_path[-1] = reversed_mapping[module_path[-1]]
current_module = model
try:
# Traverse the model hierarchy
for part in module_path:
if isinstance(current_module, FusedMoE):
return current_module
elif isinstance(current_module, torch.nn.ModuleList):
current_module = current_module[int(part)]
else:
current_module = getattr(current_module, part)
except (AttributeError, IndexError, ValueError) as e:
print(f"Warning: Could not find module for parameter '{name}'. Error: {e}")
return current_module
def is_fp8_weight(name, model):
if name not in fp8_state.seen_params:
fp8_state.seen_params.add(name)
# Filter out bias params
if name.endswith("weight"):
module = get_module_from_param_name(model, name)
# We currently only quantize linear layers
if (isinstance(module, LinearBase) and module.weight.dtype == torch.float8_e4m3fn) or (
isinstance(module, FusedMoE)
and module.w13_weight.dtype == torch.float8_e4m3fn
and module.w2_weight.dtype == torch.float8_e4m3fn
):
fp8_state.fp8_param_names.add(name)
return name in fp8_state.fp8_param_names
def quant_weights(weights, model, quant_config, dtype=torch.bfloat16):
"""Quantize weights to FP8 format using a memory-efficient generator.
Args:
weights: Generator or iterable of (name, tensor) pairs
model: The model to check for FP8 weight names
quant_config: Quantization configuration with weight_block_size
dtype: Data type for intermediate computation (default: bfloat16)
Yields:
Tuples of (name, tensor) for each weight and its scale
"""
if quant_config.weight_block_size is None:
raise ValueError("Currently only support blockwise quantization, please set weight_block_size in quant_config")
is_vllm_11_or_later = version.parse(vllm.__version__) >= version.parse("0.11.0")
for k, v in weights:
if not is_fp8_weight(k, model):
yield (k, v)
continue
# Cast the weight into fp8 and its scale factor
if torch.distributed.get_rank() == 0:
logger.debug(f"Quantizing to FP8 blockwise: {k}")
param_lp, param_scale = scaled_fp8_blockwise(
v.to(dtype),
weight_block_size=quant_config.weight_block_size,
)
param_scale = param_scale.squeeze(-1)
# Yield the quantized weight
yield (k, param_lp)
# Yield the scale with appropriate naming based on vLLM version
if is_vllm_11_or_later:
if "expert" in k:
yield (k + "_scale_inv", param_scale)
else:
yield (k + "_scale", param_scale)
else:
yield (k + "_scale_inv", param_scale)
# Explicitly delete original tensor reference to help GC
del v, param_lp, param_scale
def load_quanted_weights(weights, model_runner):
model = model_runner.model
quant_config = model_runner.vllm_config.quant_config
vllm_dtype = model_runner.vllm_config.model_config.dtype
weights_quantized = quant_weights(weights, model, quant_config, dtype=vllm_dtype)
# Monkey patch the param class to their subclass, as certain models
# will check the param type to call the proper weightloader
for name, param in model.named_parameters():
if hasattr(param, "subclass_type"):
param.orig_type = param.__class__
param.__class__ = param.subclass_type
# Finally load the weights into vllm
loaded_params = model.load_weights(weights_quantized)
# Undo the type change above to the original type
for name, param in model.named_parameters():
if hasattr(param, "subclass_type"):
param.__class__ = param.orig_type
return loaded_params
def process_weights_after_loading_for_vllm10(self, layer) -> None:
"""This function is used to process the weights after loading for a Linear layer, it is used for vllm v0.10
Compared to the original process_weights_after_loading in vllm, we just avoid creation of
new torch.nn.Parameter objects, because that removes the weight_loader attribute which we need for refit.
"""
logger.debug("Applying patch process_weights_after_loading")
try:
from vllm.model_executor.parameter import (
BlockQuantScaleParameter,
ModelWeightParameter,
)
except Exception:
print("error")
from torch.nn import Parameter
def _create_param_from_subclass_attributes(custom_param):
param = Parameter(custom_param.data, requires_grad=False)
base_param_dir = dir(torch.nn.Parameter)
custom_param_dir = dir(custom_param)
# Find the attributes that are unique to the custom parameter
custom_attributes = [
attr for attr in custom_param_dir if attr not in base_param_dir and not attr.startswith("__")
]
# Set the custom attributes into the base parameter object
for attr in custom_attributes:
setattr(param, attr, getattr(custom_param, attr))
param.subclass_type = type(custom_param)
return param
assert self.block_quant and self.quant_config.is_checkpoint_fp8_serialized
assert self.quant_config.activation_scheme == "dynamic"
weight = layer.weight.data
weight_scale_inv = layer.weight_scale_inv.data
weight = self._maybe_pad_weight(weight)
layer.weight = _create_param_from_subclass_attributes(
ModelWeightParameter(
data=weight,
output_dim=0,
input_dim=1,
weight_loader=layer.weight.weight_loader,
)
)
layer.weight_scale_inv = _create_param_from_subclass_attributes(
BlockQuantScaleParameter(
data=weight_scale_inv,
output_dim=0,
input_dim=1,
weight_loader=layer.weight_scale_inv.weight_loader,
)
)
def process_weights_after_loading_for_vllm11(self, layer) -> None:
"""This function is used to process the weights after loading for a Linear layer, it is used for vllm 0.11
Compared to the original process_weights_after_loading in vllm, we just avoid creation of
new torch.nn.Parameter objects, because that removes the weight_loader attribute which we need for refit.
"""
from torch.nn import Parameter
from vllm.model_executor.layers.quantization.utils.fp8_utils import (
maybe_post_process_fp8_weight_block,
process_fp8_weight_block_strategy,
)
from vllm.model_executor.parameter import (
BlockQuantScaleParameter,
ModelWeightParameter,
)
assert self.block_quant and self.quant_config.is_checkpoint_fp8_serialized
assert self.quant_config.activation_scheme == "dynamic"
def _create_param_from_subclass_attributes(custom_param):
param = Parameter(custom_param.data, requires_grad=False)
base_param_dir = dir(torch.nn.Parameter)
custom_param_dir = dir(custom_param)
# Find the attributes that are unique to the custom parameter
custom_attributes = [
attr for attr in custom_param_dir if attr not in base_param_dir and not attr.startswith("__")
]
# Set the custom attributes into the base parameter object
for attr in custom_attributes:
setattr(param, attr, getattr(custom_param, attr))
param.subclass_type = type(custom_param)
return param
weight_scale = layer.weight_scale_inv if hasattr(layer, "weight_scale_inv") else layer.weight_scale
weight, weight_scale = process_fp8_weight_block_strategy(layer.weight, weight_scale)
layer.weight = _create_param_from_subclass_attributes(
ModelWeightParameter(
data=weight.data,
output_dim=0,
input_dim=1,
weight_loader=layer.weight.weight_loader,
)
)
layer.weight_scale = _create_param_from_subclass_attributes(
BlockQuantScaleParameter(
data=weight_scale.data,
output_dim=0,
input_dim=1,
weight_loader=layer.weight_scale_inv.weight_loader,
)
)
del layer.weight_scale_inv
if version.parse(vllm.__version__) == version.parse("0.11.0"):
maybe_post_process_fp8_weight_block(layer, self.cutlass_block_fp8_supported)
else:
maybe_post_process_fp8_weight_block(layer)
def process_weights_after_loading_moe_for_vllm10(self, layer) -> None:
"""This function is used to process the weights after loading for a FusedMoE layer, it is used for vllm v0.10"""
from vllm.model_executor.layers.fused_moe.rocm_aiter_fused_moe import is_rocm_aiter_moe_enabled
from vllm.model_executor.layers.quantization.fp8 import _is_col_major, _swap_w13_to_w31
from vllm.model_executor.layers.quantization.utils.fp8_utils import (
get_col_major_tma_aligned_tensor,
requant_weight_ue8m0_inplace,
)
from vllm.utils.deep_gemm import is_blackwell_deep_gemm_used
self.rocm_aiter_moe_enabled = is_rocm_aiter_moe_enabled()
assert self.quant_config.activation_scheme == "dynamic"
if self.flashinfer_moe_enabled:
w13_weight = _swap_w13_to_w31(layer.w13_weight.data)
w13_weight_scale_inv = _swap_w13_to_w31(layer.w13_weight_scale_inv.data)
w2_weight = layer.w2_weight.data
w2_weight_scale_inv = layer.w2_weight_scale_inv.data
else:
w13_weight = layer.w13_weight.data
w13_weight_scale_inv = layer.w13_weight_scale_inv.data
w2_weight = layer.w2_weight
w2_weight_scale_inv = layer.w2_weight_scale_inv
from torch.nn import Parameter
def _create_param_from_subclass_attributes(custom_data, custom_weight):
param = Parameter(custom_data, requires_grad=False)
base_param_dir = dir(torch.nn.Parameter)
custom_weight_dir = dir(custom_weight)
# Find the attributes that are unique to the custom parameter
custom_attributes = [
attr for attr in custom_weight_dir if attr not in base_param_dir and not attr.startswith("__")
]
# Set the custom attributes into the base parameter object
for attr in custom_attributes:
setattr(param, attr, getattr(custom_weight, attr))
return param
layer.w13_weight = _create_param_from_subclass_attributes(w13_weight, layer.w13_weight)
layer.w13_weight_scale_inv = _create_param_from_subclass_attributes(
w13_weight_scale_inv, layer.w13_weight_scale_inv
)
layer.w2_weight = _create_param_from_subclass_attributes(w2_weight, layer.w2_weight)
layer.w2_weight_scale_inv = _create_param_from_subclass_attributes(w2_weight_scale_inv, layer.w2_weight_scale_inv)
# DeepGemm scales need to be transposed and aligned. We try to do
# it ahead of time for performance reasons.
if self.allow_deep_gemm and not is_blackwell_deep_gemm_used():
# Lazy import to avoid CUDA initialization problems.
if _is_col_major(layer.w13_weight_scale_inv):
layer.w13_weight_scale_inv = get_col_major_tma_aligned_tensor(layer.w13_weight_scale_inv).contiguous()
if _is_col_major(layer.w2_weight_scale_inv):
layer.w2_weight_scale_inv = get_col_major_tma_aligned_tensor(layer.w2_weight_scale_inv).contiguous()
if is_blackwell_deep_gemm_used():
assert layer.weight_block_size is not None
# Re-quantise the expert weights so their scales are UE8M0.
block_sz = tuple(layer.weight_block_size)
requant_weight_ue8m0_inplace(
layer.w13_weight.data,
layer.w13_weight_scale_inv.data,
block_sz,
)
requant_weight_ue8m0_inplace(
layer.w2_weight.data,
layer.w2_weight_scale_inv.data,
block_sz,
)
if _is_col_major(layer.w13_weight_scale_inv):
layer.w13_weight_scale_inv = get_col_major_tma_aligned_tensor(layer.w13_weight_scale_inv).contiguous()
if _is_col_major(layer.w2_weight_scale_inv):
layer.w2_weight_scale_inv = get_col_major_tma_aligned_tensor(layer.w2_weight_scale_inv).contiguous()
def process_weights_after_loading_moe_for_vllm11(self, layer) -> None:
"""This function is used to process the weights after loading for a FusedMoE layer, it is used for vllm 0.11"""
from vllm.model_executor.layers.quantization.utils.flashinfer_utils import (
swap_w13_to_w31,
)
from vllm.model_executor.layers.quantization.utils.fp8_utils import (
expert_weight_is_col_major,
requant_weight_ue8m0_inplace,
)
from vllm.utils.deep_gemm import (
get_col_major_tma_aligned_tensor,
is_deep_gemm_e8m0_used,
)
try:
from vllm.model_executor.layers.fused_moe.rocm_aiter_fused_moe import is_rocm_aiter_moe_enabled
self.rocm_aiter_moe_enabled = is_rocm_aiter_moe_enabled()
except ImportError:
from vllm._aiter_ops import rocm_aiter_ops
self.rocm_aiter_moe_enabled = rocm_aiter_ops.is_fused_moe_enabled()
assert self.block_quant and self.quant_config.is_checkpoint_fp8_serialized
assert self.quant_config.activation_scheme == "dynamic"
if self.flashinfer_moe_backend is not None:
layer.w13_weight.data = swap_w13_to_w31(layer.w13_weight.data)
layer.w13_weight_scale_inv.data = swap_w13_to_w31(layer.w13_weight_scale_inv.data)
if self.allow_deep_gemm and not is_deep_gemm_e8m0_used():
if expert_weight_is_col_major(layer.w13_weight_scale_inv):
layer.w13_weight_scale_inv = get_col_major_tma_aligned_tensor(layer.w13_weight_scale_inv)
if expert_weight_is_col_major(layer.w2_weight_scale_inv):
layer.w2_weight_scale_inv = get_col_major_tma_aligned_tensor(layer.w2_weight_scale_inv)
if is_deep_gemm_e8m0_used():
assert layer.weight_block_size is not None
# Re-quantise the expert weights so their scales are UE8M0.
block_sz = tuple(layer.weight_block_size)
requant_weight_ue8m0_inplace(
layer.w13_weight.data,
layer.w13_weight_scale_inv.data,
block_sz,
)
requant_weight_ue8m0_inplace(
layer.w2_weight.data,
layer.w2_weight_scale_inv.data,
block_sz,
)
# Ensure column-major TMA alignment expected by DeepGEMM.
if expert_weight_is_col_major(layer.w13_weight_scale_inv):
layer.w13_weight_scale_inv = get_col_major_tma_aligned_tensor(layer.w13_weight_scale_inv)
if expert_weight_is_col_major(layer.w2_weight_scale_inv):
layer.w2_weight_scale_inv = get_col_major_tma_aligned_tensor(layer.w2_weight_scale_inv)
def apply_vllm_fp8_patches():
logger.info("Applying vllm fp8 patches for blockwise quantization")
func1_path = "vllm.model_executor.layers.quantization.fp8.Fp8LinearMethod.process_weights_after_loading"
patcher1 = patch(
func1_path,
process_weights_after_loading_for_vllm11
if version.parse(vllm.__version__) >= version.parse("0.11.0")
else process_weights_after_loading_for_vllm10,
)
patcher1.start()
func2_path = "vllm.model_executor.layers.quantization.fp8.Fp8MoEMethod.process_weights_after_loading"
patcher2 = patch(
func2_path,
process_weights_after_loading_moe_for_vllm11
if version.parse(vllm.__version__) >= version.parse("0.11.0")
else process_weights_after_loading_moe_for_vllm10,
)
patcher2.start()
| verl__utils__vllm__vllm_fp8_utils.py |
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The base class for Actor
"""
from abc import ABC, abstractmethod
import torch
from verl import DataProto
__all__ = ["BasePPOActor"]
class BasePPOActor(ABC):
def __init__(self, config):
"""The base class for PPO actor
Args:
config (DictConfig): a config passed to the PPOActor. We expect the type to be
DictConfig (https://omegaconf.readthedocs.io/), but it can be any namedtuple in general.
"""
super().__init__()
self.config = config
@abstractmethod
def compute_log_prob(self, data: DataProto) -> torch.Tensor:
"""Compute logits given a batch of data.
Args:
data (DataProto): a batch of data represented by DataProto. It must contain key ```input_ids```,
```attention_mask``` and ```position_ids```.
Returns:
DataProto: a DataProto containing the key ```log_probs```
"""
pass
@abstractmethod
def update_policy(self, data: DataProto) -> dict:
"""Update the policy with an iterator of DataProto
Args:
data (DataProto): an iterator over the DataProto that returns by
```make_minibatch_iterator```
Returns:
Dict: a dictionary contains anything. Typically, it contains the statistics during updating the model
such as ```loss```, ```grad_norm```, etc,.
"""
pass
| verl__workers__actor__base.py |
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright 2023-2024 SGLang Team
# Copyright 2025 ModelBest Inc. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Single Process Actor
"""
import logging
import os
import torch
from torch import nn
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.distributed.tensor import DTensor
import verl.utils.torch_functional as verl_F
from verl import DataProto
from verl.trainer.ppo.core_algos import agg_loss, get_policy_loss_fn, kl_penalty
from verl.utils.attention_utils import index_first_axis, pad_input, rearrange, unpad_input
from verl.utils.device import get_device_id, get_device_name
from verl.utils.fsdp_utils import FSDPModule, fsdp2_clip_grad_norm_
from verl.utils.profiler import GPUMemoryLogger
from verl.utils.py_functional import append_to_dict
from verl.utils.seqlen_balancing import prepare_dynamic_batch, restore_dynamic_batch
from verl.utils.torch_dtypes import PrecisionType
from verl.utils.torch_functional import logprobs_from_logits
from verl.utils.ulysses import gather_outputs_and_unpad, ulysses_pad, ulysses_pad_and_slice_inputs
from verl.workers.actor import BasePPOActor
from verl.workers.config import ActorConfig
__all__ = ["DataParallelPPOActor"]
logger = logging.getLogger(__file__)
logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN"))
class DataParallelPPOActor(BasePPOActor):
"""FSDP DataParallel PPO Actor or Ref worker
Args:
config (ActorConfig): Actor config
actor_module (nn.Module): Actor or ref module
actor_optimizer (torch.optim.Optimizer, optional): Actor optimizer. Defaults to None.
"""
def __init__(self, config: ActorConfig, actor_module: nn.Module, actor_optimizer: torch.optim.Optimizer = None):
"""When optimizer is None, it is Reference Policy"""
super().__init__(config)
self.actor_module = actor_module
self.actor_optimizer = actor_optimizer
role = "Ref" if actor_optimizer is None else "Actor"
self.use_remove_padding = self.config.get("use_remove_padding", False)
if torch.distributed.get_rank() == 0:
print(f"{role} use_remove_padding={self.use_remove_padding}")
self.use_fused_kernels = self.config.get("use_fused_kernels", False)
if torch.distributed.get_rank() == 0:
print(f"{role} use_fused_kernels={self.use_fused_kernels}")
self.ulysses_sequence_parallel_size = self.config.ulysses_sequence_parallel_size
self.use_ulysses_sp = self.ulysses_sequence_parallel_size > 1
self.use_dynamic_bsz = self.config.get("use_dynamic_bsz", False)
self.use_prefix_grouper = self.config.get("use_prefix_grouper", False)
if torch.distributed.get_rank() == 0:
print(f"{role} use_prefix_grouper={self.use_prefix_grouper}")
if self.config.entropy_from_logits_with_chunking:
entropy_from_logits = verl_F.entropy_from_logits_with_chunking
else:
entropy_from_logits = verl_F.entropy_from_logits
self.compute_entropy_from_logits = (
torch.compile(entropy_from_logits, dynamic=True)
if self.config.get("use_torch_compile", True) # use torch compile by default
else entropy_from_logits
)
self.device_name = get_device_name()
self.param_dtype = PrecisionType.to_dtype(self.config.fsdp_config.get("dtype", "bfloat16"))
if self.param_dtype == torch.float16:
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
self.scaler = ShardedGradScaler(growth_interval=400)
else:
self.scaler = None
# Sum of squared probabilities computation (for optimal_token_baseline)
# Only initialize if calculate_sum_pi_squared config is enabled
if self.config.get("calculate_sum_pi_squared", False):
self.calculate_sum_pi_squared_from_logits = (
torch.compile(verl_F.calculate_sum_pi_squared_from_logits, dynamic=True)
if self.config.get("use_torch_compile", True)
else verl_F.calculate_sum_pi_squared_from_logits
)
assert not (self.use_fused_kernels or self.use_prefix_grouper), (
"calculate_sum_pi_squared is not supported with "
f"{self.use_fused_kernels=} or {self.use_prefix_grouper=} for now."
)
def _forward_micro_batch(
self, micro_batch: dict[str, torch.Tensor], temperature: float, calculate_entropy: bool = False
) -> dict[str, torch.Tensor]:
"""
Returns:
dict[str, torch.Tensor]:
log_probs: (bs, response_len)
if calculate_entropy is True:
entropys: (bs, response_len)
if calculate_sum_pi_squared is False:
sum_pi_squared: (bs, response_len)
"""
calculate_sum_pi_squared = self.config.get("calculate_sum_pi_squared", False)
sum_pi_squared_checkpointing = self.config.get("sum_pi_squared_checkpointing", False)
# PrefixGrouper path for shared-prefix optimization
if self.use_prefix_grouper:
can_use_pg = (
not self.use_remove_padding
and not self.use_ulysses_sp
and not self.use_fused_kernels
and not self.use_dynamic_bsz
)
if can_use_pg and "response_mask" in micro_batch and "uid" in micro_batch:
from verl.trainer.ppo.prefix_grouper_utils import forward_micro_batch_with_prefix_grouper
return forward_micro_batch_with_prefix_grouper(
micro_batch=micro_batch,
model=self.actor_module,
temperature=temperature,
calculate_entropy=calculate_entropy,
device_name=self.device_name,
param_dtype=self.param_dtype,
use_chunking_entropy=self.config.get("entropy_from_logits_with_chunking", False),
)
response_length = micro_batch["responses"].size(-1)
multi_modal_inputs = {}
if "multi_modal_inputs" in micro_batch.keys():
from verl.utils.model import extract_multi_modal_inputs
multi_modal_inputs = extract_multi_modal_inputs(micro_batch["multi_modal_inputs"])
with torch.autocast(device_type=self.device_name, dtype=self.param_dtype):
input_ids = micro_batch["input_ids"]
batch_size, seqlen = input_ids.shape
attention_mask = micro_batch["attention_mask"]
position_ids = micro_batch["position_ids"]
entropy = None
if position_ids.dim() == 3: # qwen2vl mrope
position_ids = position_ids.transpose(0, 1) # (bsz, 4, seqlen) -> (4, bsz, seqlen)
if self.use_remove_padding:
input_ids_rmpad, indices, cu_seqlens, *_ = unpad_input(
input_ids.unsqueeze(-1), attention_mask
) # input_ids_rmpad (total_nnz, ...)
input_ids_rmpad = input_ids_rmpad.transpose(0, 1) # (1, total_nnz)
# unpad the position_ids to align the rotary
if position_ids.dim() == 3:
position_ids_rmpad = (
index_first_axis(rearrange(position_ids, "c b s ... -> (b s) c ..."), indices)
.transpose(0, 1)
.unsqueeze(1)
) # (4, bsz, seqlen) -> (4, 1, bsz * seqlen)
else:
position_ids_rmpad = index_first_axis(
rearrange(position_ids.unsqueeze(-1), "b s ... -> (b s) ..."), indices
).transpose(0, 1)
is_mask_all_zero = attention_mask.sum() == 0
if is_mask_all_zero:
input_ids_rmpad = torch.zeros(
(1, self.ulysses_sequence_parallel_size),
device=input_ids.device,
dtype=input_ids.dtype,
)
if position_ids.dim() == 3:
position_ids_rmpad = torch.zeros(
(position_ids.shape[0], 1, self.ulysses_sequence_parallel_size),
device=position_ids.device,
dtype=position_ids.dtype,
)
else:
position_ids_rmpad = torch.zeros(
(1, self.ulysses_sequence_parallel_size),
device=position_ids.device,
dtype=position_ids.dtype,
)
if "image_bound" in multi_modal_inputs:
from verl.utils.dataset.vision_utils import process_multi_modal_inputs_for_minicpmo
multi_modal_inputs = process_multi_modal_inputs_for_minicpmo(
input_ids, attention_mask, position_ids, cu_seqlens, multi_modal_inputs
)
# for compute the log_prob
input_ids_rmpad_rolled = torch.roll(input_ids_rmpad, shifts=-1, dims=1) # (1, total_nnz)
# pad and slice the inputs if sp > 1
if self.use_ulysses_sp:
is_vlm_model = hasattr(
getattr(self.actor_module, "module", self.actor_module).config, "vision_config"
)
if is_vlm_model:
# vlm model's inputs will be sliced after embedding
input_ids_rmpad, position_ids_rmpad, pad_size = ulysses_pad(
input_ids_rmpad,
position_ids_rmpad=position_ids_rmpad,
sp_size=self.ulysses_sequence_parallel_size,
)
else:
input_ids_rmpad, position_ids_rmpad, pad_size = ulysses_pad_and_slice_inputs(
input_ids_rmpad,
position_ids_rmpad=position_ids_rmpad,
sp_size=self.ulysses_sequence_parallel_size,
)
input_ids_rmpad_rolled, _, _ = ulysses_pad_and_slice_inputs(
input_ids_rmpad_rolled,
position_ids_rmpad=None,
sp_size=self.ulysses_sequence_parallel_size,
)
input_ids_rmpad_rolled = input_ids_rmpad_rolled.squeeze(0) # ((total_nnz / sp) + pad)
# only pass input_ids and position_ids to enable flash_attn_varlen
extra_args = {}
if self.use_fused_kernels:
extra_args["temperature"] = temperature
extra_args["return_dict"] = True
output = self.actor_module(
input_ids=input_ids_rmpad,
attention_mask=None,
position_ids=position_ids_rmpad,
**multi_modal_inputs,
use_cache=False,
**extra_args,
) # prevent model thinks we are generating
if self.use_fused_kernels:
log_probs = output.log_probs.squeeze(0) # (total_nnz,)
entropy_rmpad = output.entropy.squeeze(0) # (total_nnz,)
else:
logits_rmpad = output.logits.squeeze(0) # (total_nnz, vocab_size)
logits_rmpad.div_(temperature)
# if use_sp: ((total_nnz / sp) + pad) ; if not use_sp: (batch, seqlen)
inplace_backward = True
if calculate_entropy:
inplace_backward = False
log_probs = logprobs_from_logits(
logits=logits_rmpad,
labels=input_ids_rmpad_rolled,
inplace_backward=inplace_backward,
)
# compute entropy
if calculate_entropy:
# ((total_nnz / sp) + pad)
entropy_rmpad = (
self.compute_entropy_from_logits(logits_rmpad)
if not self.config.entropy_checkpointing
else torch.utils.checkpoint.checkpoint(self.compute_entropy_from_logits, logits_rmpad)
)
# Compute sum_pi_squared if requested (for optimal_token_baseline)
if calculate_sum_pi_squared:
sum_pi_squared_rmpad = (
self.calculate_sum_pi_squared_from_logits(logits_rmpad)
if not sum_pi_squared_checkpointing
else torch.utils.checkpoint.checkpoint(
self.calculate_sum_pi_squared_from_logits, logits_rmpad
)
)
# gather log_prob if sp > 1
if self.use_ulysses_sp:
# gather and unpad for the ulysses sp
log_probs = gather_outputs_and_unpad(
log_probs,
gather_dim=0,
unpad_dim=0,
padding_size=pad_size,
)
if calculate_entropy:
entropy_rmpad = gather_outputs_and_unpad(
entropy_rmpad,
gather_dim=0,
unpad_dim=0,
padding_size=pad_size,
)
if calculate_sum_pi_squared:
sum_pi_squared_rmpad = gather_outputs_and_unpad(
sum_pi_squared_rmpad, gather_dim=0, unpad_dim=0, padding_size=pad_size
)
if is_mask_all_zero:
log_probs = log_probs[:0]
if calculate_entropy:
entropy_rmpad = entropy_rmpad[:0]
# pad back to (bsz, seqlen)
if calculate_entropy:
full_entropy = pad_input(
hidden_states=entropy_rmpad.unsqueeze(-1),
indices=indices,
batch=batch_size,
seqlen=seqlen,
)
if calculate_sum_pi_squared:
full_sum_pi_squared = pad_input(
hidden_states=sum_pi_squared_rmpad.unsqueeze(-1),
indices=indices,
batch=batch_size,
seqlen=seqlen,
)
full_log_probs = pad_input(
hidden_states=log_probs.unsqueeze(-1),
indices=indices,
batch=batch_size,
seqlen=seqlen,
)
# only return response part:
if calculate_entropy:
entropy = full_entropy.squeeze(-1)[:, -response_length - 1 : -1] # (bsz, response_length)
if calculate_sum_pi_squared:
# (bsz, response_length)
sum_pi_squared = full_sum_pi_squared.squeeze(-1)[:, -response_length - 1 : -1]
log_probs = full_log_probs.squeeze(-1)[:, -response_length - 1 : -1] # (bsz, response_length)
else: # not using rmpad and no ulysses sp
extra_args = {}
if self.use_fused_kernels:
extra_args["temperature"] = temperature
extra_args["return_dict"] = True
output = self.actor_module(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
**multi_modal_inputs,
use_cache=False,
**extra_args,
) # prevent model thinks we are generating
if self.use_fused_kernels:
log_probs = output.log_probs[:, -response_length - 1 : -1]
entropy = output.entropy[:, -response_length - 1 : -1] # (bsz, response_length)
else:
logits = output.logits
logits.div_(temperature)
logits = logits[:, -response_length - 1 : -1, :] # (bsz, response_length, vocab_size)
log_probs = logprobs_from_logits(logits, micro_batch["responses"])
if calculate_entropy:
if not self.config.entropy_checkpointing:
entropy = verl_F.entropy_from_logits(logits) # (bsz, response_length)
else:
entropy = torch.utils.checkpoint.checkpoint(verl_F.entropy_from_logits, logits)
# Compute sum_pi_squared if requested (for optimal_token_baseline)
if calculate_sum_pi_squared:
sum_pi_squared = (
self.calculate_sum_pi_squared_from_logits(logits)
if not sum_pi_squared_checkpointing
else torch.utils.checkpoint.checkpoint(self.calculate_sum_pi_squared_from_logits, logits)
)
outputs = {"log_probs": log_probs}
if calculate_entropy:
outputs["entropys"] = entropy
if calculate_sum_pi_squared:
outputs["sum_pi_squared"] = sum_pi_squared
return outputs
def _optimizer_step(self):
assert self.config.grad_clip is not None
if self.scaler is not None:
self.scaler.unscale_(self.actor_optimizer)
if isinstance(self.actor_module, FSDP):
grad_norm = self.actor_module.clip_grad_norm_(max_norm=self.config.grad_clip)
elif isinstance(self.actor_module, FSDPModule):
grad_norm = fsdp2_clip_grad_norm_(self.actor_module.parameters(), max_norm=self.config.grad_clip)
else:
grad_norm = torch.nn.utils.clip_grad_norm_(self.actor_module.parameters(), max_norm=self.config.grad_clip)
if isinstance(grad_norm, DTensor):
grad_norm = grad_norm.full_tensor()
# if grad_norm is not finite, skip the update
if self.scaler is not None:
self.scaler.step(self.actor_optimizer)
self.scaler.update()
else:
if not torch.isfinite(grad_norm):
print(f"WARN: rank {torch.distributed.get_rank()} grad_norm is not finite: {grad_norm}")
self.actor_optimizer.zero_grad()
else:
self.actor_optimizer.step()
# Clear cached weight scales for QAT (weights changed)
if getattr(self.actor_module, "_qat_fuse_enabled", False):
from verl.utils.qat import invalidate_all_scales
invalidate_all_scales(self.actor_module)
return grad_norm
@GPUMemoryLogger(role="dp actor", logger=logger)
def compute_log_prob(self, data: DataProto, calculate_entropy: bool = False) -> dict[str, torch.Tensor]:
"""Compute the log probability of the responses given input_ids, attention_mask and position_ids
Args:
data (DataProto): a DataProto containing keys
``input_ids``: tensor of shape [batch_size, sequence_length]. torch.int64. Note that input_ids is the
concatenation of prompt and response. Note that ``sequence_length = prompt_length + response_length``.
``attention_mask``: tensor of shape [batch_size, sequence_length]. torch.int64.
``position_ids``: tensor of shape [batch_size, sequence_length]. torch.int64.
``responses``: tensor of shape [batch_size, response_length]. torch.int64.
Returns:
dict[str, torch.Tensor]: a dict containing keys
- ``log_probs``: tensor of shape [batch_size, response_length]. torch.float32.
- ``entropys``: tensor of shape [batch_size, response_length]. torch.float32.
- ``sum_pi_squared``: tensor of shape [batch_size, response_length]. torch.float32.
"""
calculate_sum_pi_squared = self.config.get("calculate_sum_pi_squared", False)
# set to eval
self.actor_module.eval()
micro_batch_size = data.meta_info["micro_batch_size"]
temperature = data.meta_info["temperature"] # temperature must be in the data.meta_info to avoid silent error
use_dynamic_bsz = data.meta_info["use_dynamic_bsz"]
pad_token_id = data.meta_info.get("pad_token_id", 0)
has_multi_modal_inputs = "multi_modal_inputs" in data.non_tensor_batch.keys()
select_keys = ["responses", "input_ids", "attention_mask", "position_ids"]
non_tensor_select_keys = ["multi_modal_inputs"] if has_multi_modal_inputs else []
if self.use_prefix_grouper:
select_keys += [k for k in ["prompts", "response_mask"] if k in data.batch]
if "uid" in data.non_tensor_batch:
non_tensor_select_keys.append("uid")
data = data.select(batch_keys=select_keys, non_tensor_batch_keys=non_tensor_select_keys)
if use_dynamic_bsz:
max_token_len = data.meta_info["max_token_len"] * self.ulysses_sequence_parallel_size
micro_batches, batch_idx_list = prepare_dynamic_batch(data, max_token_len=max_token_len)
else:
micro_batches = data.split(micro_batch_size)
log_probs_lst = []
entropy_lst = []
sum_pi_squared_lst = []
for micro_batch in micro_batches:
micro_batch = micro_batch.to(get_device_id())
model_inputs = {**micro_batch.batch, **micro_batch.non_tensor_batch, "pad_token_id": pad_token_id}
with torch.no_grad():
outputs = self._forward_micro_batch(
model_inputs, temperature=temperature, calculate_entropy=calculate_entropy
)
log_probs_lst.append(outputs["log_probs"])
if calculate_entropy:
entropy_lst.append(outputs["entropys"])
if calculate_sum_pi_squared:
sum_pi_squared_lst.append(outputs["sum_pi_squared"])
log_probs = torch.concat(log_probs_lst, dim=0)
if calculate_entropy:
entropys = torch.concat(entropy_lst, dim=0)
if calculate_sum_pi_squared:
sum_pi_squared = torch.concat(sum_pi_squared_lst, dim=0)
if use_dynamic_bsz:
log_probs = restore_dynamic_batch(log_probs, batch_idx_list)
if calculate_entropy:
entropys = restore_dynamic_batch(entropys, batch_idx_list)
if calculate_sum_pi_squared:
sum_pi_squared = restore_dynamic_batch(sum_pi_squared, batch_idx_list)
outputs = {"log_probs": log_probs}
if calculate_entropy:
outputs["entropys"] = entropys
if calculate_sum_pi_squared:
outputs["sum_pi_squared"] = sum_pi_squared
return outputs
@GPUMemoryLogger(role="dp actor", logger=logger)
def update_policy(self, data: DataProto):
# make sure we are in training mode
self.actor_module.train()
temperature = data.meta_info["temperature"] # temperature must be in the data.meta_info to avoid silent error
pad_token_id = data.meta_info.get("pad_token_id", 0)
select_keys = [
"responses",
"response_mask",
"input_ids",
"attention_mask",
"position_ids",
"old_log_probs",
"advantages",
]
if self.use_prefix_grouper and "prompts" in data.batch.keys():
select_keys.append("prompts")
if self.config.use_kl_loss:
select_keys.append("ref_log_prob")
# Include pre-computed IS weights if present in batch
# Weights are computed centrally in trainer and added to batch when algorithm.rollout_is=True
if "rollout_is_weights" in data.batch.keys():
select_keys.append("rollout_is_weights")
# Include rollout_log_probs for computing rollout_corr metrics in bypass mode
if "rollout_log_probs" in data.batch.keys():
select_keys.append("rollout_log_probs")
has_multi_modal_inputs = "multi_modal_inputs" in data.non_tensor_batch.keys()
non_tensor_select_keys = []
if has_multi_modal_inputs:
non_tensor_select_keys.append("multi_modal_inputs")
if self.use_prefix_grouper and "uid" in data.non_tensor_batch.keys():
non_tensor_select_keys.append("uid")
data = data.select(batch_keys=select_keys, non_tensor_batch_keys=non_tensor_select_keys)
# Split to make minibatch iterator for updating the actor
# See PPO paper for details. https://arxiv.org/abs/1707.06347
mini_batches = data.split(self.config.ppo_mini_batch_size)
on_policy = len(mini_batches) == 1 and self.config.ppo_epochs == 1
metrics = {
"actor/pg_loss": 0.0,
"actor/kl_loss": 0.0,
}
for _ in range(self.config.ppo_epochs):
for batch_idx, mini_batch in enumerate(mini_batches):
if self.config.use_dynamic_bsz:
max_token_len = self.config.ppo_max_token_len_per_gpu * self.ulysses_sequence_parallel_size
micro_batches, _ = prepare_dynamic_batch(mini_batch, max_token_len=max_token_len)
else:
self.gradient_accumulation = (
self.config.ppo_mini_batch_size // self.config.ppo_micro_batch_size_per_gpu
)
micro_batches = mini_batch.split(self.config.ppo_micro_batch_size_per_gpu)
self.actor_optimizer.zero_grad()
for micro_batch in micro_batches:
micro_batch = micro_batch.to(get_device_id())
micro_batch_metrics = {}
model_inputs = {**micro_batch.batch, **micro_batch.non_tensor_batch, "pad_token_id": pad_token_id}
response_mask = model_inputs["response_mask"]
old_log_prob = model_inputs["old_log_probs"]
advantages = model_inputs["advantages"]
entropy_coeff = self.config.entropy_coeff
loss_agg_mode = self.config.loss_agg_mode
calculate_entropy = self.config.calculate_entropy or (entropy_coeff != 0)
if self.config.use_dynamic_bsz:
loss_scale_factor = response_mask.shape[0] / self.config.ppo_mini_batch_size
else:
loss_scale_factor = 1 / self.gradient_accumulation
# all return: (bsz, response_length)
outputs = self._forward_micro_batch(
model_inputs, temperature=temperature, calculate_entropy=calculate_entropy
)
log_prob = outputs["log_probs"]
entropy = outputs["entropys"] if calculate_entropy else None
# for fully_async_policy
if hasattr(self.config, "use_rollout_log_probs") and self.config.use_rollout_log_probs:
old_log_prob = model_inputs["old_log_probs"]
else:
if on_policy:
old_log_prob = log_prob.detach()
else:
old_log_prob = model_inputs["old_log_probs"]
loss_mode = self.config.policy_loss.get("loss_mode", "vanilla")
# vanilla -> verl.trainer.ppo.core_algos.compute_policy_loss_vanilla
# Extract pre-computed rollout correction weights if present
# Weights are computed centrally in trainer and added when algorithm.rollout_is=True
rollout_is_weights = model_inputs.get("rollout_is_weights", None)
# gpg -> verl.trainer.ppo.core_algos.compute_policy_loss_gpg
# clip_cov -> verl.trainer.ppo.core_algos.compute_policy_loss_clip_cov
policy_loss_fn = get_policy_loss_fn(loss_mode)
# Compute policy loss (any function is expected to return 2 values)
pg_loss, pg_metrics = policy_loss_fn(
old_log_prob=old_log_prob,
log_prob=log_prob,
advantages=advantages,
response_mask=response_mask,
loss_agg_mode=loss_agg_mode,
config=self.config,
rollout_is_weights=rollout_is_weights,
)
micro_batch_metrics.update(pg_metrics)
# Skip if using bypass_mode loss (metrics already computed in pg_metrics)
rollout_log_prob = model_inputs.get("rollout_log_probs", None)
if loss_mode != "bypass_mode" and rollout_log_prob is not None:
# Compute metrics using CURRENT policy π_θ vs π_rollout
# Tracks evolving off-policy gap as π_θ updates during mini-batch training
from verl.trainer.ppo.rollout_corr_helper import compute_rollout_corr_metrics_from_logprobs
rollout_corr_metrics = compute_rollout_corr_metrics_from_logprobs(
log_prob=log_prob,
rollout_log_prob=rollout_log_prob,
response_mask=response_mask,
)
micro_batch_metrics.update(rollout_corr_metrics)
policy_loss = pg_loss
if calculate_entropy and entropy is not None:
entropy_agg = agg_loss(loss_mat=entropy, loss_mask=response_mask, loss_agg_mode=loss_agg_mode)
micro_batch_metrics["actor/entropy"] = entropy_agg.detach().item()
if entropy_coeff != 0:
policy_loss -= entropy_agg * entropy_coeff
if self.config.use_kl_loss:
ref_log_prob = model_inputs["ref_log_prob"]
# compute kl loss
kld = kl_penalty(
logprob=log_prob, ref_logprob=ref_log_prob, kl_penalty=self.config.kl_loss_type
)
kl_loss = agg_loss(loss_mat=kld, loss_mask=response_mask, loss_agg_mode=loss_agg_mode)
policy_loss = policy_loss + kl_loss * self.config.kl_loss_coef
metrics["actor/kl_loss"] += kl_loss.detach().item() * loss_scale_factor
micro_batch_metrics["actor/kl_coef"] = self.config.kl_loss_coef
if self.config.use_dynamic_bsz:
# relative to the dynamic bsz
loss = policy_loss * loss_scale_factor
else:
loss = policy_loss * loss_scale_factor
if self.scaler is not None:
self.scaler.scale(loss).backward()
else:
loss.backward()
metrics["actor/pg_loss"] += pg_loss.detach().item() * loss_scale_factor
append_to_dict(metrics, micro_batch_metrics)
grad_norm = self._optimizer_step()
mini_batch_metrics = {"actor/grad_norm": grad_norm.detach().item()}
append_to_dict(metrics, mini_batch_metrics)
self.actor_optimizer.zero_grad()
return metrics
| verl__workers__actor__dp_actor.py |
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Megatron Actor.
In megatron actor, the differences are:
1. We only make minibatch
Note that our model doesn't have to be `MegatronModule` because we don't share embedding in the last layer
"""
import itertools
import logging
import os
from functools import partial
from typing import Iterable
import torch
import torch.distributed
from megatron.core import parallel_state as mpu
from megatron.core.distributed import finalize_model_grads
# from megatron.core.optimizer import DistributedOptimizer
from megatron.core.optimizer import DistributedOptimizer
from megatron.core.pipeline_parallel import get_forward_backward_func
from omegaconf import OmegaConf
from torch import nn
from verl import DataProto
from verl.trainer.ppo.core_algos import agg_loss, get_policy_loss_fn, kl_penalty
from verl.utils.device import get_device_id, get_torch_device
from verl.utils.megatron.pipeline_parallel import make_batch_generator
from verl.utils.megatron.router_replay_patch import RouterReplay, RouterReplayAction
from verl.utils.megatron.router_replay_utils import (
RouterReplayHelper,
merge_router_topk_indices,
pp_gather,
reorder_and_merge_vpp_layers,
set_router_replay_data,
)
from verl.utils.megatron.tensor_parallel import vocab_parallel_entropy, vocab_parallel_log_probs_from_logits
from verl.utils.megatron_utils import get_megatron_mtp_loss, get_model_config, unwrap_model
from verl.utils.profiler import GPUMemoryLogger
from verl.utils.py_functional import append_to_dict
from verl.utils.seqlen_balancing import get_reverse_idx, rearrange_micro_batches
from verl.utils.torch_functional import broadcast_dict_tensor
from verl.workers.actor import BasePPOActor
from verl.workers.config import MtpConfig
__all__ = ["MegatronPPOActor"]
logger = logging.getLogger(__file__)
logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN"))
class MegatronPPOActor(BasePPOActor):
def __init__(
self,
config,
model_config,
hf_config,
tf_config,
actor_module: nn.ModuleList,
actor_optimizer: DistributedOptimizer,
mtp_config: MtpConfig = None,
):
"""MeagtronPPOActor class. This class implements the simple PPO logics when the model is built with Megatron.
Args:
config (OmegaConf): the basic config that contains the hyper-parameters of PPO Actor. It must contain
``ppo_micro_batch_size_per_gpu``: micro batch size when updating ppo.
``ppo_mini_batch_size``: minibatch size when updating ppo using the batch data.
``ppo_epochs``: number of epochs to update the actor using the batch data.
``shuffle``: whether to shuffle the data after each ppo epoch.
``clip_ratio``: clip ratio of the ppo algorithm. See https://arxiv.org/abs/1707.06347.
``entropy_coeff``: entropy coefficient of the PPO loss. See https://arxiv.org/abs/1707.06347.
model_config (OmegaConf): model configuration. It must contains ``model_config.vocab_size`` and
``model_config.hidden_size``
hf_config (PretrainedConfig): huggingface config
tf_config (TransformerConfig): mcore transformer config
mtp_config (MtpConfig): mtp config, default None
actor_module (nn.ModuleList): actor module is a ModuleList that contains a list of nn.Module in this
pp stage.
each nn.Module in this rank holds a vpp module chunk. See https://arxiv.org/pdf/2104.04473.pdf for
more details.
The actor module has some constraints to follow in order to use the updating logics implemented here
1. It must implement unpad_input before any computation and pad_input after all the computation.
Remove padding is an
optimization that removes the padding tokens. See unpad_input and pad_input function in flash-attn
(https://github.com/Dao-AILab/flash-attention/blob/main/flash_attn/bert_padding.py).
2. Each pp stage must return the hidden state with the same shape [total_nnz, 1, hidden_size],
where total_nnz is the number of valid tokens in this batch. If sequence parallel is enabled, the size
of the hidden state is [total_nnz // tp, 1, hidden_size].
actor_optimizer (DistributedOptimizer): currently, we only support DistributedOptimizer in Megatron.
It implements
zero1 optimizer that shards the optimizer state across dp ranks.
>>> from megatron.training import get_model
>>> from megatron.optimizer import get_megatron_optimizer
>>> actor_module = get_model(megatron_actor_model_provider, wrap_with_ddp=True)
>>> actor_module = nn.ModuleList(actor_module)
>>> actor_optimizer = get_megatron_optimizer(actor_module)
>>> actor = MegatronPPOActor(config=config,
>>> model_config=actor_model_config,
>>> hf_config=hf_config,
>>> tf_config=tf_config,
>>> actor_module=actor_module,
>>> actor_optimizer=actor_optimizer)
"""
super().__init__(config)
self._validate_config(config)
self.model_config = model_config
self.hf_config = hf_config
self.tf_config = tf_config
self.mtp_config = mtp_config
self.actor_module = actor_module
self.actor_optimizer: DistributedOptimizer = actor_optimizer
if self.mtp_config:
assert self.mtp_config.enable, "MTP requires mtp_config.enable to be True"
self.use_fused_kernels = self.config.get("use_fused_kernels", False)
if self.use_fused_kernels and not getattr(self.config, "overlap_moe_expert_parallel_comm", False):
# do not patch if overlap_moe_expert_parallel_comm is enabled
logger.warning_once(
"Recommend to disable use_fused_kernels since the fused kernel's performance is broken for triton>=3.3"
"Unless you are using a very old version of triton < 3.3"
)
from verl.models.mcore.model_forward_fused import patch_fused_forward
for model in self.actor_module:
patch_fused_forward(model)
else:
from verl.models.mcore.mtp_patch import patch_postprocess
for model in self.actor_module:
if self.mtp_config:
from verl.models.mcore.mtp_patch import patch_mtp_layer_get_embeddings
patch_postprocess(model)
if self.mtp_config.detach_encoder:
patch_mtp_layer_get_embeddings(model)
self.optimizer_step_args = OmegaConf.create(
{
"skip_grad": None,
"overlap_dp_param_comm": False,
"overlap_dp_grad_comm": False,
"gradient_accumulation_steps": 1,
"sequence_parallel": self.tf_config.sequence_parallel,
"DDP_impl": "local",
"layernorm_allreduce_bucket_threshold": 0,
"reduce_grads_use_alltoall": False,
}
)
self.router_replay = self.config.router_replay
self.enable_routing_replay = self.router_replay.mode != "disabled"
if self.enable_routing_replay:
self.mini_layer_topk_idx_list = []
config = get_model_config(self.actor_module[0])
print(config)
config.finalize_model_grads_func = finalize_model_grads
def _validate_config(self, config) -> None:
"""Validate config options not implemented for Megatron backend"""
assert config.get("ulysses_sequence_parallel_size", 1) == 1
if config.get("shuffle", False):
assert config.data_loader_seed is not None, "If shuffle dataloader, seed must be manually set"
if config.megatron.tensor_model_parallel_size == 1:
print("[Warining] Because actor tp size == 1, set sp to False")
config.megatron.sequence_parallel = False
self.config = config
@GPUMemoryLogger(role="megatron actor", logger=logger)
def compute_log_prob(self, data: DataProto, calculate_entropy=False) -> torch.Tensor:
"""Compute the log probability of the responses given input_ids, attention_mask and position_ids
Args:
data (DataProto): a DataProto containing keys
``input_ids``: tensor of shape [batch_size, sequence_length]. torch.int64. Note that input_ids is the
concatenation of prompt and response. Note that ``sequence_length = prompt_length + response_length``.
``attention_mask``: tensor of shape [batch_size, sequence_length]. torch.int64.
``position_ids``: tensor of shape [batch_size, sequence_length]. torch.int64.
``responses``: tensor of shape [batch_size, response_length]. torch.int64.
Returns:
DataProto: torch.Tensor: the log_prob tensor
"""
prev_modes = [m.training for m in self.actor_module]
for module in self.actor_module:
module.eval()
use_dynamic_bsz = data.meta_info.get("use_dynamic_bsz", False)
micro_batch_size = data.meta_info.get("micro_batch_size", None)
max_token_len = data.meta_info.get("max_token_len", None)
if use_dynamic_bsz:
assert max_token_len is not None, "max_token_len must be set when use_dynamic_bsz is True"
max_token_len = max_token_len * self.config.megatron.context_parallel_size
else:
assert micro_batch_size is not None, (
"micro batch size is needed for forward compute when use_dynamic_bsz is False"
)
def compute_logprobs_fn(output, data, use_dynamic_bsz=False, indices=None):
response = data["responses"]
response_length = response.size(1)
log_probs = output["log_probs"][:, -response_length - 1 : -1].contiguous()
return {"log_probs": log_probs}
# We make recompute_old_log_prob by default here.
# TODO (zhangchi.usc1992): actually, this function should only return log_prob and this logic should be
# handled by user outside
recompute_old_log_prob = self.config.get("recompute_old_log_prob", True)
entropys = torch.Tensor()
if recompute_old_log_prob:
select_keys = ["responses", "input_ids", "attention_mask", "position_ids"]
if self.enable_routing_replay and self.config.router_replay.mode == "R3":
assert "routed_experts" in data.batch.keys(), "routed_experts must be in data.batch.keys()"
select_keys.append("routed_experts")
batch = data.select(batch_keys=select_keys).batch
input_ids = batch["input_ids"]
batch_size = input_ids.size(0)
response = batch["responses"]
response_length = response.size(1)
with torch.no_grad():
output = self.forward_backward_batch(
data,
forward_only=True,
post_process_fn=compute_logprobs_fn,
calculate_entropy=calculate_entropy,
use_dynamic_bsz=use_dynamic_bsz,
micro_batch_size=micro_batch_size,
max_token_len=max_token_len,
)
if mpu.is_pipeline_last_stage(ignore_virtual=True):
# only on last rank. It should be on every tp rank
if calculate_entropy:
log_probs = [o[0]["log_probs"] for o in output["output"]] # (bs, seq_size)
else:
log_probs = [o["log_probs"] for o in output["output"]] # (bs, seq_size)
log_probs = torch.cat(log_probs, dim=0).to(torch.float32)
if use_dynamic_bsz:
indices = output["indices"]
indices = list(itertools.chain.from_iterable(indices))
assert len(indices) == log_probs.size(0), f"{len(indices)} vs. {log_probs.size()}"
revert_indices = torch.tensor(get_reverse_idx(indices), dtype=torch.long)
log_probs = log_probs[revert_indices]
else:
log_probs = torch.empty(
size=(batch_size, response_length), dtype=torch.float32, device=input_ids.device
)
log_probs = log_probs.to(get_device_id())
# broadcast across pp ranks
torch.distributed.broadcast(
tensor=log_probs,
src=mpu.get_pipeline_model_parallel_last_rank(),
group=mpu.get_pipeline_model_parallel_group(),
async_op=False,
)
log_probs = log_probs.to("cpu")
if calculate_entropy:
# Note that o[0] is metrics, o[1] is entropy
if mpu.is_pipeline_last_stage(ignore_virtual=True):
entropys = torch.cat([o[1] for o in output["output"]], dim=0)
entropys = entropys.to(torch.float32)
if use_dynamic_bsz:
indices = output["indices"]
indices = list(itertools.chain.from_iterable(indices))
assert len(indices) == entropys.size(0), f"{len(indices)} vs. {entropys.size()}"
revert_indices = torch.tensor(get_reverse_idx(indices), dtype=torch.long)
entropys = entropys[revert_indices]
else:
entropys = torch.empty(
size=(batch_size, response_length), dtype=torch.float32, device=input_ids.device
)
# broadcast across pp ranks
entropys = entropys.to(get_device_id())
torch.distributed.broadcast(
tensor=entropys,
src=mpu.get_pipeline_model_parallel_last_rank(),
group=mpu.get_pipeline_model_parallel_group(),
async_op=False,
)
entropys = entropys.to("cpu")
layers_topk_idx = None
if RouterReplayHelper.is_r2_record_action(self.tf_config):
# (bs, max_seq_len/response_len,local_layer_num,topk)
layers_topk_idx = output["mini_layer_topk_idx_tensor"].to(torch.uint8)
if use_dynamic_bsz:
indices = output["indices"]
indices = list(itertools.chain.from_iterable(indices))
assert len(indices) == layers_topk_idx.size(0), f"{len(indices)} vs. {layers_topk_idx.size()}"
revert_indices = torch.tensor(get_reverse_idx(indices), dtype=torch.long)
layers_topk_idx = layers_topk_idx[revert_indices]
layers_topk_idx = pp_gather(layers_topk_idx, self.tf_config)
# add empty cache after each compute
get_torch_device().empty_cache()
for module, mode in zip(self.actor_module, prev_modes, strict=False):
module.train(mode)
return log_probs, entropys, layers_topk_idx
def make_minibatch_iterator(self, data: DataProto) -> Iterable[DataProto]:
"""Make minibatch iterator for updating the actor
Args:
data (DataProto): a DataProto containing keys
``input_ids``: tensor of shape [batch_size, sequence_length]. torch.int64, where
``sequence_length = prompt_length + response_length``
``attention_mask``: tensor of shape [batch_size, sequence_length]. torch.int64
``position_ids``: tensor of shape [batch_size, sequence_length]. torch.int64
``responses``: tensor of shape [batch_size, response_length]. torch.int64. Note that
responses = input_ids[:, -response_length:]
``old_log_probs``: tensor of shape [batch_size, response_length]. torch.float32. The log probability
of responses.
``advantages``: tensor of shape [batch_size, response_length]. torch.float32. The advantages of
responses.
See PPO paper for details. https://arxiv.org/abs/1707.06347
Returns:
"""
select_keys = [
"responses",
"input_ids",
"attention_mask",
"response_mask",
"position_ids",
"old_log_probs",
"advantages",
]
if self.config.use_kl_loss:
select_keys.append("ref_log_prob")
# Include pre-computed IS weights if present in batch
# Weights are computed centrally in trainer and added to batch when algorithm.rollout_is=True
if "rollout_is_weights" in data.batch.keys():
select_keys.append("rollout_is_weights")
# Include rollout_log_probs for computing rollout_corr metrics in bypass mode
if "rollout_log_probs" in data.batch.keys():
select_keys.append("rollout_log_probs")
self.has_multi_modal_inputs = "multi_modal_inputs" in data.non_tensor_batch.keys()
# router replay
if self.enable_routing_replay:
select_keys.append("routed_experts")
if self.has_multi_modal_inputs:
data = data.select(select_keys, ["multi_modal_inputs"])
else:
data = data.select(batch_keys=select_keys)
return data.make_iterator(
mini_batch_size=self.config.ppo_mini_batch_size,
epochs=self.config.ppo_epochs,
seed=self.config.data_loader_seed,
dataloader_kwargs={"shuffle": self.config.shuffle},
)
def forward_backward_batch(
self,
data: DataProto,
forward_only=False,
post_process_fn=None,
calculate_entropy=False,
use_dynamic_bsz=False,
micro_batch_size=None,
max_token_len=None,
mini_batch_size=None,
):
"""
We assume:
- The model takes input: (input_ids, attention_mask, position_ids). No rmpad for the input
- The communication shape is (total_nnz_pad_to_sp // tp_size, 1, hidden_size) if sequence parallel is enabled
"""
# broadcast from last pp rank to all other pp ranks
# TODO: actually, we just need to control the sampling order.
data.to(get_device_id())
data.batch = data.batch.contiguous()
mini_batch = data
broadcast_dict_tensor(
mini_batch.batch,
src=mpu.get_pipeline_model_parallel_last_rank(),
group=mpu.get_pipeline_model_parallel_group(),
)
mini_batch.to("cpu")
# split into micro-batches
mini_batch.batch["attention_mask"] = mini_batch.batch["attention_mask"].to(bool)
self.has_multi_modal_inputs = "multi_modal_inputs" in mini_batch.non_tensor_batch.keys()
if self.has_multi_modal_inputs:
mini_batch.batch["multi_modal_inputs"] = mini_batch.non_tensor_batch["multi_modal_inputs"]
mini_batch.batch["multi_modal_inputs_idx"] = torch.Tensor(
list(range(len(mini_batch.non_tensor_batch["multi_modal_inputs"])))
).to(torch.int64)
if mini_batch.batch["position_ids"].dim() == 3: # qwen2vl mrope [bs, 3, seq_len]
mini_batch.batch["position_ids"] = mini_batch.batch["position_ids"][
:, 0
] # mcore patch recompute qwen2vl's pos ids during forward
indices = None
temperature = data.meta_info["temperature"]
if use_dynamic_bsz:
assert max_token_len is not None, "max_token_len must be set when use_dynamic_bsz is True"
vpp_size = mpu.get_virtual_pipeline_model_parallel_world_size()
if vpp_size is not None and vpp_size > 1:
microbatch_group_size_per_vp_stage = self.tf_config.microbatch_group_size_per_vp_stage
micro_batches, indices = rearrange_micro_batches(
batch=mini_batch.batch,
num_batches_divided_by=microbatch_group_size_per_vp_stage,
max_token_len=max_token_len,
)
assert len(micro_batches) % self.tf_config.microbatch_group_size_per_vp_stage == 0, (
f"micro_batches {micro_batches} must be divisible by microbatch_group_size_per_vp_stage "
f"{microbatch_group_size_per_vp_stage} for megatron backend"
)
else:
micro_batches, indices = rearrange_micro_batches(batch=mini_batch.batch, max_token_len=max_token_len)
total_seqlen = max_token_len
else:
assert micro_batch_size is not None, (
"micro_batch_size is needed to be passed in when not using dynamic batch size"
)
micro_batches = mini_batch.batch.split(micro_batch_size)
seq_len = micro_batches[0]["input_ids"].shape[1]
total_seqlen = micro_batch_size * seq_len
# compute input shapes for pp stages
n_micro_batch = len(micro_batches)
forward_backward_func = get_forward_backward_func()
def loss_func(output, data, meta_info):
# For memory efficiency
# We move calculation of entropy to compute_log_probs, forward_only == True
log_probs = None
entropy = None
if isinstance(output, dict):
log_probs = output["log_probs"]
if "entropy" in output:
entropy = output["entropy"]
else:
assert isinstance(output, torch.Tensor)
log_probs = output
device = log_probs.device
metrics = {}
if forward_only:
if post_process_fn is None:
pass
# metrics["logits"] = output
else:
stats = post_process_fn(output, data)
metrics.update(stats)
if not calculate_entropy:
return torch.tensor(1.0, device=device), metrics
responses = data["responses"]
response_length = responses.size(1)
response_mask = data["response_mask"].to(bool)
loss_agg_mode = self.config.loss_agg_mode
# compute policy loss
log_prob = log_probs[:, -response_length - 1 : -1].contiguous()
ret_entropy = None
stats = {}
if not forward_only:
old_log_prob = data["old_log_probs"]
advantages = data["advantages"]
entropy_coeff = self.config.entropy_coeff
loss_agg_mode = self.config.loss_agg_mode
loss_mode = self.config.policy_loss.get("loss_mode", "vanilla")
policy_loss_fn = get_policy_loss_fn(loss_mode)
# Extract pre-computed rollout correction weights if present
# Weights are computed centrally in trainer and added when algorithm.rollout_is=True
rollout_is_weights = data.get("rollout_is_weights", None)
pg_loss, pg_metrics = policy_loss_fn(
old_log_prob=old_log_prob,
log_prob=log_prob,
advantages=advantages,
response_mask=response_mask,
loss_agg_mode=loss_agg_mode,
config=self.config,
rollout_is_weights=rollout_is_weights,
)
stats.update(pg_metrics)
# Skip if using bypass_mode loss (metrics already computed in pg_metrics)
rollout_log_prob = data.get("rollout_log_probs", None)
if loss_mode != "bypass_mode" and rollout_log_prob is not None:
# Compute metrics using CURRENT policy π_θ vs π_rollout
# Tracks evolving off-policy gap as π_θ updates during mini-batch training
from verl.trainer.ppo.rollout_corr_helper import compute_rollout_corr_metrics_from_logprobs
rollout_corr_metrics = compute_rollout_corr_metrics_from_logprobs(
log_prob=log_prob,
rollout_log_prob=rollout_log_prob,
response_mask=response_mask,
)
stats.update(rollout_corr_metrics)
stats["actor/pg_loss"] = pg_loss.detach().item()
policy_loss = pg_loss
if calculate_entropy:
entropy = output["entropy"][:, -response_length - 1 : -1].contiguous()
if not forward_only:
entropy_loss = agg_loss(loss_mat=entropy, loss_mask=response_mask, loss_agg_mode=loss_agg_mode)
entropy_coeff = meta_info["entropy_coeff"]
policy_loss = pg_loss - entropy_coeff * entropy_loss
else:
ret_entropy = entropy
if forward_only:
policy_loss = torch.tensor(1.0, device=device)
else:
if self.config.use_kl_loss:
ref_log_prob = data["ref_log_prob"]
# compute kl loss
kld = kl_penalty(logprob=log_prob, ref_logprob=ref_log_prob, kl_penalty=self.config.kl_loss_type)
kl_loss = agg_loss(loss_mat=kld, loss_mask=response_mask, loss_agg_mode=self.config.loss_agg_mode)
policy_loss = policy_loss + kl_loss * self.config.kl_loss_coef
metrics["actor/kl_loss"] = kl_loss.detach().item()
metrics["actor/kl_coef"] = self.config.kl_loss_coef
# return loss and stats
append_to_dict(metrics, stats)
return policy_loss, [metrics, ret_entropy]
def forward_step(batch_iter, model, return_schedule_plan: bool = False):
"""
Args:
batch_iter: the batch iterator
model: the model
return_schedule_plan: whether to return the schedule plan, for 1f1b overlap
"""
if return_schedule_plan:
assert self.tf_config.overlap_moe_expert_parallel_comm, (
"overlap_moe_expert_parallel_comm must be enabled to return the schedule plan"
)
# TODO: Fix this
assert not calculate_entropy, "calculate_entropy must be disabled to return the schedule plan"
from megatron.core.models.gpt.gpt_model import GPTModel
assert isinstance(model, GPTModel), "model must be a GPTModel"
assert self.use_fused_kernels, "use_fused_kernels must be enabled to return the schedule plan"
# TODO: support VLM with MoE
from verl.models.mcore.model_forward_1f1b_overlap import gptmodel_forward_1f1b_overlap
batch = next(batch_iter)
batch = batch.to(get_device_id())
batch = batch.contiguous()
input_ids = batch["input_ids"]
attention_mask = batch["attention_mask"].to(bool)
position_ids = batch["position_ids"]
unwrapped_model = unwrap_model(model)
if hasattr(unwrapped_model, "vp_stage"):
vp_rank = unwrapped_model.vp_stage
else:
vp_rank = 0
multi_modal_inputs = {}
if "multi_modal_inputs" in batch:
from verl.utils.model import extract_multi_modal_inputs
indices = batch.get("multi_modal_inputs_idx", None)
multi_modal_inputs = extract_multi_modal_inputs(batch["multi_modal_inputs"], indices)
responses = batch["responses"]
response_length = responses.size(1)
label = position_ids.clone()
label[:, -response_length - 1 : -1] = responses
label_mask = attention_mask.clone()
label_mask[:, : -response_length - 1] = False
label_mask[:, -1] = False
if RouterReplayHelper.is_replay_backward_action(self.tf_config, vp_rank):
router_instance_list = RouterReplayHelper.get_micro_batch_router_list(self.tf_config, vp_rank)
for router in router_instance_list:
router.set_router_replay_action(RouterReplayAction.REPLAY_FORWARD)
if RouterReplayHelper.is_replay_forward_action(self.tf_config, vp_rank):
layers_topk_idx = batch["routed_experts"]
set_router_replay_data(layers_topk_idx, attention_mask, self.tf_config, vp_rank)
from verl.models.mcore import get_mcore_forward_fn, get_mcore_forward_fused_fn
if self.use_fused_kernels:
forward_fn = get_mcore_forward_fused_fn(self.hf_config)
if return_schedule_plan:
forward_fn = gptmodel_forward_1f1b_overlap
# return dict of [logits, entropy]
output = forward_fn(
model=model,
input_ids=input_ids,
position_ids=position_ids,
attention_mask=attention_mask,
labels=label,
labels_mask=label_mask,
temperature=temperature,
multi_modal_inputs=multi_modal_inputs,
)
else:
forward_fn = get_mcore_forward_fn(self.hf_config)
def logits_processor(logits, label, label_mask):
assert logits.shape[:2] == label.shape[:2]
assert label.shape == label_mask.shape
logits.div_(temperature)
ret = {}
if calculate_entropy:
logits_bak = logits.clone()
# # disable the hint until the fused_kernel is optimized for triton>=3.3
# logger.warning_once(
# "For memory-efficient computation, enable fused kernels via "
# "`actor_rollout_ref.model.use_fused_kernels=True`. "
# "The current `clone()` operation ensures correctness but increases memory usage."
# )
entropy = vocab_parallel_entropy(logits)
ret["entropy"] = entropy
else:
logits_bak = logits
log_probs = vocab_parallel_log_probs_from_logits(logits_bak, label)
log_probs = log_probs.masked_fill(~label_mask, 0.0)
ret["log_probs"] = log_probs
return ret
logits_processor_args = {"label": label, "label_mask": label_mask}
output = forward_fn(
model=model,
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
multi_modal_inputs=multi_modal_inputs,
logits_processor=logits_processor,
logits_processor_args=logits_processor_args,
data_format="thd" if self.config.megatron.use_remove_padding else "bshd",
mtp_config=None if forward_only else self.mtp_config,
)
if forward_only:
meta_info = None
else:
clip_ratio_c = self.config.get("clip_ratio_c", 3.0)
meta_info = {
"clip_ratio": self.config.clip_ratio,
"entropy_coeff": self.config.entropy_coeff,
"clip_ratio_c": clip_ratio_c,
}
if RouterReplayHelper.is_r2_record_action(self.tf_config, vp_rank):
merge_router_topk_indices(
attention_mask, input_ids, self.mini_layer_topk_idx_list, self.tf_config, vp_rank
)
if RouterReplayHelper.is_replay_forward_action(self.tf_config, vp_rank):
router_instance_list = RouterReplayHelper.get_micro_batch_router_list(self.tf_config, vp_rank)
for router in router_instance_list:
router.set_router_replay_action(RouterReplayAction.REPLAY_BACKWARD)
return output, partial(loss_func, data=batch, meta_info=meta_info)
# batch should be a list of batches inside micro-batches
batch_generator = make_batch_generator(micro_batches, vpp_size=len(self.actor_module))
# TODO: we may use the new schedule instead
# for flash-attn: (seq_len, batch_size, hidden_size) = (mbs*seq_len, 1, hidden_size)
if mpu.get_pipeline_model_parallel_world_size() > 1:
losses_reduced = forward_backward_func(
forward_step_func=forward_step,
data_iterator=batch_generator,
model=self.actor_module,
num_microbatches=n_micro_batch,
seq_length=total_seqlen, # no use when input_shapes was set
micro_batch_size=1, # no use when input_shapes was set
forward_only=forward_only,
)
else:
losses_reduced = forward_backward_func(
forward_step_func=forward_step,
data_iterator=batch_generator,
model=self.actor_module,
num_microbatches=n_micro_batch,
seq_length=total_seqlen, # in use for pp = 1
micro_batch_size=1, # in use for pp = 1
forward_only=forward_only,
)
# loss_reduces contains the stats returned from loss_func
if self.has_multi_modal_inputs:
data.batch.pop("multi_modal_inputs")
data.batch.pop("multi_modal_inputs_idx")
data.non_tensor_batch.pop("multi_modal_inputs")
losses_reduced = {"output": losses_reduced}
if use_dynamic_bsz:
losses_reduced["indices"] = indices
if RouterReplayHelper.is_r2_record_action(self.tf_config):
if self.tf_config.virtual_pipeline_model_parallel_size is not None:
# config = self.actor_module[0].module.module.config
vp_size = len(self.actor_module)
microbatch_group_size_per_vp_stage = self.tf_config.microbatch_group_size_per_vp_stage
bs = n_micro_batch
losses_reduced["mini_layer_topk_idx_tensor"] = reorder_and_merge_vpp_layers(
self.mini_layer_topk_idx_list, bs, vp_size, microbatch_group_size_per_vp_stage
)
else:
losses_reduced["mini_layer_topk_idx_tensor"] = torch.cat(self.mini_layer_topk_idx_list, dim=0)
self.mini_layer_topk_idx_list = []
# Collect and pass MTP metrics to losses_reduced
if not forward_only and self.mtp_config and self.mtp_config.enable_train:
metrics = get_megatron_mtp_loss(n_micro_batch)
losses_reduced["mtp_losses"] = [metrics]
return losses_reduced
@GPUMemoryLogger(role="megatron actor", logger=logger)
def update_policy(self, dataloader: Iterable[DataProto], enable_mtp: bool = False) -> dict:
"""Update the policy with an iterator of DataProto
Args:
dataloader (Iterable[DataProto]): an iterator over the DataProto that returns by ``make_minibatch_iterator``
The keys of each data batch is described in the make_minibatch_iterator.
enable_mtp (bool, optional): whether to enable MTP communication
Returns:
Dict: a dictionary containing the statistics. Note that the statistics are only valid in the last pp stage
and users have to combine the output in each dp rank manually.
"""
metrics = {}
for data in dataloader:
if self.config.router_replay.mode in ["R2", "R3"]:
RouterReplay.set_global_router_replay_action(RouterReplayAction.REPLAY_FORWARD)
self.actor_optimizer.zero_grad()
# use use_contiguous_buffers_in_local_ddp and no overlap_dp_param_comm
for chunk in self.actor_module:
# if use distributed optimizer, zero grad buffer will be handled by optimizer
chunk.zero_grad_buffer()
calculate_entropy = self.config.entropy_coeff != 0
if data.meta_info.get("micro_batch_size", None) is not None:
micro_batch_size = data.meta_info["micro_batch_size"]
else:
micro_batch_size = self.config.ppo_micro_batch_size_per_gpu
max_token_len = None
if self.config.use_dynamic_bsz:
max_token_len = self.config.ppo_max_token_len_per_gpu * self.config.megatron.context_parallel_size
metric_micro_batch = self.forward_backward_batch(
data,
calculate_entropy=calculate_entropy,
use_dynamic_bsz=self.config.use_dynamic_bsz,
micro_batch_size=micro_batch_size,
max_token_len=max_token_len,
mini_batch_size=self.config.ppo_mini_batch_size,
)
mtp_losses = metric_micro_batch.get("mtp_losses", None)
if mtp_losses is not None:
# mtp_losses is now in format: [{"mtp_losses/mtp_1_loss": [value1], "mtp_losses/mtp_2_loss": [value2]}]
for mtp_metrics_dict in mtp_losses:
append_to_dict(metrics, mtp_metrics_dict)
metric_micro_batch = metric_micro_batch["output"]
for metric in metric_micro_batch:
# Note that o[0] is metrics, o[1] is entropy, o[2] is response_mask
append_to_dict(metrics, metric[0]) # append the metric from this micro-batch to global metrics.
update_successful, grad_norm, num_zeros_in_grad = self.actor_optimizer.step()
data = {"actor/grad_norm": grad_norm}
append_to_dict(metrics, data)
if update_successful:
# allgather already execute in optimizer.step in new megatron
pass
else:
raise NotImplementedError
if self.config.router_replay.mode in ["R2", "R3"]:
RouterReplay.clear_global_router_replay_action()
RouterReplay.clear_global_indices()
self.actor_optimizer.zero_grad()
get_torch_device().empty_cache()
return metrics
| verl__workers__actor__megatron_actor.py |
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass, field
from typing import Any, Optional
from omegaconf import MISSING
from verl.base_config import BaseConfig
from verl.trainer.config import CheckpointConfig
from verl.utils.profiler.config import ProfilerConfig
from verl.utils.qat import QATConfig
from .engine import FSDPEngineConfig, McoreEngineConfig, VeOmniEngineConfig
from .model import HFModelConfig
from .optimizer import OptimizerConfig
__all__ = [
"PolicyLossConfig",
"RouterReplayConfig",
"ActorConfig",
"FSDPActorConfig",
"McoreActorConfig",
"VeOmniActorConfig",
"QATConfig",
]
@dataclass
class RouterReplayConfig(BaseConfig):
"""Configuration for router replay in MoE models.
This configuration controls the routing behavior for Mixture of Experts (MoE) models,
allowing for deterministic training through route recording and replay.
Args:
mode (str): Router replay mode. Options: 'disabled', 'R2', 'R3'.
- 'disabled': No router replay functionality
- 'R2': Use Router Replay routing strategy
- 'R3': Use Rollout Router Replay routing strategy
record_file (Optional[str]): File path to save recorded routing decisions.
Required when mode is 'record', 'R2', or 'R3'.
replay_file (Optional[str]): File path to load recorded routing decisions for replay.
Required when mode is 'replay'.
"""
mode: str = "disabled"
record_file: Optional[str] = None
replay_file: Optional[str] = None
def __post_init__(self):
"""Validate router replay configuration."""
valid_modes = ["disabled", "R2", "R3"]
if self.mode not in valid_modes:
raise ValueError(f"Invalid router_replay mode: {self.mode}. Must be one of {valid_modes}")
@dataclass
class PolicyLossConfig(BaseConfig):
"""Configuration for policy loss computation.
The inheritance from BaseConfig provides omegaconf.DictConfig-like interface for a dataclass config.
Args:
loss_mode (str): Loss function mode. Options: 'vanilla', 'clip-cov', 'kl-cov', 'gpg'.
clip_cov_ratio (float): Ratio of tokens to be clipped for clip-cov loss.
clip_cov_lb (float): Lower bound for clip-cov loss.
clip_cov_ub (float): Upper bound for clip-cov loss.
kl_cov_ratio (float): Ratio of tokens to be applied KL penalty for kl-cov loss.
ppo_kl_coef (float): KL divergence penalty coefficient.
"""
loss_mode: str = "vanilla"
clip_cov_ratio: float = 0.0002
clip_cov_lb: float = 1.0
clip_cov_ub: float = 5.0
kl_cov_ratio: float = 0.0002
ppo_kl_coef: float = 0.1
@dataclass
class ActorConfig(BaseConfig):
"""Configuration for actor model training.
The inheritance from BaseConfig provides omegaconf.DictConfig-like interface for a dataclass config.
Args:
strategy (str): Training strategy. Must be specified.
ppo_mini_batch_size (int): Mini-batch size for PPO training.
ppo_micro_batch_size (Optional[int]): Micro-batch size for PPO training.
If None, uses ppo_micro_batch_size_per_gpu.
ppo_micro_batch_size_per_gpu (Optional[int]): Micro-batch size per GPU for PPO training.
use_dynamic_bsz (bool): Whether to use dynamic batch sizing.
ppo_max_token_len_per_gpu (int): Maximum token length per GPU for PPO training.
clip_ratio (float): PPO clipping ratio for policy loss.
clip_ratio_low (float): Lower bound for PPO clipping ratio.
clip_ratio_high (float): Upper bound for PPO clipping ratio.
policy_loss (PolicyLossConfig): Configuration for policy loss computation.
clip_ratio_c (float): Clipping ratio for critic loss.
loss_agg_mode (str): Loss aggregation mode. Options: 'token-mean', 'sample-mean'.
loss_scale_factor (Optional[int]): Scale factor for 'seq-mean-token-sum-norm' loss aggregation mode.
If None, uses response_length. Set to a constant to ensure consistent normalization.
entropy_coeff (float): Entropy coefficient for regularization.
tau_pos (float): Positive tau for SAPO smoothing (>= 1.0 keeps rewards stable).
tau_neg (float): Negative tau for SAPO smoothing (> tau_pos for asymmetry).
use_kl_loss (bool): Whether to use KL divergence loss.
use_torch_compile (bool): Whether to use torch.compile for optimization.
kl_loss_coef (float): KL divergence loss coefficient.
kl_loss_type (str): Type of KL loss to use.
ppo_epochs (int): Number of PPO epochs per training step.
shuffle (bool): Whether to shuffle data during training.
checkpoint (CheckpointConfig): Configuration for checkpointing.
optim (OptimizerConfig): Configuration for optimizer.
use_fused_kernels (bool): Whether to use custom fused kernels (e.g., FlashAttention, fused MLP).
data_loader_seed (int): Seed for data loader. If None, uses global seed.
router_replay (RouterReplayConfig): Configuration for router replay in MoE models.
"""
_mutable_fields = BaseConfig._mutable_fields | {
"ppo_mini_batch_size",
"ppo_micro_batch_size",
"ppo_micro_batch_size_per_gpu",
"ppo_infer_micro_batch_size_per_gpu",
"engine",
"model_config",
}
strategy: str = MISSING
ppo_mini_batch_size: int = 256
ppo_micro_batch_size: Optional[int] = None # deprecate
ppo_micro_batch_size_per_gpu: Optional[int] = None
ppo_infer_micro_batch_size_per_gpu: Optional[int] = None
use_dynamic_bsz: bool = False
ppo_max_token_len_per_gpu: int = 16384
ppo_infer_max_token_len_per_gpu: int = 16384
clip_ratio: float = 0.2
clip_ratio_low: float = 0.2
clip_ratio_high: float = 0.2
freeze_vision_tower: bool = False
policy_loss: PolicyLossConfig = field(default_factory=PolicyLossConfig)
clip_ratio_c: float = 3.0
loss_agg_mode: str = "token-mean"
loss_scale_factor: Optional[int] = None
entropy_coeff: float = 0
tau_pos: float = 1.0
tau_neg: float = 1.05
calculate_entropy: bool = False
use_kl_loss: bool = False
# Whether to enable PrefixGrouper-based shared-prefix forward
use_prefix_grouper: bool = False
use_torch_compile: bool = True
kl_loss_coef: float = 0.001
kl_loss_type: str = "low_var_kl"
ppo_epochs: int = 1
shuffle: bool = False
data_loader_seed: int = 1
checkpoint: CheckpointConfig = field(default_factory=CheckpointConfig)
optim: OptimizerConfig = field(default_factory=OptimizerConfig)
use_fused_kernels: bool = False
profiler: ProfilerConfig = field(default_factory=ProfilerConfig)
engine: BaseConfig = field(default_factory=BaseConfig)
rollout_n: int = MISSING # must be override by sampling config
model_config: HFModelConfig = field(default_factory=BaseConfig)
router_replay: RouterReplayConfig = field(default_factory=RouterReplayConfig)
# Store global batch info for loss aggregation:
# dp_size: data parallel size
# batch_num_tokens: number of valid tokens in global batch
# global_batch_size: global batch size
global_batch_info: dict = field(default_factory=dict)
def __post_init__(self):
"""Validate actor configuration parameters."""
assert self.strategy != MISSING
assert self.rollout_n != MISSING
if not self.use_dynamic_bsz:
if self.ppo_micro_batch_size is not None and self.ppo_micro_batch_size_per_gpu is not None:
raise ValueError(
"[actor] You have set both 'actor.ppo_micro_batch_size' AND 'actor.ppo_micro_batch_size_per_gpu'. "
"Please remove 'actor.ppo_micro_batch_size' because only '*_ppo_micro_batch_size_per_gpu' is "
"supported (the former is deprecated)."
)
else:
assert not (self.ppo_micro_batch_size is None and self.ppo_micro_batch_size_per_gpu is None), (
"[actor] Please set at least one of 'actor.ppo_micro_batch_size' or "
"'actor.ppo_micro_batch_size_per_gpu' if use_dynamic_bsz is not enabled."
)
valid_loss_agg_modes = [
"token-mean",
"seq-mean-token-sum",
"seq-mean-token-mean",
"seq-mean-token-sum-norm",
]
if self.loss_agg_mode not in valid_loss_agg_modes:
raise ValueError(f"Invalid loss_agg_mode: {self.loss_agg_mode}")
def validate(self, n_gpus: int, train_batch_size: int, model_config: dict = None):
"""Validate actor configuration with runtime parameters."""
if not self.use_dynamic_bsz:
if train_batch_size < self.ppo_mini_batch_size:
raise ValueError(
f"train_batch_size ({train_batch_size}) must be >= "
f"actor.ppo_mini_batch_size ({self.ppo_mini_batch_size})"
)
sp_size = getattr(self, "ulysses_sequence_parallel_size", 1)
if self.ppo_micro_batch_size is not None:
if self.ppo_mini_batch_size % self.ppo_micro_batch_size != 0:
raise ValueError(
f"ppo_mini_batch_size ({self.ppo_mini_batch_size}) must be divisible by "
f"ppo_micro_batch_size ({self.ppo_micro_batch_size})"
)
if self.ppo_micro_batch_size * sp_size < n_gpus:
raise ValueError(
f"ppo_micro_batch_size ({self.ppo_micro_batch_size}) * "
f"ulysses_sequence_parallel_size ({sp_size}) must be >= n_gpus ({n_gpus})"
)
@staticmethod
def _check_mutually_exclusive(mbs, mbs_per_gpu, name: str):
"""Validate mutually exclusive micro batch size configuration options."""
param = "ppo_micro_batch_size"
param_per_gpu = f"{param}_per_gpu"
if mbs is None and mbs_per_gpu is None:
raise ValueError(f"[{name}] Please set at least one of '{name}.{param}' or '{name}.{param_per_gpu}'.")
if mbs is not None and mbs_per_gpu is not None:
raise ValueError(
f"[{name}] You have set both '{name}.{param}' AND '{name}.{param_per_gpu}'. Please remove "
f"'{name}.{param}' because only '*_{param_per_gpu}' is supported (the former is deprecated)."
)
@dataclass
class McoreActorConfig(ActorConfig):
"""Configuration for Megatron actor models.
The inheritance from BaseConfig provides omegaconf.DictConfig-like interface for a dataclass config.
Args:
strategy (str): Training strategy set to 'megatron' for Megatron parallelism.
load_weight (bool): Whether to load model weights from checkpoint.
megatron (dict[str, Any]): Configuration for Megatron parallelism settings.
profile (dict[str, Any]): Configuration for profiling settings.
"""
strategy: str = "megatron"
load_weight: bool = True
megatron: McoreEngineConfig = field(default_factory=McoreEngineConfig)
profile: dict[str, Any] = field(default_factory=dict)
use_rollout_log_probs: bool = False
def __post_init__(self):
"""Validate FSDP actor configuration parameters."""
super().__post_init__()
self.engine = self.megatron
@dataclass
class FSDPActorConfig(ActorConfig):
"""Configuration for FSDP actor models.
The inheritance from BaseConfig provides omegaconf.DictConfig-like interface for a dataclass config.
Args:
strategy (str): Training strategy set to 'fsdp' for Fully Sharded Data Parallel.
grad_clip (float): Gradient clipping threshold.
ulysses_sequence_parallel_size (int): [DEPRECATED] Ulysses sequence parallel size for long sequences.
entropy_from_logits_with_chunking (bool): Whether to compute entropy from logits
with chunking for memory efficiency.
entropy_checkpointing (bool): Whether to use gradient checkpointing for entropy computation.
fsdp_config (dict[str, Any]): Configuration for FSDP settings.
use_remove_padding (bool): Whether to remove padding tokens in inputs during training
"""
strategy: str = "fsdp"
grad_clip: float = 1.0
ulysses_sequence_parallel_size: int = 1
entropy_from_logits_with_chunking: bool = False
entropy_checkpointing: bool = False
fsdp_config: FSDPEngineConfig = field(default_factory=FSDPEngineConfig)
use_remove_padding: bool = False
use_rollout_log_probs: bool = False
calculate_sum_pi_squared: bool = False
sum_pi_squared_checkpointing: bool = False
qat: QATConfig = field(default_factory=QATConfig)
def __post_init__(self):
"""Validate FSDP actor configuration parameters."""
super().__post_init__()
self.engine = self.fsdp_config
# backward compatibility
if self.ulysses_sequence_parallel_size > 1:
self.fsdp_config.ulysses_sequence_parallel_size = self.ulysses_sequence_parallel_size
def validate(self, n_gpus: int, train_batch_size: int, model_config: dict = None):
"""Validate FSDP actor configuration with runtime parameters."""
super().validate(n_gpus, train_batch_size, model_config)
if self.strategy in {"fsdp", "fsdp2"} and self.ulysses_sequence_parallel_size > 1:
if model_config and not model_config.get("use_remove_padding", False):
raise ValueError(
"When using sequence parallelism for actor/ref policy, you must enable `use_remove_padding`."
)
@dataclass
class VeOmniActorConfig(ActorConfig):
"""Configuration for VeOmni actor models.
The inheritance from BaseConfig provides omegaconf.DictConfig-like interface for a dataclass config.
Args:
strategy (str): Training strategy set to 'veomni' for VeOmni parallelism.
veomni (dict[str, Any]): Configuration for VeOmni settings.
use_remove_padding (bool): Whether to remove padding tokens in inputs during training
"""
strategy: str = "veomni"
veomni: VeOmniEngineConfig = field(default_factory=VeOmniEngineConfig)
use_remove_padding: bool = False
use_rollout_log_probs: bool = False
def __post_init__(self):
"""Validate VeOmni actor configuration parameters."""
super().__post_init__()
self.engine = self.veomni
| verl__workers__config__actor.py |
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from dataclasses import dataclass, field
from typing import Optional
from omegaconf import MISSING
from verl.base_config import BaseConfig
from verl.trainer.config import BaseModelConfig, CheckpointConfig
from verl.utils.profiler import ProfilerConfig
from .engine import FSDPEngineConfig, McoreEngineConfig
from .model import HFModelConfig
from .optimizer import OptimizerConfig
__all__ = ["CriticConfig", "FSDPCriticConfig", "McoreCriticConfig", "FSDPCriticModelCfg"]
@dataclass
class CriticConfig(BaseConfig):
"""Configuration for critic model training.
The inheritance from BaseConfig provides omegaconf.DictConfig-like interface for a dataclass config.
Args:
strategy (str): Strategy used for critic model training (fsdp, fsdp2, megatron).
ppo_micro_batch_size_per_gpu (int): Local per-GPU micro batch size.
rollout_n (int): Number of rollouts per update (mirrors actor rollout_n).
optim (Dict[str, Any]): Optimizer configuration including lr, weight_decay, etc.
model (Dict[str, Any]): Model configuration including path, tokenizer_path, etc.
ppo_mini_batch_size (int): PPO mini-batch size per update.
ppo_micro_batch_size (Optional[int]): Global micro batch size (deprecated).
use_dynamic_bsz (bool): Whether to automatically adjust batch size at runtime.
ppo_max_token_len_per_gpu (int): Max tokens per GPU in one PPO batch.
forward_max_token_len_per_gpu (int): Max token length per GPU in forward pass.
ppo_epochs (int): Number of PPO epochs per batch.
shuffle (bool): Shuffle training data across PPO epochs.
cliprange_value (float): PPO value function clipping range.
loss_agg_mode (str): Loss aggregation mode.
checkpoint (Dict[str, Any]): Checkpoint configuration.
profiler (Dict[str, Any]): Profiler configuration.
enable (Optional[bool]): Whether to enable the critic.
"""
_mutable_fields = BaseConfig._mutable_fields | {
"ppo_micro_batch_size_per_gpu",
"ppo_mini_batch_size",
"ppo_micro_batch_size",
"model_config",
}
strategy: str = MISSING
ppo_micro_batch_size_per_gpu: Optional[int] = None
enable: Optional[bool] = None
rollout_n: int = 1
ppo_mini_batch_size: int = 1
use_dynamic_bsz: bool = False
ppo_max_token_len_per_gpu: int = 32768
# deprecate this
forward_max_token_len_per_gpu: int = 32768
ppo_infer_micro_batch_size_per_gpu: Optional[int] = None
ppo_infer_max_token_len_per_gpu: int = 32768
ppo_epochs: int = 1
data_loader_seed: int = 1
shuffle: bool = True
cliprange_value: float = 0.5
loss_agg_mode: str = "token-mean"
ppo_micro_batch_size: Optional[int] = None
engine: BaseConfig = field(default_factory=BaseConfig)
optim: OptimizerConfig = field(default_factory=OptimizerConfig)
# deprecate model to favor model_config
model: BaseModelConfig = field(default_factory=BaseModelConfig)
model_config: HFModelConfig = None
checkpoint: CheckpointConfig = field(default_factory=CheckpointConfig)
profiler: ProfilerConfig = field(default_factory=ProfilerConfig)
def __post_init__(self):
"""Validate critic configuration parameters."""
assert self.strategy != MISSING
if self.model_config is None:
warnings.warn("using model in Critic Config is deprecated, please use model_config instead", stacklevel=2)
self.model_config = HFModelConfig(
path=self.model.path,
tokenizer_path=self.model.tokenizer_path,
override_config=self.model.override_config,
external_lib=self.model.external_lib,
trust_remote_code=self.model.trust_remote_code,
)
if not self.use_dynamic_bsz:
self._check_mutually_exclusive(self.ppo_micro_batch_size, self.ppo_micro_batch_size_per_gpu, "critic")
if self.ppo_micro_batch_size is not None:
if self.ppo_mini_batch_size % self.ppo_micro_batch_size != 0:
raise ValueError(
f"[critic] ppo_mini_batch_size ({self.ppo_mini_batch_size}) must be divisible by "
f"ppo_micro_batch_size ({self.ppo_micro_batch_size})"
)
def validate(self, n_gpus: int, train_batch_size: int):
"""Validate critic configuration with runtime parameters.
Args:
n_gpus: Total number of GPUs available
train_batch_size: Training batch size from data config
"""
if not self.use_dynamic_bsz:
if train_batch_size < self.ppo_mini_batch_size:
raise ValueError(
f"train_batch_size ({train_batch_size}) must be >= "
f"critic.ppo_mini_batch_size ({self.ppo_mini_batch_size})"
)
@staticmethod
def _check_mutually_exclusive(mbs, mbs_per_gpu, name: str):
"""Validate mutually exclusive micro batch size configuration options.
Ensures that users don't set both deprecated micro_batch_size and
the new micro_batch_size_per_gpu parameters simultaneously.
Args:
mbs: Deprecated micro batch size parameter value.
mbs_per_gpu: New micro batch size per GPU parameter value.
name (str): Configuration section name for error messages.
Raises:
ValueError: If both parameters are set or neither is set.
"""
param = "micro_batch_size"
param_per_gpu = f"{param}_per_gpu"
if mbs is None and mbs_per_gpu is None:
raise ValueError(f"[{name}] Please set at least one of '{name}.{param}' or '{name}.{param_per_gpu}'.")
if mbs is not None and mbs_per_gpu is not None:
raise ValueError(
f"[{name}] You have set both '{name}.{param}' AND '{name}.{param_per_gpu}'. Please remove "
f"'{name}.{param}' because only '*_{param_per_gpu}' is supported (the former is deprecated)."
)
@dataclass
class McoreCriticConfig(CriticConfig):
"""Configuration for Megatron-based critic model training.
The inheritance from CriticConfig provides all base critic configuration plus Megatron-specific settings.
Args:
nccl_timeout (int): NCCL timeout in seconds for distributed operations.
megatron (Dict[str, Any]): Megatron-specific parallelism settings.
load_weight (bool): Whether to load initial weights.
"""
strategy: str = "megatron"
nccl_timeout: int = 600
megatron: McoreEngineConfig = field(default_factory=McoreEngineConfig)
load_weight: bool = True
def validate(self, n_gpus: int, train_batch_size: int):
"""Validate Megatron critic configuration with runtime parameters."""
super().validate(n_gpus, train_batch_size)
@dataclass
class FSDPCriticConfig(CriticConfig):
"""Configuration for FSDP-based critic model training.
The inheritance from CriticConfig provides all base critic configuration plus FSDP-specific settings.
Args:
forward_micro_batch_size (int): Forward-only batch size during inference (global).
forward_micro_batch_size_per_gpu (int): Forward-only batch size during inference (per GPU).
ulysses_sequence_parallel_size (int): [DEPRECATED] Ulysses sequence parallel size for long sequences.
grad_clip (float): Gradient clipping for critic updates.
"""
_mutable_fields = CriticConfig._mutable_fields | {
"forward_micro_batch_size",
"forward_micro_batch_size_per_gpu",
}
strategy: str = "fsdp"
forward_micro_batch_size: int = 1
forward_micro_batch_size_per_gpu: int = 1
ulysses_sequence_parallel_size: int = 1
grad_clip: float = 1.0
def __post_init__(self):
"""Validate FSDP critic configuration parameters."""
super().__post_init__()
if self.strategy in {"fsdp", "fsdp2"}:
if self.ulysses_sequence_parallel_size > 1:
if not self.model.get("use_remove_padding", False):
raise ValueError(
"When using sequence parallelism for critic, you must enable `use_remove_padding`."
)
def validate(self, n_gpus: int, train_batch_size: int):
"""Validate FSDP critic configuration with runtime parameters."""
super().validate(n_gpus, train_batch_size)
if not self.use_dynamic_bsz:
sp_size = self.ulysses_sequence_parallel_size
if self.ppo_micro_batch_size is not None:
if self.ppo_micro_batch_size * sp_size < n_gpus:
raise ValueError(
f"critic.ppo_micro_batch_size ({self.ppo_micro_batch_size}) * "
f"ulysses_sequence_parallel_size ({sp_size}) must be >= n_gpus ({n_gpus})"
)
@dataclass
class FSDPCriticModelCfg(BaseModelConfig):
"""FSDP-enabled critic model configuration.
Inherits base critic settings and adds distributed-memory and LoRA options.
Args:
use_shm (bool): Whether to use shared memory for loading the model.
enable_activation_offload (bool): Offload activations to CPU to reduce GPU memory usage.
use_remove_padding (bool): Use remove-padding optimization (saves compute).
enable_gradient_checkpointing (bool): Enable gradient checkpointing for memory efficiency.
fsdp_config (FSDPEngineConfig): FSDP-specific configuration block.
lora_rank (int): Set to positive value to enable LoRA (e.g., 32).
lora_alpha (int): LoRA scaling factor.
target_modules (Union[str, List[str]]): LoRA target modules: "all-linear" or list of layer names.
"""
use_shm: bool = False
enable_activation_offload: bool = False
use_remove_padding: bool = False
enable_gradient_checkpointing: bool = True
fsdp_config: FSDPEngineConfig = field(default_factory=FSDPEngineConfig)
lora_rank: int = 0
lora_alpha: int = 16
target_modules: str | list[str] = "all-linear"
# TiledMLP configuration for memory-efficient MLP computation
tiled_mlp: dict = field(default_factory=lambda: {"enabled": False, "num_shards": 4})
| verl__workers__config__critic.py |
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from dataclasses import dataclass, field
from typing import Any, Callable, Literal, Optional
from verl.base_config import BaseConfig
from verl.trainer.config import CheckpointConfig
from ...utils.profiler import ProfilerConfig
from .model import HFModelConfig
from .optimizer import OptimizerConfig
__all__ = [
"FSDPEngineConfig",
"McoreEngineConfig",
"TrainingWorkerConfig",
"VeOmniEngineConfig",
"EngineConfig",
"EngineRouterReplayConfig",
]
# TODO: rename to RouterReplayConfig after removing the legacy implementation
@dataclass
class EngineRouterReplayConfig(BaseConfig):
"""Configuration for router replay in MoE models.
This configuration controls the routing behavior for Mixture of Experts (MoE) models,
allowing for deterministic training through route recording and replay.
Args:
mode (str): Router replay mode. Options: 'disabled', 'R2', 'R3'.
- 'disabled': No router replay functionality
- 'R2': Use Router Replay routing strategy
- 'R3': Use Rollout Router Replay routing strategy
record_file (Optional[str]): File path to save recorded routing decisions.
Required when mode is 'record', 'R2', or 'R3'.
replay_file (Optional[str]): File path to load recorded routing decisions for replay.
Required when mode is 'replay'.
"""
mode: str = "disabled"
record_file: Optional[str] = None
replay_file: Optional[str] = None
def __post_init__(self):
"""Validate router replay configuration."""
valid_modes = ["disabled", "R2", "R3"]
if self.mode not in valid_modes:
raise ValueError(f"Invalid router_replay mode: {self.mode}. Must be one of {valid_modes}")
@dataclass
class EngineConfig(BaseConfig):
_mutable_fields = BaseConfig._mutable_fields | {
"use_dynamic_bsz",
"max_token_len_per_gpu",
"micro_batch_size_per_gpu",
"infer_max_token_len_per_gpu",
"infer_micro_batch_size_per_gpu",
"use_fused_kernels",
"use_remove_padding",
}
# whether to offload param
param_offload: bool = False
# whether to offload optimizer
optimizer_offload: bool = False
# whether to offload grad
grad_offload: bool = False
# whether the engine is forward only (e.g., ref policy)
forward_only: bool = False
# the strategy (backend)
strategy: str = None
# model dtype
dtype: str = "bfloat16" # ["bfloat16", "float16"]
# whether to use dynamic bsz
use_dynamic_bsz: bool = True
# for training
max_token_len_per_gpu: int = None
micro_batch_size_per_gpu: int = None
# for inference
infer_max_token_len_per_gpu: int = None
infer_micro_batch_size_per_gpu: int = None
# whether use fuse lm head kernel
use_fused_kernels: bool = False
# TODO (this may conflict with the one in model config)
use_remove_padding: bool = True
seed: int = 42
full_determinism: bool = False
router_replay: EngineRouterReplayConfig = field(default_factory=EngineRouterReplayConfig)
def __post_init__(self):
pass
# TODO: turn on this check after we reorg config
# if self.use_dynamic_bsz:
# assert self.max_token_len_per_gpu is not None
# else:
# assert self.micro_batch_size_per_gpu is not None
@dataclass
class McoreEngineConfig(EngineConfig):
"""Configuration for Megatron parallelism.
The inheritance from BaseConfig provides omegaconf.DictConfig-like interface for a dataclass config.
Args:
param_offload (bool): Whether to offload parameters to CPU.
grad_offload (bool): Whether to offload gradients to CPU.
optimizer_offload (bool): Whether to offload optimizer states to CPU.
tensor_model_parallel_size (int): Tensor model parallel size.
expert_model_parallel_size (int): Expert model parallel size for MoE models.
expert_tensor_parallel_size (Optional[int]): Expert tensor parallel size for MoE models.
pipeline_model_parallel_size (int): Pipeline model parallel size.
virtual_pipeline_model_parallel_size (Optional[int]): Virtual pipeline model parallel size
for interleaved scheduling.
context_parallel_size (int): Context parallel size for long sequences.
sequence_parallel (bool): Whether to enable sequence parallelism.
use_distributed_optimizer (bool): Whether to use distributed optimizer.
use_dist_checkpointing (bool): Whether to use distributed checkpointing.
dist_checkpointing_path (Optional[str]): Path for distributed checkpointing.
dist_ckpt_optim_fully_reshardable (bool): Use fully reshardable optimizer checkpoints.
distrib_optim_fully_reshardable_mem_efficient (bool): Use memory-efficient fully reshardable format.
seed (int): Random seed for reproducibility.
override_ddp_config (dict[str, Any]): Override configuration for DDP.
override_transformer_config (dict[str, Any]): Override configuration for transformer.
use_mbridge (bool): Whether to use MBridge for communication.
dtype (str): Mixed precision training param dtype, default "bfloat16"
"""
# sequence_parallel is not listed as a frozen field for auto-correction purpose
_mutable_fields = EngineConfig._mutable_fields | {"sequence_parallel"}
# mcore parallelism
tensor_model_parallel_size: int = 1
expert_model_parallel_size: int = 1
expert_tensor_parallel_size: Optional[int] = None
pipeline_model_parallel_size: int = 1
virtual_pipeline_model_parallel_size: Optional[int] = None
context_parallel_size: int = 1
sequence_parallel: bool = True
use_distributed_optimizer: bool = True
use_dist_checkpointing: bool = False
dist_checkpointing_path: Optional[str] = None
dist_checkpointing_prefix: str = ""
dist_ckpt_optim_fully_reshardable: bool = False
distrib_optim_fully_reshardable_mem_efficient: bool = False
override_ddp_config: dict[str, Any] = field(default_factory=dict)
override_transformer_config: dict[str, Any] = field(default_factory=dict)
override_mcore_model_config: dict[str, Any] = field(default_factory=dict)
use_mbridge: bool = True
vanilla_mbridge: bool = True
strategy: str = "megatron"
def __post_init__(self) -> None:
super().__post_init__()
"""config validation logics go here"""
assert self.strategy == "megatron"
assert self.dtype in ["bfloat16", "float16"], f"dtype {self.dtype} not supported"
if self.tensor_model_parallel_size == 1:
warnings.warn("set sequence parallel to false as TP size is 1", stacklevel=2)
self.sequence_parallel = False
@dataclass
class FSDPEngineConfig(EngineConfig):
"""Configuration for FSDP (Fully Sharded Data Parallel).
The inheritance from BaseConfig provides omegaconf.DictConfig-like interface for a dataclass config.
Args:
wrap_policy (Dict[str, Any]): Configuration for FSDP wrap policy.
param_offload (bool): Whether to offload parameters to CPU, default False
optimizer_offload (bool): Whether to offload optimizer states to CPU, default False
offload_policy (bool): Whether to offload policy model parameters, default False
reshard_after_forward (bool): Whether to reshard parameters after forward pass, default True
fsdp_size (int): FSDP group size. -1 means use all available GPUs.
forward_prefetch (bool): Whether to prefetch parameters for next forward pass, default False
model_dtype (str): Model data type used to initialize the transformers model. default "fp32"
use_orig_params (bool): Whether to use original parameters when initialize FSDP1, default False
seed (int): Random seed for reproducibility.
full_determinism (bool): If true, enable_full_determinism is called to ensure reproducible results
in distributed training. Important: this will negatively impact performance, so only use it for
debugging.
mixed_precision (Optional[dict[str, Any]]): Mixed precision configuration for FSDP, default None
dtype (str): Mixed precision training param dtype, default "bfloat16"
"""
# ulysses_sequence_parallel_size is mutable for backward compatibility
_mutable_fields = EngineConfig._mutable_fields | {"ulysses_sequence_parallel_size"}
# fsdp specific flags
wrap_policy: dict[str, Any] = field(default_factory=dict)
offload_policy: bool = False
reshard_after_forward: bool = True
fsdp_size: int = -1
forward_prefetch: bool = False
model_dtype: str = "fp32"
use_orig_params: bool = False
mixed_precision: Optional[dict[str, Any]] = None
ulysses_sequence_parallel_size: int = 1
entropy_from_logits_with_chunking: bool = False
use_torch_compile: bool = True
entropy_checkpointing: bool = False
strategy: str = "fsdp"
def __post_init__(self):
super().__post_init__()
assert self.strategy in ["fsdp", "fsdp2"], f"strategy {self.strategy} not supported"
@dataclass
class VeOmniEngineConfig(EngineConfig):
"""Configuration for VeOmni.
The inheritance from BaseConfig provides omegaconf.DictConfig-like interface for a dataclass config.
Args:
wrap_policy (Dict[str, Any]): Configuration for FSDP wrap policy.
param_offload (bool): Whether to offload parameters to CPU, default False
optimizer_offload (bool): Whether to offload optimizer states to CPU, default False
offload_policy (bool): Whether to offload policy model parameters, default False
reshard_after_forward (bool): Whether to reshard parameters after forward pass, default True
fsdp_size (int): FSDP group size. -1 means use all available GPUs, default -1
ulysses_parallel_size (int): Ulysses sequence parallel size, default 1
expert_parallel_size (int): Expert parallel size, default 1
init_device (str): Device to initialize model weights.
1. `cpu`: Init parameters on CPU in rank0 only.
2. `cuda`: Init parameters on GPU.
3. `meta`: Init parameters on meta.
4. `npu`: Init parameters on Ascend NPU.
default "meta"
enable_full_shard (bool): Enable fully shard for FSDP training (ZeRO-3), default False
enable_fsdp_offload (bool): Enable CPU offload for FSDP1, default False
enable_reentrant (bool): Use reentrant gradient checkpointing, default False
attn_implementation (str): Attention implementation to use.
1. `eager`
2. `sdpa`
3. `flash_attention_2`
4. `flash_attention_3`
5. `veomni_flash_attention_2_with_sp`
6. `veomni_flash_attention_3_with_sp`
7. `native-sparse`
default "flash_attention_2"
Note: In case VeOmni add more attn_implementation, please check https://github.com/ByteDance-Seed/VeOmni/
moe_implementation (str): MoE implementation to use.
1. `eager`
2. `fused`
default "fused"
Note: In case VeOmni add more moe_implementation, please check https://github.com/ByteDance-Seed/VeOmni/
force_use_huggingface (bool): Force loading model from huggingface, default False
activation_gpu_limit (float): When enabling activation offload, `activation_gpu_limit` GB
activations are allowed to reserve on GPU, default 0.0
basic_modules (list[str]): List of basic modules to use, default None
forward_prefetch (bool): Whether to prefetch parameters for next forward pass, default False
model_dtype (str): Model data type used to initialize the transformers model. default "fp32"
use_orig_params (bool): Whether to use original parameters when initialize FSDP1, default False
seed (int): Random seed for reproducibility.
full_determinism (bool): If true, enable_full_determinism is called to ensure reproducible results
in distributed training. Important: this will negatively impact performance, so only use it for
debugging.
mixed_precision (Optional[dict[str, Any]]): Mixed precision configuration for FSDP, default None
"""
wrap_policy: dict[str, Any] = field(default_factory=dict)
offload_policy: bool = False
reshard_after_forward: bool = True
forward_prefetch: bool = False
use_orig_params: bool = False
entropy_from_logits_with_chunking: bool = False
use_torch_compile: bool = True
entropy_checkpointing: bool = False
strategy: str = "veomni"
fsdp_size: int = -1
ulysses_parallel_size: int = 1
expert_parallel_size: int = 1
seed: int = 42
full_determinism: bool = False
mixed_precision: bool = False
init_device: str = "meta"
enable_full_shard: bool = False
ckpt_manager: Literal["dcp"] = "dcp"
load_checkpoint_path: Optional[str] = None
enable_fsdp_offload: bool = False
enable_reentrant: bool = False
attn_implementation: str = "flash_attention_2"
moe_implementation: str = "fused"
force_use_huggingface: bool = False
activation_gpu_limit: float = 0.0
basic_modules: Optional[list[str]] = field(default_factory=list)
def __post_init__(self):
super().__post_init__()
assert self.strategy in ["veomni"], f"strategy {self.strategy} not supported"
@dataclass
class TrainingWorkerConfig(BaseConfig):
model_type: str = None # model type (language_model/value_model)
model_config: HFModelConfig = None
engine_config: EngineConfig = None
optimizer_config: OptimizerConfig = None
checkpoint_config: CheckpointConfig = None
profiler_config: ProfilerConfig = None
# automatically select engine and optimizer function.
# This function takes model config and the device name as parameter.
# Users can pass in a higher-order function to take more parameters
auto_select_engine_optim_fn: Callable[["HFModelConfig", str], tuple["EngineConfig", "OptimizerConfig"]] = None
| verl__workers__config__engine.py |
# Copyright 2025 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PEFT configuration of Megatron for VERL."""
def get_peft_cls(model_config, bridge, provider, dtype=None):
"""Get PEFT class from model config.
Args:
model_config: Model configuration object.
bridge: Megatron-Bridge AutoBridge instance.
provider: Provider instance.
Returns:
PEFT configuration object (LoRAConfig, CanonicalLoRAConfig, DoRAConfig) or None.
"""
peft_cls = None
if not hasattr(model_config, "lora"):
return peft_cls
lora_cfg = model_config.lora
# Only enable if rank > 0
if lora_cfg.get("rank", 0) <= 0:
return peft_cls
assert bridge is not None and provider is not None, "LoRA/PEFT only supported via Megatron-Bridge"
from verl.models.mcore.bridge import CanonicalLoRA, DoRA, LoRA, VLMLoRA
lora_dtype = lora_cfg.get("dtype", dtype)
if lora_dtype is not None:
from verl.utils.torch_dtypes import PrecisionType
lora_dtype = PrecisionType.to_dtype(lora_dtype)
lora_type = lora_cfg.get("type", "lora")
if lora_type == "lora":
peft_cls = LoRA(
target_modules=lora_cfg.get("target_modules", ["linear_qkv", "linear_proj", "linear_fc1", "linear_fc2"]),
dim=lora_cfg.get("rank"),
alpha=lora_cfg.get("alpha", 32),
dropout=lora_cfg.get("dropout", 0.0),
dropout_position=lora_cfg.get("dropout_position", "pre"),
lora_A_init_method=lora_cfg.get("lora_A_init_method", "xavier"),
lora_B_init_method=lora_cfg.get("lora_B_init_method", "zero"),
a2a_experimental=lora_cfg.get("a2a_experimental", False),
lora_dtype=lora_dtype,
exclude_modules=lora_cfg.get("exclude_modules", []),
)
if lora_type == "vlm_lora":
peft_cls = VLMLoRA(
target_modules=lora_cfg.get("target_modules", ["linear_qkv", "linear_proj", "linear_fc1", "linear_fc2"]),
dim=lora_cfg.get("rank"),
alpha=lora_cfg.get("alpha", 32),
dropout=lora_cfg.get("dropout", 0.0),
dropout_position=lora_cfg.get("dropout_position", "pre"),
lora_A_init_method=lora_cfg.get("lora_A_init_method", "xavier"),
lora_B_init_method=lora_cfg.get("lora_B_init_method", "zero"),
a2a_experimental=lora_cfg.get("a2a_experimental", False),
lora_dtype=lora_dtype,
freeze_vision_model=lora_cfg.get("freeze_vision_model", True),
freeze_vision_projection=lora_cfg.get("freeze_vision_projection", True),
freeze_language_model=lora_cfg.get("freeze_language_model", True),
exclude_modules=lora_cfg.get("exclude_modules", []),
)
elif lora_type == "canonical_lora":
peft_cls = CanonicalLoRA(
target_modules=lora_cfg.get(
"target_modules",
[
"linear_q",
"linear_k",
"linear_v",
"linear_proj",
"linear_fc1_up",
"linear_fc1_gate",
"linear_fc2",
],
),
dim=lora_cfg.get("rank"),
alpha=lora_cfg.get("alpha", 32),
dropout=lora_cfg.get("dropout", 0.0),
dropout_position=lora_cfg.get("dropout_position", "pre"),
lora_A_init_method=lora_cfg.get("lora_A_init_method", "xavier"),
lora_B_init_method=lora_cfg.get("lora_B_init_method", "zero"),
exclude_modules=lora_cfg.get("exclude_modules", []),
)
elif lora_type == "dora":
peft_cls = DoRA(
target_modules=lora_cfg.get("target_modules", ["linear_qkv", "linear_proj", "linear_fc1", "linear_fc2"]),
dim=lora_cfg.get("rank"),
alpha=lora_cfg.get("alpha", 32),
dropout=lora_cfg.get("dropout", 0.0),
dropout_position=lora_cfg.get("dropout_position", "pre"),
lora_A_init_method=lora_cfg.get("lora_A_init_method", "xavier"),
lora_B_init_method=lora_cfg.get("lora_B_init_method", "zero"),
exclude_modules=lora_cfg.get("exclude_modules", []),
)
print(
f"Enabling {lora_type.upper()} with rank={lora_cfg.get('rank')}, "
f"alpha={lora_cfg.get('alpha')}, dropout={lora_cfg.get('dropout')}"
)
return peft_cls
__all__ = [
"get_peft_cls",
]
| verl__workers__config__megatron_peft.py |
# Copyright 2025 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass, field
from typing import Any, Optional
from omegaconf import MISSING
from transformers import AutoConfig
from verl.base_config import BaseConfig
from verl.utils import hf_processor, hf_tokenizer
from verl.utils.fs import copy_to_local
from verl.utils.import_utils import import_external_libs
from verl.utils.model import get_generation_config, update_model_config
__all__ = ["HFModelConfig", "MtpConfig"]
@dataclass
class MtpConfig(BaseConfig):
"""
Configuration for MTP model.
enable: Enable loading and saving of MTP parameters, but do not use them
enable_train: Whether to enable using MTP parameters during training
enable_rollout: Whether to enable using MTP parameters during rollout
Training parameters:
detach_encoder: Whether to detach encoder parameters during MTP training
mtp_loss_scaling_factor: Loss scaling factor during MTP training
vLLM rollout parameters:
method: "mtp"
num-speculative-tokens: 1
SGLang rollout parameters:
speculative-algorithm: EAGLE
speculative-num-steps: 3
speculative-eagle-topk: 1
speculative-num-draft-tokens: 4
"""
enable: bool = False
enable_train: bool = False
enable_rollout: bool = False
detach_encoder: bool = False
mtp_loss_scaling_factor: float = 0.1
speculative_algorithm: str = "EAGLE"
speculative_num_steps: int = 3
speculative_eagle_topk: int = 1
speculative_num_draft_tokens: int = 4
method: str = "mtp"
num_speculative_tokens: int = 1
@dataclass
class HFModelConfig(BaseConfig):
# note that we separate model_path, model_config_path and tokenizer_path in case they are different
_mutable_fields = {
"hf_config_path",
"tokenizer_path",
"hf_config",
"generation_config",
"tokenizer",
"processor",
"local_path",
"architectures",
"local_hf_config_path",
"local_tokenizer_path",
}
path: str = MISSING
local_path: Optional[str] = None
hf_config_path: Optional[str] = None
local_hf_config_path: Optional[str] = None
tokenizer_path: Optional[str] = None
local_tokenizer_path: Optional[str] = None
# whether to load tokenizer. This is useful when we only want to load model config
load_tokenizer: bool = True
hf_config: Any = None
generation_config: Any = None
tokenizer: Any = None
processor: Any = None
# whether to use shared memory
use_shm: bool = False
trust_remote_code: bool = False
# custom chat template for the model
custom_chat_template: Optional[str] = None
external_lib: Optional[str] = None
override_config: dict = field(default_factory=dict)
enable_gradient_checkpointing: bool = True
enable_activation_offload: bool = False
use_remove_padding: bool = True
# TODO: unify fsdp and megatron lora config
# fsdp lora related. We may setup a separate config later
lora_rank: int = 0
lora_alpha: int = 16
target_modules: Optional[Any] = "all-linear" # allow both "all-linear" and ["q_proj","k_proj"]
target_parameters: Optional[list[str]] = None # for lora adapter on nn.Parameter
exclude_modules: Optional[str] = None
# megatron lora config
lora: dict[str, Any] = field(default_factory=dict)
# path to pre-trained LoRA adapter to load for continued training
lora_adapter_path: Optional[str] = None
use_liger: bool = False
use_fused_kernels: bool = False
fused_kernel_options: dict = field(default_factory=dict)
# TiledMLP configuration for memory-efficient MLP computation
tiled_mlp: dict = field(default_factory=lambda: {"enabled": False, "num_shards": 4})
architectures: Optional[list[str]] = None
mtp: MtpConfig = field(default_factory=MtpConfig)
def __post_init__(self):
import_external_libs(self.external_lib)
if self.hf_config_path is None:
self.hf_config_path = self.path
if self.tokenizer_path is None:
self.tokenizer_path = self.path
self.local_path = copy_to_local(self.path, use_shm=self.use_shm)
# construct tokenizer
if self.load_tokenizer:
self.local_tokenizer_path = copy_to_local(self.tokenizer_path, use_shm=self.use_shm)
self.tokenizer = hf_tokenizer(self.local_tokenizer_path, trust_remote_code=self.trust_remote_code)
self.processor = hf_processor(self.local_tokenizer_path, trust_remote_code=self.trust_remote_code)
if self.custom_chat_template is not None:
if self.processor is not None:
self.processor.chat_template = self.custom_chat_template
else:
self.tokenizer.chat_template = self.custom_chat_template
self.local_hf_config_path = copy_to_local(self.hf_config_path, use_shm=self.use_shm)
self.generation_config = get_generation_config(
self.local_hf_config_path, trust_remote_code=self.trust_remote_code
)
# construct hf_config
attn_implementation = self.override_config.get("attn_implementation", "flash_attention_2")
self.hf_config = AutoConfig.from_pretrained(
self.local_hf_config_path, trust_remote_code=self.trust_remote_code, attn_implementation=attn_implementation
)
override_config_kwargs = {}
if self.tokenizer is not None:
override_config_kwargs.update(
{
"bos_token_id": self.tokenizer.bos_token_id,
"eos_token_id": self.tokenizer.eos_token_id,
"pad_token_id": self.tokenizer.pad_token_id,
}
)
# TODO: (vermouth1992). self.config.model in megatron differs from that of fsdp in the override_config.
override_config = (
self.override_config["model_config"] if "model_config" in self.override_config else self.override_config
)
override_config_kwargs.update(override_config)
update_model_config(self.hf_config, override_config_kwargs=override_config_kwargs)
self.share_embeddings_and_output_weights = getattr(self.hf_config, "tie_word_embeddings", False)
# get model architectures
self.architectures = getattr(self.hf_config, "architectures", None)
assert self.architectures is not None and len(self.architectures) == 1, (
"Expect only one architecture, got {}".format(self.architectures)
)
# per model patch
if getattr(self.hf_config, "model_type", None) == "kimi_vl":
self.hf_config.text_config.topk_method = "greedy"
# Ensure target_modules is a str or list[str] (only if not None)
if self.target_modules is not None:
if not isinstance(self.target_modules, (str | list)):
raise TypeError(
"target_modules must be a string or a list of strings, "
f"but got {type(self.target_modules).__name__}"
)
if isinstance(self.target_modules, list):
for x in self.target_modules:
if not isinstance(x, str):
raise TypeError(
f"All elements in target_modules list must be strings, but found {type(x).__name__}"
)
def get_processor(self):
return self.processor if self.processor is not None else self.tokenizer
| verl__workers__config__model.py |
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from dataclasses import dataclass
from typing import Optional
from omegaconf import MISSING
from verl.base_config import BaseConfig
__all__ = ["OptimizerConfig", "FSDPOptimizerConfig", "McoreOptimizerConfig", "build_optimizer", "VeOmniOptimizerConfig"]
@dataclass
class OptimizerConfig(BaseConfig):
"""Base optimizer configuration.
Args:
lr (float): learning rate. Must be specified.
lr_warmup_steps_ratio (float): Warmup steps ratio; total steps will be injected at runtime.
total_training_steps (int): Total training steps (must be overridden at runtime).
weight_decay (float): Weight decay factor.
lr_warmup_steps (Optional[int]): Number of warmup steps; None delegates to lr_warmup_steps_ratio.
"""
_mutable_fields = {"clip_grad", "total_training_steps", "lr_warmup_steps"}
lr: float = 1e-3
lr_warmup_steps_ratio: float = 0.0
total_training_steps: int = -1
weight_decay: float = 0.01
lr_warmup_steps: Optional[int] = -1
betas: tuple[float, float] = (0.9, 0.999)
clip_grad: float = 1.0
# deprecate grad_clip
grad_clip: Optional[float] = None
def __post_init__(self):
assert self.lr != MISSING
if self.grad_clip is not None:
warnings.warn("`grad_clip` is deprecated, use `clip_grad` instead.", DeprecationWarning, stacklevel=2)
self.clip_grad = self.grad_clip
@dataclass
class VeOmniOptimizerConfig(OptimizerConfig):
"""VeOmni optimizer configuration extending base OptimizerConfig.
Args:
optimizer (str): Optimizer name; default is "adamw".
lr (float): Learning rate.
lr_min (float): Minimum learning rate.
lr_start (float): Starting learning rate for warmup.
lr_decay_ratio (float): LR decay ratio.
lr_scheduler_type (str): LR scheduler type: "constant" or "cosine".
"""
_mutable_fields = OptimizerConfig._mutable_fields.copy()
optimizer: str = "adamw"
lr_min: float = 0.0
lr_start: float = 0.0
lr_decay_ratio: float = 1.0
lr_scheduler_type: str = "constant"
override_optimizer_config: Optional[dict] = None
@dataclass
class FSDPOptimizerConfig(OptimizerConfig):
"""FSDP optimizer configuration extending base OptimizerConfig.
Args:
optimizer (str): Optimizer class name (e.g., "AdamW", "AdamW8bit", "_AdamW").
optimizer_impl (str): Module path to import optimizer from (e.g., "torch.optim", "torchao.optim",
"bitsandbytes.optim").
lr (float): Learning rate.
min_lr_ratio (Optional[float]): Minimum LR ratio for cosine schedule.
lr_scheduler_type (str): LR scheduler type: "constant" or "cosine".
num_cycles (float): Number of cosine cycles in LR schedule.
"""
_mutable_fields = OptimizerConfig._mutable_fields.copy()
_mutable_fields.add("lr_scheduler_type")
optimizer: str = "AdamW"
optimizer_impl: str = "torch.optim"
min_lr_ratio: Optional[float] = None
# deprecate warmup_style
warmup_style: Optional[str] = None
lr_scheduler_type: str = "constant"
num_cycles: float = 0.5
override_optimizer_config: Optional[dict] = None
def __post_init__(self):
if self.warmup_style is not None:
assert self.warmup_style in ["constant", "cosine"]
warnings.warn(
"`warmup_style` is deprecated, use `lr_scheduler_type` instead.", DeprecationWarning, stacklevel=2
)
self.lr_scheduler_type = self.warmup_style
assert self.lr_scheduler_type in ["constant", "cosine"]
return super().__post_init__()
@dataclass
class McoreOptimizerConfig(OptimizerConfig):
"""Mcore optimizer configuration extending base OptimizerConfig.
Args:
optimizer (str): Optimizer name; default is "adam".
lr (float): Learning rate.
clip_grad (float): Gradient clipping norm.
lr_warmup_init (float): Initial learning rate for warmup; defaults to 0.0.
lr_decay_steps (Optional[int]): Number of decay steps.
lr_decay_style (str): LR decay style: "constant", "linear", "cosine", or "inverse_square_root".
min_lr (float): Minimum learning rate.
weight_decay_incr_style (str): Weight decay increment style: "constant" or "cosine".
lr_wsd_decay_style (str): Weight-standard-deviation decay style: "constant", "exponential", or "cosine".
lr_wsd_decay_steps (Optional[int]): Number of steps for weight-standard-deviation decay.
use_checkpoint_opt_param_scheduler (bool): Whether to use checkpoint optimizer parameter scheduler.
"""
optimizer: str = "adam"
lr_warmup_init: float = 0.0
lr_decay_steps: Optional[int] = None
lr_decay_style: str = "linear"
min_lr: float = 0.0
weight_decay_incr_style: str = "constant"
lr_wsd_decay_style: str = "exponential"
lr_wsd_decay_steps: Optional[int] = None
use_checkpoint_opt_param_scheduler: bool = False
override_optimizer_config: Optional[dict] = None
def build_optimizer(parameters, config: FSDPOptimizerConfig):
"""Build an optimizer based on the configuration.
Dynamically imports and instantiates an optimizer class from the specified module.
Args:
parameters: Model parameters to optimize
config: FSDPOptimizerConfig with optimizer settings
Returns:
Optimizer instance
Examples:
# PyTorch AdamW
config.optimizer_impl = "torch.optim"
config.optimizer = "AdamW"
# TorchAO AdamW with bf16 stochastic rounding
config.optimizer_impl = "torchao.optim"
config.optimizer = "_AdamW"
config.override_optimizer_config = {"bf16_stochastic_round": True}
# BitsAndBytes AdamW 8bit
config.optimizer_impl = "bitsandbytes.optim"
config.optimizer = "AdamW8bit"
"""
import importlib
optimizer_args = {
"lr": config.lr,
"weight_decay": config.weight_decay,
}
optimizer_name_lower = config.optimizer.lower()
if "adam" in optimizer_name_lower or "ademamix" in optimizer_name_lower:
optimizer_args["betas"] = config.betas
if config.override_optimizer_config is not None:
optimizer_args.update(config.override_optimizer_config)
try:
module = importlib.import_module(config.optimizer_impl)
optimizer_cls = getattr(module, config.optimizer)
except ImportError as e:
raise ImportError(
f"Failed to import module '{config.optimizer_impl}'. Make sure the package is installed. Error: {e}"
) from e
except AttributeError as e:
raise AttributeError(
f"Optimizer '{config.optimizer}' not found in module '{config.optimizer_impl}'. "
f"Available optimizers: {dir(module)}"
) from e
return optimizer_cls(parameters, **optimizer_args)
| verl__workers__config__optimizer.py |
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
from dataclasses import dataclass, field
from typing import Optional
from verl.base_config import BaseConfig
from verl.trainer.config.config import ModuleConfig
from .rollout import RolloutConfig
__all__ = ["SandboxFusionConfig", "RewardConfig", "RewardModelConfig"]
logger = logging.getLogger(__name__)
logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN"))
@dataclass
class RewardManagerConfig(BaseConfig):
"""Configuration for reward manager.
A reward manager defines the mechanism of computing rule-based reward and handling different reward sources.
Args:
source (str): Source of the reward manager. Options: ``"register"``, ``"importlib"``. Default: ``"register"``.
name (str, optional):
- When ``source`` is ``"register"``, the name is used in `get_reward_manager_cls(name)``.
See ``verl/experimental/reward/reward_manager.py`` for options. Default: ``"naive"``.
- When ``source`` is ``"importlib"``, the name is used in ``getattr(module, name)``,
e.g., ``"DAPORewardManager"``.
module (ModuleConfig, optional): Optional configuration for the external module defining the reward manager,
"""
source: str = "register"
name: str = "naive"
module: Optional[ModuleConfig] = field(default_factory=ModuleConfig)
def __post_init__(self):
super().__post_init__()
if self.source == "register":
from verl.experimental.reward_loop.reward_manager.registry import REWARD_MANAGER
assert self.name in REWARD_MANAGER, (
f"Reward manager is not registered: {self.name=} ,{REWARD_MANAGER.keys()=}"
)
elif self.source == "importlib":
# NOTE: The existence is not checked since it depends on which machine the config is initialized on.
assert self.module is not None and self.module.path is not None, (
"When source is importlib, module.path should be set."
)
@dataclass
class SandboxFusionConfig(BaseConfig):
"""Configuration for cloud/local sandbox fusion.
Args:
url (Optional[str]): Cloud/local function URL for sandbox execution.
max_concurrent (int): Max concurrent requests allowed to sandbox.
memory_limit_mb (int): Max memory limit for each sandbox process in MB.
"""
url: Optional[str] = None
max_concurrent: int = 64
memory_limit_mb: int = 1024
@dataclass
class RewardModelConfig(BaseConfig):
_mutable_fields = BaseConfig._mutable_fields
enable: bool = False
enable_resource_pool: bool = False
n_gpus_per_node: int = 0
nnodes: int = 0
model_path: Optional[str] = None
inference: RolloutConfig = field(default_factory=RolloutConfig)
@dataclass
class RewardConfig(BaseConfig):
_mutable_fields = BaseConfig._mutable_fields
# reward manager args
num_workers: int = 8
reward_manager: RewardManagerConfig = field(default_factory=RewardManagerConfig)
# reward model args
reward_model: RewardModelConfig = field(default_factory=RewardModelConfig)
# sandbox fusion args
sandbox_fusion: SandboxFusionConfig = field(default_factory=SandboxFusionConfig)
| verl__workers__config__reward.py |
# Copyright 2025 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from dataclasses import dataclass, field
from typing import Optional
from omegaconf import MISSING
from verl.base_config import BaseConfig
from verl.utils.profiler import ProfilerConfig
from verl.workers.config.model import MtpConfig
__all__ = [
"SamplingConfig",
"MultiTurnConfig",
"CustomAsyncServerConfig",
"AgentLoopConfig",
"TraceConfig",
"ServerConfig",
"PrometheusConfig",
"RolloutConfig",
"CheckpointEngineConfig",
]
@dataclass
class SamplingConfig(BaseConfig):
temperature: float = 1.0
top_k: int = -1
top_p: float = 1.0
do_sample: bool = True
n: int = 1
@dataclass
class MultiTurnConfig(BaseConfig):
_mutable_fields = {"max_assistant_turns", "max_user_turns"}
enable: bool = False
max_assistant_turns: Optional[int] = None
tool_config_path: Optional[str] = None
max_user_turns: Optional[int] = None
max_parallel_calls: int = 1
max_tool_response_length: int = 256
tool_response_truncate_side: str = "middle"
interaction_config_path: Optional[str] = None
use_inference_chat_template: bool = False
tokenization_sanity_check_mode: str = "strict"
format: str = "hermes"
num_repeat_rollouts: Optional[int] = None
@dataclass
class CustomAsyncServerConfig(BaseConfig):
path: Optional[str] = None
name: Optional[str] = None
@dataclass
class AgentLoopConfig(BaseConfig):
num_workers: int = 8
default_agent_loop: str = "single_turn_agent"
agent_loop_config_path: Optional[str] = None
custom_async_server: CustomAsyncServerConfig = field(default_factory=CustomAsyncServerConfig)
# Fully qualified class name for custom AgentLoopManager (e.g., "mypackage.module.MyManager").
# Security: This class will be dynamically imported via importlib. Only use trusted class paths.
agent_loop_manager_class: Optional[str] = None
@dataclass
class TraceConfig(BaseConfig):
backend: Optional[str] = None
token2text: bool = False
max_samples_per_step_per_worker: Optional[int] = None
def __post_init__(self):
if self.max_samples_per_step_per_worker is not None and self.max_samples_per_step_per_worker < 0:
raise ValueError("`max_samples_per_step_per_worker` must be a non-negative integer or null.")
@dataclass
class ServerConfig(BaseConfig):
"""
Configuration for SGLang server when running in server mode
"""
timeout: float = 60.0
max_attempts: int = 3
retry_delay: float = 2.0
max_connections: int = 1000
max_start_wait_time: float = 300.0
@dataclass
class PrometheusConfig(BaseConfig):
"""
Configuration for Prometheus server
"""
# whether enable prometheus on server mode rollout
enable: bool = False
# Port number that Prometheus listens on, default is 9090
port: int = 9090
# Path to Prometheus configuration file
file: str = "/tmp/ray/session_latest/metrics/prometheus/prometheus.yml"
# Specify served_model_name to avoid displaying overly long model paths in Grafana
served_model_name: Optional[str] = None
@dataclass
class CheckpointEngineConfig(BaseConfig):
"""
Configuration for checkpoint engine to update weights from trainer to rollout
"""
# Backend for checkpoint engine: naive, nccl, nixl, hccl
backend: Optional[str] = MISSING
# Bucket size in MB to transfer multiple weights at one time
update_weights_bucket_megabytes: int = 2048
# Additional keyword arguments for checkpoint engine
engine_kwargs: dict = field(default_factory=dict)
@dataclass
class RolloutConfig(BaseConfig):
_mutable_fields = {"max_model_len", "load_format"}
name: Optional[str] = MISSING
mode: str = "async"
temperature: float = 1.0
top_k: int = -1
top_p: float = 1.0
do_sample: bool = True
n: int = 1
repetition_penalty: float = 1.0
# Early termination threshold for multi-turn rollout in sglang.
# Abort remaining requests when (1 - over_sample_rate) * total_requests are completed.
over_sample_rate: float = 0.0
prompt_length: int = 512
response_length: int = 512
dtype: str = "bfloat16"
gpu_memory_utilization: float = 0.5
ignore_eos: bool = False
enforce_eager: bool = True
cudagraph_capture_sizes: Optional[list] = None
free_cache_engine: bool = True
data_parallel_size: int = 1
expert_parallel_size: int = 1
tensor_model_parallel_size: int = 2
pipeline_model_parallel_size: int = 1
max_num_batched_tokens: int = 8192
logprobs_mode: Optional[str] = "processed_logprobs"
scheduling_policy: Optional[str] = "fcfs"
# TODO: enable train_kwargs
# train_sampling_config: SamplingConfig = field(default_factory=SamplingConfig)
val_kwargs: SamplingConfig = field(default_factory=SamplingConfig)
max_model_len: Optional[int] = None
max_num_seqs: int = 1024
# note that the logprob computation should belong to the actor
log_prob_micro_batch_size: Optional[int] = None
log_prob_micro_batch_size_per_gpu: Optional[int] = None
log_prob_use_dynamic_bsz: bool = False
log_prob_max_token_len_per_gpu: int = 16384
disable_log_stats: bool = True
multi_stage_wake_up: bool = False
engine_kwargs: dict = field(default_factory=dict)
calculate_log_probs: bool = False
agent: AgentLoopConfig = field(default_factory=AgentLoopConfig)
trace: TraceConfig = field(default_factory=TraceConfig)
multi_turn: MultiTurnConfig = field(default_factory=MultiTurnConfig)
# Server configuration for sglang server mode
server: ServerConfig = field(default_factory=ServerConfig)
# Use Prometheus to collect and monitor rollout statistics
prometheus: PrometheusConfig = field(default_factory=PrometheusConfig)
# Extension point for custom configurations
custom: Optional[dict] = None
# Checkpoint Engine config for update weights from trainer to rollout
checkpoint_engine: CheckpointEngineConfig = field(default_factory=CheckpointEngineConfig)
skip_rollout: bool = False
skip_dump_dir: str = "/tmp/rollout_dump"
profiler: Optional[ProfilerConfig] = None
enable_chunked_prefill: bool = True
enable_prefix_caching: bool = True
load_format: str = "dummy"
layered_summon: bool = False
layer_name_map: dict = field(default_factory=dict)
sglang_engine_mode: str = "local"
limit_images: Optional[int] = None
skip_tokenizer_init: bool = False
quantization: Optional[str] = None
quantization_config_file: Optional[str] = None
enable_rollout_routing_replay: bool = False
enable_sleep_mode: bool = True
mtp: MtpConfig = field(default_factory=MtpConfig)
qat: Optional[dict] = None
def __post_init__(self):
"""Validate the rollout config"""
# Deprecation warning for mode field - only async mode is supported
if self.mode == "sync":
raise ValueError(
"Rollout mode 'sync' has been removed. Please set "
"`actor_rollout_ref.rollout.mode=async` or remove the mode setting entirely."
)
if self.mode != "async":
warnings.warn(
f"Unknown rollout mode '{self.mode}'. Only 'async' mode is supported. "
"The 'mode' field is deprecated and will be removed in a future version.",
DeprecationWarning,
stacklevel=2,
)
if self.expert_parallel_size > 1:
assert self.expert_parallel_size == (self.tensor_model_parallel_size * self.data_parallel_size), (
"expert_parallel_size must be equal to tensor_model_parallel_size * data_parallel_size"
)
if self.pipeline_model_parallel_size > 1:
if self.name == "vllm" or self.name == "sglang" or self.name == "trtllm":
raise NotImplementedError(
f"Current rollout {self.name=} not implemented pipeline_model_parallel_size > 1 yet."
)
| verl__workers__config__rollout.py |
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Base class for a critic
"""
from abc import ABC, abstractmethod
import torch
from verl import DataProto
__all__ = ["BasePPOCritic"]
class BasePPOCritic(ABC):
def __init__(self, config):
super().__init__()
self.config = config
@abstractmethod
def compute_values(self, data: DataProto) -> torch.Tensor:
"""Compute values"""
pass
@abstractmethod
def update_critic(self, data: DataProto):
"""Update the critic"""
pass
| verl__workers__critic__base.py |
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Implement a multiprocess PPOCritic
"""
import logging
import os
import torch
import torch.distributed
from torch import nn, optim
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from verl import DataProto
from verl.trainer.ppo import core_algos
from verl.utils.attention_utils import index_first_axis, pad_input, rearrange, unpad_input
from verl.utils.device import get_device_id, get_device_name
from verl.utils.fsdp_utils import FSDPModule, fsdp2_clip_grad_norm_
from verl.utils.profiler import GPUMemoryLogger
from verl.utils.py_functional import append_to_dict
from verl.utils.seqlen_balancing import prepare_dynamic_batch, restore_dynamic_batch
from verl.utils.torch_functional import masked_mean
from verl.utils.ulysses import gather_outputs_and_unpad, ulysses_pad_and_slice_inputs
from verl.workers.critic import BasePPOCritic
logger = logging.getLogger(__file__)
logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN"))
class DataParallelPPOCritic(BasePPOCritic):
def __init__(self, config, critic_module: nn.Module, critic_optimizer: optim.Optimizer):
super().__init__(config=config)
self.critic_module = critic_module
self.critic_optimizer = critic_optimizer
self.use_remove_padding = self.config.model.get("use_remove_padding", False)
print(f"Critic use_remove_padding={self.use_remove_padding}")
self.ulysses_sequence_parallel_size = self.config.get("ulysses_sequence_parallel_size", 1)
self.device_name = get_device_name()
def _forward_micro_batch(self, micro_batch):
response_length = micro_batch["responses"].size(-1)
multi_modal_inputs = {}
if "multi_modal_inputs" in micro_batch.keys():
from verl.utils.model import extract_multi_modal_inputs
multi_modal_inputs = extract_multi_modal_inputs(micro_batch["multi_modal_inputs"])
with torch.autocast(device_type=self.device_name, dtype=torch.bfloat16):
input_ids = micro_batch["input_ids"]
batch, seqlen = input_ids.shape
attention_mask = micro_batch["attention_mask"]
position_ids = micro_batch["position_ids"]
if position_ids.dim() == 3: # qwen2vl mrope
position_ids = position_ids.transpose(0, 1)
if self.use_remove_padding:
input_ids_rmpad, indices, *_ = unpad_input(
input_ids.unsqueeze(-1), attention_mask
) # input_ids_rmpad (total_nnz, ...)
input_ids_rmpad = input_ids_rmpad.transpose(0, 1) # (1, total_nnz)
# unpad the position_ids to align the rotary
if position_ids.dim() == 3:
position_ids_rmpad = (
index_first_axis(rearrange(position_ids, "c b s ... -> (b s) c ..."), indices)
.transpose(0, 1)
.unsqueeze(1)
) # (4, bsz, seqlen) -> (4, 1, bsz * seqlen)
else:
position_ids_rmpad = index_first_axis(
rearrange(position_ids.unsqueeze(-1), "b s ... -> (b s) ..."), indices
).transpose(0, 1)
# pad and slice the inputs if sp > 1
if self.ulysses_sequence_parallel_size > 1:
input_ids_rmpad, position_ids_rmpad, pad_size = ulysses_pad_and_slice_inputs(
input_ids_rmpad, position_ids_rmpad, sp_size=self.ulysses_sequence_parallel_size
)
# only pass input_ids and position_ids to enable flash_attn_varlen
output = self.critic_module(
input_ids=input_ids_rmpad,
attention_mask=None,
position_ids=position_ids_rmpad,
**multi_modal_inputs,
use_cache=False,
) # prevent model thinks we are generating
if hasattr(self.critic_module, "v_head"):
# For trl.AutoModelForCausalLMWithValueHead
values_rmpad = output[2].squeeze(0).unsqueeze(-1)
else:
values_rmpad = output.logits
values_rmpad = values_rmpad.squeeze(0) # (total_nnz)
# gather output if sp > 1
if self.ulysses_sequence_parallel_size > 1:
values_rmpad = gather_outputs_and_unpad(
values_rmpad, gather_dim=0, unpad_dim=0, padding_size=pad_size
)
# pad it back
values = pad_input(values_rmpad, indices=indices, batch=batch, seqlen=seqlen).squeeze(-1)
values = values[:, -response_length - 1 : -1]
else:
output = self.critic_module(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
**multi_modal_inputs,
use_cache=False,
) # prevent model thinks we are generating
if hasattr(self.critic_module, "v_head"):
# For trl.AutoModelForCausalLMWithValueHead
values = output[2]
else:
values = output.logits
values = values[:, -response_length - 1 : -1].squeeze(-1)
return values
def _optimizer_step(self):
assert self.config.grad_clip is not None
if isinstance(self.critic_module, FSDP):
grad_norm = self.critic_module.clip_grad_norm_(self.config.grad_clip)
elif isinstance(self.critic_module, FSDPModule):
grad_norm = fsdp2_clip_grad_norm_(self.critic_module.parameters(), max_norm=self.config.grad_clip)
else:
grad_norm = torch.nn.utils.clip_grad_norm_(self.critic_module.parameters(), max_norm=self.config.grad_clip)
# if grad_norm is not finite, skip the update
if not torch.isfinite(grad_norm):
print(f"WARN: grad_norm is not finite: {grad_norm}")
self.critic_optimizer.zero_grad()
else:
self.critic_optimizer.step()
return grad_norm
@GPUMemoryLogger(role="dp critic", logger=logger)
def compute_values(self, data: DataProto) -> torch.Tensor:
self.critic_module.eval()
micro_batch_size = data.meta_info["micro_batch_size"]
use_dynamic_bsz = data.meta_info["use_dynamic_bsz"]
has_multi_modal_inputs = "multi_modal_inputs" in data.non_tensor_batch.keys()
select_keys = (
["responses", "input_ids", "response_mask", "attention_mask", "position_ids"]
if "response_mask" in data.batch
else ["responses", "input_ids", "attention_mask", "position_ids"]
)
non_tensor_select_keys = ["multi_modal_inputs"] if has_multi_modal_inputs else []
data = data.select(batch_keys=select_keys, non_tensor_batch_keys=non_tensor_select_keys)
if use_dynamic_bsz:
max_token_len = data.meta_info["max_token_len"] * self.ulysses_sequence_parallel_size
micro_batches, batch_idx_list = prepare_dynamic_batch(data, max_token_len=max_token_len)
else:
micro_batches = data.split(micro_batch_size)
values_lst = []
for micro_batch in micro_batches:
micro_batch = micro_batch.to(get_device_id())
model_inputs = {**micro_batch.batch, **micro_batch.non_tensor_batch}
with torch.no_grad():
values = self._forward_micro_batch(model_inputs)
values_lst.append(values)
values = torch.concat(values_lst, dim=0)
if use_dynamic_bsz:
values = restore_dynamic_batch(values, batch_idx_list)
if "response_mask" in data.batch:
response_mask = data.batch["response_mask"]
response_mask = response_mask.to(values.device)
values = values * response_mask # Only action tokens have values
return values
@GPUMemoryLogger(role="dp critic", logger=logger)
def update_critic(self, data: DataProto):
# make sure we are in training mode
self.critic_module.train()
metrics = {
"critic/vf_loss": 0.0,
}
select_keys = ["input_ids", "responses", "response_mask", "attention_mask", "position_ids", "values", "returns"]
has_multi_modal_inputs = "multi_modal_inputs" in data.non_tensor_batch.keys()
non_tensor_select_keys = ["multi_modal_inputs"] if has_multi_modal_inputs else []
data = data.select(batch_keys=select_keys, non_tensor_batch_keys=non_tensor_select_keys)
# Split to make minibatch iterator for updating the actor
# See PPO paper for details. https://arxiv.org/abs/1707.06347
mini_batches = data.split(self.config.ppo_mini_batch_size)
for _ in range(self.config.ppo_epochs):
for batch_idx, mini_batch in enumerate(mini_batches):
if self.config.use_dynamic_bsz:
max_token_len = self.config.ppo_max_token_len_per_gpu * self.ulysses_sequence_parallel_size
micro_batches, _ = prepare_dynamic_batch(mini_batch, max_token_len=max_token_len)
else:
self.gradient_accumulation = (
self.config.ppo_mini_batch_size // self.config.ppo_micro_batch_size_per_gpu
)
micro_batches = mini_batch.split(self.config.ppo_micro_batch_size_per_gpu)
self.critic_optimizer.zero_grad()
for micro_batch in micro_batches:
micro_batch = micro_batch.to(get_device_id())
micro_batch_metrics = {}
model_inputs = {**micro_batch.batch, **micro_batch.non_tensor_batch}
response_mask = model_inputs["response_mask"]
values = model_inputs["values"]
returns = model_inputs["returns"]
vpreds = self._forward_micro_batch(model_inputs)
vf_loss, vf_clipfrac = core_algos.compute_value_loss(
vpreds=vpreds,
values=values,
returns=returns,
response_mask=response_mask,
cliprange_value=self.config.cliprange_value,
loss_agg_mode=self.config.loss_agg_mode,
)
if self.config.use_dynamic_bsz:
# relative to the dynamic bsz
loss_scale_factor = response_mask.shape[0] / self.config.ppo_mini_batch_size
loss = vf_loss * loss_scale_factor
else:
loss_scale_factor = 1 / self.gradient_accumulation
loss = vf_loss * loss_scale_factor
loss.backward()
micro_batch_metrics.update(
{
"critic/vf_clipfrac": vf_clipfrac.detach().item(),
"critic/vpred_mean": masked_mean(vpreds, response_mask).detach().item(),
}
)
metrics["critic/vf_loss"] += vf_loss.detach().item() * loss_scale_factor
append_to_dict(metrics, micro_batch_metrics)
grad_norm = self._optimizer_step()
mini_batch_metrics = {"critic/grad_norm": grad_norm.detach().item()}
append_to_dict(metrics, mini_batch_metrics)
self.critic_optimizer.zero_grad()
return metrics
| verl__workers__critic__dp_critic.py |
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Implement a multiprocess PPOCritic
"""
import itertools
import logging
import os
from functools import partial
from typing import Iterable
import torch
import torch.distributed
from megatron.core import parallel_state as mpu
from megatron.core.optimizer import DistributedOptimizer, OptimizerConfig
from megatron.core.pipeline_parallel import get_forward_backward_func
from omegaconf import OmegaConf
from torch import nn
from verl import DataProto
from verl.trainer.ppo import core_algos
from verl.utils.device import get_device_id, get_torch_device
from verl.utils.megatron.pipeline_parallel import make_batch_generator
from verl.utils.profiler import GPUMemoryLogger
from verl.utils.py_functional import append_to_dict
from verl.utils.seqlen_balancing import get_reverse_idx, rearrange_micro_batches
from verl.utils.torch_functional import broadcast_dict_tensor, masked_mean
from verl.workers.critic import BasePPOCritic
logger = logging.getLogger(__file__)
logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN"))
class MegatronPPOCritic(BasePPOCritic):
def __init__(
self,
config,
model_config,
hf_config,
tf_config,
critic_module: nn.ModuleList,
critic_optimizer: DistributedOptimizer,
critic_optimizer_config: OptimizerConfig,
):
super().__init__(config=config)
self._validate_config(config)
self.model_config = model_config
self.hf_config = hf_config # huggingface config
self.tf_config = tf_config # mcore transformer config
self.critic_module = critic_module
self.critic_optimizer = critic_optimizer
self.critic_optimizer_config = critic_optimizer_config
# we create a separate nametuple for optimizer step so that global args won't affect it.
self.optimizer_step_args = OmegaConf.create(
{
"skip_grad": None,
"overlap_dp_param_comm": False,
"overlap_dp_grad_comm": False,
"gradient_accumulation_steps": 1,
"sequence_parallel": self.tf_config.sequence_parallel,
"DDP_impl": "local",
"layernorm_allreduce_bucket_threshold": 0,
"reduce_grads_use_alltoall": False,
}
)
def _validate_config(self, config) -> None:
"""Validate config options not implemented for Megatron backend"""
assert config.get("ulysses_sequence_parallel_size", 1) == 1
if config.shuffle:
assert config.data_loader_seed is not None, "If shuffle dataloader, seed must be manually set"
self.config = config
@GPUMemoryLogger("megatron critic", logger=logger)
def compute_values(self, data: DataProto) -> DataProto:
prev_modes = [m.training for m in self.critic_module]
for module in self.critic_module:
module.eval()
responses = data.batch["responses"]
attention_mask = data.batch["attention_mask"]
use_dynamic_bsz = data.meta_info.get("use_dynamic_bsz", False)
micro_batch_size = data.meta_info.get("micro_batch_size", None)
max_token_len = data.meta_info.get("max_token_len", None)
assert micro_batch_size is not None, "micro batch size is needed for forward compute"
if use_dynamic_bsz:
assert max_token_len is not None, "max_token_len must be set when use_dynamic_bsz is True"
max_token_len = max_token_len * self.config.megatron.context_parallel_size
response_length = responses.size(1)
with torch.no_grad():
output = self.forward_backward_batch(
data=data,
forward_only=True,
use_dynamic_bsz=use_dynamic_bsz,
micro_batch_size=micro_batch_size,
max_token_len=max_token_len,
mini_batch_size=None,
)
if mpu.is_pipeline_last_stage(ignore_virtual=True):
# only on last rank. It should be on every tp rank
values = [o["vpreds"] for o in output["output"]] # (bs, seq_size, vocal_size)
values = torch.cat(values, dim=0).to(torch.float32)
if use_dynamic_bsz:
indices = output["indices"]
indices = list(itertools.chain.from_iterable(indices))
assert len(indices) == values.size(0), f"{len(indices)} vs. {values.size()}"
revert_indices = torch.tensor(get_reverse_idx(indices), dtype=torch.long)
values = values[revert_indices]
else:
values = torch.empty_like(attention_mask, dtype=torch.float32)
# each tp ranks should contain the same value
values = values[
:, -response_length - 1 : -1
] # Values are predicted at the ends of prefixes, e.g., the last prompt token
response_mask = attention_mask[:, -response_length:]
values = values * response_mask # Only action tokens have values
values = values.contiguous()
# sync among pp ranks
values = values.to(get_device_id())
torch.distributed.broadcast(
tensor=values,
src=mpu.get_pipeline_model_parallel_last_rank(),
group=mpu.get_pipeline_model_parallel_group(),
)
values = values.to("cpu")
# add empty cache after each compute
get_torch_device().empty_cache()
for module, mode in zip(self.critic_module, prev_modes, strict=False):
module.train(mode)
return values
def make_minibatch_iterator(self, data: DataProto) -> Iterable[DataProto]:
select_keys = ["input_ids", "responses", "attention_mask", "position_ids", "values", "returns"]
data = data.select(batch_keys=select_keys)
return data.make_iterator(
mini_batch_size=self.config.ppo_mini_batch_size,
epochs=self.config.ppo_epochs,
seed=self.config.data_loader_seed,
dataloader_kwargs={"shuffle": self.config.shuffle},
)
def forward_backward_batch(
self,
data: DataProto,
forward_only=False,
use_dynamic_bsz=False,
micro_batch_size=None,
max_token_len=None,
mini_batch_size=None,
):
# broadcast from last pp rank to all other pp ranks
data.to(get_device_id())
mini_batch = data
mini_batch.batch = mini_batch.batch.contiguous()
broadcast_dict_tensor(
mini_batch.batch,
src=mpu.get_pipeline_model_parallel_last_rank(),
group=mpu.get_pipeline_model_parallel_group(),
)
mini_batch.to("cpu")
# split into micro-batches
mini_batch.batch["attention_mask"] = mini_batch.batch["attention_mask"].to(bool)
indices = None
if use_dynamic_bsz:
assert max_token_len is not None, "max_token_len must be set when use_dynamic_bsz is True"
vpp_size = mpu.get_virtual_pipeline_model_parallel_world_size()
if vpp_size is not None and vpp_size > 1:
microbatch_group_size_per_vp_stage = self.tf_config.microbatch_group_size_per_vp_stage
micro_batches, indices = rearrange_micro_batches(
batch=mini_batch.batch,
num_batches_divided_by=microbatch_group_size_per_vp_stage,
max_token_len=max_token_len,
)
assert len(micro_batches) % self.tf_config.microbatch_group_size_per_vp_stage == 0, (
f"micro_batches {micro_batches} must be divisible by microbatch_group_size_per_vp_stage "
f"{microbatch_group_size_per_vp_stage} for megatron backend"
)
else:
micro_batches, indices = rearrange_micro_batches(batch=mini_batch.batch, max_token_len=max_token_len)
total_seqlen = max_token_len
else:
assert micro_batch_size is not None, (
"micro_batch_size is needed to be passed in when not using dynamic batch size"
)
micro_batches = mini_batch.batch.split(micro_batch_size)
seq_len = micro_batches[0]["input_ids"].shape[1]
total_seqlen = micro_batch_size * seq_len
n_micro_batch = len(micro_batches)
forward_backward_func = get_forward_backward_func()
def loss_func(output, data, meta_info):
nonlocal use_dynamic_bsz
if forward_only:
return torch.tensor(1.0, device=output.device), {"vpreds": output}
responses = data["responses"]
attention_mask = data["attention_mask"]
values = data["values"]
returns = data["returns"]
response_length = responses.size(1)
response_mask = attention_mask[:, -response_length:]
cliprange_value = self.config.cliprange_value
vpreds = output # (bs, sequence_length)
vpreds = vpreds[:, -response_length - 1 : -1]
vf_loss, vf_clipfrac = core_algos.compute_value_loss(
vpreds=vpreds,
values=values,
returns=returns,
response_mask=response_mask,
cliprange_value=cliprange_value,
loss_agg_mode=self.config.loss_agg_mode,
)
stats = {
"critic/vf_loss": vf_loss.detach().item(),
"critic/vf_clipfrac": vf_clipfrac.detach().item(),
"critic/vpred_mean": masked_mean(vpreds, response_mask).detach().item(),
}
return vf_loss, stats
def forward_step(batch_iter, model):
batch = next(batch_iter)
batch = batch.to(get_device_id())
batch = batch.contiguous()
input_ids = batch["input_ids"]
attention_mask = batch["attention_mask"]
position_ids = batch["position_ids"]
from verl.models.mcore import get_mcore_forward_fn
forward_fn = get_mcore_forward_fn(self.hf_config)
output = forward_fn(
model,
input_ids,
attention_mask,
position_ids,
{}, # multi_modal_inputs
value_model=True,
)
return output, partial(loss_func, data=batch, meta_info={})
# batch should be a list of batches inside micro-batches
batch_generator = make_batch_generator(micro_batches, vpp_size=len(self.critic_module))
# TODO: we may use the new schedule instead
# for flash-attn: (seq_len, batch_size, hidden_size) = (mbs*seq_len, 1, hidden_size)
if mpu.get_pipeline_model_parallel_world_size() > 1:
losses_reduced = forward_backward_func(
forward_step_func=forward_step,
data_iterator=batch_generator,
model=self.critic_module,
num_microbatches=n_micro_batch,
seq_length=total_seqlen, # no use when input_shapes was set
micro_batch_size=1, # no use when input_shapes was set
forward_only=forward_only,
)
else:
losses_reduced = forward_backward_func(
forward_step_func=forward_step,
data_iterator=batch_generator,
model=self.critic_module,
num_microbatches=n_micro_batch,
seq_length=total_seqlen, # in use for pp = 1
micro_batch_size=1, # in use for pp = 1
forward_only=forward_only,
)
# loss_reduces contains the stats returned from loss_func
losses_reduced = {"output": losses_reduced}
if use_dynamic_bsz:
losses_reduced["indices"] = indices
return losses_reduced
@GPUMemoryLogger("megatron critic", logger=logger)
def update_critic(self, dataloader: Iterable[DataProto]):
metrics = {}
for data in dataloader:
self.critic_optimizer.zero_grad()
# use use_contiguous_buffers_in_local_ddp and no overlap_dp_param_comm
for chunk in self.critic_module:
chunk.zero_grad_buffer()
micro_batch_size = self.config.ppo_micro_batch_size_per_gpu
max_token_len = None
if self.config.use_dynamic_bsz:
max_token_len = self.config.ppo_max_token_len_per_gpu * self.config.megatron.context_parallel_size
metric_micro_batch = self.forward_backward_batch(
data,
forward_only=False,
use_dynamic_bsz=self.config.use_dynamic_bsz,
micro_batch_size=micro_batch_size,
max_token_len=max_token_len,
mini_batch_size=self.config.ppo_mini_batch_size,
)
metric_micro_batch = metric_micro_batch["output"]
update_successful, grad_norm, num_zeros_in_grad = self.critic_optimizer.step()
learning_rate = self.critic_optimizer.param_groups[-1]["lr"]
data = {"critic/grad_norm": grad_norm, "critic/lr": learning_rate}
append_to_dict(metrics, data)
if update_successful:
# allgather already execute in optimizer.step in new megatron
pass
else:
raise NotImplementedError
for metric in metric_micro_batch:
append_to_dict(metrics, metric) # append the metric from this micro-batch to global metrics.
# add empty cache after each compute
get_torch_device().empty_cache()
return metrics
| verl__workers__critic__megatron_critic.py |
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The abstract base class defining the interface for model training engines.
"""
from abc import abstractmethod
from contextlib import nullcontext
from typing import Any, Callable, ContextManager, Generator, Optional
import torch
from tensordict import TensorDict
from verl.utils.device import get_device_name
from verl.utils.tensordict_utils import maybe_fix_3d_position_ids
class BaseEngine:
"""
Abstract base class defining the interface for model training engines. Interface is subject to
change before release.
Engine implementations must subclass BaseEngine and provide concrete behavior for all methods.
"""
def initialize(self):
"""
Instantiate or load the model, optimizer, and learning rate scheduler.
Should prepare all components necessary for training or evaluation.
"""
raise NotImplementedError
@property
@abstractmethod
def is_param_offload_enabled(self) -> bool:
"""Whether parameter offloading is enabled."""
raise NotImplementedError
@property
@abstractmethod
def is_optimizer_offload_enabled(self) -> bool:
"""Whether optimizer offloading is enabled."""
raise NotImplementedError
def train_mode(self, **kwargs):
"""
Context manager entry for switching the engine and model into training mode.
Usage:
with engine.train_mode():
# runs in training mode
"""
raise NotImplementedError
def eval_mode(self, **kwargs):
"""
Context manager entry for switching the engine and model into evaluation mode.
Usage:
with engine.eval_mode():
# runs in evaluation mode
"""
raise NotImplementedError
def optimizer_zero_grad(self):
"""
Zero the gradients of the optimizer.
"""
raise NotImplementedError
def optimizer_step(self):
"""
Perform an optimization step using the optimizer.
"""
raise NotImplementedError
def lr_scheduler_step(self):
"""
Advance the learning rate scheduler by one step.
Returns:
current_lr (float or list[float]): Updated learning rate(s).
"""
raise NotImplementedError
def forward_backward_batch(self, data: TensorDict, loss_function: Callable, forward_only=False) -> Any:
"""
Perform a forward pass and optionally a backward pass on a batch of data.
Args:
data: The input data for the forward pass, typically containing tensors and metadata.
loss_function: The loss function to optimize. See `verl.workers.roles.utils.losses` for examples.
forward_only: If True, perform only the forward pass. If False, perform forward and backward pass.
Returns:
Any: The output of the forward pass, which can be used for loss computation or other purposes.
"""
raise NotImplementedError
def train_batch(self, data: TensorDict, loss_function: Callable) -> Any:
"""
Perform a training step on a batch of data.
Args:
data: The input data for training, typically containing tensors and metadata.
loss_function: A function that computes the loss and metrics given a batch and predictions.
Returns:
dict[str, torch.Tensor]: A dictionary containing the aggregated training metrics for the batch.
"""
maybe_fix_3d_position_ids(data)
self.optimizer_zero_grad()
outputs = self.forward_backward_batch(data, loss_function, forward_only=False)
grad_norm = self.optimizer_step()
if self.is_mp_src_rank_with_outputs():
assert "grad_norm" not in outputs["metrics"]
outputs["metrics"]["grad_norm"] = grad_norm
return outputs
def infer_batch(self, data: TensorDict, loss_function: Optional[Callable] = None) -> Any:
"""
Perform inference on a batch of data.
Args:
data: The input data for inference, typically containing tensors and metadata.
Returns:
Any: The output of the inference, which can be used for predictions or other purposes.
"""
# see comments from train_batch
maybe_fix_3d_position_ids(data)
with torch.no_grad():
outputs = self.forward_backward_batch(data, loss_function, forward_only=True)
return outputs
def get_per_tensor_param(self) -> tuple[Generator[tuple[str, torch.Tensor], None, None], Optional[dict]]:
"""
Get a generator that yields per-tensor parameters and optional peft config.
Returns:
Generator[tuple[str, torch.Tensor]]: A generator that yields tuples of parameter names and tensors.
Optional[dict]: Optional peft config.
"""
raise NotImplementedError
def get_data_parallel_size(self):
raise NotImplementedError
def get_data_parallel_rank(self):
raise NotImplementedError
def get_data_parallel_group(self):
raise NotImplementedError
def to(self, device: str, model: bool = True, optimizer: bool = True, grad: bool = True):
"""
Move model parameters, optimizer states, or both to the specified device.
Args:
device: Target device identifier.
model: If True, move the model.
optimizer: If True, move the optimizer states.
grad: If True, move the gradient buffer.
"""
if not model:
assert not optimizer and not grad, "Model must be moved to device along with optimizer and grad"
def save_checkpoint(
self,
local_path: str,
hdfs_path: Optional[str] = None,
global_step: int = 0,
max_ckpt_to_keep: Optional[int] = None,
**kwargs,
) -> None:
"""
Save model, optimizer, and scheduler states to a checkpoint.
Args:
local_path: Local filesystem path to save checkpoint.
hdfs_path: Optional HDFS path to copy checkpoint.
global_step: Integer training step number for naming.
max_ckpt_to_keep: Maximum number of recent checkpoints to retain.
**kwargs: Arbitrary keyword arguments.
"""
raise NotImplementedError
def load_checkpoint(
self, local_path: str, hdfs_path: Optional[str] = None, del_local_after_load: bool = True, **kwargs
) -> None:
"""
Load model, optimizer, and scheduler states from a checkpoint.
Args:
local_path: Local filesystem path of the checkpoint.
hdfs_path: Optional HDFS path where checkpoint is stored.
del_local_after_load: Whether to delete local copy after loading.
**kwargs: Arbitrary keyword arguments.
"""
raise NotImplementedError
def is_mp_src_rank_with_outputs(self):
"""
Whether the current rank is the first rank in model parallel group that contains model outputs
"""
raise NotImplementedError
def disable_adapter(self) -> ContextManager:
"""
Disable all adapters temporarily under the context in the model for LoRA
"""
return nullcontext()
class BaseEngineCtx:
def __init__(self, engine: BaseEngine, mode, **kwargs):
"""Base Engine context that handles load and offload
Args:
engine:
**kwargs:
"""
self.engine = engine
self.mode = mode
assert self.mode in ("train", "eval")
self.disable_auto_offload = kwargs.pop("disable_auto_offload", False)
def _context_switch(self, device):
if self.disable_auto_offload:
return
should_move_model = self.engine.is_param_offload_enabled if device == "cpu" else True
should_move_optimizer = self.engine.is_optimizer_offload_enabled if device == "cpu" else True
if self.mode == "eval":
self.engine.to(device=device, model=should_move_model, optimizer=False, grad=False)
elif self.mode == "train":
self.engine.to(
device=device,
model=should_move_model,
optimizer=should_move_optimizer,
grad=should_move_model,
)
def __enter__(self):
self._context_switch(get_device_name())
self.engine.mode = self.mode
def __exit__(self, exc_type, exc_val, exc_tb):
self._context_switch("cpu")
self.engine.mode = None
class EngineRegistry:
"""
A registry for managing and instantiating different types of training engines.
This class uses a dictionary to store engine classes, mapping a string key to each class.
It provides a decorator `register` to add new engines to the registry and a `new` method
to create an instance of a registered engine.
"""
_engines = {}
@classmethod
def register(cls, model_type: str, backend: list[str] | str, device: list[str] | str = "cuda"):
"""
A class method decorator that registers an engine class with a given key.
This allows for dynamic instantiation of engine classes by their registered key.
Args:
model_type (str): The type of the model
backend (list[str] | str): The backend to use for the model type
device (list[str] | str): The device type (e.g., "cuda", "npu", "cpu") this engine supports,
default is "cuda"
Returns:
A decorator function that takes an engine class and registers it.
"""
def decorator(engine_class):
assert issubclass(engine_class, BaseEngine)
if model_type not in cls._engines:
cls._engines[model_type] = {}
backends = backend if isinstance(backend, list) else [backend]
devices = device if isinstance(device, list) else [device]
for current_backend in backends:
for current_device in devices:
if current_backend not in cls._engines[model_type]:
cls._engines[model_type][current_backend] = {}
if current_device not in cls._engines[model_type][current_backend]:
cls._engines[model_type][current_backend][current_device] = engine_class
return engine_class
return decorator
@classmethod
def get_engine_cls(cls, model_type: str, backend: str):
assert model_type in cls._engines, f"Unknown model_type: {model_type}"
assert backend in cls._engines[model_type], f"Unknown backend: {backend}"
device = get_device_name()
assert device in cls._engines[model_type][backend], (
f"Unknown device: {device} for model_type: {model_type} and backend: {backend}"
)
return cls._engines[model_type][backend][device]
@classmethod
def new(cls, model_type, backend, *args, **kwargs):
"""
Function to create a new training engine instance based on the provided config.
Args:
key: A configuration object containing the engine key and other settings.
*args: Variable length argument list.
**kwargs: Arbitrary keyword arguments.
Returns:
engine: An instance of the training engine corresponding to the config.
Raises:
NotImplementedError: If the engine key in the config does not match any known engines.
"""
engine_cls = cls.get_engine_cls(model_type, backend)
return engine_cls(*args, **kwargs)
| verl__workers__engine__base.py |
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The concrete Engine implementation using PyTorch FullyShardedDataParallel (FSDP)
"""
import gc
import logging
import os
import warnings
from contextlib import nullcontext
from typing import Callable, ContextManager, Optional
import torch
import torch.distributed
from peft import LoraConfig, TaskType, get_peft_model
from tensordict import TensorDict
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.api import FullStateDictConfig, ShardedStateDictConfig, StateDictType
from torch.distributed.tensor import DTensor
import verl.utils.torch_functional as verl_F
from verl.models.transformers.monkey_patch import apply_monkey_patch
from verl.trainer.config import CheckpointConfig
from verl.utils import tensordict_utils as tu
from verl.utils.activation_offload import enable_activation_offloading
from verl.utils.checkpoint.fsdp_checkpoint_manager import FSDPCheckpointManager
from verl.utils.dataset.dataset_utils import DatasetPadMode
from verl.utils.debug import log_gpu_memory_usage
from verl.utils.device import get_device_id, get_device_name
from verl.utils.fsdp_utils import (
CPUOffloadPolicy,
FSDPModule,
MixedPrecisionPolicy,
apply_fsdp2,
collect_lora_params,
fsdp2_clip_grad_norm_,
fsdp2_load_full_state_dict,
fsdp_version,
get_fsdp_wrap_policy,
get_init_weight_context_manager,
init_fn,
load_fsdp_model_to_gpu,
load_fsdp_optimizer,
merged_lora_context,
normalize_peft_param_name,
offload_fsdp_model_to_cpu,
offload_fsdp_optimizer,
replace_lora_wrapper,
)
from verl.utils.model import convert_weight_keys, extract_multi_modal_inputs
from verl.utils.py_functional import convert_to_regular_types
from verl.utils.torch_functional import logprobs_from_logits
from verl.utils.ulysses import (
gather_outputs_and_unpad,
get_ulysses_sequence_parallel_group,
set_ulysses_sequence_parallel_group,
ulysses_pad,
ulysses_pad_and_slice_inputs,
)
from verl.workers.config import FSDPEngineConfig, FSDPOptimizerConfig, HFModelConfig
from ..base import BaseEngine, BaseEngineCtx, EngineRegistry
from ..utils import enable_full_determinism, postprocess_batch_func, prepare_micro_batches
from .utils import create_device_mesh, get_sharding_strategy
logger = logging.getLogger(__file__)
logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN"))
device_name = get_device_name()
class FSDPEngine(BaseEngine):
"""
Concrete Engine implementation using PyTorch FullyShardedDataParallel (FSDP).
Supports model sharding, activation/optimizer offloading, LoRA, and sequence parallelism.
"""
def __init__(
self,
model_config: HFModelConfig,
engine_config: FSDPEngineConfig,
optimizer_config: FSDPOptimizerConfig,
checkpoint_config: CheckpointConfig,
):
"""
Initialize the FSDPEngine.
Sets up distributed device meshes, LoRA, and offload policies based on config.
Args:
config: Configuration object with FSDP and model settings.
"""
super().__init__()
self.model_config = model_config
self.engine_config = engine_config
self.optimizer_config = optimizer_config
self.checkpoint_config = checkpoint_config
self.mode = None
self.rank = torch.distributed.get_rank()
# Apply NPU patches for FSDP backend
from .utils import apply_npu_fsdp_patches
apply_npu_fsdp_patches()
# build device mesh for Ulysses Sequence Parallel
self.use_remove_padding = self.model_config.use_remove_padding
self._init_device_mesh()
if self.engine_config.full_determinism:
enable_full_determinism(seed=self.engine_config.seed)
# set FSDP offload params
self._is_offload_param = self.engine_config.param_offload
self._is_offload_optimizer = self.engine_config.optimizer_offload
self._is_lora = self.model_config.lora_rank > 0
if self.engine_config.entropy_from_logits_with_chunking:
entropy_from_logits = verl_F.entropy_from_logits_with_chunking
else:
entropy_from_logits = verl_F.entropy_from_logits
self.compute_entropy_from_logits = (
torch.compile(entropy_from_logits, dynamic=True)
if self.engine_config.use_torch_compile # use torch compile by default
else entropy_from_logits
)
@property
def is_param_offload_enabled(self) -> bool:
return self._is_offload_param
@property
def is_optimizer_offload_enabled(self) -> bool:
return self._is_offload_optimizer
def is_mp_src_rank_with_outputs(self):
if self.ulysses_device_mesh is not None:
is_collect = self.ulysses_device_mesh["sp"].get_local_rank() == 0
else:
is_collect = True
return is_collect
def initialize(self):
"""
Build the model, optimizer, and learning rate scheduler under FSDP.
Applies device, dtype, and precision configurations, including mixed precision.
Sets up checkpoint manager and FLOPs counter.
"""
# This is used to import external_lib into the huggingface systems
self._build_model_optimizer()
self.checkpoint_manager = FSDPCheckpointManager(
model=self.module,
optimizer=self.optimizer,
lr_scheduler=self.lr_scheduler,
processing_class=self.model_config.get_processor(),
checkpoint_config=self.checkpoint_config,
trust_remote_code=self.model_config.trust_remote_code,
)
self.to(
device="cpu",
model=self._is_offload_param,
optimizer=self._is_offload_optimizer,
grad=self._is_offload_param,
)
log_gpu_memory_usage("After offload model/optimizer/grad during init", logger=logger)
def _init_device_mesh(self):
world_size = torch.distributed.get_world_size()
from torch.distributed.device_mesh import init_device_mesh
fsdp_size = self.engine_config.fsdp_size
self.device_mesh = create_device_mesh(world_size=world_size, fsdp_size=fsdp_size)
self.ulysses_device_mesh = None
self.ulysses_parallel_group = None
self.ulysses_sequence_parallel_size = self.engine_config.ulysses_sequence_parallel_size
dp_size = self.get_data_parallel_size()
if self.ulysses_sequence_parallel_size > 1:
self.ulysses_device_mesh = init_device_mesh(
device_name, mesh_shape=(dp_size, self.ulysses_sequence_parallel_size), mesh_dim_names=["dp", "sp"]
)
self.ulysses_parallel_group = self.ulysses_device_mesh["sp"].get_group()
self.use_ulysses_sp = self.ulysses_sequence_parallel_size > 1
def _build_module(self):
from verl.utils.model import get_hf_auto_model_class
from verl.utils.torch_dtypes import PrecisionType
torch_dtype = self.engine_config.model_dtype
if torch_dtype is None:
# if it is training, we force torch_dtype to fp32
torch_dtype = torch.float32 if not self.engine_config.forward_only else torch.bfloat16
torch_dtype = PrecisionType.to_dtype(torch_dtype)
init_context = get_init_weight_context_manager(
use_meta_tensor=not self.model_config.hf_config.tie_word_embeddings, mesh=self.device_mesh
)
with init_context(), warnings.catch_warnings():
warnings.simplefilter("ignore")
auto_class = get_hf_auto_model_class(hf_config=self.model_config.hf_config)
module = auto_class.from_pretrained(
pretrained_model_name_or_path=self.model_config.local_path,
torch_dtype=torch_dtype,
config=self.model_config.hf_config,
trust_remote_code=self.model_config.trust_remote_code,
)
use_liger = self.model_config.use_liger
# Apply Liger kernel to the model if use_liger is set to True
if use_liger:
from liger_kernel.transformers.monkey_patch import _apply_liger_kernel_to_instance
_apply_liger_kernel_to_instance(model=module)
fused_kernel_options = self.model_config.fused_kernel_options
fused_kernels_backend = (
fused_kernel_options.get("impl_backend", None) if fused_kernel_options is not None else None
)
use_fused_kernels = self.model_config.use_fused_kernels
apply_monkey_patch(
model=module,
use_remove_padding=self.use_remove_padding,
ulysses_sp_size=self.ulysses_sequence_parallel_size,
use_fused_kernels=use_fused_kernels,
fused_kernels_backend=fused_kernels_backend,
)
# some parameters may not in torch_dtype
module.to(torch_dtype)
if self.model_config.enable_gradient_checkpointing:
module.gradient_checkpointing_enable(gradient_checkpointing_kwargs={"use_reentrant": False})
return module
def _build_lora_module(self, module):
module.enable_input_require_grads()
lora_adapter_path = getattr(self.model_config, "lora_adapter_path", None)
if lora_adapter_path is not None:
from peft import PeftModel
from verl.utils.fs import copy_to_local
print(f"Loading pre-trained LoRA adapter to from: {lora_adapter_path}")
# Copy adapter to local if needed
local_adapter_path = copy_to_local(lora_adapter_path, use_shm=self.model_config.use_shm)
module = PeftModel.from_pretrained(module, local_adapter_path, is_trainable=True)
peft_config = module.peft_config["default"]
# Ensure task_type is TaskType enum, not string
if isinstance(peft_config.task_type, str):
peft_config.task_type = TaskType.CAUSAL_LM
else:
# Convert config to regular Python types before creating PEFT model
lora_config = {
"task_type": TaskType.CAUSAL_LM,
"r": self.model_config.lora_rank,
"lora_alpha": self.model_config.lora_alpha,
"target_modules": convert_to_regular_types(self.model_config.target_modules),
"target_parameters": convert_to_regular_types(self.model_config.target_parameters),
"exclude_modules": convert_to_regular_types(self.model_config.exclude_modules),
"bias": "none",
}
module = get_peft_model(module, LoraConfig(**lora_config))
return module
def _build_fsdp_module(self, module):
# TODO(ziheng): need to improve
from torch.distributed.fsdp import CPUOffload, MixedPrecision
from verl.utils.torch_dtypes import PrecisionType
mixed_precision_config = self.engine_config.mixed_precision
if mixed_precision_config is not None:
param_dtype = PrecisionType.to_dtype(mixed_precision_config.get("param_dtype", "bf16"))
reduce_dtype = PrecisionType.to_dtype(mixed_precision_config.get("reduce_dtype", "fp32"))
buffer_dtype = PrecisionType.to_dtype(mixed_precision_config.get("buffer_dtype", "fp32"))
else:
param_dtype = torch.bfloat16
reduce_dtype = torch.float32
buffer_dtype = torch.float32
mixed_precision = MixedPrecision(param_dtype=param_dtype, reduce_dtype=reduce_dtype, buffer_dtype=buffer_dtype)
auto_wrap_policy = get_fsdp_wrap_policy(
module=module,
config=self.engine_config.wrap_policy,
is_lora=self.model_config.lora_rank > 0,
)
fsdp_mesh = self.device_mesh
sharding_strategy = get_sharding_strategy(fsdp_mesh)
# Note: We force turn off CPUOffload because it causes incorrect results when using grad accumulation
if self.engine_config.strategy == "fsdp":
# cpu_offload:
# - actor: None
# - critic: None
# - ref: CPUOffload(offload_params=True)
# We force reference policy to use CPUOffload to save memory.
# We force turn off CPUOffload for actor because it causes incorrect results when using grad accumulation
cpu_offload = None
if self.engine_config.forward_only:
cpu_offload = CPUOffload(offload_params=True)
self._is_offload_param = False
self._is_offload_optimizer = False
module = FSDP(
module,
param_init_fn=init_fn,
auto_wrap_policy=auto_wrap_policy,
device_id=get_device_id(),
sharding_strategy=sharding_strategy,
mixed_precision=mixed_precision,
sync_module_states=True,
device_mesh=self.device_mesh,
forward_prefetch=self.engine_config.forward_prefetch,
use_orig_params=self.engine_config.use_orig_params,
cpu_offload=cpu_offload,
)
elif self.engine_config.strategy == "fsdp2":
# - actor: offload_policy
# - critic: offload_policy
# - ref: CPUOffloadPolicy(pin_memory=True)
assert CPUOffloadPolicy is not None, "PyTorch version >= 2.4 is required for using fully_shard API (FSDP2)"
mp_policy = MixedPrecisionPolicy(
param_dtype=param_dtype, reduce_dtype=reduce_dtype, cast_forward_inputs=True
)
offload_policy = None
if self.engine_config.offload_policy or self.engine_config.forward_only:
self._is_offload_param = False
self._is_offload_optimizer = False
offload_policy = CPUOffloadPolicy(pin_memory=True)
fsdp_kwargs = {
"mesh": fsdp_mesh,
"mp_policy": mp_policy,
"offload_policy": offload_policy,
"reshard_after_forward": self.engine_config.reshard_after_forward,
}
full_state = module.state_dict()
apply_fsdp2(module, fsdp_kwargs, self.engine_config)
fsdp2_load_full_state_dict(module, full_state, fsdp_mesh, offload_policy)
else:
raise NotImplementedError(f"Unknown strategy {self.engine_config.strategy}")
if self.model_config.enable_activation_offload:
enable_gradient_checkpointing = self.model_config.enable_gradient_checkpointing
enable_activation_offloading(module, self.engine_config.strategy, enable_gradient_checkpointing)
if torch.distributed.get_world_size() == 1 and fsdp_version(module) == 1:
FSDP.set_state_dict_type(
module,
state_dict_type=StateDictType.FULL_STATE_DICT,
state_dict_config=FullStateDictConfig(),
)
elif fsdp_version(module) == 1:
FSDP.set_state_dict_type(
module,
state_dict_type=StateDictType.SHARDED_STATE_DICT,
state_dict_config=ShardedStateDictConfig(),
)
return module
def _build_optimizer(self, module):
from verl.workers.config.optimizer import build_optimizer
optimizer = build_optimizer(module.parameters(), self.optimizer_config)
return optimizer
def _build_lr_scheduler(self, optimizer):
from verl.utils.torch_functional import get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup
optim_config = self.optimizer_config
total_steps = optim_config.total_training_steps
num_warmup_steps = optim_config.lr_warmup_steps
lr_scheduler_type = optim_config.lr_scheduler_type
min_lr_ratio = optim_config.min_lr_ratio
num_cycles = optim_config.num_cycles
if num_warmup_steps <= 0:
num_warmup_steps_ratio = optim_config.lr_warmup_steps_ratio
num_warmup_steps = int(num_warmup_steps_ratio * total_steps)
if self.rank == 0:
print(f"Total steps: {total_steps}, num_warmup_steps: {num_warmup_steps}")
if lr_scheduler_type == "constant":
lr_scheduler = get_constant_schedule_with_warmup(optimizer=optimizer, num_warmup_steps=num_warmup_steps)
elif lr_scheduler_type == "cosine":
lr_scheduler = get_cosine_schedule_with_warmup(
optimizer=optimizer,
num_warmup_steps=num_warmup_steps,
num_training_steps=total_steps,
min_lr_ratio=min_lr_ratio,
num_cycles=num_cycles,
)
else:
raise NotImplementedError(f"LR scheduler type {lr_scheduler_type} is not supported")
return lr_scheduler
def _build_model_optimizer(self):
from verl.utils.model import print_model_size
# Load base model with specified configuration and dtype
module = self._build_module()
# Apply LoRA adapters if low-rank adaptation is enabled
if self._is_lora:
module = self._build_lora_module(module)
# Synchronize all distributed processes before proceeding
torch.distributed.barrier()
if self.rank == 0:
print_model_size(module)
log_gpu_memory_usage("After init model from HF AutoModel", logger=logger)
# Wrap model with FSDP for distributed training (sharding, mixed precision, etc.)
log_gpu_memory_usage("Before FSDP", logger=None)
module = self._build_fsdp_module(module)
log_gpu_memory_usage("After FSDP", logger=None)
if not self.engine_config.forward_only:
# Initialize optimizer with model parameters and config settings
optimizer = self._build_optimizer(module)
# Create learning rate scheduler with warmup and decay settings
lr_scheduler = self._build_lr_scheduler(optimizer)
else:
optimizer = None
lr_scheduler = None
self.module = module
self.optimizer = optimizer
self.lr_scheduler = lr_scheduler
def train_mode(self, **kwargs):
"""
Return a context manager that switches to training mode with FSDP-specific handling.
Includes parameter and optimizer offload entry/exit.
"""
return EngineTrainModeCtx(self, **kwargs)
def eval_mode(self, **kwargs):
"""
Return a context manager that switches to evaluation mode with FSDP-specific handling.
Includes activation offload entry/exit.
"""
return EngineEvalModeCtx(self, **kwargs)
def get_data_parallel_rank(self):
if self.ulysses_device_mesh is not None:
return self.ulysses_device_mesh["dp"].get_local_rank()
else:
return torch.distributed.get_rank()
def get_data_parallel_size(self):
return torch.distributed.get_world_size() // self.ulysses_sequence_parallel_size
def get_data_parallel_group(self):
if self.ulysses_device_mesh is not None:
return self.ulysses_device_mesh.get_group(mesh_dim="dp")
else:
return torch.distributed.group.WORLD
def forward_backward_batch(self, data: TensorDict, loss_function: Callable, forward_only=False) -> list[TensorDict]:
# note that the global_batch_size should include data on all the dp
tu.assign_non_tensor(data, sp_size=self.ulysses_sequence_parallel_size)
# compute num_tokens in global batch for loss normalization
batch_num_tokens = data["loss_mask"].sum().to(get_device_id())
torch.distributed.all_reduce(
batch_num_tokens, op=torch.distributed.ReduceOp.SUM, group=self.get_data_parallel_group()
)
tu.assign_non_tensor(data, batch_num_tokens=batch_num_tokens.item())
tu.assign_non_tensor(data, dp_size=self.get_data_parallel_size())
micro_batches, indices = prepare_micro_batches(
data=data, dp_group=self.get_data_parallel_group(), same_micro_num_in_dp=True
)
output_lst = []
ctx = torch.no_grad() if forward_only else nullcontext()
for micro_batch in micro_batches:
with ctx:
loss, meta_info = self.forward_step(micro_batch, loss_function=loss_function, forward_only=forward_only)
if not forward_only:
loss.backward()
output_lst.append(meta_info)
# postprocess and return
return postprocess_batch_func(output_lst=output_lst, indices=indices, data=data)
def forward_step(self, micro_batch: TensorDict, loss_function, forward_only):
raise NotImplementedError("forward_step must be implemented in subclass")
def optimizer_zero_grad(self):
"""
Zero gradients and enforce FSDP grad-clipping logic.
"""
self.optimizer.zero_grad()
def optimizer_step(self):
"""
Clip gradients, skip update if non-finite, and step optimizer.
Returns:
grad_norm (float): Norm of gradients before clipping.
"""
assert self.optimizer_config.clip_grad is not None
if isinstance(self.module, FSDP):
grad_norm = self.module.clip_grad_norm_(self.optimizer_config.clip_grad)
elif isinstance(self.module, FSDPModule):
grad_norm = fsdp2_clip_grad_norm_(self.module.parameters(), max_norm=self.optimizer_config.clip_grad)
else:
grad_norm = torch.nn.utils.clip_grad_norm_(
self.module.parameters(), max_norm=self.optimizer_config.clip_grad
)
if isinstance(grad_norm, DTensor):
grad_norm = grad_norm.full_tensor()
# if grad_norm is not finite, skip the update
if not torch.isfinite(grad_norm):
print(f"WARN: grad_norm is not finite: {grad_norm}")
self.optimizer.zero_grad()
else:
self.optimizer.step()
return grad_norm.item()
def lr_scheduler_step(self):
"""
Advance FSDP scheduler and return updated learning rate.
"""
self.lr_scheduler.step()
lr = self.lr_scheduler.get_last_lr()[0] # only return the first group
return lr
def to(self, device: str, model: bool = True, optimizer: bool = True, grad: bool = True):
"""
Move FSDP model and/or optimizer to CPU or GPU with offload support.
Note that this function executes irrespective of offload config. It serves as manual control
"""
super().to(device=device, model=model, optimizer=optimizer, grad=grad)
if self.engine_config.forward_only:
# force cpu_offload
return
device_name = get_device_name()
assert device in (device_name, "cpu")
if device == device_name:
if model:
load_fsdp_model_to_gpu(self.module)
if optimizer and self.optimizer is not None:
load_fsdp_optimizer(self.optimizer, device)
gc.collect()
elif device == "cpu":
if model:
offload_fsdp_model_to_cpu(self.module)
if optimizer and self.optimizer is not None:
offload_fsdp_optimizer(self.optimizer)
else:
raise ValueError(f"Invalid device type: {device}")
def save_checkpoint(
self,
local_path: str,
hdfs_path: Optional[str] = None,
global_step: int = 0,
max_ckpt_to_keep: Optional[int] = None,
**kwargs,
) -> None:
"""
Save FSDP checkpoint, handling parameter offload as needed.
"""
origin_module_device = next(self.module.parameters()).device.type
if self._is_offload_param or origin_module_device == "cpu":
load_fsdp_model_to_gpu(self.module)
self.checkpoint_manager.save_checkpoint(
local_path=local_path, hdfs_path=hdfs_path, global_step=global_step, max_ckpt_to_keep=max_ckpt_to_keep
)
torch.distributed.barrier()
if self._is_offload_param:
offload_fsdp_model_to_cpu(self.module)
def load_checkpoint(
self, local_path: str, hdfs_path: Optional[str] = None, del_local_after_load: int = True, **kwargs
) -> None:
"""
Load FSDP checkpoint, restoring parameters and optimizer state.
"""
import torch
if self._is_offload_param:
load_fsdp_model_to_gpu(self.module)
self.checkpoint_manager.load_checkpoint(
local_path=local_path, hdfs_path=hdfs_path, del_local_after_load=del_local_after_load
)
torch.distributed.barrier()
if self._is_offload_param:
offload_fsdp_model_to_cpu(self.module)
if self._is_offload_optimizer:
offload_fsdp_optimizer(self.optimizer)
def get_per_tensor_param(self, layered_summon=False, base_sync_done=False, **kwargs):
log_gpu_memory_usage("Before load_fsdp_model_to_gpu", logger=logger)
load_fsdp_model_to_gpu(self.module)
log_gpu_memory_usage("After load_fsdp_model_to_gpu", logger=logger)
peft_config = None
merge_lora = self.model_config.lora.get("merge", False)
peft_model = getattr(self.module, "_fsdp_wrapped_module", self.module)
if hasattr(peft_model, "peft_config"): # LoRA
if not merge_lora:
peft_config = peft_model.peft_config.get("default", None)
params = collect_lora_params(
module=self.module,
layered_summon=layered_summon,
base_sync_done=base_sync_done,
)
if not base_sync_done:
params = {replace_lora_wrapper(k, peft_config): v for k, v in params.items()}
else: # merge lora
with merged_lora_context(self.module, backup_adapters=True):
params = self.module.state_dict()
params = normalize_peft_param_name(params)
else:
params = self.module.state_dict()
params = convert_weight_keys(params, getattr(self.module, "_fsdp_wrapped_module", self.module))
log_gpu_memory_usage("Before offload_fsdp_model_to_cpu", logger=logger)
if self._is_offload_param:
offload_fsdp_model_to_cpu(self.module)
log_gpu_memory_usage("After offload_fsdp_model_to_cpu", logger=logger)
if peft_config is not None and base_sync_done:
per_tensor_param = params.items()
else:
device = get_device_id() # used when fsdp2 set cpu_offload_policy
# TODO: cast fp32 to bf16 to reduce weight sync overhead, need more fine-grained control, e.g MoE gate
per_tensor_param = (
(
name,
param.to(device, non_blocking=True).full_tensor().to(torch.bfloat16, non_blocking=True)
if isinstance(param, DTensor)
else param,
)
for name, param in params.items()
)
# return per_tensor_param, peft_config
# Convert peft_config to dict for vLLM compatibility (PEFTHelper.from_dict expects dict)
peft_config_dict = peft_config.to_dict() if peft_config is not None else None
return per_tensor_param, peft_config_dict
def disable_adapter(self) -> ContextManager:
return self.module.disable_adapter()
class EngineEvalModeCtx(BaseEngineCtx):
def __init__(self, engine: FSDPEngine, **kwargs):
super().__init__(engine=engine, mode="eval", **kwargs)
def __enter__(self):
assert isinstance(self.engine, FSDPEngine)
super().__enter__()
self.prev_sp_group = get_ulysses_sequence_parallel_group()
set_ulysses_sequence_parallel_group(self.engine.ulysses_parallel_group)
self.engine.module.eval()
def __exit__(self, exc_type, exc_value, traceback):
assert isinstance(self.engine, FSDPEngine)
set_ulysses_sequence_parallel_group(self.prev_sp_group)
# https://pytorch.org/docs/stable/notes/fsdp.html#fsdp-notes
# unshard the root FSDP module
if self.engine.engine_config.fsdp_size > 1:
if fsdp_version(self.engine.module) == 1:
self.engine.module._handle.reshard(True)
elif fsdp_version(self.engine.module) == 2:
self.engine.module.reshard()
super().__exit__(exc_type, exc_value, traceback)
class EngineTrainModeCtx(BaseEngineCtx):
def __init__(self, engine: FSDPEngine, **kwargs):
super().__init__(engine=engine, mode="train", **kwargs)
def __enter__(self):
assert isinstance(self.engine, FSDPEngine)
super().__enter__()
self.prev_sp_group = get_ulysses_sequence_parallel_group()
set_ulysses_sequence_parallel_group(self.engine.ulysses_parallel_group)
self.engine.module.train()
def __exit__(self, exc_type, exc_value, traceback):
assert isinstance(self.engine, FSDPEngine)
set_ulysses_sequence_parallel_group(self.prev_sp_group)
self.engine.optimizer_zero_grad()
super().__exit__(exc_type, exc_value, traceback)
@EngineRegistry.register(model_type="language_model", backend=["fsdp", "fsdp2"], device=["cuda", "npu"])
class FSDPEngineWithLMHead(FSDPEngine):
def prepare_model_inputs(self, micro_batch: TensorDict):
use_remove_padding = tu.get_non_tensor_data(data=micro_batch, key="use_remove_padding", default=True)
pad_mode = tu.get_non_tensor_data(data=micro_batch, key="pad_mode", default=DatasetPadMode.NO_PADDING)
use_fused_kernels = tu.get_non_tensor_data(data=micro_batch, key="use_fused_kernels", default=False)
temperature = micro_batch["temperature"]
temperature_item = temperature
if use_fused_kernels:
assert not isinstance(temperature, torch.Tensor), (
"use_fused_kernels does not support per sample temperature yet"
)
assert pad_mode == DatasetPadMode.NO_PADDING, f"pad_mode {pad_mode} not supported"
multi_modal_inputs = extract_multi_modal_inputs(micro_batch.get("multi_modal_inputs", []))
input_ids = micro_batch["input_ids"]
position_ids = micro_batch["position_ids"]
if not isinstance(temperature, torch.Tensor):
temperature = torch.tensor([temperature] * input_ids.shape[0], device=input_ids.device)
temperature = temperature.to(torch.float32)
assert temperature.shape[0] == input_ids.shape[0]
# args used to get outputs
output_args = {}
if use_remove_padding:
# support per sample temperature
# temperature (bsz,)
# input_ids (bsz, j1)
temperature_rmpad = verl_F.expand_as_nested(temperature, input_ids).values() # (total_nnz,)
temperature_rmpad = temperature_rmpad.unsqueeze(0) # (1, total_nnz)
if pad_mode == DatasetPadMode.NO_PADDING:
input_ids_rmpad = input_ids.values().unsqueeze(0) # (1, total_nnz)
if position_ids.dim() == 3:
position_ids_rmpad = position_ids.values().unsqueeze(1) # (4, 1, total_nnz)
else:
position_ids_rmpad = position_ids.values().unsqueeze(0) # (1, total_nnz)
else:
raise NotImplementedError(f"pad_mode {pad_mode} not implemented")
# for compute the log_prob
input_ids_rmpad_rolled = torch.roll(input_ids_rmpad, shifts=-1, dims=1) # (1, total_nnz)
# pad and slice the inputs if sp > 1
if self.use_ulysses_sp:
is_vlm_model = hasattr(getattr(self.module, "module", self.module).config, "vision_config")
if is_vlm_model:
# vlm model's inputs will be sliced after embedding
input_ids_rmpad, position_ids_rmpad, pad_size = ulysses_pad(
input_ids_rmpad,
position_ids_rmpad=position_ids_rmpad,
sp_size=self.ulysses_sequence_parallel_size,
)
else:
input_ids_rmpad, position_ids_rmpad, pad_size = ulysses_pad_and_slice_inputs(
input_ids_rmpad,
position_ids_rmpad=position_ids_rmpad,
sp_size=self.ulysses_sequence_parallel_size,
skip_position_ids_rmpad=True if self.__class__.__name__ == "VeOmniEngineWithLMHead" else False,
)
input_ids_rmpad_rolled, _, _ = ulysses_pad_and_slice_inputs(
input_ids_rmpad_rolled,
position_ids_rmpad=None,
sp_size=self.ulysses_sequence_parallel_size,
)
temperature_rmpad, _, _ = ulysses_pad_and_slice_inputs(
temperature_rmpad, position_ids_rmpad=None, sp_size=self.ulysses_sequence_parallel_size, pad_value=1
)
output_args["pad_size"] = pad_size
input_ids_rmpad_rolled = input_ids_rmpad_rolled.squeeze(0) # ((total_nnz / sp) + pad)
temperature_rmpad = temperature_rmpad.squeeze(0)
output_args["input_ids_rmpad_rolled"] = input_ids_rmpad_rolled
output_args["temperature_rmpad"] = temperature_rmpad
# only pass input_ids and position_ids to enable flash_attn_varlen
model_inputs = {
"input_ids": input_ids_rmpad,
"attention_mask": None,
"position_ids": position_ids_rmpad,
}
else:
if pad_mode == DatasetPadMode.NO_PADDING:
input_ids = micro_batch["input_ids"]
position_ids = micro_batch["position_ids"]
loss_mask = micro_batch["loss_mask"]
pad_token_id = tu.get_non_tensor_data(data=micro_batch, key="pad_token_id", default=0)
batch_size = micro_batch.batch_size[0]
seq_len_effective = input_ids.offsets().diff()
max_seq_len = max(seq_len_effective)
input_ids_rmpad_rolled = torch.roll(input_ids.values(), shifts=-1, dims=0)
output_args["input_ids_rmpad_rolled"] = input_ids_rmpad_rolled
# we store the per sample temperature
output_args["temperature"] = temperature
input_ids = torch.nested.to_padded_tensor(
input_ids, padding=pad_token_id, output_size=(batch_size, max_seq_len)
)
if position_ids.dim() == 3:
position_ids = torch.nested.to_padded_tensor(
position_ids, padding=0, output_size=(batch_size, 4, max_seq_len)
).transpose(0, 1) # (4, batch_size, max_seq_len)
else:
position_ids = torch.nested.to_padded_tensor(
position_ids, padding=0, output_size=(batch_size, max_seq_len)
)
attention_mask_list = [torch.ones_like(t, dtype=torch.int32) for t in loss_mask]
attention_mask = torch.nested.as_nested_tensor(attention_mask_list, layout=torch.jagged)
attention_mask = torch.nested.to_padded_tensor(
attention_mask, padding=0, output_size=(batch_size, max_seq_len)
)
model_inputs = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"position_ids": position_ids,
}
else:
raise NotImplementedError(f"pad_mode {pad_mode} not implemented")
extra_args = {}
if use_fused_kernels:
extra_args["temperature"] = temperature_item
extra_args["return_dict"] = True
model_inputs.update(multi_modal_inputs)
model_inputs.update(extra_args)
return model_inputs, output_args
def prepare_model_outputs(self, output, output_args, micro_batch: TensorDict):
use_remove_padding = tu.get_non_tensor_data(data=micro_batch, key="use_remove_padding", default=True)
pad_mode = tu.get_non_tensor_data(data=micro_batch, key="pad_mode", default=DatasetPadMode.NO_PADDING)
use_fused_kernels = tu.get_non_tensor_data(data=micro_batch, key="use_fused_kernels", default=False)
calculate_entropy = tu.get_non_tensor_data(data=micro_batch, key="calculate_entropy", default=False)
model_output = {}
input_ids = micro_batch["input_ids"]
if use_remove_padding:
input_ids_rmpad_rolled = output_args["input_ids_rmpad_rolled"]
temperature_rmpad = output_args["temperature_rmpad"]
if use_fused_kernels:
# temperature is singleton
log_probs = output.log_probs.squeeze(0) # (total_nnz,)
entropy_rmpad = output.entropy.squeeze(0) # (total_nnz,)
else:
logits_rmpad = output.logits.squeeze(0) # (total_nnz, vocab_size)
logits_rmpad.div_(temperature_rmpad.clamp(min=1e-8).unsqueeze(-1).to(logits_rmpad.dtype))
# if use_sp: ((total_nnz / sp) + pad) ; if not use_sp: (batch, seqlen)
inplace_backward = True
if calculate_entropy:
inplace_backward = False
log_probs = logprobs_from_logits(
logits=logits_rmpad,
labels=input_ids_rmpad_rolled,
inplace_backward=inplace_backward,
)
# compute entropy
if calculate_entropy:
if not self.engine_config.entropy_checkpointing:
entropy_rmpad = self.compute_entropy_from_logits(logits_rmpad) # ((total_nnz / sp) + pad)
else:
entropy_rmpad = torch.utils.checkpoint.checkpoint(
self.compute_entropy_from_logits, logits_rmpad
)
# gather log_prob if sp > 1
if self.use_ulysses_sp:
pad_size = output_args["pad_size"]
# gather and unpad for the ulysses sp
log_probs = gather_outputs_and_unpad(
log_probs,
gather_dim=0,
unpad_dim=0,
padding_size=pad_size,
)
if calculate_entropy:
entropy_rmpad = gather_outputs_and_unpad(
entropy_rmpad,
gather_dim=0,
unpad_dim=0,
padding_size=pad_size,
)
if pad_mode == DatasetPadMode.NO_PADDING:
cu_seqlens = input_ids.offsets()
# (bsz, j1), for each sample, is the length of each sample: [real_prompt length + real_response length]
log_probs = torch.nested.nested_tensor_from_jagged(log_probs, cu_seqlens)
if calculate_entropy:
entropy = torch.nested.nested_tensor_from_jagged(entropy_rmpad, cu_seqlens)
else:
raise NotImplementedError(f"pad_mode {pad_mode} not implemented")
else: # not using rmpad and no ulysses sp
response_length = tu.get_non_tensor_data(data=micro_batch, key="max_response_length", default=1024)
if use_fused_kernels:
log_probs = output.log_probs[:, -response_length - 1 : -1]
entropy = output.entropy[:, -response_length - 1 : -1] # (bsz, response_length)
else:
logits = output.logits # (bsz, response_length, vocab_size)
temperature = output_args["temperature"] # (bsz,)
temperature = temperature.unsqueeze(-1).unsqueeze(-1)
logits.div_(temperature.clamp(min=1e-8).to(logits.dtype))
if calculate_entropy:
if not self.engine_config.entropy_checkpointing:
entropy = verl_F.entropy_from_logits(logits)
else:
entropy = torch.utils.checkpoint.checkpoint(verl_F.entropy_from_logits, logits)
if pad_mode == DatasetPadMode.NO_PADDING:
cu_seqlens = input_ids.offsets()
seq_lengths = cu_seqlens.diff()
starts = torch.zeros_like(seq_lengths, dtype=torch.int64)
logits = torch.nested.narrow(logits, 1, starts, seq_lengths, layout=torch.jagged)
logits_rmpad = torch.cat([t for t in logits.unbind()])
input_ids_rmpad_rolled = output_args["input_ids_rmpad_rolled"]
log_probs = logprobs_from_logits(logits=logits_rmpad, labels=input_ids_rmpad_rolled)
# (bsz, j1), for each sample, length of each sample: [real_prompt_length + real_response_length]
log_probs = torch.nested.nested_tensor_from_jagged(log_probs, cu_seqlens)
if calculate_entropy:
entropy = torch.nested.narrow(entropy, 1, starts, seq_lengths, layout=torch.jagged)
entropy_rmpad = torch.cat([t for t in entropy.unbind()])
entropy = torch.nested.nested_tensor_from_jagged(entropy_rmpad, cu_seqlens)
else:
raise NotImplementedError(f"pad_mode {pad_mode} not implemented")
model_output["log_probs"] = log_probs
if calculate_entropy:
model_output["entropy"] = entropy
return model_output
def forward_step(self, micro_batch: TensorDict, loss_function, forward_only):
device_name = get_device_name()
# actually, we should avoid assigning like this...
micro_batch = micro_batch.to(get_device_id())
model_inputs, output_args = self.prepare_model_inputs(micro_batch=micro_batch)
with torch.autocast(device_type=device_name, dtype=torch.bfloat16):
raw_output = self.module(
**model_inputs,
use_cache=False,
) # prevent model thinks we are generating
model_output = self.prepare_model_outputs(
output=raw_output, output_args=output_args, micro_batch=micro_batch
)
if loss_function is not None:
loss, metrics = loss_function(
model_output=model_output, data=micro_batch, dp_group=self.get_data_parallel_group()
)
else:
assert forward_only, "forward_only must be True when loss_function is None"
loss = torch.tensor(1.0, device=device_name)
metrics = {}
output = {
"model_output": model_output,
"loss": loss.detach().item(),
"metrics": metrics,
}
return loss, output
@EngineRegistry.register(model_type="value_model", backend=["fsdp", "fsdp2"], device=["cuda", "npu"])
class FSDPEngineWithValueHead(FSDPEngineWithLMHead):
"""
The only difference between critic and actor is how the raw model output is processed
"""
def prepare_model_outputs(self, output, output_args, micro_batch: TensorDict):
use_remove_padding = tu.get_non_tensor_data(data=micro_batch, key="use_remove_padding", default=True)
pad_mode = tu.get_non_tensor_data(data=micro_batch, key="pad_mode", default=DatasetPadMode.NO_PADDING)
input_ids = micro_batch["input_ids"]
if use_remove_padding:
if hasattr(self.module, "v_head"):
# For trl.AutoModelForCausalLMWithValueHead
values_rmpad = output[2].squeeze(0).unsqueeze(-1)
else:
values_rmpad = output.logits
values_rmpad = values_rmpad.squeeze(0) # (total_nnz, 1)
# critic model arch is like Qwen3ForTokenClassfication and num_labels=1
# so we squeeze the last dimension here to get the value for each token
values_rmpad = values_rmpad.squeeze(-1)
# gather output if sp > 1
if self.use_ulysses_sp:
pad_size = output_args["pad_size"]
values_rmpad = gather_outputs_and_unpad(values_rmpad, gather_dim=0, unpad_dim=0, padding_size=pad_size)
if pad_mode == DatasetPadMode.NO_PADDING:
cu_seqlens = input_ids.offsets()
# (bsz, j1), for each sample, is the length of each sample: [real_prompt length + real_response length]
values = torch.nested.nested_tensor_from_jagged(values_rmpad, cu_seqlens)
else:
raise NotImplementedError(f"pad_mode {pad_mode} not implemented")
else:
if hasattr(self.module, "v_head"):
# For trl.AutoModelForCausalLMWithValueHead
values = output[2]
else:
values = output.logits
if pad_mode == DatasetPadMode.NO_PADDING:
cu_seqlens = input_ids.offsets()
seq_lengths = cu_seqlens.diff()
starts = torch.zeros_like(seq_lengths, dtype=torch.int64)
values = torch.nested.narrow(values, 1, starts, seq_lengths, layout=torch.jagged)
values_rmpad = torch.cat([t for t in values.unbind()])
# (bsz, j1), for each sample, length of each sample: [real_prompt_length + real_response_length]
values = torch.nested.nested_tensor_from_jagged(values_rmpad, cu_seqlens)
else:
raise NotImplementedError(f"pad_mode {pad_mode} not implemented")
return {"values": values}
| verl__workers__engine__fsdp__transformer_impl.py |
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import torch
from torch.distributed.device_mesh import init_device_mesh
from verl.utils.device import get_device_name, is_npu_available
logger = logging.getLogger(__file__)
logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN"))
def apply_npu_fsdp_patches():
"""Apply NPU patches for FSDP backend if NPU is available."""
if is_npu_available:
try:
import verl.models.transformers.npu_patch # noqa
if torch.distributed.is_initialized() and torch.distributed.get_rank() == 0:
logger.info("Applied NPU patches for FSDP backend")
except Exception as e:
logger.warning(f"Failed to apply NPU patches: {e}")
def create_device_mesh(world_size, fsdp_size):
"""
Create a device mesh for distributed training based on the world size and FSDP size.
Args:
world_size (int): Total number of processes in the distributed training setup.
fsdp_size (int): Size of the Fully Sharded Data Parallel (FSDP) group.
Returns:
torch.distributed.device_mesh.DeviceMesh: The initialized device mesh.
"""
device_name = get_device_name()
if fsdp_size < 0 or fsdp_size >= world_size:
device_mesh = init_device_mesh(device_name, mesh_shape=(world_size,), mesh_dim_names=["fsdp"])
else:
device_mesh = init_device_mesh(
device_name, mesh_shape=(world_size // fsdp_size, fsdp_size), mesh_dim_names=["ddp", "fsdp"]
)
return device_mesh
def get_sharding_strategy(device_mesh):
"""
Determine the appropriate sharding strategy based on the number of dimensions of the device mesh.
Args:
device_mesh (torch.distributed.device_mesh.DeviceMesh): The device mesh used for distributed training.
Returns:
torch.distributed.fsdp.ShardingStrategy: The sharding strategy to be used with FSDP.
Raises:
NotImplementedError: If the number of dimensions of the device mesh is neither 1 nor 2.
"""
from torch.distributed.fsdp import ShardingStrategy
if device_mesh.ndim == 1:
sharding_strategy = ShardingStrategy.FULL_SHARD
elif device_mesh.ndim == 2:
sharding_strategy = ShardingStrategy.HYBRID_SHARD
else:
raise NotImplementedError(f"Get device mesh ndim={device_mesh.ndim}, but only support 1 or 2")
return sharding_strategy
| verl__workers__engine__fsdp__utils.py |
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
from functools import partial
from typing import Any, Callable, ContextManager, Iterator, Optional
import torch
import torch.distributed
from megatron.core import parallel_state as mpu
from megatron.core.pipeline_parallel import get_forward_backward_func
from omegaconf import OmegaConf
from tensordict import TensorDict
import verl.utils.torch_functional as verl_F
from verl.models.mcore import get_mcore_forward_fused_no_padding_fn, get_mcore_weight_converter
from verl.trainer.config import CheckpointConfig
from verl.utils import tensordict_utils as tu
from verl.utils.checkpoint.megatron_checkpoint_manager import MegatronCheckpointManager
from verl.utils.dataset.dataset_utils import DatasetPadMode
from verl.utils.debug import log_gpu_memory_usage
from verl.utils.device import get_device_id, get_device_name
from verl.utils.megatron.pipeline_parallel import make_batch_generator
from verl.utils.megatron.router_replay_patch import RouterReplay, RouterReplayAction, apply_router_replay_patch
from verl.utils.megatron.router_replay_utils import (
RouterReplayHelper,
set_router_replay_data,
)
from verl.utils.megatron.tensor_parallel import vocab_parallel_entropy, vocab_parallel_log_probs_from_logits
from verl.utils.megatron_peft_utils import add_base_layer_suffix, build_peft_config_for_vllm
from verl.utils.megatron_utils import (
check_mtp_config,
get_megatron_module_device,
get_megatron_mtp_loss,
load_megatron_model_to_gpu,
load_megatron_optimizer,
offload_megatron_model_to_cpu,
offload_megatron_optimizer,
patch_engine_mtp,
register_megatron_training_hooks,
unwrap_model,
)
from verl.utils.model import extract_multi_modal_inputs, load_mcore_dist_weights
from verl.workers.config import HFModelConfig, McoreEngineConfig, McoreOptimizerConfig
from ..base import BaseEngine, BaseEngineCtx, EngineRegistry
from ..utils import postprocess_batch_func, prepare_micro_batches
from .utils import set_random_seed
logger = logging.getLogger(__file__)
logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN"))
class MegatronEngine(BaseEngine):
def __init__(
self,
model_config: HFModelConfig,
engine_config: McoreEngineConfig,
optimizer_config: McoreOptimizerConfig,
checkpoint_config: CheckpointConfig,
):
super().__init__()
self.model_config = model_config
self.engine_config = engine_config
self.optimizer_config = optimizer_config
self.checkpoint_config = checkpoint_config
assert self.engine_config.use_mbridge, "use_mbridge must be True"
self._init_device_mesh()
set_random_seed(seed=self.engine_config.seed)
self._is_offload_param = self.engine_config.param_offload
self._is_offload_grad = self.engine_config.grad_offload
self._is_offload_optimizer = self.engine_config.optimizer_offload
self.mode = None
self.layer_name_mapping = {
"qkv_layer_name": "self_attention.linear_qkv.",
"gate_proj_layer_name": "linear_fc1.",
}
self.weight_converter = None
# Router replay configuration for MoE models
self.enable_routing_replay = self.engine_config.router_replay.mode != "disabled"
logger.info(f"enable_routing_replay in MegatronEngine: {self.enable_routing_replay}")
if self.enable_routing_replay:
apply_router_replay_patch()
def _init_device_mesh(self):
# TODO: set different parallelism for actor, critic, ref
if mpu.is_initialized():
return
mpu.initialize_model_parallel(
tensor_model_parallel_size=self.engine_config.tensor_model_parallel_size,
pipeline_model_parallel_size=self.engine_config.pipeline_model_parallel_size,
virtual_pipeline_model_parallel_size=self.engine_config.virtual_pipeline_model_parallel_size,
use_sharp=False,
context_parallel_size=self.engine_config.context_parallel_size,
expert_model_parallel_size=self.engine_config.expert_model_parallel_size,
expert_tensor_parallel_size=self.engine_config.expert_tensor_parallel_size,
nccl_communicator_config_path=None,
)
def _build_tf_config(self):
from verl.utils.megatron_utils import mapping_string_to_attn_backend
from verl.utils.torch_dtypes import PrecisionType
check_mtp_config(self.model_config, self.engine_config)
self.param_dtype = PrecisionType.to_dtype(self.engine_config.dtype)
self.dtype = PrecisionType.to_dtype(self.param_dtype)
override_transformer_config = mapping_string_to_attn_backend({**self.engine_config.override_transformer_config})
if self.enable_routing_replay:
override_transformer_config["enable_routing_replay"] = True
self.provider = None
self.vanilla_bridge = self.engine_config.vanilla_mbridge
if self.vanilla_bridge:
from verl.models.mcore.mbridge import AutoBridge
bridge = AutoBridge.from_config(self.model_config.hf_config, dtype=self.param_dtype)
bridge.set_extra_args(**override_transformer_config)
tf_config = bridge.config
tf_config.fp16 = self.param_dtype == torch.float16
tf_config.bf16 = self.param_dtype == torch.bfloat16
else:
from verl.models.mcore.bridge import AutoBridge
# Use Megatron-Bridge to convert HF config to Megatron config
bridge = AutoBridge.from_hf_pretrained(
self.model_config.local_path, trust_remote_code=self.model_config.trust_remote_code
)
# Get Megatron provider and configure it
provider = bridge.to_megatron_provider(load_weights=False)
# In case of invalid overrides, we need to make sure some critical params are set correctly
provider.params_dtype = self.param_dtype
# Ensure dtype settings propagate to Megatron-Bridge/TE
provider.fp16 = self.param_dtype == torch.float16
provider.bf16 = self.param_dtype == torch.bfloat16
# Pass distributed info
provider.tensor_model_parallel_size = self.engine_config.tensor_model_parallel_size
provider.pipeline_model_parallel_size = self.engine_config.pipeline_model_parallel_size
provider.expert_model_parallel_size = self.engine_config.expert_model_parallel_size
provider.expert_tensor_parallel_size = self.engine_config.expert_tensor_parallel_size
provider.virtual_pipeline_model_parallel_size = self.engine_config.virtual_pipeline_model_parallel_size
provider.context_parallel_size = self.engine_config.context_parallel_size
provider.sequence_parallel = self.engine_config.sequence_parallel
# Match verl implementation (need variable_seq_lengths)
from megatron.core.transformer.enums import AttnBackend
provider.attention_backend = AttnBackend.flash
provider.variable_seq_lengths = True
provider.moe_token_dispatcher_type = "alltoall"
provider.moe_router_load_balancing_type = "none"
# Apply transformer config overrides
for key, value in override_transformer_config.items():
setattr(provider, key, value)
provider.finalize()
self.provider = provider
tf_config = None # Will be set after model creation
self.bridge = bridge
if not self.bridge:
self.weight_converter = get_mcore_weight_converter(self.model_config.hf_config, self.dtype)
if torch.distributed.get_rank() == 0:
if tf_config is not None:
print(f"TF config: {tf_config}")
self.tf_config = tf_config
from verl.workers.config.megatron_peft import get_peft_cls
self.peft_cls = get_peft_cls(
model_config=self.model_config, bridge=self.bridge, provider=self.provider, dtype=self.param_dtype
)
def _build_megatron_module(self):
from verl.utils.megatron_utils import McoreModuleWrapperConfig, make_megatron_module
from verl.utils.model import print_model_size
# TODO: add more cases
is_value_model = (
"ForTokenClassification" in self.model_config.architectures[0]
or "ForSequenceClassification" in self.model_config.architectures[0]
)
self.is_value_model = is_value_model
if self.engine_config.forward_only:
wrap_with_ddp = False
else:
wrap_with_ddp = True
wrap_config = McoreModuleWrapperConfig(
is_value_model=is_value_model, # actor is not value model
share_embeddings_and_output_weights=self.model_config.share_embeddings_and_output_weights,
wrap_with_ddp=wrap_with_ddp,
use_distributed_optimizer=self.engine_config.use_distributed_optimizer,
)
module, updated_tf_config = make_megatron_module(
wrap_config=wrap_config,
tf_config=self.tf_config,
hf_config=self.model_config.hf_config,
bridge=self.bridge,
provider=self.provider,
override_model_config=self.engine_config.override_mcore_model_config,
override_ddp_config=self.engine_config.override_ddp_config,
peft_cls=self.peft_cls,
peft_config=self.model_config.get("lora", None),
)
self.tf_config = updated_tf_config
print(f"module: {len(module)}")
if self.engine_config.use_dist_checkpointing:
load_mcore_dist_weights(module, self.engine_config.dist_checkpointing_path, is_value_model=is_value_model)
else:
if self.vanilla_bridge:
self.bridge.load_weights(module, self.model_config.local_path)
else:
allowed_mismatched_params = []
if self.is_value_model:
allowed_mismatched_params = ["output_layer.weight"]
self.bridge.load_hf_weights(
module, self.model_config.local_path, allowed_mismatched_params=allowed_mismatched_params
)
if torch.distributed.get_rank() == 0:
print_model_size(module[0])
if self.enable_routing_replay:
print(f"routing replay layers: {len(RouterReplay.router_instances)}")
return module
def _maybe_enable_fused_kernels(self):
if not self.engine_config.use_fused_kernels:
return
if self.is_value_model or self.model_config.mtp.enable:
logger.warning_once(
"Fused kernels are not supported for value models or when MTP is enabled in Megatron engine; disabling."
)
self.engine_config.use_fused_kernels = False
return
from verl.models.mcore.model_forward_fused import patch_fused_forward
for model in self.module:
patch_fused_forward(model)
def _build_optimizer(self):
from verl.utils.megatron.optimizer import get_megatron_optimizer, init_megatron_optim_config
optim_config_megatron = init_megatron_optim_config(
self.optimizer_config,
use_distributed_optimizer=self.engine_config.use_distributed_optimizer,
fp16=self.param_dtype == torch.float16,
)
optimizer = get_megatron_optimizer(model=self.module, config=optim_config_megatron)
register_megatron_training_hooks(self.module, optimizer)
return optimizer
def _build_lr_scheduler(self):
from verl.utils.megatron.optimizer import get_megatron_optimizer_param_scheduler
optimizer_scheduler = get_megatron_optimizer_param_scheduler(
optimizer=self.optimizer, config=self.optimizer_config
)
return optimizer_scheduler
@property
def is_param_offload_enabled(self) -> bool:
return self._is_offload_param
@property
def is_optimizer_offload_enabled(self) -> bool:
return self._is_offload_optimizer
def is_mp_src_rank_with_outputs(self):
return (
mpu.get_tensor_model_parallel_rank() == 0
and mpu.get_pipeline_model_parallel_rank() == mpu.get_pipeline_model_parallel_world_size() - 1
and mpu.get_context_parallel_rank() == 0
)
def initialize(self):
self._build_tf_config()
self.module = self._build_megatron_module()
self._maybe_enable_fused_kernels()
if self.model_config.mtp.enable:
patch_engine_mtp(self.module, self.model_config)
# For forward_only, we don't need optimizer, lr_scheduler, checkpoint_mananager
if self.engine_config.forward_only:
self.optimizer = None
self.lr_scheduler = None
return
self.optimizer = self._build_optimizer()
self.lr_scheduler = self._build_lr_scheduler()
full_reshardable = self.engine_config.dist_ckpt_optim_fully_reshardable
mem_eff = self.engine_config.distrib_optim_fully_reshardable_mem_efficient
tmp_config = OmegaConf.create(
{
"model": {"path": self.model_config.local_path},
"megatron": {
"dist_ckpt_optim_fully_reshardable": full_reshardable,
"distrib_optim_fully_reshardable_mem_efficient": mem_eff,
},
}
)
role = "actor" if not self.is_value_model else "critic"
self.checkpoint_mananager = MegatronCheckpointManager(
config=tmp_config,
checkpoint_config=self.checkpoint_config,
model_config=self.model_config.hf_config,
transformer_config=self.tf_config,
role=role,
model=self.module,
arch=self.model_config.architectures[0],
hf_config=self.model_config.hf_config,
param_dtype=self.param_dtype,
share_embeddings_and_output_weights=self.model_config.share_embeddings_and_output_weights,
processing_class=self.model_config.get_processor(),
optimizer=self.optimizer,
optimizer_scheduler=self.lr_scheduler,
use_distributed_optimizer=self.engine_config.use_distributed_optimizer,
use_checkpoint_opt_param_scheduler=self.optimizer_config.use_checkpoint_opt_param_scheduler,
bridge=self.bridge,
provider=self.provider,
peft_cls=self.peft_cls,
use_dist_checkpointing=self.engine_config.use_dist_checkpointing,
)
self.to(
device="cpu",
model=self._is_offload_param,
optimizer=self._is_offload_optimizer,
grad=self._is_offload_param,
)
log_gpu_memory_usage("After offload model/optimizer/grad during init", logger=logger)
def train_mode(self, **kwargs):
"""
Context manager entry for switching the engine and model into training mode.
Usage:
with engine.train_mode():
# runs in training mode
"""
return EngineTrainModeCtx(self, **kwargs)
def eval_mode(self, **kwargs):
"""
Context manager entry for switching the engine and model into evaluation mode.
Usage:
with engine.eval_mode():
# runs in evaluation mode
"""
return EngineEvalModeCtx(self, **kwargs)
def optimizer_zero_grad(self):
"""
Zero out gradients of all parameters before starting a new backward pass.
"""
self.optimizer.zero_grad()
# use use_contiguous_buffers_in_local_ddp and no overlap_dp_param_comm
for chunk in self.module:
# if use distributed optimizer, zero grad buffer will be handled by optimizer
chunk.zero_grad_buffer()
def optimizer_step(self):
"""
Perform an optimization step to update model parameters based on accumulated gradients.
Returns:
grad_norm (float): The norm of the gradients before clipping or update.
"""
update_successful, grad_norm, num_zeros_in_grad = self.optimizer.step()
if update_successful:
# allgather already execute in optimizer.step in new megatron
pass
else:
raise NotImplementedError("Megatron optimizer step failed. This should not happen")
return grad_norm
def lr_scheduler_step(self):
"""
Advance the learning rate scheduler by one step.
Returns:
current_lr (float or list[float]): Updated learning rate(s).
"""
from verl.utils.megatron.optimizer import get_megatron_last_lr
self.lr_scheduler.step(1)
return get_megatron_last_lr(self.optimizer)
def to(self, device: str, model: bool = True, optimizer: bool = True, grad: bool = True):
"""
Move model parameters, optimizer states, or both to the specified device.
Note that this function executes irrespective of offload config. It serves as manual control
Args:
device: Target device identifier.
model: If True, move the model.
optimizer: If True, move the optimizer states.
"""
super().to(device=device, model=model, optimizer=optimizer, grad=grad)
device_name = get_device_name()
assert device in (device_name, "cpu")
if device == device_name:
if model:
load_megatron_model_to_gpu(self.module, load_grad=grad)
if optimizer and self.optimizer is not None:
load_megatron_optimizer(self.optimizer)
elif device == "cpu":
if model:
offload_megatron_model_to_cpu(self.module)
if optimizer and self.optimizer is not None:
offload_megatron_optimizer(self.optimizer)
else:
raise ValueError(f"Invalid device type: {device}")
def get_data_parallel_rank(self):
return mpu.get_data_parallel_rank()
def get_data_parallel_size(self):
return mpu.get_data_parallel_world_size()
def get_data_parallel_group(self):
return mpu.get_data_parallel_group()
def save_checkpoint(
self,
local_path: str,
hdfs_path: Optional[str] = None,
global_step: int = 0,
max_ckpt_to_keep: Optional[int] = None,
**kwargs,
) -> None:
"""
Save model, optimizer, and scheduler states to a checkpoint.
Args:
local_path: Local filesystem path to save checkpoint.
hdfs_path: Optional HDFS path to copy checkpoint.
global_step: Integer training step number for naming.
max_ckpt_to_keep: Maximum number of recent checkpoints to retain.
"""
origin_module_device = get_megatron_module_device(self.module)
if self._is_offload_param or origin_module_device == "cpu":
load_megatron_model_to_gpu(self.module, load_grad=True)
self.checkpoint_mananager.save_checkpoint(
local_path=local_path, hdfs_path=hdfs_path, global_step=global_step, max_ckpt_to_keep=max_ckpt_to_keep
)
torch.distributed.barrier()
if self._is_offload_param:
offload_megatron_model_to_cpu(self.module)
def load_checkpoint(
self, local_path: str, hdfs_path: Optional[str] = None, del_local_after_load: bool = True, **kwargs
) -> None:
"""
Load model, optimizer, and scheduler states from a checkpoint.
Args:
local_path: Local filesystem path of the checkpoint.
hdfs_path: Optional HDFS path where checkpoint is stored.
del_local_after_load: Whether to delete local copy after loading.
"""
if self._is_offload_param:
load_megatron_model_to_gpu(self.module)
self.checkpoint_mananager.load_checkpoint(
local_path=local_path, hdfs_path=hdfs_path, del_local_after_load=del_local_after_load
)
if self._is_offload_param:
offload_megatron_model_to_cpu(self.module)
if self._is_offload_optimizer:
offload_megatron_optimizer(self.optimizer)
def forward_backward_batch(self, data: TensorDict, loss_function: Callable, forward_only=False) -> Any:
tu.assign_non_tensor(data, sp_size=self.engine_config.context_parallel_size)
# compute num_tokens in global batch for loss normalization
batch_num_tokens = data["loss_mask"].sum().to(get_device_id())
torch.distributed.all_reduce(
batch_num_tokens, op=torch.distributed.ReduceOp.SUM, group=self.get_data_parallel_group()
)
tu.assign_non_tensor(data, batch_num_tokens=batch_num_tokens.item())
tu.assign_non_tensor(data, dp_size=self.get_data_parallel_size())
vpp_size = mpu.get_virtual_pipeline_model_parallel_world_size()
if vpp_size is not None and vpp_size > 1:
num_batches_divided_by = self.tf_config.microbatch_group_size_per_vp_stage
else:
num_batches_divided_by = None
micro_batches, indices = prepare_micro_batches(
data=data,
dp_group=self.get_data_parallel_group(),
num_batches_divided_by=num_batches_divided_by,
same_micro_num_in_dp=True,
min_num_micro_batch=None,
)
if num_batches_divided_by is not None:
assert len(micro_batches) % num_batches_divided_by == 0, (
f"micro_batches {micro_batches} must be divisible by num_batches_divided_by "
f"{num_batches_divided_by} for megatron backend"
)
# compute input shapes for pp stages
n_micro_batch = len(micro_batches)
for micro_batch in micro_batches:
tu.assign_non_tensor(micro_batch, num_micro_batch=n_micro_batch)
forward_backward_func = get_forward_backward_func()
postprocess_micro_batch_func = partial(
self.postprocess_micro_batch_func,
forward_only=forward_only,
loss_function=loss_function,
)
tu.assign_non_tensor(data, num_micro_batch=n_micro_batch)
forward_step = partial(self.forward_step, postprocess_micro_batch_func=postprocess_micro_batch_func)
enable_routing_replay = tu.get_non_tensor_data(data, key="enable_routing_replay", default=False)
if enable_routing_replay:
RouterReplay.set_global_router_replay_action(RouterReplayAction.REPLAY_FORWARD)
# batch should be a list of batches inside micro-batches
batch_generator = make_batch_generator(micro_batches, vpp_size=len(self.module))
# TODO: we may use the new schedule instead
# for flash-attn: (seq_len, batch_size, hidden_size) = (mbs*seq_len, 1, hidden_size)
losses_reduced = forward_backward_func(
forward_step_func=forward_step,
data_iterator=batch_generator,
model=self.module,
num_microbatches=n_micro_batch,
seq_length=1, # the communication shape is obtained via p2p comm
micro_batch_size=1, # the communication shape is obtained via p2p comm
forward_only=forward_only,
)
if enable_routing_replay:
if self.engine_config.router_replay.mode in ["R3"]:
RouterReplay.clear_global_indices()
RouterReplay.clear_global_router_replay_action()
if self.model_config.mtp.enable and self.is_mp_src_rank_with_outputs():
# add mtp_losses
metrics = get_megatron_mtp_loss(n_micro_batch)
if "metrics" not in losses_reduced[0]:
losses_reduced[0]["metrics"] = {}
losses_reduced[0]["metrics"].update(metrics)
if mpu.is_pipeline_last_stage(ignore_virtual=True):
output = postprocess_batch_func(output_lst=losses_reduced, indices=indices, data=data)
return output
else:
return {}
def get_per_tensor_param(self, base_sync_done=False, **kwargs):
load_megatron_model_to_gpu(self.module, load_grad=False)
peft_config = None
non_merge_lora_sync = self.peft_cls is not None and not self.model_config.lora.get("merge", False)
if self.vanilla_bridge:
per_tensor_param = self.bridge.export_weights(self.module)
elif base_sync_done and non_merge_lora_sync:
# Only export adapter weights
peft_config = build_peft_config_for_vllm(self.model_config.lora)
per_tensor_param = self.bridge.export_adapter_weights(self.module)
else:
per_tensor_param = self.bridge.export_hf_weights(self.module)
if non_merge_lora_sync:
per_tensor_param = add_base_layer_suffix(
per_tensor_param, model_type=self.model_config.hf_config.model_type
)
return per_tensor_param, peft_config
def disable_adapter(self) -> ContextManager:
return self.peft_cls.disable_adapter(self.module)
def forward_step(self, batch_iter, model, postprocess_micro_batch_func):
raise NotImplementedError("forward_step must be implemented in subclass")
def postprocess_micro_batch_func(self, output, data: TensorDict, forward_only: bool, loss_function):
raise NotImplementedError("postprocess_micro_batch_func must be implemented in subclass")
class EngineEvalModeCtx(BaseEngineCtx):
def __init__(self, engine: MegatronEngine, **kwargs):
super().__init__(engine=engine, mode="eval", **kwargs)
def __enter__(self):
assert isinstance(self.engine, MegatronEngine)
super().__enter__()
# mcore module is a list of model chunk in each vpp stage
for module in self.engine.module:
module.eval()
def __exit__(self, exc_type, exc_value, traceback):
assert isinstance(self.engine, MegatronEngine)
super().__exit__(exc_type, exc_value, traceback)
class EngineTrainModeCtx(BaseEngineCtx):
def __init__(self, engine: MegatronEngine, **kwargs):
super().__init__(engine=engine, mode="train", **kwargs)
def __enter__(self):
assert isinstance(self.engine, MegatronEngine)
super().__enter__()
# mcore module is a list of model chunk in each vpp stage
for module in self.engine.module:
module.train()
def __exit__(self, exc_type, exc_value, traceback):
assert isinstance(self.engine, MegatronEngine)
self.engine.optimizer_zero_grad()
super().__exit__(exc_type, exc_value, traceback)
@EngineRegistry.register(model_type="language_model", backend="megatron")
class MegatronEngineWithLMHead(MegatronEngine):
def prepare_model_inputs(self, batch: TensorDict):
input_ids = batch["input_ids"]
loss_mask = batch["loss_mask"].to(bool)
multi_modal_inputs = extract_multi_modal_inputs(batch.get("multi_modal_inputs", []))
routed_experts = batch.get("routed_experts", [])
return {
"input_ids": input_ids,
"loss_mask": loss_mask,
"multi_modal_inputs": multi_modal_inputs,
"routed_experts": routed_experts,
}
def prepare_model_outputs(self, output: dict, data: TensorDict):
calculate_entropy = tu.get_non_tensor_data(data, key="calculate_entropy", default=False)
log_prob = output["log_probs"]
model_output = {"log_probs": log_prob}
if calculate_entropy:
entropy = output["entropy"]
model_output["entropy"] = entropy
return model_output
def forward_step(self, batch_iter: Iterator[TensorDict], model, postprocess_micro_batch_func):
batch: TensorDict = next(batch_iter)
batch = batch.to(get_device_id())
use_fused_kernels = tu.get_non_tensor_data(batch, key="use_fused_kernels", default=False)
calculate_entropy = tu.get_non_tensor_data(batch, key="calculate_entropy", default=False)
pad_mode = tu.get_non_tensor_data(batch, key="pad_mode", default=DatasetPadMode.NO_PADDING)
temperature = batch["temperature"]
model_inputs = self.prepare_model_inputs(batch)
input_ids = model_inputs["input_ids"]
multi_modal_inputs = model_inputs["multi_modal_inputs"]
loss_mask = model_inputs["loss_mask"]
unwrapped_model = unwrap_model(model)
if hasattr(unwrapped_model, "vp_stage"):
vp_rank = unwrapped_model.vp_stage
else:
vp_rank = 0
if RouterReplayHelper.is_replay_backward_action(self.tf_config, vp_rank):
router_instance_list = RouterReplayHelper.get_micro_batch_router_list(self.tf_config, vp_rank)
for router in router_instance_list:
router.set_router_replay_action(RouterReplayAction.REPLAY_FORWARD)
if RouterReplayHelper.is_replay_forward_action(self.tf_config, vp_rank):
layers_topk_idx = model_inputs["routed_experts"]
set_router_replay_data(layers_topk_idx, None, self.tf_config, vp_rank)
if pad_mode == DatasetPadMode.NO_PADDING:
label = input_ids.clone()
else:
raise NotImplementedError(f"Pad mode {pad_mode} is not supported for megatron engine")
from verl.models.mcore import get_mcore_forward_no_padding_fn
if use_fused_kernels:
if not self.engine_config.use_remove_padding:
logger.warning_once(
"Fused kernels require `use_remove_padding=True` for Megatron engine. Falling back to non-fused."
)
use_fused_kernels = False
elif isinstance(temperature, torch.Tensor):
if temperature.numel() != 1:
logger.warning_once(
"Fused kernels do not support per-sample temperature. Falling back to non-fused."
)
use_fused_kernels = False
else:
temperature_value = float(temperature.item())
else:
temperature_value = float(temperature)
if use_fused_kernels:
fused_forward_fn = get_mcore_forward_fused_no_padding_fn(self.model_config.hf_config)
output = fused_forward_fn(
model=model,
input_ids=input_ids,
labels=label,
multi_modal_inputs=multi_modal_inputs,
temperature=temperature_value,
calculate_entropy=calculate_entropy,
pad_token_id=self.model_config.tokenizer.pad_token_id,
)
else:
if not isinstance(temperature, torch.Tensor):
temperature = torch.tensor([temperature] * input_ids.shape[0], device=input_ids.device)
temperature = temperature.to(torch.float32)
assert temperature.shape[0] == input_ids.shape[0]
temperature = verl_F.expand_as_nested(temperature, input_ids) # (bsz, j1)
forward_fn = get_mcore_forward_no_padding_fn(self.model_config.hf_config)
def logits_processor(logits, label, temperature):
assert logits.shape[:2] == label.shape[:2]
# avoid non-positive temperature such as padding
temperature[temperature <= 0] = 1e-8
assert torch.all(temperature > 0).item(), f"temperature tensor must be positive. Got {temperature}"
logits.div_(temperature.unsqueeze(dim=-1).to(logits.dtype))
ret = {}
if calculate_entropy:
logits_bak = logits.clone()
# # disable the hint until the fused_kernel is optimized for triton>=3.3
# if torch.distributed.get_rank() == 0:
# logger.warning_once(
# "For memory-efficient computation, enable fused kernels via "
# "`actor_rollout_ref.model.use_fused_kernels=True`. "
# "The current `clone()` operation ensures correctness but increases memory usage."
# )
entropy = vocab_parallel_entropy(logits)
ret["entropy"] = entropy
else:
logits_bak = logits
log_probs = vocab_parallel_log_probs_from_logits(logits_bak, label)
ret["log_probs"] = log_probs
return ret
logits_processor_args = {"label": label, "temperature": temperature, "loss_mask": loss_mask}
output = forward_fn(
model,
input_ids,
multi_modal_inputs,
logits_processor=logits_processor,
logits_processor_args=logits_processor_args,
vision_model=hasattr(self.model_config.hf_config, "vision_config"),
pad_token_id=self.model_config.tokenizer.pad_token_id,
data_format="thd" if self.engine_config.use_remove_padding else "bshd",
enable_mtp=self.model_config.mtp.enable_train,
)
# Router replay: switch to backward replay mode for next backward pass
if RouterReplayHelper.is_replay_forward_action(self.tf_config, vp_rank):
router_instance_list = RouterReplayHelper.get_micro_batch_router_list(self.tf_config, vp_rank)
for router in router_instance_list:
router.set_router_replay_action(RouterReplayAction.REPLAY_BACKWARD)
return output, partial(postprocess_micro_batch_func, data=batch)
def postprocess_micro_batch_func(self, output, data: TensorDict, forward_only: bool, loss_function):
# For memory efficiency
# We move calculation of entropy to compute_log_probs, forward_only == True
device = data["input_ids"].device
model_output = self.prepare_model_outputs(output, data)
if loss_function is not None:
loss, metrics = loss_function(model_output=model_output, data=data, dp_group=self.get_data_parallel_group())
# scale loss by num_micro_batch because megatron will scale loss
# by n_micro_batch inside pp schedule
scaled_loss = loss * data["num_micro_batch"]
else:
assert forward_only, "forward_only must be True when loss_function is None"
loss = torch.tensor(1.0, device=device)
scaled_loss = loss
metrics = {}
output = {
"model_output": model_output,
"loss": loss.detach().item(),
"metrics": metrics,
}
# return loss and stats
return scaled_loss, output
@EngineRegistry.register(model_type="value_model", backend="megatron")
class MegatronEngineWithValueHead(MegatronEngineWithLMHead):
# for value head
def forward_step(self, batch_iter, model, postprocess_micro_batch_func):
batch: TensorDict = next(batch_iter)
batch = batch.to(get_device_id())
model_inputs = self.prepare_model_inputs(batch)
input_ids = model_inputs["input_ids"]
multi_modal_inputs = model_inputs["multi_modal_inputs"]
from verl.models.mcore import get_mcore_forward_no_padding_fn
forward_fn = get_mcore_forward_no_padding_fn(self.model_config.hf_config)
output = forward_fn(
model,
input_ids,
multi_modal_inputs,
value_model=True,
vision_model=hasattr(self.model_config.hf_config, "vision_config"),
pad_token_id=self.model_config.tokenizer.pad_token_id,
enable_mtp=self.model_config.mtp.enable_train,
)
return output, partial(postprocess_micro_batch_func, data=batch)
def prepare_model_outputs(self, output: dict | torch.Tensor, data: TensorDict):
return {"values": output}
| verl__workers__engine__megatron__transformer_impl.py |
# Copyright 2025 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from verl.utils.device import get_torch_device
def set_random_seed(seed):
import random
import numpy as np
import torch
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
if get_torch_device().device_count() > 0:
from megatron.core import tensor_parallel
tensor_parallel.model_parallel_cuda_manual_seed(seed)
# FIXME: torch cumsum not support deterministic (used in vllm sampler),
# https://github.com/pytorch/pytorch/issues/89492
# torch.use_deterministic_algorithms(True, warn_only=True)
# os.environ['CUBLAS_WORKSPACE_CONFIG'] = ':4096:8'
| verl__workers__engine__megatron__utils.py |
# Copyright 2025 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
try:
from mindspeed.megatron_adaptor import repatch
except ImportError:
repatch = None
from verl.trainer.config import CheckpointConfig
from verl.workers.config import HFModelConfig, McoreEngineConfig, McoreOptimizerConfig
from ..base import EngineRegistry
from ..megatron import MegatronEngineWithLMHead
logger = logging.getLogger(__file__)
logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN"))
@EngineRegistry.register(model_type="language_model", backend="megatron", device="npu")
class MindspeedEngineWithLMHead(MegatronEngineWithLMHead):
def __init__(
self,
model_config: HFModelConfig,
engine_config: McoreEngineConfig,
optimizer_config: McoreOptimizerConfig,
checkpoint_config: CheckpointConfig,
):
super().__init__(model_config, engine_config, optimizer_config, checkpoint_config)
repatch_config = {"use_flash_attn": True}
if self.engine_config.context_parallel_size > 1:
repatch_config["context_parallel_size"] = self.engine_config.context_parallel_size
repatch(repatch_config)
| verl__workers__engine__mindspeed__transformer_impl.py |
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import random
import numpy as np
import torch
from tensordict import TensorDict
from verl.utils import tensordict_utils as tu
from verl.utils.dataset.dataset_utils import DatasetPadMode
from verl.utils.device import is_npu_available
from verl.utils.py_functional import append_to_dict
from verl.utils.seqlen_balancing import rearrange_micro_batches, restore_dynamic_batch
def enable_full_determinism(seed: int):
"""
Helper function for reproducibility in distributed training.
See https://pytorch.org/docs/stable/notes/randomness.html for details.
"""
os.environ["PYTHONHASHSEED"] = str(seed)
os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":16:8"
os.environ["NCCL_DETERMINISTIC"] = "1"
os.environ["FLASH_ATTENTION_DETERMINISTIC"] = "1"
if is_npu_available:
# The environment variable required to enable deterministic mode on Ascend NPUs.
os.environ["NCCL_DETERMINISTIC"] = "true"
os.environ["CLOSE_MATMUL_K_SHIFT"] = "1"
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.use_deterministic_algorithms(True, warn_only=True)
# Enable CUDNN deterministic mode
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.enabled = False
if is_npu_available:
torch.npu.manual_seed(seed)
torch.npu.manual_seed_all(seed)
def prepare_micro_batches(
data: TensorDict,
dp_group=None,
num_batches_divided_by=None,
same_micro_num_in_dp=True,
min_num_micro_batch=None,
use_dynamic_bsz_balance=True,
):
"""
Prepare micro batches from data.
"""
use_dynamic_bsz = tu.get_non_tensor_data(data=data, key="use_dynamic_bsz", default=True)
sp_size = tu.get_non_tensor_data(data=data, key="sp_size", default=1)
if use_dynamic_bsz:
assert "max_token_len_per_gpu" in data.keys(), "max_token_len_per_gpu must be set when use_dynamic_bsz is True"
max_token_len_per_gpu = data["max_token_len_per_gpu"]
max_token_len = max_token_len_per_gpu * sp_size
micro_batches, batch_idx_list = rearrange_micro_batches(
data,
max_token_len=max_token_len,
dp_group=dp_group,
num_batches_divided_by=num_batches_divided_by,
same_micro_num_in_dp=same_micro_num_in_dp,
min_num_micro_batch=min_num_micro_batch,
use_dynamic_bsz_balance=use_dynamic_bsz_balance,
)
else:
micro_batch_size_per_gpu = data["micro_batch_size_per_gpu"]
micro_batches = tu.chunk_tensordict(data, len(data) // micro_batch_size_per_gpu)
batch_idx_list = None
return micro_batches, batch_idx_list
def postprocess_batch_func(output_lst, indices, data: TensorDict):
"""postprocess the output of a forward_backward_batch.
output_lst is a list of dict containing outputs for each micro-batch
reorder entropy and outputs. Return None for other pp ranks
only on last rank. It should be on every tp rank
each losses_reduced contains 1. model_output, 2. loss, 3. metrics.
"""
use_dynamic_bsz = tu.get_non_tensor_data(data=data, key="use_dynamic_bsz", default=True)
pad_mode = tu.get_non_tensor_data(data=data, key="pad_mode", default=DatasetPadMode.NO_PADDING)
assert pad_mode == DatasetPadMode.NO_PADDING, "postprocess_batch_func only support NO_PADDING pad_mode"
# losses_reduced is a list of dict containing outputs for each micro-batch
# reorder entropy and outputs. Return None for other pp ranks
# only on last rank. It should be on every tp rank
# losses_reduced contains 1. model_output, 2. loss, 3. metrics.
# We perform reverse
model_output = {}
losses = []
aggregated_metrics = {}
# model output
for o in output_lst:
if "model_output" in o:
for key, val in o["model_output"].items():
if key not in model_output:
model_output[key] = []
model_output[key].append(val)
# concat results from micro batches
for key, val in model_output.items():
if pad_mode == DatasetPadMode.NO_PADDING:
tensors = [tensor for nt in model_output[key] for tensor in nt.unbind()]
model_output[key] = torch.nested.as_nested_tensor(tensors, layout=torch.jagged)
else:
raise NotImplementedError(f"pad_mode {pad_mode} not implemented")
# reverse with dynamic bsz
if use_dynamic_bsz:
model_output[key] = restore_dynamic_batch(model_output[key], indices)
# loss
for o in output_lst:
if "loss" in o:
losses.append(o["loss"])
# metrics
for o in output_lst:
if "metrics" in o:
metrics = o["metrics"]
append_to_dict(aggregated_metrics, metrics)
output = {
"model_output": model_output,
"loss": losses,
"metrics": aggregated_metrics,
}
return output
| verl__workers__engine__utils.py |
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from dataclasses import dataclass, field
from typing import Any, Callable, Optional, Sequence
import torch
import torch.distributed as dist
from tensordict import TensorDict
from torch.distributed.tensor import DTensor
from veomni.distributed import parallel_state
from veomni.distributed.offloading import build_activation_offloading_context
from veomni.distributed.torch_parallelize import build_parallelize_model
from veomni.models.auto import build_foundation_model
from veomni.optim import build_lr_scheduler, build_optimizer
import verl.utils.torch_functional as verl_F
from verl.trainer.config import CheckpointConfig
from verl.utils import tensordict_utils as tu
from verl.utils.checkpoint.fsdp_checkpoint_manager import FSDPCheckpointManager
from verl.utils.device import get_device_id, get_device_name
from verl.utils.fsdp_utils import fsdp_version
from verl.utils.model import convert_weight_keys
from verl.utils.profiler import log_gpu_memory_usage
from verl.utils.ulysses import (
get_ulysses_sequence_parallel_group,
set_ulysses_sequence_parallel_group,
)
from verl.workers.config import HFModelConfig, VeOmniEngineConfig, VeOmniOptimizerConfig
from ..base import BaseEngineCtx, EngineRegistry
from ..fsdp.transformer_impl import FSDPEngine, FSDPEngineWithLMHead
from ..utils import enable_full_determinism, postprocess_batch_func, prepare_micro_batches
from .utils import (
MOE_PARAM_HANDERS,
VL_TYPE2INDEX,
load_veomni_model_to_gpu,
load_veomni_optimizer,
offload_veomni_model_to_cpu,
offload_veomni_optimizer,
)
logger = logging.getLogger(__file__)
class VeOmniEngine(FSDPEngine):
def __init__(
self,
model_config: HFModelConfig,
engine_config: VeOmniEngineConfig,
optimizer_config: VeOmniOptimizerConfig,
checkpoint_config: CheckpointConfig,
**kwargs,
):
"""
Initialize the VeOmniEngine.
Sets up distributed device meshes, LoRA, and offload policies based on config.
Args:
config: Configuration object with VeOmni and model settings.
"""
self.model_config = model_config
self.engine_config = engine_config
self.optimizer_config = optimizer_config
self.checkpoint_config = checkpoint_config
# VeOmniEngine only supports fsdp2.
self.data_parallel_mode = "fsdp2"
self.rank = dist.get_rank()
fsdp_size = self.engine_config.fsdp_size
world_size = dist.get_world_size()
dp_size = world_size // self.engine_config.ulysses_parallel_size
if fsdp_size < 0 or fsdp_size >= dp_size:
data_parallel_replicate_size = 1
data_parallel_shard_size = dp_size
else:
if dp_size % fsdp_size != 0:
raise ValueError(
f"Data parallel size ({dp_size}) must be divisible by fsdp_size ({fsdp_size}). "
"Please adjust your parallel configuration."
)
data_parallel_replicate_size = dp_size // fsdp_size
data_parallel_shard_size = fsdp_size
parallel_state.init_parallel_state(
dp_size=dp_size,
dp_replicate_size=data_parallel_replicate_size,
dp_shard_size=data_parallel_shard_size,
ep_size=self.engine_config.expert_parallel_size,
ulysses_size=self.engine_config.ulysses_parallel_size,
dp_mode=self.data_parallel_mode,
)
if self.engine_config.full_determinism:
enable_full_determinism(seed=self.engine_config.seed)
self.use_remove_padding = self.model_config.use_remove_padding
self._is_offload_param = self.engine_config.param_offload
self._is_offload_optimizer = self.engine_config.optimizer_offload
self._is_lora = self.model_config.lora_rank > 0
self.use_ulysses_sp = parallel_state.get_parallel_state().sp_enabled
self.ulysses_sequence_parallel_size = self.engine_config.ulysses_parallel_size
if self.use_ulysses_sp:
self.ulysses_parallel_group = parallel_state.get_parallel_state().device_mesh["sp"].get_group()
else:
self.ulysses_parallel_group = None
if self.engine_config.entropy_from_logits_with_chunking:
entropy_from_logits = verl_F.entropy_from_logits_with_chunking
else:
entropy_from_logits = verl_F.entropy_from_logits
self.compute_entropy_from_logits = (
torch.compile(entropy_from_logits, dynamic=True)
if self.engine_config.use_torch_compile # use torch compile by default
else entropy_from_logits
)
def initialize(self):
"""
Build the model, optimizer, and learning rate scheduler under VeOmni.
Applies device, dtype, and precision configurations, including mixed precision.
Sets up checkpoint manager and FLOPs counter.
"""
self._build_model_optimizer()
self.checkpoint_manager = FSDPCheckpointManager(
model=self.module,
optimizer=self.optimizer,
lr_scheduler=self.lr_scheduler,
processing_class=self.model_config.get_processor(),
checkpoint_config=self.checkpoint_config,
trust_remote_code=self.model_config.trust_remote_code,
)
self.to(
device="cpu",
model=self._is_offload_param,
optimizer=self._is_offload_optimizer,
grad=self._is_offload_optimizer,
)
log_gpu_memory_usage("After offload model/optimizer/grad during init", logger=logger)
def _build_optimizer(self, module):
optimizer = build_optimizer(
module,
lr=self.optimizer_config.lr,
betas=self.optimizer_config.betas,
weight_decay=self.optimizer_config.weight_decay,
optimizer_type=self.optimizer_config.optimizer,
)
get_optimizer_pre_hook = getattr(module, "get_optimizer_pre_hook", None)
if get_optimizer_pre_hook is not None:
optimizer_pre_hook = get_optimizer_pre_hook(module, module.config, self.data_parallel_mode)
optimizer.register_step_pre_hook(optimizer_pre_hook)
return optimizer
def _build_lr_scheduler(self, optimizer):
optim_config = self.optimizer_config
lr_scheduler = build_lr_scheduler(
optimizer,
train_steps=optim_config.total_training_steps,
lr=optim_config.lr,
lr_min=optim_config.lr_min,
lr_decay_style=optim_config.lr_scheduler_type,
lr_decay_ratio=optim_config.lr_decay_ratio,
lr_warmup_ratio=optim_config.lr_warmup_steps_ratio,
lr_start=optim_config.lr_start,
)
return lr_scheduler
def _build_model_optimizer(self):
# Load base model with specified configuration and dtype
module = build_foundation_model(
config_path=self.model_config.hf_config_path,
weights_path=self.model_config.path,
torch_dtype="float32" if self.engine_config.mixed_precision else "bfloat16",
attn_implementation=self.engine_config.attn_implementation,
moe_implementation=self.engine_config.moe_implementation,
init_device=self.engine_config.init_device,
)
log_gpu_memory_usage("After load base model", logger=logger)
# Applies parallel strategies to the model.
log_gpu_memory_usage("Before parallelize model", logger=logger)
module = build_parallelize_model(
module,
init_device=self.engine_config.init_device,
weights_path=self.model_config.path,
enable_full_shard=self.engine_config.enable_full_shard,
enable_mixed_precision=self.engine_config.mixed_precision,
enable_gradient_checkpointing=self.model_config.enable_gradient_checkpointing,
enable_fsdp_offload=self.engine_config.enable_fsdp_offload,
basic_modules=module._no_split_modules + self.engine_config.basic_modules,
enable_reentrant=self.engine_config.enable_reentrant,
enable_forward_prefetch=self.engine_config.forward_prefetch,
)
log_gpu_memory_usage("After parallelize model", logger=logger)
if not self.engine_config.forward_only:
# Initialize optimizer with model parameters and config settings
optimizer = self._build_optimizer(module)
# Create learning rate scheduler with warmup and decay settings
lr_scheduler = self._build_lr_scheduler(optimizer)
else:
optimizer = None
lr_scheduler = None
self.module = module
self.optimizer = optimizer
self.lr_scheduler = lr_scheduler
self.model_fwd_context, self.model_bwd_context = build_activation_offloading_context(
self.model_config.enable_activation_offload,
self.model_config.enable_gradient_checkpointing,
self.engine_config.activation_gpu_limit,
)
def optimizer_step(self):
"""
Perform an optimization step using the optimizer.
"""
if hasattr(self.module, "clip_grad_norm_"):
grad_norm = self.module.clip_grad_norm_(self.optimizer_config.clip_grad)
else:
grad_norm = torch.nn.utils.clip_grad_norm_(self.module.parameters(), self.optimizer_config.clip_grad)
if isinstance(grad_norm, DTensor):
grad_norm = grad_norm.full_tensor()
# if grad_norm is not finite, skip the update
if not torch.isfinite(grad_norm):
print(f"WARN: grad_norm is not finite: {grad_norm}")
self.optimizer.zero_grad()
else:
self.optimizer.step()
return grad_norm.item()
def forward_backward_batch(self, data: TensorDict, loss_function: Callable, forward_only=False) -> Any:
"""
Perform a forward pass and optionally a backward pass on a batch of data.
Args:
data: The input data for the forward pass, typically containing tensors and metadata.
loss_function: The loss function to optimize. See `verl.workers.roles.utils.losses` for examples.
forward_only: If True, perform only the forward pass. If False, perform forward and backward pass.
Returns:
Any: The output of the forward pass, which can be used for loss computation or other purposes.
"""
tu.assign_non_tensor(data, sp_size=parallel_state.get_parallel_state().ulysses_size)
# compute num_tokens in global batch for loss normalization
batch_num_tokens = data["loss_mask"].sum().to(get_device_id())
torch.distributed.all_reduce(
batch_num_tokens, op=torch.distributed.ReduceOp.SUM, group=self.get_data_parallel_group()
)
tu.assign_non_tensor(data, batch_num_tokens=batch_num_tokens.item())
tu.assign_non_tensor(data, dp_size=self.get_data_parallel_size())
micro_batches, indices = prepare_micro_batches(
data=data, dp_group=self.get_data_parallel_group(), same_micro_num_in_dp=True
)
output_lst = []
for micro_batch in micro_batches:
with self.model_fwd_context:
loss, meta_info = self.forward_step(micro_batch, loss_function=loss_function, forward_only=forward_only)
if not forward_only:
with self.model_bwd_context:
loss.backward()
output_lst.append(meta_info)
return postprocess_batch_func(output_lst=output_lst, indices=indices, data=data)
def get_data_parallel_rank(self):
return parallel_state.get_parallel_state().device_mesh.get_local_rank("dp")
def get_data_parallel_size(self):
return torch.distributed.get_world_size() // parallel_state.get_parallel_state().ulysses_size
def get_data_parallel_group(self):
if parallel_state.get_parallel_state().ulysses_size > 1:
return parallel_state.get_parallel_state().device_mesh.get_group(mesh_dim="dp")
else:
return torch.distributed.group.WORLD
def is_mp_src_rank_with_outputs(self):
"""
Whether the current rank is the first rank in model parallel group that contains model outputs
"""
if parallel_state.get_parallel_state().ulysses_size > 1:
is_collect = parallel_state.get_parallel_state().device_mesh["ulysses"].get_local_rank() == 0
else:
is_collect = True
return is_collect
def train_mode(self, **kwargs):
"""
Return a context manager that switches to training mode with VeOmni-specific handling.
Includes parameter and optimizer offload entry/exit.
"""
return EngineTrainModeCtx(self, **kwargs)
def eval_mode(self, **kwargs):
"""
Return a context manager that switches to evaluation mode with VeOmni-specific handling.
Includes activation offload entry/exit.
"""
return EngineEvalModeCtx(self, **kwargs)
def to(self, device: str, model: bool = True, optimizer: bool = True, grad: bool = True):
"""
Move model parameters, optimizer states, or both to the specified device.
Note that this function executes irrespective of offload config. It serves as manual control.
Args:
device: Target device identifier.
model: If True, move the model.
optimizer: If True, move the optimizer states.
"""
super(FSDPEngine, self).to(device=device, model=model, optimizer=optimizer, grad=grad)
device_name = get_device_name()
assert device in (device_name, "cpu")
if device == device_name:
if model:
load_veomni_model_to_gpu(self.module)
if optimizer and self.optimizer is not None:
load_veomni_optimizer(self.optimizer, device)
elif device == "cpu":
if model:
offload_veomni_model_to_cpu(self.module)
if optimizer and self.optimizer is not None:
offload_veomni_optimizer(self.optimizer)
else:
raise ValueError(f"Invalid device type: {device}")
def save_checkpoint(
self,
local_path: str,
hdfs_path: Optional[str] = None,
global_step: int = 0,
max_ckpt_to_keep: Optional[int] = None,
**kwargs,
) -> None:
"""
Save VeOmni checkpoint, handling parameter offload as needed.
"""
origin_module_device = next(self.module.parameters()).device.type
if self._is_offload_param or origin_module_device == "cpu":
load_veomni_model_to_gpu(self.module)
self.checkpoint_manager.save_checkpoint(
local_path=local_path, hdfs_path=hdfs_path, global_step=global_step, max_ckpt_to_keep=max_ckpt_to_keep
)
torch.distributed.barrier()
if self._is_offload_param:
offload_veomni_model_to_cpu(self.module)
def load_checkpoint(
self, local_path: str, hdfs_path: Optional[str] = None, del_local_after_load: int = True, **kwargs
) -> None:
"""
Load VeOmni checkpoint, restoring parameters and optimizer state.
"""
if self._is_offload_param:
load_veomni_model_to_gpu(self.module)
self.checkpoint_manager.load_checkpoint(
local_path=local_path, hdfs_path=hdfs_path, del_local_after_load=del_local_after_load
)
torch.distributed.barrier()
if self._is_offload_param:
offload_veomni_model_to_cpu(self.module)
if self._is_offload_optimizer:
offload_veomni_optimizer(self.optimizer)
def get_per_tensor_param(self, **kwargs):
load_veomni_model_to_gpu(self.module)
params = self.module.state_dict()
params = convert_weight_keys(params, getattr(self.module, "_fsdp_wrapped_module", self.module))
if self._is_offload_param:
offload_veomni_model_to_cpu(self.module)
device = get_device_id()
ps = parallel_state.get_parallel_state()
model_type = getattr(self.module.config, "model_type", "default")
process_func = MOE_PARAM_HANDERS.get(model_type, lambda n, t: iter([(n, t)]))
def param_generator():
for name, param in params.items():
unsharded_tensor = param.full_tensor() if isinstance(param, DTensor) else param
is_expert_layer = "mlp.experts." in name
is_proj = any(p in name for p in ["down_proj", "gate_proj", "up_proj", "gate_up_proj"])
if is_expert_layer and is_proj and ps.ep_enabled:
output_shape = list(unsharded_tensor.shape)
output_shape[0] *= ps.ep_size
stacked_tensor = torch.empty(output_shape, dtype=unsharded_tensor.dtype, device=device)
# all gather expert tensors [32, H, I] -> [128, H, I]
torch.distributed.all_gather_into_tensor(stacked_tensor, unsharded_tensor, group=ps.ep_group)
yield from process_func(name, stacked_tensor)
del stacked_tensor
else:
if is_expert_layer:
yield from process_func(name, unsharded_tensor)
else:
yield name, unsharded_tensor
# TODO: support VeOmni LoRA
return param_generator(), None
class EngineEvalModeCtx(BaseEngineCtx):
def __init__(self, engine: VeOmniEngine, **kwargs):
super().__init__(engine=engine, mode="eval", **kwargs)
def __enter__(self):
assert isinstance(self.engine, VeOmniEngine)
super().__enter__()
self.prev_sp_group = get_ulysses_sequence_parallel_group()
set_ulysses_sequence_parallel_group(self.engine.ulysses_parallel_group)
self.engine.module.train()
def __exit__(self, exc_type, exc_value, traceback):
assert isinstance(self.engine, VeOmniEngine)
set_ulysses_sequence_parallel_group(self.prev_sp_group)
# https://pytorch.org/docs/stable/notes/fsdp.html#fsdp-notes
# unshard the root FSDP module
if parallel_state.get_parallel_state().dp_shard_size > 1:
if fsdp_version(self.engine.module) == 1:
self.engine.module._handle.reshard(True)
elif fsdp_version(self.engine.module) == 2:
self.engine.module.reshard()
super().__exit__(exc_type, exc_value, traceback)
class EngineTrainModeCtx(BaseEngineCtx):
def __init__(self, engine: VeOmniEngine, **kwargs):
super().__init__(engine=engine, mode="train", **kwargs)
def __enter__(self):
assert isinstance(self.engine, VeOmniEngine)
super().__enter__()
self.prev_sp_group = get_ulysses_sequence_parallel_group()
set_ulysses_sequence_parallel_group(self.engine.ulysses_parallel_group)
# TODO: Switch to eval mode after Integrating the CI environment
# VeOmni (ref: https://github.com/ByteDance-Seed/VeOmni/pull/421)
self.engine.module.train()
def __exit__(self, exc_type, exc_value, traceback):
assert isinstance(self.engine, VeOmniEngine)
set_ulysses_sequence_parallel_group(self.prev_sp_group)
self.engine.optimizer_zero_grad()
super().__exit__(exc_type, exc_value, traceback)
@dataclass
class OmniSequenceShardCollator:
"""
Data collator to chunk inputs along the sequence length.
"""
# features to slice sequence dimension
sp_slice_features: dict[str, int] = field(
default_factory=lambda: {
"input_ids": -1,
"labels": -1,
"pixel_values": 0,
"pixel_values_videos": 0,
},
metadata={"help": "features to slice sequence dimension."},
)
# features to padding sequence dimension
padding_features: dict[str, int] = field(
default_factory=lambda: {
"pixel_values": 0,
},
metadata={"help": "features to padding sequence dimension."},
)
# padding scale for padding features
padding_scale: dict[str, int] = field(
default_factory=lambda: {"pixel_values": 4}, metadata={"help": "padding scale for padding features."}
)
def __post_init__(self):
self.sp_size = parallel_state.get_parallel_state().sp_size
self.sp_rank = parallel_state.get_parallel_state().sp_rank
def sp_slice(self, feature: torch.Tensor, dim: int = -1) -> dict[str, "torch.Tensor"]:
seq_length = feature.size(dim)
sp_chunk_size = (seq_length + self.sp_size - 1) // self.sp_size
return feature.narrow(dim, self.sp_rank * sp_chunk_size, sp_chunk_size)
def sp_padding(
self, tensor: "torch.Tensor", dim: int = -1, pad_value: int = 0, pad_scale: int = 1
) -> "torch.Tensor":
"""
Pads a tensor with pad_length to aligns tensor with sp size.
"""
seq_length = tensor.size(dim)
scale_sp_size = self.sp_size * pad_scale
sp_chunk_size = (seq_length + scale_sp_size - 1) // scale_sp_size
pad_size = sp_chunk_size * scale_sp_size - seq_length
if pad_size == 0:
return tensor
pad_shape = list(tensor.shape)
pad_shape[dim] = pad_size
pad = torch.full(pad_shape, fill_value=pad_value, dtype=tensor.dtype, device=tensor.device)
return torch.cat((tensor, pad), dim=dim)
def __call__(self, batch: Sequence[dict[str, "torch.Tensor"]]) -> dict[str, "torch.Tensor"]:
for key in batch.keys():
if key in self.padding_features.keys():
batch[key] = self.sp_padding(
batch[key],
dim=self.sp_slice_features.get(key, -1),
pad_value=self.padding_features[key],
pad_scale=self.padding_scale.get(key, 1),
)
# sp slice
for key in batch.keys():
if key in self.sp_slice_features.keys():
batch[key] = self.sp_slice(batch[key], dim=self.sp_slice_features[key])
return batch
@EngineRegistry.register(model_type="language_model", backend=["veomni"], device=["cuda", "npu"])
class VeOmniEngineWithLMHead(VeOmniEngine, FSDPEngineWithLMHead):
def prepare_model_inputs(self, micro_batch: TensorDict):
# TODO: Cannot work properly for qwen_vl ulysses
model_inputs, output_args = super().prepare_model_inputs(micro_batch)
input_ids_rmpad = model_inputs["input_ids"]
if self.module.config.model_type in VL_TYPE2INDEX.keys():
image_mask = input_ids_rmpad == VL_TYPE2INDEX[self.module.config.model_type]["IMAGE_INPUT_INDEX"]
video_mask = input_ids_rmpad == VL_TYPE2INDEX[self.module.config.model_type]["VIDEO_INPUT_INDEX"]
model_inputs.update({"image_mask": image_mask, "video_mask": video_mask})
if parallel_state.get_parallel_state().sp_enabled:
omni_sequence_shard_collator = OmniSequenceShardCollator()
omni_sequence_shard_collator(model_inputs)
return model_inputs, output_args
| verl__workers__engine__veomni__transformer_impl.py |
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from verl.utils.device import get_device_id, get_torch_device
VL_TYPE2INDEX = {
"qwen2_5_vl": {
"IMAGE_INPUT_INDEX": 151655,
"VIDEO_INPUT_INDEX": 151656,
},
"qwen3_vl": {
"IMAGE_INPUT_INDEX": 151655,
"VIDEO_INPUT_INDEX": 151656,
},
"qwen3_vl_moe": {
"IMAGE_INPUT_INDEX": 151655,
"VIDEO_INPUT_INDEX": 151656,
},
}
@torch.no_grad()
def offload_veomni_model_to_cpu(model, empty_cache: bool = True):
from torch.distributed.fsdp._fully_shard._fsdp_common import TrainingState
from torch.distributed.fsdp._fully_shard._fsdp_state import _get_module_fsdp_state
for module in model.modules():
state = _get_module_fsdp_state(module)
if state is None:
continue
fsdp_param_group = state._fsdp_param_group
if fsdp_param_group is None:
continue
fsdp_param_group._training_state = TrainingState.IDLE
model.reshard()
model.cpu()
if empty_cache:
get_torch_device().empty_cache()
@torch.no_grad()
def load_veomni_model_to_gpu(model):
device = get_device_id()
model.to(device)
@torch.no_grad()
def offload_veomni_optimizer(optimizer):
optimizers = []
# Check if this is a MultiOptimizer (for ep and non-ep parameters when ep+fsdp2 is enabled)
if hasattr(optimizer, "_is_multi_optimizer") and optimizer._is_multi_optimizer:
optimizers.extend(optimizer.optimizers_dict.values())
else:
optimizers.append(optimizer)
for opt in optimizers:
if not opt.state:
continue
for param_group in opt.param_groups:
for param in param_group["params"]:
state = opt.state[param]
for key, value in state.items():
if isinstance(value, torch.Tensor):
state[key] = value.to("cpu", non_blocking=True)
@torch.no_grad()
def load_veomni_optimizer(optimizer, device_id):
optimizers = []
# Check if this is a MultiOptimizer (for ep and non-ep parameters when ep+fsdp2 is enabled)
if hasattr(optimizer, "_is_multi_optimizer") and optimizer._is_multi_optimizer:
optimizers.extend(optimizer.optimizers_dict.values())
else:
optimizers.append(optimizer)
for opt in optimizers:
if not opt.state:
continue
for param_group in opt.param_groups:
for param in param_group["params"]:
state = opt.state[param]
for key, value in state.items():
if isinstance(value, torch.Tensor):
state[key] = value.to(device_id, non_blocking=True)
def _map_moe_params_qwen3_moe(name, tensor):
for i in range(tensor.size(0)):
new_key = name.replace("mlp.experts.", f"mlp.experts.{i}.") + ".weight"
yield new_key, tensor[i].to(get_device_id(), non_blocking=True)
MOE_PARAM_HANDERS = {
"qwen3_moe": _map_moe_params_qwen3_moe,
}
| verl__workers__engine__veomni__utils.py |
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import logging
import os
from contextlib import nullcontext
from functools import partial
from itertools import chain
import torch
from codetiming import Timer
from omegaconf import DictConfig, open_dict
from tensordict import NonTensorData, TensorDict
from torch.distributed.device_mesh import init_device_mesh
try:
from verl.workers.engine.mindspeed.transformer_impl import repatch
except ImportError:
repatch = None
from verl.checkpoint_engine import CheckpointEngineRegistry
from verl.single_controller.base import Worker
from verl.single_controller.base.decorator import Dispatch, make_nd_compute_dataproto_dispatch_fn, register
from verl.utils import tensordict_utils as tu
from verl.utils.config import omega_conf_to_dataclass
from verl.utils.device import get_device_name, set_expandable_segments
from verl.utils.distributed import initialize_global_process_group_ray
from verl.utils.flops_counter import FlopsCounter
from verl.utils.memory_utils import aggressive_empty_cache
from verl.utils.metric.utils import Metric
from verl.utils.profiler import DistProfiler, DistProfilerExtension, ProfilerConfig, log_gpu_memory_usage
from verl.utils.py_functional import append_to_dict
from verl.utils.tensordict_utils import maybe_fix_3d_position_ids
from verl.utils.torch_functional import allgather_dict_into_dict
from verl.workers.config import ActorConfig, HFModelConfig, RolloutConfig, TrainingWorkerConfig
from verl.workers.rollout.base import BaseRollout, get_rollout_class
from verl.workers.utils.losses import ppo_loss
logger = logging.getLogger(__file__)
logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN"))
def _with_routing_replay_flag(enabled: bool):
"""Decorator to set 'enable_routing_replay' flag on the data TensorDict."""
def decorator(func):
@functools.wraps(func)
def wrapper(self, data: TensorDict, *args, **kwargs):
if self.enable_routing_replay:
tu.assign_non_tensor_data(data, "enable_routing_replay", enabled)
return func(self, data, *args, **kwargs)
return wrapper
return decorator
class TrainingWorker(Worker, DistProfilerExtension):
"""
TrainingWorker provides a Tinker-like API (https://thinkingmachines.ai/tinker/) as a RayWorkerGroup
to a single controller. Currently, we only provide more coarse grained APIs,
and do not provide exact APIs as Tinker does. But this can be added in the future.
"""
def __init__(self, config: TrainingWorkerConfig):
Worker.__init__(self)
from verl.workers.engine import BaseEngine, EngineRegistry
initialize_global_process_group_ray(timeout_second=None)
self.config = config
self.model_config = self.config.model_config
self.engine_config = self.config.engine_config
self.optimizer_config = self.config.optimizer_config
self.checkpoint_config = self.config.checkpoint_config
self.device_name = get_device_name()
if self.engine_config is None:
assert self.optimizer_config is None
if self.config.auto_select_engine_optim_fn is None:
raise ValueError(
"engine_config is not provided and auto_select_engine_optim_fn is not set. "
"Cannot determine engine backend."
)
# Support automatically select engine backend given model config
self.engine_config, self.optimizer_config = self.config.auto_select_engine_optim_fn(
self.model_config, self.device_name
)
# we use the one defined in model
# TODO: this is not elegant and should refactor later
self.engine_config.use_remove_padding = self.model_config.use_remove_padding
self.engine_config.use_fused_kernels = self.model_config.use_fused_kernels
if repatch is not None:
# NPU MindSpeed patch, will be refactored with MindSpeedEngine.
repatch(self.engine_config.get("override_transformer_config", {}))
# TODO: add DistProfilerExtension
self.profiler_config = self.config.profiler_config
if self.profiler_config is not None:
self.profiler_tool_config = self.profiler_config.tool_config.get(self.profiler_config.tool, {})
else:
self.profiler_tool_config = None
DistProfilerExtension.__init__(
self, DistProfiler(rank=self.rank, config=self.profiler_config, tool_config=self.profiler_tool_config)
)
self.engine: BaseEngine = EngineRegistry.new(
model_type=self.config.model_type,
backend=self.engine_config.strategy,
model_config=self.model_config,
engine_config=self.engine_config,
optimizer_config=self.optimizer_config,
checkpoint_config=self.checkpoint_config,
)
# build dispatch info
self._register_dispatch_collect_info(
mesh_name="train",
dp_rank=self.engine.get_data_parallel_rank(),
is_collect=self.engine.is_mp_src_rank_with_outputs(),
)
self.flops_counter = FlopsCounter(self.model_config.hf_config)
self.loss_fn = None
@register(dispatch_mode=Dispatch.ONE_TO_ALL)
def to(self, device, model=True, optimizer=True, grad=True):
"""Manual control of load/offload"""
assert device in ["cpu", "device"]
if device == "device":
device = get_device_name()
self.engine.to(device=device, model=model, optimizer=optimizer, grad=grad)
@register(dispatch_mode=Dispatch.ONE_TO_ALL)
def set_loss_fn(self, loss_fn):
self.loss_fn = loss_fn
@register(dispatch_mode=Dispatch.ONE_TO_ALL)
def reset(self):
"""
Reset the model engine to the initial state. If the engine is not initialized,
we initialize it. Otherwise, reload ckpt and reset states
"""
self.engine.initialize()
def _postprocess_output(self, output, *, global_token_num, delta_time, forward_only, images_seqlens):
"""
Args:
output: a dictionary containing loss, model_outputs and metrics
Returns:
"""
# TODO: whether to log memory
# metrics["perf/max_memory_allocated_gb"] = get_torch_device().max_memory_allocated() / (1024 ** 3)
# metrics["perf/max_memory_reserved_gb"] = get_torch_device().max_memory_reserved() / (1024 ** 3)
# metrics["perf/cpu_memory_used_gb"] = psutil.virtual_memory().used / (1024 ** 3)
metrics: dict = output.pop("metrics")
# perform all gather in dp group to ensure that it's correct.
# Here each metric in metrics can be a list (micro-batch metrics) or a singleton
# we should always sum the loss of each micro-batch as we scale by global_bsz/global_token
loss = torch.sum(torch.tensor(output.pop("loss"), device=self.device_name))
torch.distributed.all_reduce(
loss, op=torch.distributed.ReduceOp.AVG, group=self.engine.get_data_parallel_group()
)
loss = loss.item()
# For grad_norm, we do not perform all reduce because it is already been done when clipping grad
grad_norm = metrics.pop("grad_norm", None)
lr = metrics.pop("lr", None)
# For other metrics, we perform all gather in dp group
final_metrics = allgather_dict_into_dict(data=metrics, group=self.engine.get_data_parallel_group())
final_metrics["loss"] = loss
if grad_norm is not None:
final_metrics["grad_norm"] = grad_norm
if lr is not None:
final_metrics["lr"] = lr
# TODO: confirm the mtp loss IS same across dp
for k, v in final_metrics.items():
if k.startswith("mtp_losses"):
flatten_v = [sublist[0] for sublist in v] # sublist should be single element
final_metrics[k] = sum(flatten_v) / len(flatten_v)
# compute mfu
if global_token_num is not None:
estimated_flops, promised_flops = self.flops_counter.estimate_flops(
global_token_num, delta_time, images_seqlens=images_seqlens
)
final_metrics["mfu"] = estimated_flops / promised_flops / torch.distributed.get_world_size()
if forward_only:
final_metrics["mfu"] /= 3.0
# model outputs
model_output = output.pop("model_output", {})
# We only return final_metrics
final_output = tu.get_tensordict(tensor_dict=model_output, non_tensor_dict={"metrics": final_metrics})
return final_output
@register(dispatch_mode=make_nd_compute_dataproto_dispatch_fn(mesh_name="train"), blocking=False)
def train_mini_batch(self, data: TensorDict) -> TensorDict:
"""Split a batch into N mini-batches run for multiple epochs
Args:
data:
Returns:
"""
maybe_fix_3d_position_ids(data)
batch_size_per_dp = data.shape[0]
disable_auto_offload = tu.pop(data, key="disable_auto_offload", default=False)
mini_batch_size = tu.pop(data, key="mini_batch_size", default=None)
num_mini_batch = tu.pop(data, key="num_mini_batch", default=None)
epochs = tu.pop(data, key="epochs", default=1)
seed = tu.pop(data, key="seed", default=42)
dataloader_kwargs = tu.pop(data, key="dataloader_kwargs", default={})
assert mini_batch_size is not None or num_mini_batch is not None
if mini_batch_size is None:
assert batch_size_per_dp % num_mini_batch == 0, f"Got {batch_size_per_dp=} and {num_mini_batch=}"
mini_batch_size_per_gpu = batch_size_per_dp // num_mini_batch
else:
assert mini_batch_size % self.engine.get_data_parallel_size() == 0, (
f"Got {mini_batch_size=} and {self.engine.get_data_parallel_size()=}"
)
mini_batch_size_per_gpu = mini_batch_size // self.engine.get_data_parallel_size()
# make iterator
dataloader = tu.make_iterator(
data,
mini_batch_size=mini_batch_size_per_gpu,
epochs=epochs,
seed=seed + self.engine.get_data_parallel_rank(),
dataloader_kwargs=dataloader_kwargs,
)
with (
self.engine.train_mode(disable_auto_offload=disable_auto_offload),
Timer(name="train_batch", logger=None),
):
# update
output_lst = []
total_num_iterations = data.shape[0] // mini_batch_size_per_gpu * epochs
for batch_idx, mini_batch_td in enumerate(dataloader):
# add global token num
global_token_num = mini_batch_td["input_ids"].offsets().diff().tolist() # (total_nnz,)
# allgather from dp rank
global_token_num_output = [None] * self.engine.get_data_parallel_size()
torch.distributed.all_gather_object(
global_token_num_output, global_token_num, self.engine.get_data_parallel_group()
)
global_token_num = [x for xs in global_token_num_output for x in xs]
tu.assign_non_tensor(
mini_batch_td,
global_token_num=NonTensorData(global_token_num),
update_lr_scheduler=batch_idx == total_num_iterations - 1,
disable_auto_offload=True,
)
actor_output = self.train_batch(mini_batch_td)
output_lst.append(actor_output)
if self.engine.is_mp_src_rank_with_outputs():
actor_output = [tu.get(output, "metrics") for output in output_lst]
metrics = {}
for output in actor_output:
for key, val in output.items():
# flattn dp and micro batch
if isinstance(val, list):
output[key] = (
Metric.aggregate_dp(val)
if isinstance(val[0], Metric)
else list(chain.from_iterable(val))
)
append_to_dict(metrics, output)
output = tu.get_tensordict(tensor_dict={}, non_tensor_dict={"metrics": metrics}).cpu()
else:
output = None
return output
@register(dispatch_mode=make_nd_compute_dataproto_dispatch_fn(mesh_name="train"), blocking=False)
def train_batch(self, data: TensorDict) -> TensorDict:
assert self.loss_fn is not None, "loss function can't be None when calling train_batch"
assert not self.engine_config.forward_only, "Can't run `train_batch` when forward_only is in the engine config."
# global_token_num should be a list of number of tokens of each seq in this batch
global_token_num = tu.get(data, key="global_token_num")
disable_auto_offload = tu.get(data, key="disable_auto_offload", default=False)
images_seqlens = tu.get(data, key="images_seqlens", default=None)
# inject engineering parameters if not specified
default_keys = dict(
use_remove_padding=self.model_config.use_remove_padding,
use_dynamic_bsz=self.engine_config.use_dynamic_bsz,
max_token_len_per_gpu=self.engine_config.max_token_len_per_gpu,
micro_batch_size_per_gpu=self.engine_config.micro_batch_size_per_gpu,
use_fused_kernels=self.engine_config.use_fused_kernels,
)
for key, val in default_keys.items():
if key not in data.keys():
tu.assign_non_tensor(data, **{key: val})
with (
self.engine.train_mode(disable_auto_offload=disable_auto_offload),
Timer(name="train_batch", logger=None) as timer,
):
output = self.engine.train_batch(data, loss_function=self.loss_fn)
# containing loss, model_output and metrics
# for training, we only care about loss and metrics
delta_time = timer.last
update_lr_scheduler = tu.get(data, key="update_lr_scheduler", default=False)
# update lr scheduler
if update_lr_scheduler:
lr = self.engine.lr_scheduler_step()
else:
lr = None
if self.engine.is_mp_src_rank_with_outputs():
# we don't need model_output in training. Maybe we change out mind later
output.pop("model_output")
if lr is not None:
output["metrics"]["lr"] = lr
final_output = self._postprocess_output(
output,
global_token_num=global_token_num,
delta_time=delta_time,
forward_only=False,
images_seqlens=images_seqlens,
).cpu()
else:
final_output = None
return final_output
@register(dispatch_mode=make_nd_compute_dataproto_dispatch_fn(mesh_name="train"), blocking=False)
def infer_batch(self, data: TensorDict) -> TensorDict:
# add mfu calculator
global_token_num = tu.get(data, key="global_token_num")
compute_loss = tu.get(data, key="compute_loss", default=True)
disable_auto_offload = tu.get(data, key="disable_auto_offload", default=False)
no_lora_adapter = tu.pop(data, key="no_lora_adapter", default=False)
images_seqlens = tu.get(data, key="images_seqlens", default=None)
default_keys = dict(
use_remove_padding=self.model_config.use_remove_padding,
use_dynamic_bsz=self.engine_config.use_dynamic_bsz,
max_token_len_per_gpu=self.engine_config.infer_max_token_len_per_gpu,
micro_batch_size_per_gpu=self.engine_config.infer_micro_batch_size_per_gpu,
use_fused_kernels=self.engine_config.use_fused_kernels,
)
for key, val in default_keys.items():
if key not in data.keys():
tu.assign_non_tensor(data, **{key: val})
# for sft training, we need to compute loss in eval
loss_function = self.loss_fn if compute_loss else None
with (
self.engine.eval_mode(disable_auto_offload=disable_auto_offload),
Timer(name="eval_batch", logger=None) as timer,
):
adapter_ctx = self.engine.disable_adapter() if no_lora_adapter else nullcontext()
with adapter_ctx:
output = self.engine.infer_batch(data, loss_function=loss_function)
delta_time = timer.last
if self.engine.is_mp_src_rank_with_outputs():
final_output = self._postprocess_output(
output,
global_token_num=global_token_num,
delta_time=delta_time,
forward_only=True,
images_seqlens=images_seqlens,
).cpu()
else:
final_output = None
return final_output
@register(dispatch_mode=Dispatch.ONE_TO_ALL)
def save_checkpoint(self, local_path, hdfs_path=None, global_step=0, max_ckpt_to_keep=None):
return self.engine.save_checkpoint(local_path, hdfs_path, global_step, max_ckpt_to_keep)
@register(dispatch_mode=Dispatch.ONE_TO_ALL)
def load_checkpoint(self, local_path, hdfs_path=None, del_local_after_load=False):
return self.engine.load_checkpoint(local_path, hdfs_path, del_local_after_load)
class ActorRolloutRefWorker(Worker, DistProfilerExtension):
"""Hybrid worker that includes actor model, rollout and optional ref model.
For standalone actor or rollout, use ActorWorker or BaseRollout respectively.
NOTE: ActorRolloutRefWorker no longer support spmd mode and run native server mode.
"""
def __init__(self, config: DictConfig, role: str, **kwargs):
Worker.__init__(self)
self.config = config
self.role = role
self.actor: TrainingWorker = None
self.ref: TrainingWorker = None
self.rollout: BaseRollout = None
assert self.role in ["actor", "rollout", "ref", "actor_rollout", "actor_rollout_ref"]
self._is_actor = self.role in ["actor", "actor_rollout", "actor_rollout_ref"]
self._is_rollout = self.role in ["rollout", "actor_rollout", "actor_rollout_ref"]
self._is_ref = self.role in ["ref", "actor_rollout_ref"]
if self._is_actor:
omega_profiler_config = config.actor.get("profiler", {})
elif self._is_rollout:
# NOTE: In colocation mode, rollout config may not take effect (follow the actor config)
# This is for extendability in AsyncRL cases
omega_profiler_config = config.rollout.get("profiler", {})
else:
omega_profiler_config = config.ref.get("profiler", {})
profiler_config = omega_conf_to_dataclass(omega_profiler_config, dataclass_type=ProfilerConfig)
if omega_profiler_config.get("tool", None) in ["npu", "nsys", "torch", "torch_memory"]:
tool_config = omega_conf_to_dataclass(
omega_profiler_config.get("tool_config", {}).get(omega_profiler_config.get("tool"))
)
else:
tool_config = None
self.enable_routing_replay = (
self.config.actor.strategy == "megatron" and self.config.actor.megatron.router_replay.mode != "disabled"
)
DistProfilerExtension.__init__(
self, DistProfiler(rank=self.rank, config=profiler_config, tool_config=tool_config)
)
@register(dispatch_mode=Dispatch.ONE_TO_ALL)
def set_loss_fn(self, loss_fn):
self.actor.set_loss_fn(loss_fn=loss_fn)
@register(dispatch_mode=Dispatch.ONE_TO_ALL)
def to(self, device, model=True, optimizer=True, grad=True):
"""Manual control of load/offload"""
self.actor.to(device=device, model=model, optimizer=optimizer, grad=grad)
@register(dispatch_mode=Dispatch.ONE_TO_ALL)
def init_model(self):
model_config: HFModelConfig = omega_conf_to_dataclass(self.config.model)
# 1. build reference model
if "ref" in self.role:
# TODO: align ref config with actor config
with open_dict(self.config.ref):
self.config.ref.ppo_mini_batch_size = self.config.actor.ppo_mini_batch_size
self.config.ref.ppo_micro_batch_size = self.config.ref.pop("log_prob_micro_batch_size", None)
self.config.ref.ppo_micro_batch_size_per_gpu = self.config.ref.pop(
"log_prob_micro_batch_size_per_gpu", None
)
self.config.ref.use_dynamic_bsz = self.config.ref.pop("log_prob_use_dynamic_bsz", False)
self.config.ref.ppo_max_token_len_per_gpu = self.config.ref.pop("log_prob_max_token_len_per_gpu", None)
ref_config: ActorConfig = omega_conf_to_dataclass(self.config.ref)
ref_config.model_config = model_config
# construct TrainingWorkerConfig
ref_training_config = TrainingWorkerConfig(
model_type="language_model",
model_config=ref_config.model_config,
engine_config=ref_config.engine,
optimizer_config=ref_config.optim,
checkpoint_config=ref_config.checkpoint,
)
# assign engine configs
ref_training_config.engine_config.use_dynamic_bsz = self.config.ref.use_dynamic_bsz
ref_training_config.engine_config.infer_max_token_len_per_gpu = self.config.ref.ppo_max_token_len_per_gpu
ref_training_config.engine_config.infer_micro_batch_size_per_gpu = (
self.config.ref.ppo_micro_batch_size_per_gpu
)
ref_training_config.engine_config.use_remove_padding = model_config.use_remove_padding
self.ref = TrainingWorker(config=ref_training_config)
self.ref.reset()
self.set_dispatch_collect(mesh_name="ref", **self.ref.get_dispatch_collect())
# 2. build actor model
if "actor" in self.role:
actor_config: ActorConfig = omega_conf_to_dataclass(self.config.actor)
actor_config.model_config = model_config
actor_training_config = TrainingWorkerConfig(
model_type="language_model",
model_config=actor_config.model_config,
engine_config=actor_config.engine,
optimizer_config=actor_config.optim,
checkpoint_config=actor_config.checkpoint,
)
assert self.config.actor.use_dynamic_bsz == self.config.rollout.log_prob_use_dynamic_bsz
# assign engine configs
actor_training_config.engine_config.use_dynamic_bsz = self.config.actor.use_dynamic_bsz
actor_training_config.engine_config.infer_max_token_len_per_gpu = (
self.config.rollout.log_prob_max_token_len_per_gpu
)
actor_training_config.engine_config.infer_micro_batch_size_per_gpu = (
self.config.rollout.log_prob_micro_batch_size_per_gpu
)
actor_training_config.engine_config.max_token_len_per_gpu = self.config.actor.ppo_max_token_len_per_gpu
actor_training_config.engine_config.micro_batch_size_per_gpu = (
self.config.actor.ppo_micro_batch_size_per_gpu
)
actor_training_config.engine_config.use_remove_padding = model_config.use_remove_padding
if self.config.actor.use_dynamic_bsz:
assert self.config.rollout.log_prob_max_token_len_per_gpu is not None
assert self.config.actor.ppo_max_token_len_per_gpu is not None
else:
assert self.config.rollout.log_prob_micro_batch_size_per_gpu is not None
assert self.config.actor.ppo_micro_batch_size_per_gpu is not None
self.loss_fn = partial(ppo_loss, config=actor_config)
self.actor = TrainingWorker(config=actor_training_config)
self.actor.reset()
self.actor.set_loss_fn(self.loss_fn)
self.set_dispatch_collect(mesh_name="actor", **self.actor.get_dispatch_collect())
# 3. build rollout engine
if "rollout" in self.role:
rollout_config: RolloutConfig = omega_conf_to_dataclass(self.config.rollout)
# TODO: move rollout_device_mesh into ServerAdapter
# 3.1 build rollout device mesh (sglang need only)
infer_tp = rollout_config.tensor_model_parallel_size * rollout_config.data_parallel_size
infer_pp = rollout_config.pipeline_model_parallel_size
infer_world_size = infer_tp * infer_pp
dp = self.world_size // infer_world_size
assert self.world_size % infer_world_size == 0, (
f"rollout world_size: {self.world_size} is not divisible by infer_world_size: {infer_world_size}"
)
rollout_device_mesh = init_device_mesh(
get_device_name(), mesh_shape=(dp, infer_tp, infer_pp), mesh_dim_names=["dp", "infer_tp", "infer_pp"]
)
# 3.2 initialize rollout engine
rollout_cls: type[BaseRollout] = get_rollout_class(rollout_config.name, rollout_config.mode)
self.rollout = rollout_cls(
config=rollout_config, model_config=model_config, device_mesh=rollout_device_mesh
)
# used for LoRA
self.base_sync_done: bool = "dummy" not in self.config.rollout.load_format
self.layered_summon = self.config.rollout.get("layered_summon", False)
self.peft_merge: bool = model_config.lora.get("merge", False)
# 4. build checkpoint engine
if "actor" in self.role:
checkpoint_engine_config = omega_conf_to_dataclass(self.config.rollout.checkpoint_engine)
backend = checkpoint_engine_config.backend
bucket_size = checkpoint_engine_config.update_weights_bucket_megabytes << 20
engine_kwargs = checkpoint_engine_config.engine_kwargs.get(backend, {})
self.checkpoint_engine = CheckpointEngineRegistry.new(
backend, is_master=(torch.distributed.get_rank() == 0), bucket_size=bucket_size, **engine_kwargs
)
@register(dispatch_mode=make_nd_compute_dataproto_dispatch_fn(mesh_name="ref"))
@DistProfiler.annotate(color="olive", role="ref_compute_log_prob")
@_with_routing_replay_flag(enabled=False)
def compute_ref_log_prob(self, data: TensorDict) -> TensorDict:
output = self.ref.infer_batch(data=data)
return output.cpu() if output is not None else None
@register(dispatch_mode=make_nd_compute_dataproto_dispatch_fn(mesh_name="actor"))
@DistProfiler.annotate(color="blue", role="actor_compute_log_prob")
@_with_routing_replay_flag(enabled=True)
def compute_log_prob(self, data: TensorDict) -> TensorDict:
output = self.actor.infer_batch(data)
return output.cpu() if output is not None else None
@register(dispatch_mode=make_nd_compute_dataproto_dispatch_fn(mesh_name="actor"))
@DistProfiler.annotate(color="red", role="actor_update")
@_with_routing_replay_flag(enabled=True)
def update_actor(self, data: TensorDict) -> TensorDict:
output = self.actor.train_mini_batch(data=data)
return output.cpu() if output is not None else None
@register(dispatch_mode=Dispatch.ONE_TO_ALL)
def load_checkpoint(self, local_path, hdfs_path=None, del_local_after_load=False):
assert "actor" in self.role, "load_checkpoint only support actor role"
self.actor.load_checkpoint(local_path, hdfs_path, del_local_after_load)
@register(dispatch_mode=Dispatch.ONE_TO_ALL)
def save_checkpoint(self, local_path, hdfs_path=None, global_step=0, max_ckpt_to_keep=None):
assert "actor" in self.role, "save_checkpoint only support actor role"
self.actor.save_checkpoint(local_path, hdfs_path, global_step, max_ckpt_to_keep)
@register(dispatch_mode=Dispatch.ONE_TO_ALL, blocking=False)
async def update_weights(self):
"""Update weights from trainer to rollout.
1. For sync training with colocated trainer and rollout, update rollout directly from model engine.
- before update_weights: rollout should be in sleep mode.
- after update_weights: rollout should be in wake_up mode.
2. For async training with disaggregated trainer and rollout, send_weights only by checkpoint engine.
"""
assert self.checkpoint_engine is not None
# 0. send_weights only for async training with disaggregated trainer and rollout
if self.config.rollout.checkpoint_engine.backend != "naive":
per_tensor_param, _ = self.engine.get_per_tensor_param()
await self.checkpoint_engine.send_weights(per_tensor_param)
return
set_expandable_segments(False)
log_gpu_memory_usage("Before resume weights", logger=logger)
# 1. resume weights and update weights
if self.config.rollout.free_cache_engine:
await self.rollout.resume(tags=["weights"])
log_gpu_memory_usage("After resume weights", logger=logger)
# 2. get per tensor generator from engine, this will load model to gpu
per_tensor_param, peft_config = self.actor.engine.get_per_tensor_param(
layered_summon=self.layered_summon, base_sync_done=True
)
await self.rollout.update_weights(per_tensor_param, peft_config=peft_config, base_sync_done=True)
do_lora_base_sync = False
if not self.peft_merge and peft_config is not None:
# set sleep level for LoRA adapter weights only sync
# TODO: make this configurable so that users with small
# main memory can trade sync time to avoid OOM
self.rollout.sleep_level = 1
do_lora_base_sync = (not self.base_sync_done) or (
self.rollout.sleep_level != 1 and self.config.rollout.free_cache_engine
)
if do_lora_base_sync:
per_tensor_base_params, _ = self.actor.engine.get_per_tensor_param(
layered_summon=self.layered_summon, base_sync_done=False
)
await self.rollout.update_weights(per_tensor_base_params, peft_config=peft_config, base_sync_done=False)
log_gpu_memory_usage("After update_weights", logger=logger)
# 3. offload model to cpu
self.actor.engine.to("cpu", model=True, optimizer=False, grad=False)
aggressive_empty_cache(force_sync=True)
# 4. resume kv_cache
if self.config.rollout.free_cache_engine:
await self.rollout.resume(tags=["kv_cache"])
log_gpu_memory_usage("After resume kv_cache", logger=logger)
self.base_sync_done = True
set_expandable_segments(True)
@register(dispatch_mode=Dispatch.DP_COMPUTE, blocking=False)
def execute_checkpoint_engine(self, method: str, *args, **kwargs):
"""Execute checkpoint engine method.
Args:
method (str): Checkpoint engine method name.
*args: Variable length argument list.
**kwargs: Arbitrary keyword arguments.
"""
return getattr(self.checkpoint_engine, method)(*args, **kwargs)
| verl__workers__engine_workers.py |
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The main entry point to run the PPO algorithm
"""
import datetime
import json
import logging
import os
import warnings
from dataclasses import asdict
import psutil
import torch
import torch.distributed
import torch.distributed as dist
from codetiming import Timer
from omegaconf import DictConfig, OmegaConf, open_dict
from omegaconf.errors import ConfigAttributeError
from peft import LoraConfig, TaskType, get_peft_model
from safetensors.torch import save_file
from torch.distributed.device_mesh import init_device_mesh
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.api import FullStateDictConfig, ShardedStateDictConfig, StateDictType
try:
# for torch 2.5+
from torch.distributed.tensor import DTensor
except ImportError:
from torch.distributed._tensor import DTensor
from verl import DataProto
from verl.models.transformers.monkey_patch import apply_monkey_patch
from verl.single_controller.base import Worker
from verl.single_controller.base.decorator import Dispatch, make_nd_compute_dataproto_dispatch_fn, register
from verl.utils import hf_processor, hf_tokenizer
from verl.utils.activation_offload import enable_activation_offloading
from verl.utils.checkpoint.fsdp_checkpoint_manager import FSDPCheckpointManager
from verl.utils.config import omega_conf_to_dataclass
from verl.utils.device import (
get_device_id,
get_device_name,
get_nccl_backend,
get_torch_device,
set_expandable_segments,
)
from verl.utils.flops_counter import FlopsCounter
from verl.utils.fs import copy_to_local
from verl.utils.fsdp_utils import (
CPUOffloadPolicy,
MixedPrecisionPolicy,
apply_fsdp2,
collect_lora_params,
fsdp2_load_full_state_dict,
fsdp_version,
get_fsdp_wrap_policy,
get_init_weight_context_manager,
get_shard_placement_fn,
init_fn,
layered_summon_lora_params,
load_fsdp_model_to_gpu,
load_fsdp_optimizer,
offload_fsdp_model_to_cpu,
offload_fsdp_optimizer,
replace_lora_wrapper,
)
from verl.utils.import_utils import import_external_libs
from verl.utils.memory_utils import aggressive_empty_cache
from verl.utils.model import convert_weight_keys
from verl.utils.profiler import DistProfiler, DistProfilerExtension, ProfilerConfig, log_gpu_memory_usage, simple_timer
from verl.utils.profiler.performance import reduce_timing, topk_reduce_ratio_min_max
from verl.utils.py_functional import convert_to_regular_types
# QAT support
from verl.utils.qat import apply_qat, enable_qat_fuse
from verl.utils.ray_utils import get_event_loop
from verl.workers.config import FSDPCriticConfig, FSDPEngineConfig, HFModelConfig, RolloutConfig
from verl.workers.config.optimizer import build_optimizer
from verl.workers.rollout import get_rollout_class
from verl.workers.sharding_manager.fsdp_ulysses import FSDPUlyssesShardingManager
logger = logging.getLogger(__file__)
logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN"))
device_name = get_device_name()
def create_device_mesh(world_size, fsdp_size):
if fsdp_size < 0 or fsdp_size >= world_size:
device_mesh = init_device_mesh(device_name, mesh_shape=(world_size,), mesh_dim_names=["fsdp"])
else:
device_mesh = init_device_mesh(
device_name, mesh_shape=(world_size // fsdp_size, fsdp_size), mesh_dim_names=["ddp", "fsdp"]
)
return device_mesh
def get_sharding_strategy(device_mesh, zero3_enable=True):
from torch.distributed.fsdp import ShardingStrategy
if zero3_enable:
fsdp_strategy = ShardingStrategy.FULL_SHARD
hsdp_strategy = ShardingStrategy.HYBRID_SHARD
else:
fsdp_strategy = ShardingStrategy.SHARD_GRAD_OP
hsdp_strategy = ShardingStrategy._HYBRID_SHARD_ZERO2
if device_mesh.ndim == 1:
sharding_strategy = fsdp_strategy
elif device_mesh.ndim == 2:
sharding_strategy = hsdp_strategy
else:
raise NotImplementedError(f"Get device mesh ndim={device_mesh.ndim}, but only support 1 or 2")
return sharding_strategy
def get_vl_model_vision_tower(vl_model_instance):
"""
Util to extract Vision Tower from a VL model instance
"""
if hasattr(vl_model_instance, "model") and hasattr(vl_model_instance.model, "visual"):
# transformers >= 4.52.0
return vl_model_instance.model.visual
elif hasattr(vl_model_instance, "visual"):
# transformers < 4.52.0
return vl_model_instance.visual
return None
class ActorRolloutRefWorker(Worker, DistProfilerExtension):
"""
This worker can be instantiated as a standalone actor or a standalone rollout or a standalone reference policy
or a hybrid engine based on the config.rollout
"""
def __init__(self, config: DictConfig, role: str, **kwargs):
Worker.__init__(self)
self.config = config
import torch.distributed
if not torch.distributed.is_initialized():
rank = int(os.environ.get("RANK", 0))
world_size = int(os.environ.get("WORLD_SIZE", 1))
torch.distributed.init_process_group(
backend=f"cpu:gloo,{get_device_name()}:{get_nccl_backend()}",
rank=rank,
world_size=world_size,
timeout=datetime.timedelta(seconds=self.config.get("nccl_timeout", 600)),
init_method=os.environ.get("DIST_INIT_METHOD", None),
)
# Apply NPU patches for FSDP backend
from verl.workers.engine.fsdp.utils import apply_npu_fsdp_patches
apply_npu_fsdp_patches()
# build device mesh for FSDP
world_size = torch.distributed.get_world_size()
# TODO(sgm): support FSDP hybrid shard for larger model
self.device_mesh = create_device_mesh(world_size=world_size, fsdp_size=self.config.actor.fsdp_config.fsdp_size)
# build device mesh for Ulysses Sequence Parallel
self.ulysses_device_mesh = None
self.ulysses_sequence_parallel_size = self.config.actor.get("ulysses_sequence_parallel_size", 1)
dp = world_size // self.ulysses_sequence_parallel_size
if self.ulysses_sequence_parallel_size > 1:
self.ulysses_device_mesh = init_device_mesh(
device_name, mesh_shape=(dp, self.ulysses_sequence_parallel_size), mesh_dim_names=["dp", "sp"]
)
# create training dispatch
if self.ulysses_device_mesh is not None:
is_collect = self.ulysses_device_mesh["sp"].get_local_rank() == 0
self._register_dispatch_collect_info(
"actor", dp_rank=self.ulysses_device_mesh["dp"].get_local_rank(), is_collect=is_collect
)
else:
self._register_dispatch_collect_info("actor", dp_rank=self.rank, is_collect=True)
self.ulysses_sharding_manager = FSDPUlyssesShardingManager(self.ulysses_device_mesh)
self._lora_rank = self.config.model.get("lora_rank", 0)
self._is_lora = self.config.model.get("lora_adapter_path") is not None or self._lora_rank > 0
self.role = role
assert self.role in ["actor", "rollout", "ref", "actor_rollout", "actor_rollout_ref"]
self._is_actor = self.role in ["actor", "actor_rollout", "actor_rollout_ref"]
self._is_rollout = self.role in ["rollout", "actor_rollout", "actor_rollout_ref"]
self._is_ref = self.role in ["ref", "actor_rollout_ref"]
self.use_orig_params = self.config.actor.fsdp_config.get("use_orig_params", False)
# TODO(haibin.lin):
# As of now the type of config is DictConfig, if we assign config.profiler with ProfilerConfig,
# it will actually convert the ProfilerConfig dataclass back to a DictConfig.
# We can still use ProfilerConfig for testing purpose (tests/utils/test_nvtx_profile.py)
# as they provides DictConfig-like interface
# The benefit of creating the dataclass config is to perform validation during __post_init__
if self._is_actor:
omega_profiler_config = config.actor.get("profiler", {})
elif self._is_rollout:
# NOTE: In colocation mode, rollout config may not take effect (follow the actor config)
# This is for extendability in AsyncRL cases
omega_profiler_config = config.rollout.get("profiler", {})
elif self._is_ref:
omega_profiler_config = config.ref.get("profiler", {})
else:
raise ValueError(
f"Invalid role {self.role}, should be one of "
"['actor', 'rollout', 'ref', 'actor_rollout', 'actor_rollout_ref']"
)
# omega_profiler_config is DictConfig
# profiler_config is a ProfilerConfig dataclass
profiler_config = omega_conf_to_dataclass(omega_profiler_config, dataclass_type=ProfilerConfig)
if omega_profiler_config.get("tool", None) in ["npu", "nsys", "torch", "torch_memory"]:
tool_config = omega_conf_to_dataclass(
omega_profiler_config.get("tool_config", {}).get(omega_profiler_config.get("tool"))
)
else:
tool_config = None
DistProfilerExtension.__init__(
self, DistProfiler(rank=self.rank, config=profiler_config, tool_config=tool_config)
)
self._is_offload_param = False
self._is_offload_optimizer = False
if self._is_actor:
self._is_offload_param = self.config.actor.fsdp_config.get("param_offload", False)
self._is_offload_optimizer = self.config.actor.fsdp_config.get("optimizer_offload", False)
elif self._is_ref:
# TODO: it seems that manual offload is slowly than FSDP offload
self._is_offload_param = self.config.ref.fsdp_config.get("param_offload", False)
# normalize config
if self._is_actor:
self.config.actor.ppo_mini_batch_size *= self.config.rollout.n
self.config.actor.ppo_mini_batch_size //= self.device_mesh.size() // self.ulysses_sequence_parallel_size
assert self.config.actor.ppo_mini_batch_size > 0, (
f"ppo_mini_batch_size {self.config.actor.ppo_mini_batch_size} should be larger than 0 after "
f"normalization"
)
# micro bsz
if self.config.actor.ppo_micro_batch_size is not None:
self.config.actor.ppo_micro_batch_size //= (
self.device_mesh.size() // self.ulysses_sequence_parallel_size
)
self.config.actor.ppo_micro_batch_size_per_gpu = self.config.actor.ppo_micro_batch_size
if self.config.actor.ppo_micro_batch_size_per_gpu is not None:
assert self.config.actor.ppo_mini_batch_size % self.config.actor.ppo_micro_batch_size_per_gpu == 0, (
f"normalized ppo_mini_batch_size {self.config.actor.ppo_mini_batch_size} should be divisible by "
f"ppo_micro_batch_size_per_gpu {self.config.actor.ppo_micro_batch_size_per_gpu}"
)
assert self.config.actor.ppo_mini_batch_size // self.config.actor.ppo_micro_batch_size_per_gpu > 0, (
f"normalized ppo_mini_batch_size {self.config.actor.ppo_mini_batch_size} should be larger than "
f"ppo_micro_batch_size_per_gpu {self.config.actor.ppo_micro_batch_size_per_gpu}"
)
# normalize rollout config
if self._is_rollout and self.config.rollout.log_prob_micro_batch_size is not None:
self.config.rollout.log_prob_micro_batch_size //= (
self.device_mesh.size() // self.ulysses_sequence_parallel_size
)
self.config.rollout.log_prob_micro_batch_size_per_gpu = self.config.rollout.log_prob_micro_batch_size
# normalize ref config
if self._is_ref and self.config.ref.log_prob_micro_batch_size is not None:
self.config.ref.log_prob_micro_batch_size //= self.device_mesh.size() // self.ulysses_sequence_parallel_size
self.config.ref.log_prob_micro_batch_size_per_gpu = self.config.ref.log_prob_micro_batch_size
def _init_qat_config(self):
"""Initialize QAT configuration from actor.qat."""
try:
self.qat_config = self.config.actor.qat
self._qat_enabled = self.qat_config.enable
if self._qat_enabled:
logger.info(
f"QAT enabled: mode={self.qat_config.mode}, config_path={self.qat_config.quantization_config_path}"
)
except (AttributeError, KeyError, ConfigAttributeError):
# QAT config not provided, disable QAT
self._qat_enabled = False
self.qat_config = None
def _restore_w4a4_input_scales(self, model, model_path):
"""Restore input_global_scale and input_amax from checkpoint for W4A4 mode."""
import glob
from safetensors import safe_open
safetensor_files = glob.glob(f"{model_path}/model*.safetensors")
loaded_count = 0
for sf_path in safetensor_files:
with safe_open(sf_path, framework="pt") as f:
for key in f.keys():
if "input_global_scale" in key:
module_path = key.replace(".input_global_scale", "")
amax_key = f"{module_path}.input_amax"
module = model
for part in module_path.split("."):
module = getattr(module, part)
scale_val = f.get_tensor(key)
val = scale_val.item() if scale_val.numel() == 1 else scale_val.max().item()
module.input_global_scale.fill_(val)
amax_val = f.get_tensor(amax_key)
amax = amax_val.item() if amax_val.numel() == 1 else amax_val.max().item()
module.input_amax.fill_(amax)
loaded_count += 1
if self.rank == 0:
logger.info(f"[W4A4] Loaded {loaded_count} input scales from checkpoint")
def _build_model_optimizer(
self,
model_path,
fsdp_config: FSDPEngineConfig,
optim_config,
override_model_config,
use_remove_padding=False,
use_fused_kernels=False,
enable_gradient_checkpointing=False,
trust_remote_code=False,
use_liger=False,
role="actor",
enable_activation_offload=False,
use_prefix_grouper=False,
use_tiled_mlp=False,
tiled_mlp_shards=4,
):
from torch.distributed.fsdp import CPUOffload, MixedPrecision
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForImageTextToText,
AutoModelForVision2Seq,
)
from verl.utils.model import get_generation_config, print_model_size, update_model_config
from verl.utils.torch_dtypes import PrecisionType
assert role in ["actor", "ref"]
# TiledMLP requires FSDP2 for correct gradient computation
if use_tiled_mlp and self.config.actor.strategy == "fsdp":
raise ValueError("TiledMLP requires FSDP2. Set `actor_rollout_ref.actor.strategy=fsdp2`.")
log_gpu_memory_usage(f"Before init {role} from HF AutoModel", logger=logger)
local_path = model_path
# note that we have to create model in fp32. Otherwise, the optimizer is in bf16, which is incorrect
# TODO(zhangchi.usc1992): 1. support create from random initialized model. 2. Support init with FSDP directly
self.tokenizer = hf_tokenizer(local_path, trust_remote_code=trust_remote_code)
self.processor = hf_processor(local_path, trust_remote_code=trust_remote_code)
if self.config.model.get("custom_chat_template", None) is not None:
if self.processor is not None:
self.processor.chat_template = self.config.model.custom_chat_template
else:
self.tokenizer.chat_template = self.config.model.custom_chat_template
torch_dtype = fsdp_config.get("model_dtype", None)
if torch_dtype is None:
torch_dtype = torch.float32 if self._is_actor else torch.bfloat16
else:
torch_dtype = PrecisionType.to_dtype(torch_dtype)
# override model kwargs
attn_implementation = override_model_config.get("attn_implementation", "flash_attention_2")
actor_model_config = AutoConfig.from_pretrained(
local_path, trust_remote_code=trust_remote_code, attn_implementation=attn_implementation
)
# TODO: VL models use VisionAttention, which directly uses flash_attention in transformers>=4.53
# which will be patched by _ulysses_flash_attention_forward, but errorly misses position_ids
# Maybe support Ulysses in VisionAttention in the future and remove this patch
if self.ulysses_sequence_parallel_size > 1 and hasattr(actor_model_config, "vision_config"):
actor_model_config.vision_config._attn_implementation = "eager"
# patch for qwen2.5-vl: when using flash_attention_3, set vision tower to use flash_attention_2
# because the vision tower does not support flash_attention_3
if (
getattr(actor_model_config, "model_type", None) == "qwen2_5_vl"
and attn_implementation == "flash_attention_3"
and hasattr(actor_model_config, "vision_config")
):
actor_model_config.vision_config._attn_implementation = "flash_attention_2"
# patch for kimi-vl
if getattr(actor_model_config, "model_type", None) == "kimi_vl":
actor_model_config.text_config.topk_method = "greedy"
self.generation_config = get_generation_config(local_path, trust_remote_code=trust_remote_code)
override_config_kwargs = {
"bos_token_id": self.tokenizer.bos_token_id,
"eos_token_id": self.tokenizer.eos_token_id,
"pad_token_id": self.tokenizer.pad_token_id,
}
if self.config.model.get("mtp", {}).get("enable", False):
raise NotImplementedError("Right now, MTP is not supported in FSDP")
else:
if hasattr(actor_model_config, "num_nextn_predict_layers"):
actor_model_config.num_nextn_predict_layers = 0
override_config_kwargs.update(override_model_config)
update_model_config(actor_model_config, override_config_kwargs=override_config_kwargs)
if self.rank == 0:
print(f"Model config after override: {actor_model_config}")
# NOTE(fix me): tie_word_embedding causes meta_tensor init to hang
init_context = get_init_weight_context_manager(
use_meta_tensor=not actor_model_config.tie_word_embeddings, mesh=self.device_mesh
)
with init_context(), warnings.catch_warnings():
warnings.simplefilter("ignore")
has_remote_code = hasattr(actor_model_config, "auto_map") and any(
actor_model_config.architectures[0] in val for val in actor_model_config.auto_map.values()
)
if has_remote_code:
auto_class = next(
k for k, v in actor_model_config.auto_map.items() if actor_model_config.architectures[0] in v
)
match auto_class:
case "AutoModelForVision2Seq":
actor_module_class = AutoModelForVision2Seq
case "AutoModelForCausalLM":
actor_module_class = AutoModelForCausalLM
case "AutoModelForImageTextToText":
actor_module_class = AutoModelForImageTextToText
case _:
actor_module_class = AutoModel
else:
if type(actor_model_config) in AutoModelForVision2Seq._model_mapping.keys():
actor_module_class = AutoModelForVision2Seq
elif type(actor_model_config) in AutoModelForCausalLM._model_mapping.keys():
actor_module_class = AutoModelForCausalLM
elif type(actor_model_config) in AutoModelForImageTextToText._model_mapping.keys():
actor_module_class = AutoModelForImageTextToText
else:
actor_module_class = AutoModel
actor_module = actor_module_class.from_pretrained(
pretrained_model_name_or_path=local_path,
torch_dtype=torch_dtype,
config=actor_model_config,
trust_remote_code=trust_remote_code,
attn_implementation=attn_implementation,
)
# Apply Liger kernel to the model if use_liger is set to True
if use_liger:
from liger_kernel.transformers.monkey_patch import _apply_liger_kernel_to_instance
_apply_liger_kernel_to_instance(model=actor_module)
fused_kernel_options = self.config.model.get("fused_kernel_options", None)
fused_kernels_backend = (
fused_kernel_options.get("impl_backend", None) if fused_kernel_options is not None else None
)
apply_monkey_patch(
model=actor_module,
use_remove_padding=use_remove_padding,
ulysses_sp_size=self.ulysses_sequence_parallel_size,
use_fused_kernels=use_fused_kernels,
fused_kernels_backend=fused_kernels_backend,
use_prefix_grouper=use_prefix_grouper,
use_tiled_mlp=use_tiled_mlp,
tiled_mlp_shards=tiled_mlp_shards,
)
# some parameters may not in torch_dtype. TODO(zhangchi.usc1992) remove this after we switch to fsdp2
actor_module.to(torch_dtype)
if enable_gradient_checkpointing:
actor_module.gradient_checkpointing_enable(gradient_checkpointing_kwargs={"use_reentrant": False})
if self._is_lora:
print("Applying LoRA to actor module")
actor_module.enable_input_require_grads()
lora_adapter_path = self.config.model.get("lora_adapter_path")
if lora_adapter_path is not None:
from peft import PeftModel
print(f"Loading pre-trained LoRA adapter to {role} from: {lora_adapter_path}")
# Copy adapter to local if needed
local_adapter_path = copy_to_local(lora_adapter_path, use_shm=self.config.model.get("use_shm", False))
actor_module = PeftModel.from_pretrained(actor_module, local_adapter_path, is_trainable=True)
peft_config = actor_module.peft_config["default"]
# Ensure task_type is TaskType enum, not string
if isinstance(peft_config.task_type, str):
peft_config.task_type = TaskType.CAUSAL_LM
else:
# Convert config to regular Python types before creating PEFT model
lora_config = {
"task_type": TaskType.CAUSAL_LM,
"r": self.config.model.lora_rank,
"lora_alpha": self.config.model.lora_alpha,
"target_modules": convert_to_regular_types(self.config.model.target_modules),
"exclude_modules": convert_to_regular_types(self.config.model.exclude_modules),
"bias": "none",
}
actor_module = get_peft_model(actor_module, LoraConfig(**lora_config))
self.use_orig_params = fsdp_config.get("use_orig_params", False)
if self.config.actor.get("freeze_vision_tower", False):
vision_tower = get_vl_model_vision_tower(actor_module)
if vision_tower is not None:
vision_tower.requires_grad_(False)
self.use_orig_params = True
if self.rank == 0:
print("[actor model] Vision tower is set to not trainable.")
else:
if self.rank == 0:
print("[actor model] No vision tower found.")
# Apply QAT before FSDP wrapping (actor only)
if role == "actor" and self._qat_enabled:
actor_module = apply_qat(actor_module, self.qat_config)
enable_qat_fuse(actor_module)
if self.qat_config.mode == "w4a4":
self._restore_w4a4_input_scales(actor_module, self.config.model.path)
torch.distributed.barrier()
if self.rank == 0:
print_model_size(actor_module)
log_gpu_memory_usage(f"After init {role} from HF AutoModel", logger=logger)
# We wrap FSDP for rollout as well
mixed_precision_config = fsdp_config.get("mixed_precision", None)
if mixed_precision_config is not None:
param_dtype = PrecisionType.to_dtype(mixed_precision_config.get("param_dtype", "bf16"))
reduce_dtype = PrecisionType.to_dtype(mixed_precision_config.get("reduce_dtype", "fp32"))
buffer_dtype = PrecisionType.to_dtype(mixed_precision_config.get("buffer_dtype", "fp32"))
else:
param_dtype = PrecisionType.to_dtype(fsdp_config.dtype)
reduce_dtype = torch.float32
buffer_dtype = torch.float32
mixed_precision = MixedPrecision(param_dtype=param_dtype, reduce_dtype=reduce_dtype, buffer_dtype=buffer_dtype)
# Store param_dtype for QAT quantizer
self._param_dtype = param_dtype
auto_wrap_policy = get_fsdp_wrap_policy(
module=actor_module,
config=fsdp_config.get("wrap_policy", None),
is_lora=self._is_lora,
)
# if self._is_rollout and self.config.rollout.name == "hf":
# # TODO(zhangchi.usc1992, shengguangming) fix me.
# Current, auto_wrap_policy causes HFRollout to hang in Gemma
# auto_wrap_policy = None
if self.rank == 0:
print(f"wrap_policy: {auto_wrap_policy}")
fsdp_mesh = self.device_mesh
fsdp_enable_zero3 = fsdp_config.reshard_after_forward
sharding_strategy = get_sharding_strategy(fsdp_mesh, fsdp_enable_zero3)
# TODO: add transformer policy
# We force reference policy to use CPUOffload to save memory.
# We force turn off CPUOffload for actor because it causes incorrect results when using grad accumulation
cpu_offload = None if role == "actor" else CPUOffload(offload_params=True)
fsdp_strategy = self.config.actor.strategy
if fsdp_strategy == "fsdp":
actor_module_fsdp = FSDP(
actor_module,
cpu_offload=cpu_offload,
param_init_fn=init_fn,
auto_wrap_policy=auto_wrap_policy,
device_id=get_device_id(),
sharding_strategy=sharding_strategy, # zero3
mixed_precision=mixed_precision,
sync_module_states=True,
device_mesh=self.device_mesh,
use_orig_params=self.use_orig_params,
forward_prefetch=fsdp_config.get("forward_prefetch", False),
)
elif fsdp_strategy == "fsdp2":
assert CPUOffloadPolicy is not None, "PyTorch version >= 2.4 is required for using fully_shard API (FSDP2)"
mp_policy = MixedPrecisionPolicy(
param_dtype=param_dtype, reduce_dtype=reduce_dtype, cast_forward_inputs=True
)
if role == "actor" and fsdp_config.offload_policy:
cpu_offload = CPUOffloadPolicy(pin_memory=True)
self._is_offload_param = False
self._is_offload_optimizer = False
else:
cpu_offload = None if role == "actor" else CPUOffloadPolicy(pin_memory=True)
fsdp_kwargs = {
"mesh": fsdp_mesh,
"mp_policy": mp_policy,
"offload_policy": cpu_offload,
"reshard_after_forward": fsdp_config.reshard_after_forward,
"shard_placement_fn": get_shard_placement_fn(fsdp_size=self.device_mesh.shape[-1]),
}
full_state = actor_module.state_dict()
apply_fsdp2(actor_module, fsdp_kwargs, fsdp_config)
fsdp2_load_full_state_dict(actor_module, full_state, fsdp_mesh, cpu_offload)
actor_module_fsdp = actor_module
else:
raise NotImplementedError(f"not implement {fsdp_strategy}")
if enable_activation_offload:
enable_activation_offloading(actor_module_fsdp, fsdp_strategy, enable_gradient_checkpointing)
log_gpu_memory_usage(f"After {role} FSDP init", logger=logger)
# TODO: add more optimizer args into config
if role == "actor" and optim_config is not None:
from verl.utils.torch_functional import get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup
actor_optimizer = build_optimizer(actor_module_fsdp.parameters(), optim_config)
total_steps = optim_config.get("total_training_steps", 0)
num_warmup_steps = int(optim_config.get("lr_warmup_steps", -1))
lr_scheduler_type = optim_config.get("lr_scheduler_type", "constant")
min_lr_ratio = optim_config.get("min_lr_ratio", 0.0)
num_cycles = optim_config.get("num_cycles", 0.5)
if num_warmup_steps < 0:
num_warmup_steps_ratio = optim_config.get("lr_warmup_steps_ratio", 0.0)
num_warmup_steps = int(num_warmup_steps_ratio * total_steps)
if self.rank == 0:
print(f"Total steps: {total_steps}, num_warmup_steps: {num_warmup_steps}")
if lr_scheduler_type == "constant":
actor_lr_scheduler = get_constant_schedule_with_warmup(
optimizer=actor_optimizer, num_warmup_steps=num_warmup_steps
)
elif lr_scheduler_type == "cosine":
actor_lr_scheduler = get_cosine_schedule_with_warmup(
optimizer=actor_optimizer,
num_warmup_steps=num_warmup_steps,
num_training_steps=total_steps,
min_lr_ratio=min_lr_ratio,
num_cycles=num_cycles,
)
else:
raise NotImplementedError(f"LR scheduler type {lr_scheduler_type} is not supported")
log_gpu_memory_usage(f"After {role} optimizer init", logger=logger)
else:
actor_optimizer = None
actor_lr_scheduler = None
return actor_module_fsdp, actor_optimizer, actor_lr_scheduler, actor_model_config
def _build_rollout(self, trust_remote_code=False):
from torch.distributed.device_mesh import init_device_mesh
# 1. parse rollout and huggingface model config
rollout_config: RolloutConfig = omega_conf_to_dataclass(self.config.rollout)
model_config: HFModelConfig = omega_conf_to_dataclass(self.config.model, dataclass_type=HFModelConfig)
self.model_config = model_config
# 2. build rollout device mesh
infer_tp = self.config.rollout.tensor_model_parallel_size * self.config.rollout.data_parallel_size
infer_pp = self.config.rollout.pipeline_model_parallel_size
infer_world_size = infer_tp * infer_pp
dp = self.world_size // infer_world_size
assert self.world_size % infer_world_size == 0, (
f"rollout world_size: {self.world_size} is not divisible by infer_world_size: {infer_world_size}"
)
rollout_device_mesh = init_device_mesh(
device_name, mesh_shape=(dp, infer_tp, infer_pp), mesh_dim_names=["dp", "infer_tp", "infer_pp"]
)
rollout_name = self.config.rollout.name
self.rollout_device_mesh = rollout_device_mesh
if rollout_name == "hf":
self._register_dispatch_collect_info("rollout", dp_rank=self.rank, is_collect=True)
else:
is_collect = (
rollout_device_mesh["infer_tp"].get_local_rank() == 0
and rollout_device_mesh["infer_pp"].get_local_rank() == 0
)
self._register_dispatch_collect_info(
"rollout", dp_rank=rollout_device_mesh["dp"].get_local_rank(), is_collect=is_collect
)
# 4. build rollout model
log_gpu_memory_usage(f"Before building {self.config.rollout.name} rollout", logger=logger)
self.rollout = get_rollout_class(rollout_config.name, rollout_config.mode)(
config=rollout_config, model_config=model_config, device_mesh=rollout_device_mesh
)
log_gpu_memory_usage(f"After building {self.config.rollout.name} rollout", logger=logger)
# Full params
if torch.distributed.get_world_size() == 1 and fsdp_version(self.actor_module_fsdp) == 1:
FSDP.set_state_dict_type(
self.actor_module_fsdp,
state_dict_type=StateDictType.FULL_STATE_DICT,
state_dict_config=FullStateDictConfig(),
)
elif fsdp_version(self.actor_module_fsdp) == 1:
FSDP.set_state_dict_type(
self.actor_module_fsdp,
state_dict_type=StateDictType.SHARDED_STATE_DICT,
state_dict_config=ShardedStateDictConfig(),
)
# used for LoRA
self.base_sync_done: bool = "dummy" not in self.config.rollout.load_format
self.layered_summon = self.config.rollout.get("layered_summon", False)
# 5. switch to trainer mode
# NOTE: It's critical that hybrid engine in trainer mode initially to load checkpoint.
# For async mode, we can't call run_until_complete here, so we will switch to trainer mode in AgentLoopManager.
# Note: sync mode is deprecated and rejected in RolloutConfig.__post_init__
async def rollout_mode(self):
"""Context switch hybridengine to rollout mode."""
aggressive_empty_cache(force_sync=True)
log_gpu_memory_usage("Before load_fsdp_model_to_gpu", logger=logger)
if self._is_offload_param:
load_fsdp_model_to_gpu(self.actor_module_fsdp)
log_gpu_memory_usage("After load_fsdp_model_to_gpu", logger=logger)
peft_config = None
peft_model = getattr(self.actor_module_fsdp, "_fsdp_wrapped_module", self.actor_module_fsdp)
if hasattr(peft_model, "peft_config"): # LoRA
peft_config = peft_model.peft_config.get("default", None)
params = collect_lora_params(
module=self.actor_module_fsdp,
layered_summon=self.config.rollout.get("layered_summon", False),
base_sync_done=self.base_sync_done,
)
if not self.base_sync_done:
params = {replace_lora_wrapper(k, peft_config): v for k, v in params.items()}
else:
params = self.actor_module_fsdp.state_dict()
params = convert_weight_keys(
params, getattr(self.actor_module_fsdp, "_fsdp_wrapped_module", self.actor_module_fsdp)
)
# Special handling for LoRA with sleep_level=2:
# When sleep_level=2, base model weights are destroyed during each sleep cycle.
# separately collect and update LoRA weights and base model weights through their respective interfaces.
# Here: params contains LoRA weights, base_model_params contains base model weights.
# Only needed if the rollout engine actually sleeps/frees weights (free_cache_engine=True).
if (
peft_config is not None
and getattr(self.rollout, "sleep_level", None) == 2
and self.config.rollout.free_cache_engine
):
base_model_params = collect_lora_params(
module=self.actor_module_fsdp,
layered_summon=self.layered_summon,
base_sync_done=False,
)
base_model_params = {replace_lora_wrapper(k, peft_config): v for k, v in base_model_params.items()}
base_model_params = convert_weight_keys(
base_model_params, getattr(self.actor_module_fsdp, "_fsdp_wrapped_module", self.actor_module_fsdp)
)
log_gpu_memory_usage("Before offload_fsdp_model_to_cpu", logger=logger)
if self._is_offload_param:
offload_fsdp_model_to_cpu(self.actor_module_fsdp)
log_gpu_memory_usage("After offload_fsdp_model_to_cpu", logger=logger)
set_expandable_segments(False)
if peft_config is not None and self.base_sync_done:
per_tensor_param = params.items() if isinstance(params, dict) else params # Fixed: handle dict case
else:
device = get_device_id() # used when fsdp2 set cpu_offload_policy
per_tensor_param = (
(name, param.to(device, non_blocking=True).full_tensor() if isinstance(param, DTensor) else param)
for name, param in params.items()
)
# QAT: quantize weights before sending to vLLM
if self._qat_enabled:
from verl.utils.qat.quantizer import QATQuantizer
quantizer = QATQuantizer(
mode=self.qat_config.mode,
group_size=self.qat_config.group_size,
ignore_patterns=self.qat_config.ignore_patterns,
device=torch.device(get_device_id()),
param_dtype=self._param_dtype,
)
per_tensor_param = quantizer.quantize_with_fusion(
per_tensor_param,
target_device=torch.device("cpu"),
)
aggressive_empty_cache(force_sync=True)
if self.config.rollout.free_cache_engine:
await self.rollout.resume(tags=["weights"])
log_gpu_memory_usage("After resume weights", logger=logger)
if (
peft_config is not None
and getattr(self.rollout, "sleep_level", None) == 2
and self.config.rollout.free_cache_engine
):
per_tensor_base_params = (
(name, param.to(device, non_blocking=True).full_tensor() if isinstance(param, DTensor) else param)
for name, param in base_model_params.items()
)
await self.rollout.update_weights(per_tensor_base_params, base_sync_done=False)
del base_model_params, per_tensor_base_params
await self.rollout.update_weights(per_tensor_param, peft_config=peft_config, base_sync_done=self.base_sync_done)
log_gpu_memory_usage("After update_weights", logger=logger)
del params, per_tensor_param
aggressive_empty_cache(force_sync=True)
if self.config.rollout.free_cache_engine:
await self.rollout.resume(tags=["kv_cache"])
log_gpu_memory_usage("After resume kv_cache", logger=logger)
self.base_sync_done = True
set_expandable_segments(True)
@register(dispatch_mode=Dispatch.ONE_TO_ALL)
def init_model(self):
from verl.workers.actor import DataParallelPPOActor
# This is used to import external_lib into the huggingface systems
import_external_libs(self.config.model.get("external_lib", None))
# Initialize QAT config before _build_model_optimizer
self._init_qat_config()
override_model_config = OmegaConf.to_container(OmegaConf.create(self.config.model.get("override_config", {})))
use_remove_padding = self.config.model.get("use_remove_padding", False)
use_shm = self.config.model.get("use_shm", False)
use_fused_kernels = self.config.model.get("use_fused_kernels", False)
if self._is_actor or self._is_rollout:
# we need the model for actor and rollout
if self._is_actor:
optim_config = self.config.actor.optim
fsdp_config = omega_conf_to_dataclass(self.config.actor.fsdp_config)
else:
optim_config = None
fsdp_config = FSDPEngineConfig()
local_path = copy_to_local(self.config.model.path, use_shm=use_shm)
# TiledMLP configuration for memory-efficient MLP computation
tiled_mlp_config = self.config.model.get("tiled_mlp", {})
use_tiled_mlp = tiled_mlp_config.get("enabled", False)
tiled_mlp_shards = tiled_mlp_config.get("num_shards", 4)
(
self.actor_module_fsdp,
self.actor_optimizer,
self.actor_lr_scheduler,
self.actor_model_config,
) = self._build_model_optimizer(
model_path=local_path,
fsdp_config=fsdp_config,
optim_config=optim_config,
override_model_config=override_model_config,
use_remove_padding=use_remove_padding,
use_fused_kernels=use_fused_kernels,
enable_gradient_checkpointing=self.config.model.get("enable_gradient_checkpointing", False),
trust_remote_code=self.config.model.get("trust_remote_code", False),
use_liger=self.config.model.get("use_liger", False),
role="actor",
enable_activation_offload=self.config.model.get("enable_activation_offload", False),
use_prefix_grouper=self.config.actor.get("use_prefix_grouper", False),
use_tiled_mlp=use_tiled_mlp,
tiled_mlp_shards=tiled_mlp_shards,
)
# get the original unwrapped module
if fsdp_version(self.actor_module_fsdp) == 1:
self.actor_module = self.actor_module_fsdp._fsdp_wrapped_module
if self._is_offload_param:
offload_fsdp_model_to_cpu(self.actor_module_fsdp)
log_gpu_memory_usage("After offload actor model during init", logger=logger)
if self._is_offload_optimizer:
offload_fsdp_optimizer(optimizer=self.actor_optimizer)
log_gpu_memory_usage("After offload actor optimizer during init", logger=logger)
if self._is_actor:
actor_cfg = omega_conf_to_dataclass(self.config.actor)
self.actor = DataParallelPPOActor(
config=actor_cfg, actor_module=self.actor_module_fsdp, actor_optimizer=self.actor_optimizer
)
if self._is_rollout:
self._build_rollout(trust_remote_code=self.config.model.get("trust_remote_code", False))
if self._is_ref:
ref_model_path = self.config.model.path
ref_model = self.config.ref.get("model", None)
if ref_model is not None:
ref_model_path = ref_model.get("path", self.config.model.path)
if self.rank == 0:
print("reference model:", ref_model_path)
local_path = copy_to_local(ref_model_path, use_shm=use_shm)
use_prefix_grouper = hasattr(self.config, "actor") and self.config.actor.get("use_prefix_grouper", False)
# TiledMLP for ref model: use ref config if specified, otherwise use actor config
ref_tiled_mlp_config = self.config.ref.get("tiled_mlp", None)
if ref_tiled_mlp_config is None:
ref_tiled_mlp_config = self.config.model.get("tiled_mlp", {})
ref_use_tiled_mlp = ref_tiled_mlp_config.get("enabled", False)
ref_tiled_mlp_shards = ref_tiled_mlp_config.get("num_shards", 4)
self.ref_module_fsdp = self._build_model_optimizer(
model_path=local_path,
fsdp_config=omega_conf_to_dataclass(self.config.ref.fsdp_config),
optim_config=None,
override_model_config=override_model_config,
use_remove_padding=use_remove_padding,
use_fused_kernels=use_fused_kernels,
trust_remote_code=self.config.model.get("trust_remote_code", False),
use_liger=self.config.model.get("use_liger", False),
role="ref",
use_prefix_grouper=use_prefix_grouper,
use_tiled_mlp=ref_use_tiled_mlp,
tiled_mlp_shards=ref_tiled_mlp_shards,
)[0]
OmegaConf.set_struct(self.config.ref, True)
with open_dict(self.config.ref):
self.config.ref.use_remove_padding = use_remove_padding
self.config.ref.use_fused_kernels = use_fused_kernels
if use_prefix_grouper:
self.config.ref.use_prefix_grouper = use_prefix_grouper
self.ref_policy = DataParallelPPOActor(config=self.config.ref, actor_module=self.ref_module_fsdp)
if self._is_actor:
self.flops_counter = FlopsCounter(self.actor_model_config)
self.checkpoint_manager = FSDPCheckpointManager(
model=self.actor_module_fsdp,
optimizer=self.actor.actor_optimizer,
lr_scheduler=self.actor_lr_scheduler,
processing_class=self.processor if self.processor is not None else self.tokenizer,
checkpoint_config=self.config.actor.checkpoint,
trust_remote_code=self.config.model.get("trust_remote_code", False),
)
if not self._is_actor and self._is_rollout:
# If ActorRolloutRefWorker is initialized as a standalone rollout,
# create a checkpoint manager for FSDP model to allow loading FSDP checkpoints for rollout.
checkpoint_contents = OmegaConf.create({"load_contents": ["model"], "save_contents": []})
self.checkpoint_manager = FSDPCheckpointManager(
model=self.actor_module_fsdp,
optimizer=None,
lr_scheduler=None,
processing_class=self.processor if self.processor is not None else self.tokenizer,
checkpoint_config=checkpoint_contents,
)
@register(dispatch_mode=make_nd_compute_dataproto_dispatch_fn(mesh_name="actor"))
@DistProfiler.annotate(color="red", role="actor_update")
def update_actor(self, data: DataProto):
assert self._is_actor
if self._is_offload_param:
load_fsdp_model_to_gpu(self.actor_module_fsdp)
if self._is_offload_optimizer:
load_fsdp_optimizer(optimizer=self.actor_optimizer, device_id=get_device_id())
with self.ulysses_sharding_manager:
data = data.to("cpu") # data will to device with each micro batch on actor.update_policy
data.meta_info.setdefault("pad_token_id", self.tokenizer.pad_token_id)
# perform training
with Timer(name="update_policy", logger=None) as timer:
metrics = self.actor.update_policy(data=data)
delta_time = timer.last
global_num_tokens = data.meta_info["global_token_num"]
images_seqlens = data.meta_info.get("images_seqlens", None)
estimated_flops, promised_flops = self.flops_counter.estimate_flops(
global_num_tokens, delta_time, images_seqlens=images_seqlens
)
metrics["perf/mfu/actor"] = (
estimated_flops * self.config.actor.ppo_epochs / promised_flops / self.world_size
)
metrics["perf/max_memory_allocated_gb"] = get_torch_device().max_memory_allocated() / (1024**3)
metrics["perf/max_memory_reserved_gb"] = get_torch_device().max_memory_reserved() / (1024**3)
metrics["perf/cpu_memory_used_gb"] = psutil.virtual_memory().used / (1024**3)
lr = self.actor_lr_scheduler.get_last_lr()[0]
metrics["actor/lr"] = lr.item() if torch.is_tensor(lr) else lr
self.actor_lr_scheduler.step()
# TODO: here, we should return all metrics
output = DataProto(meta_info={"metrics": metrics})
output = output.to("cpu")
if self._is_offload_param:
offload_fsdp_model_to_cpu(self.actor_module_fsdp)
log_gpu_memory_usage("After offload actor model during update_actor", logger=logger)
if self._is_offload_optimizer:
offload_fsdp_optimizer(optimizer=self.actor_optimizer)
log_gpu_memory_usage("After offload actor optimizer during update_actor", logger=logger)
return output
@register(dispatch_mode=make_nd_compute_dataproto_dispatch_fn(mesh_name="rollout"))
@DistProfiler.annotate(color="red", role="rollout_generate")
def generate_sequences(self, prompts: DataProto):
# Support all hardwares
assert self._is_rollout
prompts = prompts.to(get_device_id())
meta_info = {
"eos_token_id": self.generation_config.eos_token_id
if self.generation_config is not None
else self.tokenizer.eos_token_id,
"pad_token_id": self.generation_config.pad_token_id
if self.generation_config is not None
else self.tokenizer.pad_token_id,
}
prompts.meta_info.update(meta_info)
timing_generate = {}
if self._is_actor: # For rollout only, we do not switch context.
loop = get_event_loop()
loop.run_until_complete(self.rollout_mode())
log_gpu_memory_usage("After switch to rollout mode", logger=logger)
with simple_timer("generate_sequences", timing_generate):
output = self.rollout.generate_sequences(prompts=prompts)
if self._is_actor:
loop.run_until_complete(self.trainer_mode())
log_gpu_memory_usage("After switch to trainer mode", logger=logger)
# We calculate the average timing across all ranks
# to make sure meta_info["timing"] is the same
timing_generate_topk_ratio, timing_generate_min, timing_generate_max = topk_reduce_ratio_min_max(
timing_generate["generate_sequences"]
)
timing_generate = reduce_timing(timing_generate)
timing_generate.update(
{
"generation_timing/max": timing_generate_max,
"generation_timing/min": timing_generate_min,
"generation_timing/topk_ratio": timing_generate_topk_ratio,
}
)
output.meta_info["timing"] = timing_generate
output = output.to("cpu")
# clear kv cache
get_torch_device().empty_cache()
return output
@register(dispatch_mode=make_nd_compute_dataproto_dispatch_fn(mesh_name="actor"))
@DistProfiler.annotate(color="blue", role="actor_compute_log_prob")
def compute_log_prob(self, data: DataProto):
# when is_lora is True, we use the actor without lora applied to calculate the log_prob
# which is mostly used for ref log_prob calculation
assert self._is_actor
if self._is_offload_param:
load_fsdp_model_to_gpu(self.actor_module_fsdp)
# Support all hardwares
from contextlib import nullcontext
is_lora = data.meta_info.pop("is_lora", False)
adapter_ctx = self.actor.actor_module.disable_adapter() if is_lora else nullcontext()
# we should always recompute old_log_probs when it is HybridEngine
config_source = self.config.ref if is_lora else self.config.rollout
data.meta_info["micro_batch_size"] = config_source.log_prob_micro_batch_size_per_gpu
data.meta_info["max_token_len"] = config_source.log_prob_max_token_len_per_gpu
data.meta_info["use_dynamic_bsz"] = config_source.log_prob_use_dynamic_bsz
data.meta_info["temperature"] = self.config.rollout.temperature
data.meta_info.setdefault("pad_token_id", self.tokenizer.pad_token_id)
# perform recompute log_prob
calculate_entropy = not is_lora
with self.ulysses_sharding_manager:
with adapter_ctx:
outputs = self.actor.compute_log_prob(data=data, calculate_entropy=calculate_entropy)
if not is_lora:
tensors = {"old_log_probs": outputs["log_probs"]}
else:
tensors = {"ref_log_prob": outputs["log_probs"]}
if calculate_entropy:
tensors["entropys"] = outputs["entropys"]
if "sum_pi_squared" in outputs:
tensors["sum_pi_squared"] = outputs["sum_pi_squared"]
output = DataProto.from_dict(
tensors=tensors,
meta_info={"temperature": self.config.rollout.temperature},
)
output = output.to("cpu")
# https://pytorch.org/docs/stable/notes/fsdp.html#fsdp-notes
# unshard the root FSDP module
if self.world_size > 1 and fsdp_version(self.actor.actor_module) == 1:
self.actor.actor_module._handle.reshard(True)
if self._is_offload_param:
offload_fsdp_model_to_cpu(self.actor_module_fsdp)
log_gpu_memory_usage("After offload actor model during compute_log_prob", logger=logger)
return output
@register(dispatch_mode=make_nd_compute_dataproto_dispatch_fn(mesh_name="actor"))
@DistProfiler.annotate(color="olive", role="ref_compute_log_prob")
def compute_ref_log_prob(self, data: DataProto):
if self._is_lora:
# if _is_lora, actor without lora applied is the ref
data.meta_info["is_lora"] = True
return self.compute_log_prob(data)
assert self._is_ref
# else:
# otherwise, the class have a standalone ref model
micro_batch_size = self.config.ref.log_prob_micro_batch_size_per_gpu
data.meta_info["micro_batch_size"] = micro_batch_size
data.meta_info["temperature"] = self.config.rollout.temperature
data.meta_info["max_token_len"] = self.config.ref.log_prob_max_token_len_per_gpu
data.meta_info["use_dynamic_bsz"] = self.config.ref.log_prob_use_dynamic_bsz
data.meta_info.setdefault("pad_token_id", self.tokenizer.pad_token_id)
with self.ulysses_sharding_manager:
data = data.to("cpu") # data will to device with each micro batch on ref.compute_log_prob
outputs = self.ref_policy.compute_log_prob(data=data, calculate_entropy=False)
output = DataProto.from_dict(tensors={"ref_log_prob": outputs["log_probs"]})
output = output.to("cpu")
# https://pytorch.org/docs/stable/notes/fsdp.html#fsdp-notes
# unshard the root FSDP module
if self.world_size > 1:
if fsdp_version(self.ref_policy.actor_module) == 1:
self.ref_policy.actor_module._handle.reshard(True)
elif fsdp_version(self.ref_policy.actor_module) == 2:
self.ref_policy.actor_module.reshard()
return output
@register(dispatch_mode=Dispatch.ONE_TO_ALL)
def save_checkpoint(self, local_path, hdfs_path=None, global_step=0, max_ckpt_to_keep=None):
from verl.utils.logger import log_with_rank
# only support save and load ckpt for actor
assert self._is_actor
if self._is_offload_param:
load_fsdp_model_to_gpu(self.actor_module_fsdp)
self.checkpoint_manager.save_checkpoint(
local_path=local_path, hdfs_path=hdfs_path, global_step=global_step, max_ckpt_to_keep=max_ckpt_to_keep
)
dist.barrier()
if self._is_lora and hasattr(getattr(self, "actor_module", self.actor_module_fsdp), "peft_config"):
lora_save_path = os.path.join(local_path, "lora_adapter")
peft_model = getattr(self, "actor_module", self.actor_module_fsdp)
peft_config = {}
if dist.get_rank() == 0:
os.makedirs(lora_save_path, exist_ok=True)
peft_config = asdict(peft_model.peft_config.get("default", {}))
peft_config["task_type"] = peft_config["task_type"].value
peft_config["peft_type"] = peft_config["peft_type"].value
peft_config["target_modules"] = list(peft_config["target_modules"])
try:
if fsdp_version(self.actor_module_fsdp) > 0:
self.actor_module_fsdp = self.actor_module_fsdp.to(get_device_name())
lora_params = layered_summon_lora_params(self.actor_module_fsdp)
if dist.get_rank() == 0:
save_file(lora_params, os.path.join(lora_save_path, "adapter_model.safetensors"))
with open(os.path.join(lora_save_path, "adapter_config.json"), "w", encoding="utf-8") as f:
json.dump(peft_config, f, ensure_ascii=False, indent=4)
except Exception as e:
log_with_rank(
f"Save LoRA Adapter Error ({e})", rank=dist.get_rank(), logger=logger, log_only_rank_0=True
)
dist.barrier()
log_with_rank(
f"[rank-{self.rank}]: Saved LoRA adapter to: {lora_save_path}",
rank=dist.get_rank(),
logger=logger,
log_only_rank_0=True,
)
if self._is_offload_param:
offload_fsdp_model_to_cpu(self.actor_module_fsdp)
@register(dispatch_mode=Dispatch.ONE_TO_ALL)
def load_checkpoint(self, local_path, hdfs_path=None, del_local_after_load=False):
assert self._is_actor or (not self._is_actor and self._is_rollout), (
f"Checkpoint loading is only supported for Actor or standalone Rollout Workers, but got "
f"{self._is_actor} and {self._is_rollout}"
)
# No checkpoint to load, just offload the model and optimizer to CPU
if local_path is None:
if self._is_offload_param:
offload_fsdp_model_to_cpu(self.actor_module_fsdp)
if self._is_offload_optimizer:
offload_fsdp_optimizer(self.actor_optimizer)
return
if self._is_offload_param:
load_fsdp_model_to_gpu(self.actor_module_fsdp)
self.checkpoint_manager.load_checkpoint(
local_path=local_path, hdfs_path=hdfs_path, del_local_after_load=del_local_after_load
)
if self._is_offload_param:
offload_fsdp_model_to_cpu(self.actor_module_fsdp)
if self._is_offload_optimizer:
offload_fsdp_optimizer(self.actor_optimizer)
@register(dispatch_mode=Dispatch.ONE_TO_ALL)
def start_profile(self, **kwargs) -> None:
"""Start profiling for the current rank in the current training step."""
self.profiler.start(**kwargs)
@register(dispatch_mode=Dispatch.ONE_TO_ALL)
def stop_profile(self) -> None:
"""Stop profiling for the current rank in the current training step."""
self.profiler.stop()
@register(dispatch_mode=Dispatch.ONE_TO_ALL)
def dump_memory_snapshot(self, tag: str = "manual", sub_dir: str = None) -> None:
"""Manually trigger a CUDA memory snapshot dump on all ranks."""
# Memory snapshot is now handled by the profiler system
# This method is kept for backward compatibility but delegates to profiler
if hasattr(self, "profiler") and hasattr(self.profiler, "_impl"):
try:
# Try to use the profiler's memory snapshot functionality
if hasattr(self.profiler._impl, "sampler"):
out_dir = OmegaConf.select(self.config, "actor.profiler.save_path") or "."
self.profiler._impl.sampler.dump_memory_snapshot(out_dir=out_dir, tag=tag, sub_dir=sub_dir)
except Exception:
# silently ignore if profiler doesn't support memory snapshots
pass
class CriticWorker(Worker, DistProfilerExtension):
def __init__(self, config: FSDPCriticConfig):
Worker.__init__(self)
omega_profiler_config = config.get("profiler", {})
profiler_config = omega_conf_to_dataclass(omega_profiler_config, dataclass_type=ProfilerConfig)
if omega_profiler_config.get("tool", None) in ["npu", "nsys", "torch", "torch_memory"]:
tool_config = omega_conf_to_dataclass(
omega_profiler_config.get("tool_config", {}).get(omega_profiler_config.get("tool"))
)
else:
tool_config = None
DistProfilerExtension.__init__(
self, DistProfiler(rank=self.rank, config=profiler_config, tool_config=tool_config)
)
import torch.distributed
self.config = config
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(
backend=get_nccl_backend(),
timeout=datetime.timedelta(seconds=self.config.get("nccl_timeout", 600)),
init_method=os.environ.get("DIST_INIT_METHOD", None),
)
self.config: FSDPCriticConfig = config
# build device mesh for Ulysses Sequence Parallel
world_size = torch.distributed.get_world_size()
from torch.distributed.device_mesh import init_device_mesh
fsdp_size = self.config.model.fsdp_config.fsdp_size
self.device_mesh = create_device_mesh(world_size=world_size, fsdp_size=fsdp_size)
self.ulysses_device_mesh = None
self.ulysses_sequence_parallel_size = self.config.get("ulysses_sequence_parallel_size", 1)
dp = world_size // self.ulysses_sequence_parallel_size
if self.ulysses_sequence_parallel_size > 1:
self.ulysses_device_mesh = init_device_mesh(
device_name, mesh_shape=(dp, self.ulysses_sequence_parallel_size), mesh_dim_names=["dp", "sp"]
)
# create training dispatch
if self.ulysses_device_mesh is not None:
is_collect = self.ulysses_device_mesh["sp"].get_local_rank() == 0
self._register_dispatch_collect_info(
"critic", dp_rank=self.ulysses_device_mesh["dp"].get_local_rank(), is_collect=is_collect
)
else:
self._register_dispatch_collect_info("critic", dp_rank=self.rank, is_collect=True)
self.ulysses_sharding_manager = FSDPUlyssesShardingManager(self.ulysses_device_mesh)
# set FSDP offload params
self._is_offload_param = self.config.model.fsdp_config.param_offload
self._is_offload_optimizer = self.config.model.fsdp_config.optimizer_offload
# normalize config
self.config.ppo_mini_batch_size *= self.config.rollout_n
self.config.ppo_mini_batch_size //= torch.distributed.get_world_size() // self.ulysses_sequence_parallel_size
if self.config.ppo_micro_batch_size is not None:
self.config.ppo_micro_batch_size //= (
torch.distributed.get_world_size() // self.ulysses_sequence_parallel_size
)
self.config.forward_micro_batch_size //= (
torch.distributed.get_world_size() // self.ulysses_sequence_parallel_size
)
self.config.ppo_micro_batch_size_per_gpu = self.config.ppo_micro_batch_size
self.config.forward_micro_batch_size_per_gpu = self.config.forward_micro_batch_size
if self.config.ppo_micro_batch_size_per_gpu is not None:
assert self.config.ppo_mini_batch_size % self.config.ppo_micro_batch_size_per_gpu == 0, (
f"normalized ppo_mini_batch_size {self.config.ppo_mini_batch_size} should be divisible by "
f"ppo_micro_batch_size_per_gpu {self.config.ppo_micro_batch_size_per_gpu}"
)
assert self.config.ppo_mini_batch_size // self.config.ppo_micro_batch_size_per_gpu > 0, (
f"normalized ppo_mini_batch_size {self.config.ppo_mini_batch_size} should be larger than "
f"ppo_micro_batch_size_per_gpu {self.config.ppo_micro_batch_size_per_gpu}"
)
self._is_lora = (
self.config.model.get("lora_adapter_path") is not None or self.config.model.get("lora_rank", 0) > 0
)
self.use_orig_params = self.config.model.fsdp_config.get("use_orig_params", False)
def _build_critic_model_optimizer(self, config: FSDPCriticConfig):
# the following line is necessary
from torch.distributed.fsdp import MixedPrecision
from verl.utils.model import load_valuehead_model, print_model_size
from verl.utils.torch_dtypes import PrecisionType
use_shm = config.model.get("use_shm", False)
local_path = copy_to_local(config.model.path, use_shm=use_shm)
# note that the tokenizer between actor and critic may be different. So override tokenizer info with actor info
# using random initialized model from any architecture. May not be the same as Actor.
tokenizer_path = copy_to_local(config.model.tokenizer_path, use_shm=use_shm)
self.tokenizer = hf_tokenizer(tokenizer_path, trust_remote_code=config.model.get("trust_remote_code", False))
self.processor = hf_processor(tokenizer_path, trust_remote_code=config.model.get("trust_remote_code", False))
if self.config.model.get("custom_chat_template", None) is not None:
if self.processor is not None:
self.processor.chat_template = self.config.model.custom_chat_template
else:
self.tokenizer.chat_template = self.config.model.custom_chat_template
override_config = OmegaConf.to_container(OmegaConf.create(self.config.model.get("override_config", {})))
override_config_kwargs = {
"bos_token_id": self.tokenizer.bos_token_id,
"eos_token_id": self.tokenizer.eos_token_id,
"pad_token_id": self.tokenizer.pad_token_id,
}
override_config_kwargs.update(override_config)
if self.rank == 0:
print(f"Critic overriding config {override_config_kwargs}")
torch_dtype = self.config.model.fsdp_config.get("model_dtype", "fp32")
torch_dtype = PrecisionType.to_dtype(torch_dtype)
from transformers import AutoConfig
# override model kwargs
attn_implementation = override_config.get("attn_implementation", "flash_attention_2")
critic_model_config = AutoConfig.from_pretrained(
local_path,
attn_implementation=attn_implementation,
trust_remote_code=config.model.get("trust_remote_code", False),
)
# TODO: VL models use VisionAttention, which directly uses flash_attention in transformers>=4.53
# which will be patched by _ulysses_flash_attention_forward, but errorly misses position_ids
# Maybe support Ulysses in VisionAttention in the future and remove this patch
if self.ulysses_sequence_parallel_size > 1 and hasattr(critic_model_config, "vision_config"):
critic_model_config.vision_config._attn_implementation = "eager"
critic_model_config.num_labels = 1
# patch for kimi-vl
if getattr(critic_model_config, "model_type", None) == "kimi_vl":
critic_model_config.text_config.topk_method = "greedy"
init_context = get_init_weight_context_manager(
use_meta_tensor=not critic_model_config.tie_word_embeddings, mesh=self.device_mesh
)
# TiledMLP configuration for memory-efficient MLP computation
tiled_mlp_config = config.model.get("tiled_mlp", {})
use_tiled_mlp = tiled_mlp_config.get("enabled", False)
tiled_mlp_shards = tiled_mlp_config.get("num_shards", 4)
# TiledMLP requires FSDP2 for correct gradient computation
if use_tiled_mlp and config.strategy == "fsdp":
raise ValueError("TiledMLP requires FSDP2. Set `critic.strategy=fsdp2`.")
with init_context(), warnings.catch_warnings():
warnings.simplefilter("ignore")
critic_model_config.classifier_dropout = 0.0
critic_model_config.hidden_dropout = "0"
critic_model_config.summary_dropout_prob = 0.0
critic_module = load_valuehead_model(
local_path,
torch_dtype,
critic_model_config,
config.model.get("trust_remote_code", False),
)
use_remove_padding = config.model.get("use_remove_padding", False)
apply_monkey_patch(
model=critic_module,
use_remove_padding=use_remove_padding,
ulysses_sp_size=self.ulysses_sequence_parallel_size,
use_tiled_mlp=use_tiled_mlp,
tiled_mlp_shards=tiled_mlp_shards,
)
# some parameters may not in torch_dtype
critic_module.to(torch_dtype)
if config.model.get("enable_gradient_checkpointing", False):
critic_module.gradient_checkpointing_enable(gradient_checkpointing_kwargs={"use_reentrant": False})
if self._is_lora:
print("Applying LoRA to critic module")
critic_module.enable_input_require_grads()
# Check if we should load a pre-trained LoRA adapter
lora_adapter_path = self.config.model.get("lora_adapter_path")
if lora_adapter_path is not None:
from peft import PeftModel
print(f"Loading pre-trained LoRA adapter to critic from: {lora_adapter_path}")
# Copy adapter to local if needed
local_adapter_path = copy_to_local(lora_adapter_path, use_shm=self.config.model.get("use_shm", False))
critic_module = PeftModel.from_pretrained(critic_module, local_adapter_path, is_trainable=True)
peft_config = critic_module.peft_config["default"]
# Ensure task_type is TaskType enum, not string
# Use TOKEN_CLS for Critic since it's loaded as AutoModelForTokenClassification
if isinstance(peft_config.task_type, str):
peft_config.task_type = TaskType.TOKEN_CLS
else:
# Convert config to regular Python types before creating PEFT model
# Use TOKEN_CLS for Critic since it's loaded as AutoModelForTokenClassification
lora_config = {
"task_type": TaskType.TOKEN_CLS,
"r": self.config.model.lora_rank,
"lora_alpha": self.config.model.lora_alpha,
"target_modules": convert_to_regular_types(self.config.model.target_modules),
"bias": "none",
}
critic_module = get_peft_model(critic_module, LoraConfig(**lora_config))
if self.rank == 0:
print_model_size(critic_module)
self.critic_model_config = critic_model_config
fsdp_config = self.config.model.fsdp_config
mixed_precision_config = fsdp_config.get("mixed_precision", None)
if mixed_precision_config is not None:
param_dtype = PrecisionType.to_dtype(mixed_precision_config.get("param_dtype", "bf16"))
reduce_dtype = PrecisionType.to_dtype(mixed_precision_config.get("reduce_dtype", "fp32"))
buffer_dtype = PrecisionType.to_dtype(mixed_precision_config.get("buffer_dtype", "fp32"))
else:
param_dtype = torch.bfloat16
reduce_dtype = torch.float32
buffer_dtype = torch.float32
mixed_precision = MixedPrecision(param_dtype=param_dtype, reduce_dtype=reduce_dtype, buffer_dtype=buffer_dtype)
auto_wrap_policy = get_fsdp_wrap_policy(
module=critic_module,
config=self.config.model.fsdp_config.wrap_policy,
is_lora=self._is_lora,
)
log_gpu_memory_usage("Before critic FSDP", logger=None)
fsdp_mesh = self.device_mesh
sharding_strategy = get_sharding_strategy(fsdp_mesh)
self.use_orig_params = fsdp_config.get("use_orig_params", False)
if self.config.model.get("freeze_vision_tower", False):
vision_tower = get_vl_model_vision_tower(critic_module)
if vision_tower is not None:
vision_tower.requires_grad_(False)
self.use_orig_params = True
if self.rank == 0:
print("[critic model] Vision tower is set to not trainable.")
else:
if self.rank == 0:
print("[critic model] No vision tower found.")
# Note: We force turn off CPUOffload for critic because it causes incorrect results when using grad accumulation
if config.strategy == "fsdp":
critic_module = FSDP(
critic_module,
param_init_fn=init_fn,
use_orig_params=self.use_orig_params,
auto_wrap_policy=auto_wrap_policy,
device_id=get_device_id(),
sharding_strategy=sharding_strategy,
mixed_precision=mixed_precision,
sync_module_states=True,
forward_prefetch=self.config.model.fsdp_config.forward_prefetch,
device_mesh=self.device_mesh,
cpu_offload=None,
)
elif config.strategy == "fsdp2":
assert CPUOffloadPolicy is not None, "PyTorch version >= 2.4 is required for using fully_shard API (FSDP2)"
mp_policy = MixedPrecisionPolicy(
param_dtype=param_dtype, reduce_dtype=reduce_dtype, cast_forward_inputs=True
)
offload_policy = None
if fsdp_config.offload_policy:
self._is_offload_param = False
self._is_offload_optimizer = False
offload_policy = CPUOffloadPolicy(pin_memory=True)
fsdp_kwargs = {
"mesh": fsdp_mesh,
"mp_policy": mp_policy,
"offload_policy": offload_policy,
"reshard_after_forward": fsdp_config.reshard_after_forward,
"shard_placement_fn": get_shard_placement_fn(fsdp_size=self.device_mesh.shape[-1]),
}
full_state = critic_module.state_dict()
apply_fsdp2(critic_module, fsdp_kwargs, fsdp_config)
fsdp2_load_full_state_dict(critic_module, full_state, fsdp_mesh, offload_policy)
else:
raise NotImplementedError(f"Unknown strategy {config.strategy}")
if config.model.get("enable_activation_offload", False):
enable_gradient_checkpointing = config.model.get("enable_gradient_checkpointing", False)
enable_activation_offloading(critic_module, config.strategy, enable_gradient_checkpointing)
log_gpu_memory_usage("After critic FSDP", logger=None)
critic_optimizer = build_optimizer(critic_module.parameters(), config.optim)
total_steps = config.optim.get("total_training_steps", 0)
num_warmup_steps = int(config.optim.get("lr_warmup_steps", -1))
lr_scheduler_type = config.optim.get("lr_scheduler_type", "constant")
if num_warmup_steps < 0:
num_warmup_steps_ratio = config.optim.get("lr_warmup_steps_ratio", 0.0)
num_warmup_steps = int(num_warmup_steps_ratio * total_steps)
if self.rank == 0:
print(f"Total steps: {total_steps}, num_warmup_steps: {num_warmup_steps}")
from verl.utils.torch_functional import get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup
if lr_scheduler_type == "constant":
critic_lr_scheduler = get_constant_schedule_with_warmup(
optimizer=critic_optimizer, num_warmup_steps=num_warmup_steps
)
elif lr_scheduler_type == "cosine":
min_lr_ratio = config.optim.get("min_lr_ratio", 0.0)
num_cycles = config.optim.get("num_cycles", 0.5)
critic_lr_scheduler = get_cosine_schedule_with_warmup(
optimizer=critic_optimizer,
num_warmup_steps=num_warmup_steps,
num_training_steps=total_steps,
min_lr_ratio=min_lr_ratio,
num_cycles=num_cycles,
)
else:
raise NotImplementedError(f"LR scheduler type {lr_scheduler_type} is not supported")
return critic_module, critic_optimizer, critic_lr_scheduler
@register(dispatch_mode=Dispatch.ONE_TO_ALL)
def init_model(self):
# This is used to import external_lib into the huggingface systems
import_external_libs(self.config.model.get("external_lib", None))
from verl.workers.critic import DataParallelPPOCritic
self.critic_module, self.critic_optimizer, self.critic_lr_scheduler = self._build_critic_model_optimizer(
self.config
)
if self._is_offload_param:
offload_fsdp_model_to_cpu(self.critic_module)
log_gpu_memory_usage("After offload critic model during init", logger=logger)
if self._is_offload_optimizer:
offload_fsdp_optimizer(optimizer=self.critic_optimizer)
log_gpu_memory_usage("After offload critic optimizer during init", logger=logger)
self.critic = DataParallelPPOCritic(
config=self.config, critic_module=self.critic_module, critic_optimizer=self.critic_optimizer
)
self.flops_counter = FlopsCounter(self.critic_model_config)
self.checkpoint_manager = FSDPCheckpointManager(
model=self.critic_module,
optimizer=self.critic_optimizer,
lr_scheduler=self.critic_lr_scheduler,
processing_class=self.processor if self.processor is not None else self.tokenizer,
checkpoint_config=self.config.checkpoint,
trust_remote_code=self.config.model.get("trust_remote_code", False),
)
@register(dispatch_mode=make_nd_compute_dataproto_dispatch_fn(mesh_name="critic"))
@DistProfiler.annotate(color="cyan", role="compute_values")
def compute_values(self, data: DataProto):
if self._is_offload_param:
load_fsdp_model_to_gpu(self.critic_module)
micro_batch_size = self.config.forward_micro_batch_size_per_gpu
data.meta_info["micro_batch_size"] = micro_batch_size
data.meta_info["max_token_len"] = self.config.forward_max_token_len_per_gpu
data.meta_info["use_dynamic_bsz"] = self.config.use_dynamic_bsz
# perform forward computation
with self.ulysses_sharding_manager:
data = data.to("cpu") # data will to device with each micro batch on critic.compute_values
values = self.critic.compute_values(data=data)
output = DataProto.from_dict(tensors={"values": values})
output = output.to("cpu")
if self._is_offload_param:
offload_fsdp_model_to_cpu(self.critic_module)
return output
@register(dispatch_mode=make_nd_compute_dataproto_dispatch_fn(mesh_name="critic"))
@DistProfiler.annotate(color="pink", role="critic_update")
def update_critic(self, data: DataProto):
if self._is_offload_param:
load_fsdp_model_to_gpu(self.critic_module)
if self._is_offload_optimizer:
load_fsdp_optimizer(optimizer=self.critic_optimizer, device_id=get_device_id())
# perform forward computation
with self.ulysses_sharding_manager:
data = data.to("cpu") # data will to device with each micro batch on critic.update_critic
with Timer(name="update_critic", logger=None) as timer:
metrics = self.critic.update_critic(data=data)
delta_time = timer.last
global_num_tokens = data.meta_info["global_token_num"]
estimated_flops, promised_flops = self.flops_counter.estimate_flops(global_num_tokens, delta_time)
metrics["perf/mfu/critic"] = estimated_flops * self.config.ppo_epochs / promised_flops / self.world_size
lr = self.critic_lr_scheduler.get_last_lr()[0]
metrics["critic/lr"] = lr
self.critic_lr_scheduler.step()
output = DataProto(batch=None, meta_info={"metrics": metrics})
if self._is_offload_param:
offload_fsdp_model_to_cpu(self.critic_module)
if self._is_offload_optimizer:
offload_fsdp_optimizer(optimizer=self.critic_optimizer)
output = output.to("cpu")
return output
@register(dispatch_mode=Dispatch.ONE_TO_ALL)
def save_checkpoint(self, local_path, hdfs_path=None, global_step=0, max_ckpt_to_keep=None):
import torch
if self._is_offload_param:
load_fsdp_model_to_gpu(self.critic_module)
self.checkpoint_manager.save_checkpoint(
local_path=local_path, hdfs_path=hdfs_path, global_step=global_step, max_ckpt_to_keep=max_ckpt_to_keep
)
torch.distributed.barrier()
if self._is_offload_param:
offload_fsdp_model_to_cpu(self.critic_module)
@register(dispatch_mode=Dispatch.ONE_TO_ALL)
def load_checkpoint(self, local_path, hdfs_path=None, del_local_after_load=True):
import torch
if self._is_offload_param:
load_fsdp_model_to_gpu(self.critic_module)
self.checkpoint_manager.load_checkpoint(
local_path=local_path, hdfs_path=hdfs_path, del_local_after_load=del_local_after_load
)
torch.distributed.barrier()
if self._is_offload_param:
offload_fsdp_model_to_cpu(self.critic_module)
if self._is_offload_optimizer:
offload_fsdp_optimizer(self.critic_optimizer)
# ================================= Async related workers =================================
class AsyncActorRolloutRefWorker(ActorRolloutRefWorker):
@register(dispatch_mode=Dispatch.ONE_TO_ALL, blocking=False)
async def update_weights(self):
await self.rollout_mode()
return True
| verl__workers__fsdp_workers.py |
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The main entry point to run the PPO algorithm
"""
import datetime
import logging
import os
import time
import psutil
import torch
import torch.distributed
from codetiming import Timer
from omegaconf import DictConfig, OmegaConf
try:
from verl.workers.engine.mindspeed.transformer_impl import repatch
except ImportError:
repatch = None
from contextlib import nullcontext
from megatron.core import parallel_state as mpu
from verl import DataProto
from verl.models.mcore import get_mcore_weight_converter
from verl.single_controller.base import Worker
from verl.single_controller.base.decorator import Dispatch, make_nd_compute_dataproto_dispatch_fn, register
from verl.utils import hf_tokenizer
from verl.utils.checkpoint.megatron_checkpoint_manager import MegatronCheckpointManager
from verl.utils.config import omega_conf_to_dataclass
from verl.utils.device import (
get_device_id,
get_device_name,
get_nccl_backend,
get_torch_device,
set_expandable_segments,
)
from verl.utils.distributed import set_numa_affinity
from verl.utils.flops_counter import FlopsCounter
from verl.utils.fs import copy_to_local
from verl.utils.megatron.router_replay_patch import RouterReplay, RouterReplayAction, apply_router_replay_patch
from verl.utils.megatron_peft_utils import add_base_layer_suffix, build_peft_config_for_vllm
from verl.utils.megatron_utils import (
load_megatron_model_to_gpu,
load_megatron_optimizer,
offload_megatron_model_to_cpu,
offload_megatron_optimizer,
per_tensor_generator,
register_megatron_training_hooks,
)
from verl.utils.memory_utils import aggressive_empty_cache
from verl.utils.model import get_hf_model_path, load_mcore_dist_weights, load_megatron_gptmodel_weights
from verl.utils.profiler import (
DistProfiler,
DistProfilerExtension,
GPUMemoryLogger,
ProfilerConfig,
log_gpu_memory_usage,
simple_timer,
)
from verl.utils.profiler.performance import reduce_timing, topk_reduce_ratio_min_max
from verl.utils.ray_utils import get_event_loop
from verl.utils.torch_functional import use_original_torch_compile
from verl.workers.actor.megatron_actor import MegatronPPOActor
from verl.workers.config import HFModelConfig, McoreCriticConfig, RolloutConfig
from verl.workers.critic.megatron_critic import MegatronPPOCritic
from verl.workers.rollout import get_rollout_class
logger = logging.getLogger(__file__)
logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN"))
def set_random_seed(seed, only_rollout=False):
import random
import numpy as np
import torch
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
if not only_rollout and get_torch_device().device_count() > 0:
from megatron.core import tensor_parallel
tensor_parallel.model_parallel_cuda_manual_seed(seed)
# FIXME: torch cumsum not support deterministic (used in vllm sampler),
# https://github.com/pytorch/pytorch/issues/89492
# torch.use_deterministic_algorithms(True, warn_only=True)
# os.environ['CUBLAS_WORKSPACE_CONFIG'] = ':4096:8'
class MegatronWorker(Worker):
def _init_hf_config_and_tf_config(
self,
model_path,
tokenizer_or_path,
dtype,
override_model_config,
override_transformer_config,
trust_remote_code=False,
megatron_config=None,
enable_mtp=False,
):
from transformers import AutoConfig
from verl.models.mcore import hf_to_mcore_config
from verl.utils import hf_processor
from verl.utils.model import update_model_config
# Step 1: initialize the tokenizer
self.local_path = copy_to_local(model_path)
if tokenizer_or_path is None:
self.tokenizer = hf_tokenizer(self.local_path, trust_remote_code=trust_remote_code)
self.processor = hf_processor(self.local_path, trust_remote_code=trust_remote_code)
elif isinstance(tokenizer_or_path, str):
self.tokenizer = hf_tokenizer(copy_to_local(tokenizer_or_path), trust_remote_code=trust_remote_code)
self.processor = hf_processor(copy_to_local(tokenizer_or_path), trust_remote_code=trust_remote_code)
else:
self.tokenizer = tokenizer_or_path
self.processor = tokenizer_or_path
if self.config.model.get("custom_chat_template", None) is not None:
if self.processor is not None:
self.processor.chat_template = self.config.model.custom_chat_template
else:
self.tokenizer.chat_template = self.config.model.custom_chat_template
# Step 2: get the hf
hf_config = AutoConfig.from_pretrained(self.local_path, trust_remote_code=trust_remote_code)
# Step 3: override the hf config
override_config_kwargs = {
"bos_token_id": self.tokenizer.bos_token_id,
"eos_token_id": self.tokenizer.eos_token_id,
"pad_token_id": self.tokenizer.pad_token_id,
}
override_config_kwargs.update(override_model_config.get("model_config", {}))
self.share_embeddings_and_output_weights = getattr(hf_config, "tie_word_embeddings", False)
# only actor need enable mtp
if enable_mtp:
assert hf_config.num_nextn_predict_layers > 0, "MTP requires at least one nextn_predict_layer"
assert megatron_config.use_mbridge, "MTP requires use_mbridge to be True"
assert megatron_config.vanilla_mbridge, "MTP requires vanilla_mbridge to be True"
override_transformer_config["mtp_loss_scaling_factor"] = self.config.model.mtp.mtp_loss_scaling_factor
else:
if hasattr(hf_config, "num_nextn_predict_layers"):
hf_config.num_nextn_predict_layers = 0
self.enable_mtp = enable_mtp
update_model_config(hf_config, override_config_kwargs=override_config_kwargs)
self.architectures = getattr(hf_config, "architectures", None)
if self.rank == 0:
print(f"Model config after override: {hf_config}")
from verl.models.mcore.config_converter import mapping_string_to_attn_backend
# todo: remove this line after mcore adopt mbridge 0.15, now for compatibility
override_transformer_config = mapping_string_to_attn_backend(override_transformer_config)
fp16 = dtype == torch.float16
bf16 = dtype == torch.bfloat16
if fp16:
assert megatron_config.use_mbridge, "fp16 mode requires use_mbridge to be True"
self.provider = None
self.vanilla_bridge = megatron_config.get("vanilla_mbridge", True)
if megatron_config.use_mbridge:
if self.vanilla_bridge:
from verl.models.mcore.mbridge import AutoBridge
bridge = AutoBridge.from_config(hf_config, dtype=dtype)
bridge.set_extra_args(**override_transformer_config)
tf_config = bridge.config
tf_config.fp16 = fp16
tf_config.bf16 = bf16
else:
from verl.models.mcore.bridge import AutoBridge
# Use Megatron-Bridge to convert HF config to Megatron config
bridge = AutoBridge.from_hf_pretrained(self.local_path, trust_remote_code=trust_remote_code)
# Get Megatron provider and configure it
provider = bridge.to_megatron_provider(load_weights=False)
# In case of invalid overrides, we need to make sure some critical params are set correctly
provider.params_dtype = dtype
# Ensure dtype settings propagate to Megatron-Bridge/TE
provider.fp16 = fp16
provider.bf16 = bf16
# Pass distributed info
provider.tensor_model_parallel_size = megatron_config.tensor_model_parallel_size
provider.pipeline_model_parallel_size = megatron_config.pipeline_model_parallel_size
provider.expert_model_parallel_size = megatron_config.expert_model_parallel_size
provider.expert_tensor_parallel_size = megatron_config.expert_tensor_parallel_size
provider.virtual_pipeline_model_parallel_size = megatron_config.virtual_pipeline_model_parallel_size
provider.context_parallel_size = megatron_config.context_parallel_size
provider.sequence_parallel = megatron_config.sequence_parallel
# Match verl implementation (need variable_seq_lengths)
from megatron.core.transformer.enums import AttnBackend
provider.attention_backend = AttnBackend.flash
provider.variable_seq_lengths = True
provider.moe_token_dispatcher_type = "alltoall"
provider.moe_router_load_balancing_type = "none"
# Apply transformer config overrides
for key, value in override_transformer_config.items():
setattr(provider, key, value)
provider.finalize()
self.provider = provider
tf_config = None # Will be set after model creation
self.bridge = bridge
else:
tf_config = hf_to_mcore_config(hf_config, dtype, **override_transformer_config)
self.bridge = None
if torch.distributed.get_rank() == 0:
if tf_config is not None:
print(f"TF config: {tf_config}")
self.hf_config = hf_config
self.tf_config = tf_config
# Get PEFT config from model.lora if specified
from verl.workers.config.megatron_peft import get_peft_cls
self.peft_cls = get_peft_cls(
model_config=self.config.model, bridge=self.bridge, provider=self.provider, dtype=dtype
)
class ActorRolloutRefWorker(MegatronWorker, DistProfilerExtension):
"""
This worker can be instantiated as a standalone actor or a standalone rollout or a standalone reference policy
or a hybrid engine based on the config.rollout
"""
def __init__(self, config: DictConfig, role: str, **kwargs):
Worker.__init__(self)
self.config = config
if repatch is not None:
# NPU MindSpeed patch, will be refactored with MindSpeedEngine.
repatch(self.config.actor.megatron.get("override_transformer_config", {}))
self.role = role
assert self.role in ["actor", "rollout", "ref", "actor_rollout", "actor_rollout_ref"]
self._is_actor = self.role in ["actor", "actor_rollout", "actor_rollout_ref"]
self._is_rollout = self.role in ["rollout", "actor_rollout", "actor_rollout_ref"]
self._is_ref = self.role in ["ref", "actor_rollout_ref"]
# NOTE(sgm): We utilize colocate WorkerGroup by default.
# As a result, Workers for different model share the same process.
# Therefore, we only require one distribute initialization.
# To utilize different parallel strategy in different models:
# 1, users should disable WorkerDict; 2.assign different ResourcePool to different models,
# 3. and apply the following patch in ray==2.10, https://github.com/ray-project/ray/pull/44385
if not torch.distributed.is_initialized():
set_numa_affinity()
rank = int(os.environ["LOCAL_RANK"])
torch.distributed.init_process_group(
backend=f"cpu:gloo,{get_device_name()}:{get_nccl_backend()}",
timeout=datetime.timedelta(seconds=self.config.get("nccl_timeout", 600)),
init_method=os.environ.get("DIST_INIT_METHOD", None),
)
get_torch_device().set_device(rank)
if self._is_actor or self._is_ref:
mpu.initialize_model_parallel(
tensor_model_parallel_size=self.config.actor.megatron.tensor_model_parallel_size,
pipeline_model_parallel_size=self.config.actor.megatron.pipeline_model_parallel_size,
virtual_pipeline_model_parallel_size=self.config.actor.megatron.virtual_pipeline_model_parallel_size,
use_sharp=False,
context_parallel_size=self.config.actor.megatron.context_parallel_size,
expert_model_parallel_size=self.config.actor.megatron.expert_model_parallel_size,
expert_tensor_parallel_size=self.config.actor.megatron.expert_tensor_parallel_size,
nccl_communicator_config_path=None,
)
if self._is_actor or self._is_ref:
is_collect = (
mpu.get_tensor_model_parallel_rank() == 0
and mpu.get_pipeline_model_parallel_rank() == mpu.get_pipeline_model_parallel_world_size() - 1
and mpu.get_context_parallel_rank() == 0
)
self._register_dispatch_collect_info(
mesh_name="actor", dp_rank=mpu.get_data_parallel_rank(), is_collect=is_collect
)
only_rollout = self._is_rollout and not self._is_actor
self.enable_routing_replay = False
if self._is_actor:
self.router_replay = self.config.actor.router_replay
self.enable_routing_replay = self.router_replay.mode != "disabled"
if self.enable_routing_replay:
apply_router_replay_patch()
set_random_seed(seed=self.config.actor.megatron.seed, only_rollout=only_rollout)
if self._is_actor:
omega_profiler_config = config.actor.get("profiler", {})
elif self._is_rollout:
# NOTE: In colocation mode, rollout config may not take effect (follow the actor config)
# This is for extendability in AsyncRL cases
omega_profiler_config = config.rollout.get("profiler", {})
elif self._is_ref:
omega_profiler_config = config.ref.get("profiler", {})
else:
raise ValueError(
f"Invalid role {self.role}, should be one of "
"['actor', 'rollout', 'ref', 'actor_rollout', 'actor_rollout_ref']"
)
# omega_profiler_config is DictConfig
# profiler_config is a ProfilerConfig dataclass
profiler_config = omega_conf_to_dataclass(omega_profiler_config, dataclass_type=ProfilerConfig)
if omega_profiler_config.get("tool", None) in ["npu", "nsys", "torch", "torch_memory"]:
tool_config = omega_conf_to_dataclass(
omega_profiler_config.get("tool_config", {}).get(omega_profiler_config.get("tool"))
)
else:
tool_config = None
DistProfilerExtension.__init__(
self, DistProfiler(rank=self.rank, config=profiler_config, tool_config=tool_config)
)
# TODO(sgm): Currently, we only support reference model param offload
# will support other offload later
self._is_offload_param = False
self._is_offload_grad = False
self._is_offload_optimizer = False
# Initialize LoRA-related attributes (will be updated in _build_rollout if needed)
self.base_sync_done = False
self.peft_merge = False
# normalize config
if self._is_actor:
self.config.actor.ppo_mini_batch_size *= self.config.rollout.n
self.config.actor.ppo_mini_batch_size //= mpu.get_data_parallel_world_size()
if self.config.actor.get("ppo_micro_batch_size", None):
self.config.actor.ppo_micro_batch_size //= mpu.get_data_parallel_world_size()
self.config.rollout.log_prob_micro_batch_size //= mpu.get_data_parallel_world_size()
self.config.actor.ppo_micro_batch_size_per_gpu = self.config.actor.ppo_micro_batch_size
self.config.rollout.log_prob_micro_batch_size_per_gpu = self.config.rollout.log_prob_micro_batch_size
self._is_offload_param = self.config.actor.megatron.get("param_offload", False)
self._is_offload_grad = self.config.actor.megatron.get("grad_offload", False)
self._is_offload_optimizer = self.config.actor.megatron.get("optimizer_offload", False)
elif self._is_ref:
if self.config.ref.get("log_prob_micro_batch_size", None):
self.config.ref.log_prob_micro_batch_size //= mpu.get_data_parallel_world_size()
self.config.ref.log_prob_micro_batch_size_per_gpu = self.config.ref.log_prob_micro_batch_size
else:
assert self.config.ref.get("log_prob_micro_batch_size_per_gpu", None) is not None, (
"Please note that in the ref policy configuration, `log_prob_micro_batch_size_per_gpu` and "
"`log_prob_micro_batch_size` should not be None at the same time."
)
self._ref_is_offload_param = self.config.ref.megatron.get("param_offload", False)
def _build_model_optimizer(
self, model_path, optim_config, override_model_config, override_transformer_config, override_ddp_config=None
):
from verl.utils.megatron.optimizer import (
get_megatron_optimizer,
get_megatron_optimizer_param_scheduler,
init_megatron_optim_config,
)
from verl.utils.megatron_utils import McoreModuleWrapperConfig, make_megatron_module
from verl.utils.model import get_generation_config, print_model_size
self._init_hf_config_and_tf_config(
model_path,
self.config.model.get("tokenizer_path") or model_path,
self.dtype,
override_model_config,
override_transformer_config,
self.config.model.get("trust_remote_code", False),
self.config.actor.megatron if not self._is_ref else self.config.ref.megatron,
self.config.model.get("mtp", {}).get("enable", False),
)
self.generation_config = get_generation_config(
self.local_path,
self.config.model.get("trust_remote_code", False),
)
if self._is_actor or self._is_rollout:
wrap_config = McoreModuleWrapperConfig(
is_value_model=False, # actor is not value model
share_embeddings_and_output_weights=self.share_embeddings_and_output_weights,
wrap_with_ddp=True,
use_distributed_optimizer=self.config.actor.megatron.use_distributed_optimizer,
)
actor_module, updated_tf_config = make_megatron_module(
wrap_config=wrap_config,
tf_config=self.tf_config,
hf_config=self.hf_config,
bridge=self.bridge,
provider=self.provider,
override_model_config=override_model_config,
override_ddp_config=override_ddp_config,
peft_cls=self.peft_cls,
peft_config=self.config.model.get("lora", None),
)
self.tf_config = updated_tf_config
print(f"actor_module: {len(actor_module)}")
if self.config.actor.load_weight:
if self.config.actor.megatron.use_dist_checkpointing:
load_mcore_dist_weights(
actor_module,
self.config.actor.megatron.dist_checkpointing_path,
is_value_model=False,
prefix=self.config.actor.megatron.dist_checkpointing_prefix,
)
else:
if self.bridge is not None:
local_model_path = get_hf_model_path(self.config)
if self.vanilla_bridge:
self.bridge.load_weights(actor_module, local_model_path)
else:
self.bridge.load_hf_weights(actor_module, local_model_path)
else:
load_megatron_gptmodel_weights(
self.config, self.hf_config, actor_module, params_dtype=self.dtype, is_value_model=False
)
if self.rank == 0:
print_model_size(actor_module[0])
log_gpu_memory_usage("After MegatronPPOActor init", logger=logger)
elif self._is_ref:
wrap_config = McoreModuleWrapperConfig(
is_value_model=False, # ref is not value model
share_embeddings_and_output_weights=self.share_embeddings_and_output_weights,
wrap_with_ddp=False,
use_distributed_optimizer=self.config.ref.megatron.use_distributed_optimizer,
)
ref_module, updated_tf_config = make_megatron_module(
wrap_config=wrap_config,
tf_config=self.tf_config,
hf_config=self.hf_config,
bridge=self.bridge,
provider=self.provider,
override_model_config=override_model_config,
)
self.tf_config = updated_tf_config
if self.config.ref.load_weight: # should align with the actor:
assert self.config.actor.load_weight == self.config.ref.load_weight
print("load ref weight start")
if self.config.ref.megatron.use_dist_checkpointing:
load_mcore_dist_weights(
ref_module,
self.config.ref.megatron.dist_checkpointing_path,
is_value_model=False,
prefix=self.config.ref.megatron.dist_checkpointing_prefix,
)
else:
if self.bridge is not None:
local_model_path = get_hf_model_path(self.config)
if self.vanilla_bridge:
self.bridge.load_weights(ref_module, local_model_path)
else:
self.bridge.load_hf_weights(ref_module, local_model_path)
else:
load_megatron_gptmodel_weights(
self.config, self.hf_config, ref_module, params_dtype=self.dtype, is_value_model=False
)
log_gpu_memory_usage("After ref module init", logger=logger)
return ref_module, self.hf_config
# TODO: add more optimizer args into config
if self._is_actor:
optim_config_megatron = init_megatron_optim_config(
optim_config,
use_distributed_optimizer=wrap_config.use_distributed_optimizer,
fp16=self.dtype == torch.float16,
)
actor_optimizer = get_megatron_optimizer(model=actor_module, config=optim_config_megatron)
actor_optimizer_scheduler = get_megatron_optimizer_param_scheduler(
optimizer=actor_optimizer, config=optim_config
)
else:
optim_config = None
actor_optimizer = None
actor_optimizer_scheduler = None
log_gpu_memory_usage("After actor optimizer init", logger=logger)
register_megatron_training_hooks(actor_module, actor_optimizer)
return actor_module, actor_optimizer, actor_optimizer_scheduler, self.hf_config, optim_config
def _build_rollout(self, trust_remote_code=False):
from torch.distributed.device_mesh import init_device_mesh
# 1. parse rollout and huggingface model config
rollout_config: RolloutConfig = omega_conf_to_dataclass(self.config.rollout)
model_config: HFModelConfig = omega_conf_to_dataclass(self.config.model)
# 2. build rollout device mesh
infer_tp = self.config.rollout.tensor_model_parallel_size * self.config.rollout.data_parallel_size
infer_pp = self.config.rollout.pipeline_model_parallel_size
infer_world_size = infer_tp * infer_pp
dp = self.world_size // infer_world_size
assert self.world_size % infer_world_size == 0, (
f"rollout world_size: {self.world_size} is not divisible by infer_world_size: {infer_world_size}"
)
rollout_device_mesh = init_device_mesh(
get_device_name(), mesh_shape=(dp, infer_tp, infer_pp), mesh_dim_names=["dp", "infer_tp", "infer_pp"]
)
self.rollout_device_mesh = rollout_device_mesh
is_collect = (
rollout_device_mesh["infer_tp"].get_local_rank() == 0
and rollout_device_mesh["infer_pp"].get_local_rank() == 0
)
self._register_dispatch_collect_info(
"rollout", dp_rank=rollout_device_mesh["dp"].get_local_rank(), is_collect=is_collect
)
# 4. build rollout model
log_gpu_memory_usage(f"Before building {self.config.rollout.name} rollout", logger=logger)
self.rollout = get_rollout_class(rollout_config.name, rollout_config.mode)(
config=rollout_config, model_config=model_config, device_mesh=rollout_device_mesh
)
log_gpu_memory_usage(f"After building {self.config.rollout.name} rollout", logger=logger)
# Initialize base_sync_done for LoRA
self.base_sync_done: bool = "dummy" not in self.config.rollout.load_format
self.peft_merge: bool = model_config.lora.get("merge", False)
# 5. switch to trainer mode
# NOTE: It's critical that hybrid engine in trainer mode initially to load checkpoint.
# For async mode, we can't call run_until_complete here, so we will switch to trainer mode in AgentLoopManager.
# Note: sync mode is deprecated and rejected in RolloutConfig.__post_init__
@register(dispatch_mode=Dispatch.ONE_TO_ALL)
def init_model(self):
if self.config.model.get("external_lib", None) is not None:
# This is used to import external_lib into the huggingface systems
import importlib
importlib.import_module(self.config.model.external_lib)
from verl.utils.torch_dtypes import PrecisionType
override_model_config = OmegaConf.to_container(OmegaConf.create(self.config.model.get("override_config", {})))
if self._is_actor:
override_transformer_config = OmegaConf.to_container(
OmegaConf.create(self.config.actor.megatron.get("override_transformer_config", {}))
)
if self.enable_routing_replay:
override_transformer_config["enable_routing_replay"] = True
override_ddp_config = OmegaConf.to_container(
OmegaConf.create(self.config.actor.megatron.get("override_ddp_config", {}))
)
elif self._is_ref:
override_transformer_config = OmegaConf.to_container(
OmegaConf.create(self.config.ref.megatron.get("override_transformer_config", {}))
)
else:
override_transformer_config = {}
self.param_dtype = PrecisionType.to_dtype(self.config.actor.megatron.dtype)
log_gpu_memory_usage("Before init actor model and optimizer", logger=logger)
self.dtype = PrecisionType.to_dtype(self.param_dtype)
if self._is_actor:
# we need the model for actor and rollout
optim_config = self.config.actor.optim if self._is_actor else None
(
self.actor_module,
self.actor_optimizer,
self.actor_optimizer_scheduler,
self.actor_model_config,
self.actor_optim_config,
) = self._build_model_optimizer(
model_path=self.config.model.path,
optim_config=optim_config,
override_model_config=override_model_config,
override_transformer_config=override_transformer_config,
override_ddp_config=override_ddp_config,
)
if self._is_offload_param:
offload_megatron_model_to_cpu(self.actor_module)
log_gpu_memory_usage("After offload actor params and grad during init", logger=logger)
if self._is_offload_optimizer:
offload_megatron_optimizer(self.actor_optimizer)
log_gpu_memory_usage("After offload actor optimizer during init", logger=logger)
if self._is_actor:
actor_cfg = omega_conf_to_dataclass(self.config.actor)
self.actor = MegatronPPOActor(
config=actor_cfg,
model_config=self.actor_model_config,
hf_config=self.hf_config,
tf_config=self.tf_config,
actor_module=self.actor_module,
actor_optimizer=self.actor_optimizer,
mtp_config=self.config.model.mtp if self.config.model.mtp.enable else None,
)
print(f"routing replay layers: {len(RouterReplay.router_instances)}")
log_gpu_memory_usage("After MegatronPPOActor init", logger=logger)
if self._is_rollout:
with use_original_torch_compile():
self._build_rollout(trust_remote_code=self.config.model.get("trust_remote_code", False))
log_gpu_memory_usage("After rollout init", logger=logger)
if self._is_ref:
self.ref_module, self.ref_model_config = self._build_model_optimizer(
model_path=self.config.model.path,
optim_config=None,
override_model_config=override_model_config,
override_transformer_config=override_transformer_config,
)
log_gpu_memory_usage("After ref model init", logger=logger)
self.ref_policy = MegatronPPOActor(
config=self.config.ref,
model_config=self.ref_model_config,
hf_config=self.hf_config,
tf_config=self.tf_config,
actor_module=self.ref_module,
actor_optimizer=None,
)
if self._ref_is_offload_param:
offload_megatron_model_to_cpu(self.ref_module)
log_gpu_memory_usage("After offload ref params during init", logger=logger)
if self._is_actor:
self.flops_counter = FlopsCounter(self.actor_model_config)
self.checkpoint_mananager = MegatronCheckpointManager(
config=self.config,
checkpoint_config=self.config.actor.checkpoint,
model_config=self.actor_model_config,
transformer_config=self.tf_config,
role="actor",
model=self.actor_module,
arch=self.architectures[0],
hf_config=self.hf_config,
param_dtype=self.param_dtype,
share_embeddings_and_output_weights=self.share_embeddings_and_output_weights,
processing_class=self.processor if self.processor is not None else self.tokenizer,
optimizer=self.actor_optimizer,
optimizer_scheduler=self.actor_optimizer_scheduler,
use_distributed_optimizer=self.config.actor.megatron.use_distributed_optimizer,
use_checkpoint_opt_param_scheduler=self.config.actor.optim.use_checkpoint_opt_param_scheduler,
bridge=self.bridge,
provider=self.provider,
use_dist_checkpointing=self.config.actor.megatron.use_dist_checkpointing,
peft_cls=self.peft_cls,
)
self.layer_name_mapping = {
"qkv_layer_name": "self_attention.linear_qkv.",
"gate_proj_layer_name": "linear_fc1.",
}
self.weight_converter = None
if not self.config.actor.megatron.use_mbridge:
self.weight_converter = get_mcore_weight_converter(self.actor_model_config, self.dtype)
get_torch_device().empty_cache()
log_gpu_memory_usage("After init_model finish", logger=logger)
async def rollout_mode(self):
"""Context switch hybridengine to rollout mode."""
aggressive_empty_cache(force_sync=True)
set_expandable_segments(False)
if self._is_offload_param:
load_megatron_model_to_gpu(self.actor.actor_module, load_grad=False)
log_gpu_memory_usage("After load actor params during rollout_mode", logger=logger)
# Build peft_config for vLLM LoRA support
peft_config = None
do_lora_base_sync = False
if not self.peft_merge and self.peft_cls is not None:
peft_config = build_peft_config_for_vllm(self.config.model.get("lora", {}))
# set sleep level for LoRA adapter weights only sync
# TODO: make this configurable so that users with small
# main memory can trade sync time to avoid OOM
self.rollout.sleep_level = 1
do_lora_base_sync = (not self.base_sync_done) or (
self.rollout.sleep_level != 1 and self.config.rollout.free_cache_engine
)
if self.bridge is not None:
if self.vanilla_bridge:
per_tensor_param = self.bridge.export_weights(self.actor.actor_module)
elif not self.peft_merge and self.peft_cls is not None:
# Only export adapter weights
per_tensor_param = self.bridge.export_adapter_weights(self.actor.actor_module)
else:
per_tensor_param = self.bridge.export_hf_weights(self.actor.actor_module)
else:
per_tensor_param = per_tensor_generator(
self.actor.actor_module,
self.actor_model_config,
self.weight_converter,
self.tf_config,
self.layer_name_mapping,
)
if self.config.rollout.free_cache_engine:
await self.rollout.resume(tags=["weights"])
if do_lora_base_sync:
# Base layer sync
per_tensor_param_lora_base = self.bridge.export_hf_weights(
self.actor.actor_module, merge_adapter_weights=False
)
await self.rollout.update_weights(
add_base_layer_suffix(per_tensor_param_lora_base, model_type=self.hf_config.model_type),
peft_config=peft_config,
base_sync_done=False,
)
# Mark base sync as done after first successful sync
self.base_sync_done = True
await self.rollout.update_weights(per_tensor_param, peft_config=peft_config, base_sync_done=True)
if self._is_offload_param:
offload_megatron_model_to_cpu(self.actor.actor_module)
aggressive_empty_cache(force_sync=True)
if self.config.rollout.free_cache_engine:
await self.rollout.resume(tags=["kv_cache"])
set_expandable_segments(True)
@register(dispatch_mode=make_nd_compute_dataproto_dispatch_fn(mesh_name="actor"))
@GPUMemoryLogger(role="update_actor", logger=logger)
@DistProfiler.annotate(color="red", role="actor_update")
def update_actor(self, data: DataProto):
assert self._is_actor
if self._is_offload_param:
load_megatron_model_to_gpu(self.actor_module)
log_gpu_memory_usage("After load actor params and grad during update_actor", logger=logger)
if self._is_offload_optimizer:
load_megatron_optimizer(self.actor_optimizer)
log_gpu_memory_usage("After load actor optimizer during update_actor", logger=logger)
micro_batch_size = self.config.actor.ppo_micro_batch_size_per_gpu
data.meta_info["micro_batch_size"] = micro_batch_size
dataloader = self.actor.make_minibatch_iterator(data=data)
with Timer(name="update_policy", logger=None) as timer:
metrics = self.actor.update_policy(dataloader=dataloader)
delta_time = timer.last
global_num_tokens = data.meta_info["global_token_num"]
images_seqlens = data.meta_info.get("images_seqlens", None)
estimated_flops, promised_flops = self.flops_counter.estimate_flops(
global_num_tokens, delta_time, images_seqlens=images_seqlens
)
metrics["perf/mfu/actor"] = estimated_flops * self.config.actor.ppo_epochs / promised_flops / self.world_size
metrics["perf/max_memory_allocated_gb"] = get_torch_device().max_memory_allocated() / (1024**3)
metrics["perf/max_memory_reserved_gb"] = get_torch_device().max_memory_reserved() / (1024**3)
metrics["perf/cpu_memory_used_gb"] = psutil.virtual_memory().used / (1024**3)
from verl.utils.megatron.optimizer import get_megatron_last_lr
metrics["actor/lr"] = get_megatron_last_lr(self.actor_optimizer)
self.actor_optimizer_scheduler.step(1)
# TODO: here, we should return all metrics
output = DataProto(meta_info={"metrics": metrics})
output = output.to("cpu")
if self._is_offload_param:
offload_megatron_model_to_cpu(self.actor_module)
log_gpu_memory_usage("After offload actor params and grad during update_actor", logger=logger)
if self._is_offload_optimizer:
offload_megatron_optimizer(self.actor_optimizer)
log_gpu_memory_usage("After offload actor optimizer during update_actor", logger=logger)
aggressive_empty_cache(force_sync=True)
return output
@register(dispatch_mode=make_nd_compute_dataproto_dispatch_fn(mesh_name="rollout"))
@GPUMemoryLogger(role="generate_sequences", logger=logger)
@DistProfiler.annotate(color="red", role="rollout_generate")
def generate_sequences(self, prompts: DataProto):
assert self._is_rollout
prompts = prompts.to(get_device_name())
meta_info = {
"eos_token_id": self.generation_config.eos_token_id
if self.generation_config is not None
else self.tokenizer.eos_token_id,
"pad_token_id": self.generation_config.pad_token_id
if self.generation_config is not None
else self.tokenizer.pad_token_id,
}
prompts.meta_info.update(meta_info)
if self._is_offload_optimizer:
offload_megatron_optimizer(self.actor_optimizer)
timing_generate = {}
if self._is_actor: # For rollout only, we do not switch context.
loop = get_event_loop()
loop.run_until_complete(self.rollout_mode())
log_gpu_memory_usage("After switch to rollout mode", logger=logger)
with simple_timer("generate_sequences", timing_generate):
output = self.rollout.generate_sequences(prompts=prompts)
if self._is_actor:
loop.run_until_complete(self.trainer_mode())
log_gpu_memory_usage("After switch to trainer mode", logger=logger)
# We calculate the average timing across all ranks
# to make sure meta_info["timing"] is the same
timing_generate_topk_ratio, timing_generate_min, timing_generate_max = topk_reduce_ratio_min_max(
timing_generate["generate_sequences"]
)
timing_generate = reduce_timing(timing_generate)
timing_generate.update(
{
"generation_timing/max": timing_generate_max,
"generation_timing/min": timing_generate_min,
"generation_timing/topk_ratio": timing_generate_topk_ratio,
}
)
output.meta_info["timing"] = timing_generate
output = output.to("cpu")
# clear kv cache
aggressive_empty_cache(force_sync=True)
return output
@register(dispatch_mode=make_nd_compute_dataproto_dispatch_fn(mesh_name="actor"))
@GPUMemoryLogger(role="compute_ref_log_prob", logger=logger)
@DistProfiler.annotate(color="olive", role="ref_compute_log_prob")
def compute_ref_log_prob(self, data: DataProto):
if self.peft_cls is not None:
# if is lora, actor without lora applied is the ref
data.meta_info["is_lora"] = True
return self.compute_log_prob(data)
assert self._is_ref
if self._ref_is_offload_param:
load_megatron_model_to_gpu(self.ref_module, load_grad=False)
log_gpu_memory_usage("After load ref params and grad during compute_ref_log_prob", logger=logger)
micro_batch_size = self.config.ref.log_prob_micro_batch_size_per_gpu
data.meta_info["micro_batch_size"] = micro_batch_size
data.meta_info["max_token_len"] = self.config.ref.log_prob_max_token_len_per_gpu
data.meta_info["use_dynamic_bsz"] = self.config.ref.log_prob_use_dynamic_bsz
data.meta_info["temperature"] = self.config.rollout.temperature
output, _, _ = self.ref_policy.compute_log_prob(data=data, calculate_entropy=False)
output = DataProto.from_dict(tensors={"ref_log_prob": output})
output = output.to("cpu")
if self._ref_is_offload_param:
offload_megatron_model_to_cpu(self.ref_module)
log_gpu_memory_usage("After offload ref params and grad during compute_ref_log_prob", logger=logger)
aggressive_empty_cache(force_sync=True)
return output
@register(dispatch_mode=make_nd_compute_dataproto_dispatch_fn(mesh_name="actor"))
@GPUMemoryLogger(role="compute_log_prob", logger=logger)
@DistProfiler.annotate(color="blue", role="actor_compute_log_prob")
def compute_log_prob(self, data: DataProto):
assert self._is_actor
if self._is_offload_param:
load_megatron_model_to_gpu(self.actor_module, load_grad=False)
log_gpu_memory_usage("After load actor params and grad during compute_log_prob", logger=logger)
is_lora = data.meta_info.pop("is_lora", False)
adapter_ctx = self.peft_cls.disable_adapter(self.actor_module) if is_lora else nullcontext()
# we should always recompute old_log_probs when it is HybridEngine
config_source = self.config.ref if is_lora else self.config.rollout
data.meta_info["micro_batch_size"] = config_source.log_prob_micro_batch_size_per_gpu
data.meta_info["max_token_len"] = config_source.log_prob_max_token_len_per_gpu
data.meta_info["use_dynamic_bsz"] = config_source.log_prob_use_dynamic_bsz
data.meta_info["temperature"] = self.config.rollout.temperature
if self.enable_routing_replay and self.config.actor.router_replay.mode == "R2":
RouterReplay.set_global_router_replay_action(RouterReplayAction.RECORD)
if self.enable_routing_replay and self.config.actor.router_replay.mode == "R3":
RouterReplay.set_global_router_replay_action(RouterReplayAction.REPLAY_FORWARD)
with adapter_ctx:
output, entropys, layers_topk_idx = self.actor.compute_log_prob(data=data, calculate_entropy=not is_lora)
tensors = {"ref_log_prob": output} if is_lora else {"old_log_probs": output}
if not is_lora:
tensors["entropys"] = entropys
output = DataProto.from_dict(
tensors=tensors,
meta_info={"temperature": self.config.rollout.temperature},
)
if self.config.actor.router_replay.mode == "R2":
output.batch["routed_experts"] = layers_topk_idx
if self.config.actor.router_replay.mode in ["R2", "R3"]:
RouterReplay.clear_global_indices()
RouterReplay.clear_global_router_replay_action()
output = output.to("cpu")
# clear kv cache
if self._is_offload_param:
offload_megatron_model_to_cpu(self.actor_module)
log_gpu_memory_usage("After offload actor params and grad during compute_log_prob", logger=logger)
aggressive_empty_cache(force_sync=True)
return output
@register(dispatch_mode=Dispatch.ONE_TO_ALL)
def load_checkpoint(self, checkpoint_path, hdfs_path=None, del_local_after_load=True):
# No checkpoint to load, just offload the model and optimizer to CPU
if checkpoint_path is None:
if self._is_offload_param:
offload_megatron_model_to_cpu(self.actor_module)
if self._is_offload_optimizer:
offload_megatron_optimizer(self.actor_optimizer)
log_gpu_memory_usage("After offload actor params and optimizer during load_checkpoint", logger=logger)
return
if self._is_offload_param:
load_megatron_model_to_gpu(self.actor_module)
self.checkpoint_mananager.load_checkpoint(
local_path=checkpoint_path, hdfs_path=hdfs_path, del_local_after_load=del_local_after_load
)
if self._is_offload_param:
offload_megatron_model_to_cpu(self.actor_module)
if self._is_offload_optimizer:
offload_megatron_optimizer(self.actor_optimizer)
@register(dispatch_mode=Dispatch.ONE_TO_ALL)
def load_pretrained_model(self, checkpoint_path, del_local_after_load=True):
pass
@register(dispatch_mode=Dispatch.ONE_TO_ALL)
def save_checkpoint(self, checkpoint_path, hdfs_path=None, global_step=0, max_ckpt_to_keep=None):
if self._is_offload_param:
load_megatron_model_to_gpu(self.actor_module)
if self.checkpoint_mananager.checkpoint_config.async_save and self._is_offload_optimizer:
load_megatron_optimizer(self.actor_optimizer)
self.checkpoint_mananager.save_checkpoint(
local_path=checkpoint_path, hdfs_path=hdfs_path, global_step=global_step, max_ckpt_to_keep=max_ckpt_to_keep
)
torch.distributed.barrier()
if self._is_offload_param:
offload_megatron_model_to_cpu(self.actor_module)
if self.checkpoint_mananager.checkpoint_config.async_save and self._is_offload_optimizer:
offload_megatron_optimizer(self.actor_optimizer)
@register(dispatch_mode=Dispatch.ONE_TO_ALL)
def async_calls_finalize_fn_exec(self, blocking=False):
from megatron.core.dist_checkpointing.strategies.base import async_calls
async_calls.maybe_finalize_async_calls(blocking=blocking)
@register(dispatch_mode=Dispatch.ONE_TO_ALL)
def start_profile(self, **kwargs) -> None:
"""Start profiling for the current rank in the current training step."""
self.profiler.start(**kwargs)
@register(dispatch_mode=Dispatch.ONE_TO_ALL)
def stop_profile(self) -> None:
"""Stop profiling for the current rank in the current training step."""
self.profiler.stop()
@register(dispatch_mode=Dispatch.ONE_TO_ALL)
def dump_memory_snapshot(self, tag: str = "manual", sub_dir: str = None) -> None:
"""Manually trigger a CUDA memory snapshot dump on all ranks."""
# Memory snapshot is now handled by the profiler system
# This method is kept for backward compatibility but delegates to profiler
if hasattr(self, "profiler") and hasattr(self.profiler, "_impl"):
try:
# Try to use the profiler's memory snapshot functionality
if hasattr(self.profiler._impl, "sampler"):
out_dir = OmegaConf.select(self.config, "actor.profiler.save_path") or "."
self.profiler._impl.sampler.dump_memory_snapshot(out_dir=out_dir, tag=tag, sub_dir=sub_dir)
except Exception as e:
# Log a warning if memory snapshot fails. This might be expected if the profiler doesn't support it.
logger.warning(f"Failed to dump memory snapshot: {e}")
class AsyncActorRolloutRefWorker(ActorRolloutRefWorker):
@register(dispatch_mode=Dispatch.ONE_TO_ALL, blocking=False)
async def update_weights(self):
await self.rollout_mode()
return True
class CriticWorker(MegatronWorker, DistProfilerExtension):
def __init__(self, config: McoreCriticConfig):
Worker.__init__(self)
omega_profiler_config = config.get("profiler", {})
profiler_config = omega_conf_to_dataclass(omega_profiler_config, dataclass_type=ProfilerConfig)
if omega_profiler_config.get("tool", None) in ["npu", "nsys", "torch", "torch_memory"]:
tool_config = omega_conf_to_dataclass(
omega_profiler_config.get("tool_config", {}).get(omega_profiler_config.get("tool"))
)
else:
tool_config = None
DistProfilerExtension.__init__(
self, DistProfiler(rank=self.rank, config=profiler_config, tool_config=tool_config)
)
self.config: McoreCriticConfig = config
# NOTE(sgm): We utilize colocate WorkerGroup by default.
# As a result, Workers for different model share the same process.
# Therefore, we only require one distribute initialization.
# To utilize different parallel strategy in different models:
# 1, users should disable WorkerDict; 2.assign different ResourcePool to different models,
# 3. and apply the following patch in ray==2.10, https://github.com/ray-project/ray/pull/44385
if not torch.distributed.is_initialized():
set_numa_affinity()
rank = int(os.environ["LOCAL_RANK"])
torch.distributed.init_process_group(
backend=get_nccl_backend(),
timeout=datetime.timedelta(seconds=self.config.get("nccl_timeout", 600)),
init_method=os.environ.get("DIST_INIT_METHOD", None),
)
get_torch_device().set_device(rank)
mpu.initialize_model_parallel(
tensor_model_parallel_size=self.config.megatron.tensor_model_parallel_size,
pipeline_model_parallel_size=self.config.megatron.pipeline_model_parallel_size,
virtual_pipeline_model_parallel_size=self.config.megatron.virtual_pipeline_model_parallel_size,
use_sharp=False,
context_parallel_size=self.config.megatron.context_parallel_size,
expert_model_parallel_size=self.config.megatron.expert_model_parallel_size,
expert_tensor_parallel_size=self.config.megatron.expert_tensor_parallel_size,
nccl_communicator_config_path=None,
)
is_collect = (
mpu.get_tensor_model_parallel_rank() == 0
and mpu.get_pipeline_model_parallel_rank() == mpu.get_pipeline_model_parallel_world_size() - 1
and mpu.get_context_parallel_rank() == 0
)
self._register_dispatch_collect_info(
mesh_name="critic", dp_rank=mpu.get_data_parallel_rank(), is_collect=is_collect
)
set_random_seed(seed=self.config.megatron.seed)
# set FSDP offload params
self._is_offload_param = self.config.megatron.param_offload
self._is_offload_optimizer = self.config.megatron.optimizer_offload
# normalize config
self.config.ppo_mini_batch_size *= self.config.rollout_n
self.config.ppo_mini_batch_size //= mpu.get_data_parallel_world_size()
if self.config.get("ppo_micro_batch_size", None):
self.config.ppo_micro_batch_size //= mpu.get_data_parallel_world_size()
self.config.ppo_micro_batch_size_per_gpu = self.config.ppo_micro_batch_size
# TODO(sgm): support critic model offload
def _build_critic_model_optimizer(
self, model_path, optim_config, override_model_config, override_transformer_config, override_ddp_config
):
from verl.utils.megatron.optimizer import (
get_megatron_optimizer,
get_megatron_optimizer_param_scheduler,
init_megatron_optim_config,
)
from verl.utils.megatron_utils import McoreModuleWrapperConfig, make_megatron_module
from verl.utils.model import print_model_size
self._init_hf_config_and_tf_config(
model_path,
self.config.model.get("tokenizer_path") or model_path,
self.dtype,
override_model_config,
override_transformer_config,
self.config.model.get("trust_remote_code", False),
self.config.megatron,
)
wrap_config = McoreModuleWrapperConfig(
is_value_model=True, # critic is value model
share_embeddings_and_output_weights=False,
wrap_with_ddp=True,
use_distributed_optimizer=self.config.megatron.use_distributed_optimizer,
)
critic_module, updated_tf_config = make_megatron_module(
wrap_config=wrap_config,
tf_config=self.tf_config,
hf_config=self.hf_config,
bridge=self.bridge,
provider=self.provider,
override_model_config=override_model_config,
override_ddp_config=override_ddp_config,
peft_cls=self.peft_cls,
peft_config=self.config.model.get("lora", None),
)
self.tf_config = updated_tf_config
# note that here critic_module will be a list to be compatible with the construction of interleaved pp (vpp).
# but here, we do not use pp (vpp) yet. For simplicity, we remove the list
# critic_module = nn.ModuleList(critic_module)
if self.config.load_weight:
t0 = time.time()
if self.config.megatron.use_dist_checkpointing:
load_mcore_dist_weights(
critic_module,
self.config.megatron.dist_checkpointing_path,
is_value_model=True,
prefix=self.config.megatron.dist_checkpointing_prefix,
)
else:
if self.bridge is not None:
local_model_path = get_hf_model_path(self.config)
if self.vanilla_bridge:
self.bridge.load_weights(critic_module, local_model_path)
else:
self.bridge.load_hf_weights(
critic_module, local_model_path, allowed_mismatched_params=["output_layer.weight"]
)
else:
load_megatron_gptmodel_weights(
self.config, self.hf_config, critic_module, params_dtype=self.dtype, is_value_model=True
)
t1 = time.time()
if torch.distributed.get_rank() == 0:
print(f"critic load_weight time: {t1 - t0}")
if self.rank == 0:
print_model_size(critic_module[0])
# TODO: add more optimizer args into config
optim_config_megatron = init_megatron_optim_config(
optim_config,
use_distributed_optimizer=wrap_config.use_distributed_optimizer,
fp16=self.dtype == torch.float16,
)
critic_optimizer = get_megatron_optimizer(model=critic_module, config=optim_config_megatron)
critic_optimizer_scheduler = get_megatron_optimizer_param_scheduler(
optimizer=critic_optimizer, config=optim_config
)
get_torch_device().empty_cache()
register_megatron_training_hooks(critic_module, critic_optimizer)
return critic_module, critic_optimizer, critic_optimizer_scheduler, self.hf_config, optim_config
@register(dispatch_mode=Dispatch.ONE_TO_ALL)
def init_model(self):
# create critic
from verl.utils.torch_dtypes import PrecisionType
if self.config.model.get("external_lib", None) is not None:
# This is used to import external_lib into the huggingface systems
import importlib
importlib.import_module(self.config.model.external_lib)
override_model_config = OmegaConf.to_container(OmegaConf.create(self.config.model.get("override_config", {})))
override_transformer_config = OmegaConf.to_container(
OmegaConf.create(self.config.megatron.get("override_transformer_config", {}))
)
override_ddp_config = OmegaConf.to_container(
OmegaConf.create(self.config.megatron.get("override_ddp_config", {}))
)
self.param_dtype = PrecisionType.to_dtype(self.config.megatron.dtype)
self.dtype = PrecisionType.to_dtype(self.param_dtype)
(
self.critic_module,
self.critic_optimizer,
self.critic_optimizer_scheduler,
self.critic_model_config,
critic_optimizer_config,
) = self._build_critic_model_optimizer(
model_path=self.config.model.path,
optim_config=self.config.optim,
override_model_config=override_model_config,
override_transformer_config=override_transformer_config,
override_ddp_config=override_ddp_config,
)
if self._is_offload_param:
offload_megatron_model_to_cpu(self.critic_module)
if self._is_offload_optimizer:
offload_megatron_optimizer(self.critic_optimizer)
self.critic = MegatronPPOCritic(
config=self.config,
model_config=self.critic_model_config,
hf_config=self.hf_config,
tf_config=self.tf_config,
critic_module=self.critic_module,
critic_optimizer=self.critic_optimizer,
critic_optimizer_config=critic_optimizer_config,
)
self.flops_counter = FlopsCounter(self.critic_model_config)
self.checkpoint_mananager = MegatronCheckpointManager(
config=self.config,
checkpoint_config=self.config.checkpoint,
model_config=self.critic_model_config,
transformer_config=self.tf_config,
role="critic",
model=self.critic_module,
arch=self.architectures[0],
hf_config=self.hf_config,
param_dtype=self.param_dtype,
share_embeddings_and_output_weights=False,
processing_class=self.processor if self.processor is not None else self.tokenizer,
optimizer=self.critic_optimizer,
optimizer_scheduler=self.critic_optimizer_scheduler,
use_distributed_optimizer=self.config.megatron.use_distributed_optimizer,
use_checkpoint_opt_param_scheduler=self.config.optim.use_checkpoint_opt_param_scheduler,
bridge=self.bridge,
provider=self.provider,
use_dist_checkpointing=self.config.megatron.use_dist_checkpointing,
peft_cls=self.peft_cls,
)
@register(dispatch_mode=make_nd_compute_dataproto_dispatch_fn(mesh_name="critic"))
@DistProfiler.annotate(color="cyan", role="compute_values")
def compute_values(self, data: DataProto):
micro_batch_size = self.config.ppo_micro_batch_size_per_gpu
data.meta_info["micro_batch_size"] = micro_batch_size
data.meta_info["max_token_len"] = self.config.forward_max_token_len_per_gpu
data.meta_info["use_dynamic_bsz"] = self.config.use_dynamic_bsz
data = data.to(get_device_id())
if self._is_offload_param:
load_megatron_model_to_gpu(self.critic_module)
values = self.critic.compute_values(data=data)
output = DataProto.from_dict(tensors={"values": values})
output = output.to("cpu")
if self._is_offload_param:
offload_megatron_model_to_cpu(self.critic_module)
return output
@register(dispatch_mode=make_nd_compute_dataproto_dispatch_fn(mesh_name="critic"))
@DistProfiler.annotate(color="pink", role="critic_update")
def update_critic(self, data: DataProto):
data = data.to(get_device_id())
if self._is_offload_param:
load_megatron_model_to_gpu(self.critic_module)
if self._is_offload_optimizer:
load_megatron_optimizer(self.critic_optimizer)
dataloader = self.critic.make_minibatch_iterator(data)
with Timer(name="update_critic", logger=None) as timer:
metrics = self.critic.update_critic(dataloader=dataloader)
delta_time = timer.last
global_num_tokens = data.meta_info["global_token_num"]
estimated_flops, promised_flops = self.flops_counter.estimate_flops(global_num_tokens, delta_time)
metrics["perf/mfu/critic"] = estimated_flops * self.config.ppo_epochs / promised_flops / self.world_size
from verl.utils.megatron.optimizer import get_megatron_last_lr
metrics["critic/lr"] = get_megatron_last_lr(self.critic_optimizer)
self.critic_optimizer_scheduler.step(1)
output = DataProto(batch=None, meta_info={"metrics": metrics})
if self._is_offload_param:
offload_megatron_model_to_cpu(self.critic_module)
if self._is_offload_optimizer:
offload_megatron_optimizer(self.critic_optimizer)
output = output.to("cpu")
return output
@register(dispatch_mode=Dispatch.ONE_TO_ALL)
def load_checkpoint(self, checkpoint_path, hdfs_path=None, del_local_after_load=True):
if self._is_offload_param:
load_megatron_model_to_gpu(self.critic_module)
self.checkpoint_mananager.load_checkpoint(
local_path=checkpoint_path, hdfs_path=hdfs_path, del_local_after_load=del_local_after_load
)
if self._is_offload_param:
offload_megatron_model_to_cpu(self.critic_module)
if self._is_offload_optimizer:
offload_megatron_optimizer(self.critic_optimizer)
@register(dispatch_mode=Dispatch.ONE_TO_ALL)
def save_checkpoint(self, checkpoint_path, hdfs_path=None, global_steps=0, max_ckpt_to_keep=None):
if self._is_offload_param:
load_megatron_model_to_gpu(self.critic_module)
self.checkpoint_mananager.save_checkpoint(
local_path=checkpoint_path, hdfs_path=hdfs_path, global_step=global_steps, max_ckpt_to_keep=max_ckpt_to_keep
)
if self._is_offload_param:
offload_megatron_model_to_cpu(self.critic_module)
| verl__workers__megatron_workers.py |
# Copyright 2023-2025 SGLang Team
# Copyright Amazon.com, Inc. or its affiliates.
# Copyright 2025 ModelBest Inc. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
from typing import Any, Callable
import torch
from verl.protocol import DataProto
RawRewardFn = Callable[..., Any]
class AbstractRewardManager(ABC):
@abstractmethod
def __init__(
self,
tokenizer: Any,
num_examine: int,
compute_score: RawRewardFn | None,
reward_fn_key: str = "data_source",
**kwargs: Any,
):
pass
@abstractmethod
def __call__(
self,
data: DataProto,
return_dict: bool = False,
) -> torch.Tensor | dict[str, Any]:
pass
def _extract_reward_from_rm_scores(
self, data: DataProto, return_dict: bool = False
) -> torch.Tensor | dict[str, Any] | None:
"""
Extract reward from already-computed rm_scores if available.
This has been deprecated.
Args:
data: DataProto object containing the batch data
return_dict: Whether to return a dictionary with reward_tensor and reward_extra_info
Returns:
If rm_scores exists:
- If return_dict=True: dict with "reward_tensor" and "reward_extra_info"
- If return_dict=False: torch.Tensor of rm_scores
If rm_scores doesn't exist: None
"""
if "rm_scores" not in data.batch.keys():
return None
if return_dict:
reward_extra_keys = data.meta_info.get("reward_extra_keys", [])
reward_extra_info = {key: data.non_tensor_batch[key] for key in reward_extra_keys}
return {"reward_tensor": data.batch["rm_scores"], "reward_extra_info": reward_extra_info}
else:
return data.batch["rm_scores"]
| verl__workers__reward_manager__abstract.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.