text
stringlengths
1.03k
82.6k
file_name
stringlengths
8
85
# Copyright 2025 Individual Contributor: Mert Unsal # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from collections import defaultdict from typing import Any import torch from verl import DataProto from verl.workers.reward_manager import register from verl.workers.reward_manager.abstract import AbstractRewardManager, RawRewardFn @register("batch") class BatchRewardManager(AbstractRewardManager): """ A batch reward manager that computes rewards for a batch of data. Args: tokenizer (Tokenizer): The tokenizer to use for decoding the responses. num_examine (int): The number of responses to examine. compute_score (callable): The function to compute the rewards. reward_fn_key (str): The key to use for the reward function. reward_kwargs (dict): The keyword arguments to pass to the reward function. """ def __init__( self, tokenizer, num_examine, compute_score: RawRewardFn, reward_fn_key="data_source", **reward_kwargs ): self.tokenizer = tokenizer self.num_examine = num_examine self.compute_score = compute_score self.reward_fn_key = reward_fn_key self.reward_kwargs = reward_kwargs def verify(self, data): prompt_ids = data.batch["prompts"] response_ids = data.batch["responses"] attention_mask = data.batch["attention_mask"] prompt_len = prompt_ids.shape[-1] valid_response_lengths = attention_mask[:, prompt_len:].sum(dim=-1) responses_str = [] for i in range(len(data)): valid_len = valid_response_lengths[i] valid_response_ids = response_ids[i][:valid_len] response_str = self.tokenizer.decode(valid_response_ids, skip_special_tokens=True) responses_str.append(response_str) ground_truths = [item.non_tensor_batch["reward_model"].get("ground_truth", None) for item in data] data_sources = data.non_tensor_batch[self.reward_fn_key] rollout_reward_scores = data.non_tensor_batch.get("reward_scores", [{} for _ in range(len(data))]) extras = data.non_tensor_batch.get("extra_info", [{} for _ in range(len(data))]) for i in range(len(data)): extras[i]["rollout_reward_scores"] = rollout_reward_scores[i] scores = self.compute_score( data_sources=data_sources, solution_strs=responses_str, ground_truths=ground_truths, extra_infos=extras, **self.reward_kwargs, ) return scores def __call__(self, data: DataProto, return_dict: bool = False) -> torch.Tensor | dict[str, Any]: # If there is rm score, we directly return rm score. Otherwise, we compute via rm_score_fn reward_from_rm_scores = self._extract_reward_from_rm_scores(data, return_dict) if reward_from_rm_scores is not None: return reward_from_rm_scores reward_tensor = torch.zeros_like(data.batch["responses"], dtype=torch.float32) reward_extra_info = defaultdict(list) prompt_ids = data.batch["prompts"] prompt_len = prompt_ids.shape[-1] attention_mask = data.batch["attention_mask"] valid_response_lengths = attention_mask[:, prompt_len:].sum(dim=-1) data_sources = data.non_tensor_batch[self.reward_fn_key] scores = self.verify(data) rewards = [] already_printed: dict[str, Any] = {} for i in range(len(data)): length = valid_response_lengths[i].item() score = scores[i] if isinstance(score, dict): reward = score["score"] for key, value in score.items(): reward_extra_info[key].append(value) else: reward = score rewards.append(reward) reward_tensor[i, length - 1] = reward data_source = data_sources[i] if already_printed.get(data_source, 0) < self.num_examine: response_str = self.tokenizer.decode(data.batch["responses"][i][:length], skip_special_tokens=True) prompt_str = self.tokenizer.decode(data.batch["prompts"][i], skip_special_tokens=True) ground_truth = data[i].non_tensor_batch["reward_model"].get("ground_truth", None) print("[prompt]", prompt_str) print("[response]", response_str) print("[ground_truth]", ground_truth) print("[score]", scores[i]) already_printed[data_source] = already_printed.get(data_source, 0) + 1 data.batch["acc"] = torch.tensor(rewards, dtype=torch.float32, device=prompt_ids.device) if return_dict: return {"reward_tensor": reward_tensor, "reward_extra_info": reward_extra_info} else: return reward_tensor
verl__workers__reward_manager__batch.py
# Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from collections import defaultdict import torch from verl import DataProto from verl.utils.reward_score import default_compute_score from verl.workers.reward_manager import register from verl.workers.reward_manager.abstract import AbstractRewardManager @register("dapo") class DAPORewardManager(AbstractRewardManager): """The reward manager.""" def __init__( self, tokenizer, num_examine, compute_score=None, reward_fn_key="data_source", max_resp_len=None, overlong_buffer_cfg=None, ) -> None: self.tokenizer = tokenizer self.num_examine = num_examine # the number of batches of decoded responses to print to the console self.compute_score = compute_score or default_compute_score self.reward_fn_key = reward_fn_key self.overlong_buffer_cfg = overlong_buffer_cfg self.max_resp_len = max_resp_len if self.overlong_buffer_cfg is not None: assert self.max_resp_len is not None, ( f"max_resp_len must be provided if {overlong_buffer_cfg=}, but got None" ) assert self.max_resp_len >= self.overlong_buffer_cfg.len, ( "max_resp_len must be larger than overlong_buffer.len" ) assert not self.overlong_buffer_cfg.enable or self.overlong_buffer_cfg.len > 0, ( "overlong_buffer.len must be positive when overlong penalty is enabled," f"but got {self.overlong_buffer_cfg.len}." "To disable the overlong penalty, set overlong_buffer.enable = False" ) def __call__(self, data: DataProto, return_dict: bool = False): """We will expand this function gradually based on the available datasets""" # If there is rm score, we directly return rm score. Otherwise, we compute via rm_score_fn reward_from_rm_scores = self._extract_reward_from_rm_scores(data, return_dict) if reward_from_rm_scores is not None: return reward_from_rm_scores reward_tensor = torch.zeros_like(data.batch["responses"], dtype=torch.float32) reward_extra_info = defaultdict(list) already_print_data_sources = {} for i in range(len(data)): data_item = data[i] # DataProtoItem prompt_ids = data_item.batch["prompts"] prompt_length = prompt_ids.shape[-1] valid_prompt_length = data_item.batch["attention_mask"][:prompt_length].sum() valid_prompt_ids = prompt_ids[-valid_prompt_length:] response_ids = data_item.batch["responses"] valid_response_length = data_item.batch["attention_mask"][prompt_length:].sum() valid_response_ids = response_ids[:valid_response_length] # decode prompt_str = self.tokenizer.decode(valid_prompt_ids, skip_special_tokens=True) response_str = self.tokenizer.decode(valid_response_ids, skip_special_tokens=True) eos_token = self.tokenizer.eos_token if response_str.endswith(eos_token): response_str = response_str[: -len(eos_token)] ground_truth = data_item.non_tensor_batch["reward_model"]["ground_truth"] data_source = data_item.non_tensor_batch[self.reward_fn_key] extra_info = data_item.non_tensor_batch.get("extra_info", {}) rollout_reward_scores = data_item.non_tensor_batch.get("reward_scores", {}) extra_info["rollout_reward_scores"] = rollout_reward_scores result = self.compute_score( data_source=data_source, solution_str=response_str, ground_truth=ground_truth, extra_info=extra_info, ) score: float if isinstance(result, dict): score = result["score"] # Store the information including original reward for key, value in result.items(): reward_extra_info[key].append(value) else: score = result reward_extra_info["acc"].append(score) reward = score if self.overlong_buffer_cfg.enable: overlong_buffer_len = self.overlong_buffer_cfg.len expected_len = self.max_resp_len - overlong_buffer_len exceed_len = valid_response_length - expected_len overlong_penalty_factor = self.overlong_buffer_cfg.penalty_factor overlong_reward = min(-exceed_len / overlong_buffer_len * overlong_penalty_factor, 0) reward += overlong_reward if self.overlong_buffer_cfg.log: reward_extra_info["overlong_reward"].append(overlong_reward) reward_extra_info["overlong"].append(overlong_reward < 0) reward_tensor[i, valid_response_length - 1] = reward if data_source not in already_print_data_sources: already_print_data_sources[data_source] = 0 if already_print_data_sources[data_source] < self.num_examine: already_print_data_sources[data_source] += 1 print("[prompt]", prompt_str) print("[response]", response_str) print("[ground_truth]", ground_truth) if isinstance(result, dict): for key, value in result.items(): print(f"[{key}]", value) else: print("[score]", score) if return_dict: return { "reward_tensor": reward_tensor, "reward_extra_info": reward_extra_info, } else: return reward_tensor
verl__workers__reward_manager__dapo.py
# Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from collections import defaultdict from typing import Any import torch from verl import DataProto from verl.utils.reward_score import default_compute_score from verl.workers.reward_manager import register from verl.workers.reward_manager.abstract import AbstractRewardManager @register("naive") class NaiveRewardManager(AbstractRewardManager): """The reward manager.""" def __init__(self, tokenizer, num_examine, compute_score=None, reward_fn_key="data_source") -> None: """ Initialize the NaiveRewardManager instance. Args: tokenizer: The tokenizer used to decode token IDs into text. num_examine: The number of batches of decoded responses to print to the console for debugging purpose. compute_score: A function to compute the reward score. If None, `default_compute_score` will be used. reward_fn_key: The key used to access the data source in the non-tensor batch data. Defaults to "data_source". """ self.tokenizer = tokenizer # Store the tokenizer for decoding token IDs self.num_examine = num_examine # the number of batches of decoded responses to print to the console self.compute_score = compute_score or default_compute_score self.reward_fn_key = reward_fn_key # Store the key for accessing the data source def __call__(self, data: DataProto, return_dict: bool = False) -> torch.Tensor | dict[str, Any]: """We will expand this function gradually based on the available datasets""" # If there is rm score, we directly return rm score. Otherwise, we compute via rm_score_fn reward_from_rm_scores = self._extract_reward_from_rm_scores(data, return_dict) if reward_from_rm_scores is not None: return reward_from_rm_scores reward_tensor = torch.zeros_like(data.batch["responses"], dtype=torch.float32) reward_extra_info = defaultdict(list) already_print_data_sources = {} for i in range(len(data)): data_item = data[i] # DataProtoItem prompt_ids = data_item.batch["prompts"] prompt_length = prompt_ids.shape[-1] valid_prompt_length = data_item.batch["attention_mask"][:prompt_length].sum() valid_prompt_ids = prompt_ids[-valid_prompt_length:] response_ids = data_item.batch["responses"] valid_response_length = data_item.batch["attention_mask"][prompt_length:].sum() valid_response_ids = response_ids[:valid_response_length] # decode prompt_str = self.tokenizer.decode(valid_prompt_ids, skip_special_tokens=True) response_str = self.tokenizer.decode(valid_response_ids, skip_special_tokens=True) ground_truth = data_item.non_tensor_batch["reward_model"]["ground_truth"] data_source = data_item.non_tensor_batch[self.reward_fn_key] extra_info = data_item.non_tensor_batch.get("extra_info", {}) num_turns = data_item.non_tensor_batch.get("__num_turns__", None) rollout_reward_scores = data_item.non_tensor_batch.get("reward_scores", {}) extra_info["num_turns"] = num_turns extra_info["rollout_reward_scores"] = rollout_reward_scores score = self.compute_score( data_source=data_source, solution_str=response_str, ground_truth=ground_truth, extra_info=extra_info, ) if isinstance(score, dict): reward = score["score"] # Store the information including original reward for key, value in score.items(): reward_extra_info[key].append(value) else: reward = score reward_tensor[i, valid_response_length - 1] = reward if data_source not in already_print_data_sources: already_print_data_sources[data_source] = 0 if already_print_data_sources[data_source] < self.num_examine: already_print_data_sources[data_source] += 1 print("[prompt]", prompt_str) print("[response]", response_str) print("[ground_truth]", ground_truth) if isinstance(score, dict): for key, value in score.items(): print(f"[{key}]", value) else: print("[score]", score) if return_dict: return { "reward_tensor": reward_tensor, "reward_extra_info": reward_extra_info, } else: return reward_tensor
verl__workers__reward_manager__naive.py
# Copyright 2024 PRIME team and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import asyncio from concurrent.futures import ProcessPoolExecutor from functools import partial from typing import Any, Callable, Optional import psutil import torch from transformers import PreTrainedTokenizer from verl import DataProto from verl.utils.ray_utils import get_event_loop from verl.utils.reward_score import default_compute_score from verl.workers.reward_manager import register from verl.workers.reward_manager.abstract import AbstractRewardManager async def single_compute_score(evaluation_func, completion, reference, task, task_extra_info, executor, timeout=300.0): loop = get_event_loop() try: # Ensure process_completion is called properly future = loop.run_in_executor(executor, partial(evaluation_func, task, completion, reference, task_extra_info)) return await asyncio.wait_for(future, timeout=timeout) except asyncio.TimeoutError: print(f"[Timeout] Task timeout: {completion}") return None # Default value for timed-out rows except Exception as e: print(f"[Error] Task failed: {e}, completion: {completion[:80]}") return None # Default value for failed rows async def parallel_compute_score_async( evaluation_func, completions, references, tasks, extra_info=None, num_processes=64 ): if extra_info is None: extra_info = [None] * len(tasks) scores = [] with ProcessPoolExecutor(max_workers=num_processes) as executor: # to prevent very occasional starvation caused by some anomalous programs ( like infinite loop ), the # exceptions in async programs will instantly halt the evaluation, and all summoned processes will be killed. try: # Create tasks for all rows tasks_async = [ single_compute_score(evaluation_func, c, r, t, ei, executor, timeout=300.0) for c, r, t, ei in zip(completions, references, tasks, extra_info, strict=True) ] results = await asyncio.gather(*tasks_async, return_exceptions=False) except Exception as e: print(f"[Exception] async gather failed: {e}") raise finally: terminated_count = 0 for pid, proc in executor._processes.items(): try: p = psutil.Process(pid) p.terminate() try: p.wait(timeout=5) except psutil.TimeoutExpired: p.kill() terminated_count += 1 except Exception: pass print(f"[Shutdown] {terminated_count} subprocess(es) terminated.") # Process results for result, completion, reference, task in zip(results, completions, references, tasks, strict=True): if isinstance(result, Exception) or result is None: # Handle failed or timed-out tasks scores.append(0.0) elif isinstance(result, int | float | bool): scores.append(float(result)) else: scores.append(float(result[0])) return scores def run_reward_scoring(evaluation_func, completions, references, tasks, extra_info=None, num_processes=64): loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) try: return loop.run_until_complete( parallel_compute_score_async(evaluation_func, completions, references, tasks, extra_info, num_processes) ) finally: loop.close() @register("prime") class PrimeRewardManager(AbstractRewardManager): """ The Reward Manager used in https://github.com/PRIME-RL/PRIME """ def __init__( self, tokenizer: PreTrainedTokenizer, num_examine: int, compute_score: Optional[Callable] = None, reward_fn_key: str = "data_source", ) -> None: self.tokenizer = tokenizer self.num_examine = num_examine # the number of batches of decoded responses to print to the console self.compute_score = compute_score or default_compute_score self.reward_fn_key = reward_fn_key def verify(self, data): """ verify the batch and save as ``acc`` tensor """ # batched scoring prompt_ids = data.batch["prompts"] response_ids = data.batch["responses"] sequences_str = self.tokenizer.batch_decode(response_ids, skip_special_tokens=True) ground_truth = [data_item.non_tensor_batch["reward_model"]["ground_truth"] for data_item in data] data_sources = data.non_tensor_batch[self.reward_fn_key] extra_info = data.non_tensor_batch.get("extra_info", None) assert len(sequences_str) == len(ground_truth) == len(data_sources) try: scores = run_reward_scoring( self.compute_score, completions=sequences_str, references=ground_truth, tasks=data_sources, extra_info=extra_info, num_processes=64, ) except asyncio.TimeoutError: print("[Timeout] Global reward scoring timed out. Setting all as 0.") scores = [0.0 for _ in range(len(sequences_str))] except Exception as e: print(f"[Error] Unexpected error during scoring. Setting all as 0. {e}") scores = [0.0 for _ in range(len(sequences_str))] data.batch["acc"] = torch.tensor(scores, dtype=torch.float32, device=prompt_ids.device) return scores def __call__(self, data: DataProto, return_dict: bool = False) -> torch.Tensor | dict[str, Any]: """We will expand this function gradually based on the available datasets""" # If there is rm score, we directly return rm score. Otherwise, we compute via rm_score_fn reward_from_rm_scores = self._extract_reward_from_rm_scores(data, return_dict) if reward_from_rm_scores is not None: return reward_from_rm_scores reward_tensor = torch.zeros_like(data.batch["responses"], dtype=torch.float32) already_print_data_sources = {} # batched scoring prompt_ids = data.batch["prompts"] prompt_length = prompt_ids.shape[-1] response_ids = data.batch["responses"] valid_response_length = data.batch["attention_mask"][:, prompt_length:].sum(dim=-1) sequences_str = self.tokenizer.batch_decode(response_ids, skip_special_tokens=True) data_sources = data.non_tensor_batch["data_source"] scores = self.verify(data) for i in range(len(data)): data_source = data_sources[i] reward_tensor[i, valid_response_length[i].item() - 1] = scores[i] if data_source not in already_print_data_sources: already_print_data_sources[data_source] = 0 if already_print_data_sources[data_source] < self.num_examine: already_print_data_sources[data_source] += 1 print(sequences_str) if return_dict: return {"reward_tensor": reward_tensor} else: return reward_tensor
verl__workers__reward_manager__prime.py
# Copyright 2025 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Callable from verl.workers.reward_manager.abstract import AbstractRewardManager __all__ = ["register", "get_reward_manager_cls"] REWARD_MANAGER_REGISTRY: dict[str, type[AbstractRewardManager]] = {} def register(name: str) -> Callable[[type[AbstractRewardManager]], type[AbstractRewardManager]]: """Decorator to register a reward manager class with a given name. Args: name: `(str)` The name of the reward manager. """ def decorator(cls: type[AbstractRewardManager]) -> type[AbstractRewardManager]: if name in REWARD_MANAGER_REGISTRY and REWARD_MANAGER_REGISTRY[name] != cls: raise ValueError( f"Reward manager {name} has already been registered: {REWARD_MANAGER_REGISTRY[name]} vs {cls}" ) REWARD_MANAGER_REGISTRY[name] = cls return cls return decorator def get_reward_manager_cls(name: str) -> type[AbstractRewardManager]: """Get the reward manager class with a given name. Args: name: `(str)` The name of the reward manager. Returns: `(type)`: The reward manager class. """ if name not in REWARD_MANAGER_REGISTRY: raise ValueError(f"Unknown reward manager: {name}") return REWARD_MANAGER_REGISTRY[name]
verl__workers__reward_manager__registry.py
# Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import importlib from abc import ABC, abstractmethod from typing import Generator import torch from torch.distributed.device_mesh import DeviceMesh from verl import DataProto from verl.utils.config import omega_conf_to_dataclass from verl.workers.config import HFModelConfig, RolloutConfig __all__ = ["BaseRollout"] class BaseRollout(ABC): """Base class for rollout.""" def __init__( self, config: RolloutConfig, model_config: HFModelConfig, device_mesh: DeviceMesh, ): self.config = omega_conf_to_dataclass(config) self.model_config: HFModelConfig = omega_conf_to_dataclass(model_config, dataclass_type=HFModelConfig) self.device_mesh = device_mesh @abstractmethod async def resume(self, tags: list[str]): """Resume rollout weights or kv cache in GPU memory. Args: tags: weights or kv_cache. """ pass @abstractmethod async def update_weights( self, weights: Generator[tuple[str, torch.Tensor], None, None], **kwargs, ): """Update the weights of the rollout model. Args: weights: A generator that yields the name of the weight tensor and the tensor itself. """ pass @abstractmethod async def release(self): """Release weights and kv cache in GPU memory.""" pass def generate_sequences(self, prompts: DataProto) -> DataProto: """Batch generate sequences in sync mode. Args: prompts: The input prompts. Returns: The output sequences. """ raise NotImplementedError _ROLLOUT_REGISTRY = { ("vllm", "async"): "verl.workers.rollout.vllm_rollout.ServerAdapter", ("sglang", "async"): "verl.workers.rollout.sglang_rollout.sglang_rollout.ServerAdapter", ("trtllm", "async"): "verl.workers.rollout.trtllm_rollout.trtllm_rollout.ServerAdapter", } def get_rollout_class(rollout_name: str, mode: str = "async") -> type[BaseRollout]: """Get the rollout class by name. Args: rollout_name: The name of the rollout. mode: The mode of the rollout, async: server mode. Returns: The rollout class. """ assert (rollout_name, mode) in _ROLLOUT_REGISTRY, f"Rollout {rollout_name} with mode {mode} not found" fqdn = _ROLLOUT_REGISTRY[(rollout_name, mode)] module_name, class_name = fqdn.rsplit(".", 1) rollout_module = importlib.import_module(module_name) return getattr(rollout_module, class_name)
verl__workers__rollout__base.py
# Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Rollout with huggingface models. TODO: refactor this class. Currently, it will hang when using FSDP HybridShard. We should actually create a single GPU model. Then, get full state_dict and bind the state_dict to the single GPU model. Then, use the single GPU model to perform generation. """ import contextlib import torch import torch.distributed from tensordict import TensorDict from torch import nn from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from transformers import GenerationConfig from verl import DataProto from verl.utils.device import get_device_name, get_torch_device from verl.utils.torch_functional import get_response_mask from .base import BaseRollout __all__ = ["HFRollout"] class HFRollout(BaseRollout): def __init__(self, module: nn.Module, config): super().__init__() self.config = config self.module = module def generate_sequences(self, prompts: DataProto) -> DataProto: batch_size = prompts.batch.batch_size[0] num_chunks = max(batch_size // self.config.get("micro_batch_size", batch_size), 1) batch_prompts = prompts.chunk(chunks=num_chunks) output = [self._generate_minibatch(p) for p in batch_prompts] output = DataProto.concat(output) return output @torch.no_grad() def _generate_minibatch(self, prompts: DataProto) -> DataProto: # make sampling args can be overridden by inputs do_sample = prompts.meta_info.get("do_sample", self.config.do_sample) is_validate = prompts.meta_info.get("validate", False) temperature = prompts.meta_info.get("temperature", self.config.temperature) response_length = prompts.meta_info.get("response_length", self.config.response_length) top_p = prompts.meta_info.get("top_p", self.config.get("top_p", 1.0)) top_k = max(0, prompts.meta_info.get("top_k", self.config.get("top_k", 0))) # to be compatible with vllm if not do_sample: # do_sample==False -> greedy decoding kwargs = { "do_sample": False, "num_beams": 1, } elif is_validate: # do validate and do sample -> use val_kwargs kwargs = { "do_sample": True, "num_beams": 1, "top_k": max(0, self.config.val_kwargs.top_k), # to be compatible with vllm "top_p": self.config.val_kwargs.top_p, "temperature": self.config.val_kwargs.temperature, "num_return_sequences": 1, # if validate, already repeat in ray_trainer } else: # do_sample -> use rollout config kwargs = { "do_sample": True, "num_beams": 1, "top_p": top_p, "top_k": top_k, "temperature": temperature, # already repeat in ray_trainer # https://github.com/volcengine/verl/blob/2fdfbdcba6f2e076f64bc47922d8fe6cf7dc7da5/verl/trainer/ppo/ray_trainer.py#L1117 "num_return_sequences": 1, } # make config according to generate mode generation_config = GenerationConfig(**kwargs) idx = prompts.batch["input_ids"] # (bs, prompt_length) prompt_length = idx.size(1) attention_mask = prompts.batch["attention_mask"] # left-padded attention_mask position_ids = prompts.batch["position_ids"] # used to construct attention_mask eos_token_id = prompts.meta_info["eos_token_id"] pad_token_id = prompts.meta_info["pad_token_id"] self.module.eval() param_ctx = contextlib.nullcontext() if isinstance(self.module, FSDP): # recurse need to set to False according to https://github.com/pytorch/pytorch/issues/100069 param_ctx = FSDP.summon_full_params(self.module, writeback=False, recurse=False) with param_ctx, torch.autocast(device_type=get_device_name(), dtype=torch.bfloat16): output = self.module.generate( input_ids=idx, attention_mask=attention_mask, position_ids=position_ids, do_sample=do_sample, max_new_tokens=response_length, eos_token_id=eos_token_id, pad_token_id=pad_token_id, generation_config=generation_config, output_scores=False, # this is potentially very large return_dict_in_generate=True, use_cache=True, ) # TODO: filter out the seq with no answers like ds-chat seq = output.sequences generated_batch_size = seq.size(0) # bs * num_return_sequences # huggingface generate will stop generating when all the batch reaches [EOS]. # We have to pad to response_length sequence_length = prompt_length + self.config.response_length delta_length = sequence_length - seq.shape[1] if delta_length > 0: delta_tokens = torch.ones(size=(generated_batch_size, delta_length), device=seq.device, dtype=seq.dtype) delta_tokens = pad_token_id * delta_tokens seq = torch.cat((seq, delta_tokens), dim=1) assert seq.shape[1] == sequence_length # make necessary reputations if num_return_sequences > 1 num_return_sequences = kwargs.get("num_return_sequences", 1) if num_return_sequences > 1: position_ids = position_ids.repeat_interleave(num_return_sequences, dim=0) attention_mask = attention_mask.repeat_interleave(num_return_sequences, dim=0) prompt = seq[:, :prompt_length] # (generated_batch_size, prompt_length) response = seq[:, prompt_length:] # (generated_batch_size, response_length) response_length = response.size(1) delta_position_id = torch.arange(1, response_length + 1, device=position_ids.device) delta_position_id = delta_position_id.unsqueeze(0).repeat(generated_batch_size, 1) response_position_ids = position_ids[:, -1:] + delta_position_id position_ids = torch.cat([position_ids, response_position_ids], dim=-1) response_attention_mask = get_response_mask( response_id=response, eos_token=eos_token_id, dtype=attention_mask.dtype ) attention_mask = torch.cat((attention_mask, response_attention_mask), dim=-1) batch = TensorDict( { "prompts": prompt, "responses": response, "input_ids": seq, "attention_mask": attention_mask, "position_ids": position_ids, }, batch_size=generated_batch_size, ) # empty cache before compute old_log_prob get_torch_device().empty_cache() self.module.train() return DataProto(batch=batch)
verl__workers__rollout__hf_rollout.py
# Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ In single GPU rollout, the sequences are generated directly by sampling from the model. The output will contain 1. output_ids 2. attention_masks (left padding) 3. eos_masks 4. log_probs """ import torch import torch.nn.functional as F from tensordict import TensorDict from torch import nn from verl import DataProto from verl.utils.torch_functional import logprobs_from_logits from ..base import BaseRollout __all__ = ["NaiveRollout"] class NaiveRollout(BaseRollout): def __init__(self, module: nn.Module, config): """A naive rollout. It requires the module to be compatible with huggingface APIs. That is: The module should define __call__ to receive input_ids, attention_mask and position_ids. It outputs a structure that contains logits field. Args: module: module here follows huggingface APIs config: DictConfig """ super().__init__() self.config = config self.module = module @torch.no_grad() def generate_sequences(self, prompts: DataProto) -> DataProto: """Generate sequences""" idx = prompts.batch["input_ids"] # (bs, prompt_length) attention_mask = prompts.batch["attention_mask"] # left-padded attention_mask position_ids = prompts.batch["position_ids"] # used to construct attention_mask eos_token_id = prompts.meta_info["eos_token_id"] batch_size = idx.size(0) prompt_length = idx.size(1) self.module.eval() prev_attention_mask = torch.ones(size=(batch_size, 1), dtype=attention_mask.dtype, device=attention_mask.device) logits_lst = [] for _ in range(self.config.response_length): # if the sequence context is growing too long we must crop it at block_size # idx_cond = idx if idx.size(1) <= self.config.block_size else idx[:, -self.config.block_size:] idx_cond = idx # forward the model to get the logits for the index in the sequence # we use huggingface APIs here output = self.module(input_ids=idx_cond, attention_mask=attention_mask, position_ids=position_ids) logits = output.logits # pluck the logits at the final step and scale by desired temperature logits = logits[:, -1, :] / self.config.temperature # (bs, vocab_size) # optionally crop the logits to only the top k options if self.config.top_k is not None: v, _ = torch.topk(logits, min(self.config.top_k, logits.size(-1))) logits[logits < v[:, [-1]]] = -float("Inf") # apply softmax to convert logits to (normalized) probabilities probs = F.softmax(logits, dim=-1) # sample from the distribution if self.config.do_sample: idx_next = torch.multinomial(probs, num_samples=1) else: idx_next = torch.argmax(probs, dim=-1, keepdim=True) attention_mask = torch.cat((attention_mask, prev_attention_mask), dim=-1) for token_id in eos_token_id: prev_attention_mask = torch.logical_and(idx_next != token_id, prev_attention_mask.bool()) prev_attention_mask.to(attention_mask.dtype) position_ids = torch.cat((position_ids, position_ids[:, -1:] + 1), dim=-1) # append sampled index to the running sequence and continue idx = torch.cat((idx, idx_next), dim=1) logits_lst.append(logits) logits = torch.stack(logits_lst, dim=1) # (bs, response_length, vocab_size) prompts = idx[:, :prompt_length] # (bs, prompt_length) response = idx[:, prompt_length:] # (bs, response_length) log_probs = logprobs_from_logits(logits=logits, labels=response) batch = TensorDict( { "input_ids": prompts, "responses": response, "sequences": idx, "old_log_probs": log_probs, "attention_mask": attention_mask, "position_ids": position_ids, }, batch_size=batch_size, ) self.module.train() return DataProto(batch=batch)
verl__workers__rollout__naive__naive_rollout.py
# Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import asyncio import logging import os from abc import ABC, abstractmethod from enum import Enum from typing import Any, Callable, Optional from omegaconf import DictConfig from pydantic import BaseModel from ray.actor import ActorHandle from verl.single_controller.ray import RayClassWithInitArgs, RayResourcePool, RayWorkerGroup, ResourcePoolManager from verl.utils.config import omega_conf_to_dataclass from verl.utils.device import is_torch_npu_available from verl.workers.config import HFModelConfig, RolloutConfig logger = logging.getLogger(__file__) class TokenOutput(BaseModel): token_ids: list[int] """response token ids""" log_probs: Optional[list[float]] = None """logprobs of response token ids""" routed_experts: Optional[Any] = None """routed experts of response token ids""" stop_reason: Optional[str] = None """stop reason: 'completed', 'aborted', or None for unknown""" num_preempted: Optional[int] = None """number of preempted times for metric calculation""" class RolloutMode(Enum): # Rollout engine and training engine(fsdp/megatron) fused in same process # Rollout and trainer share GPUs, switch context with weight synchronization. # Usage scenarios: on-policy training. HYBRID = "hybrid" # Rollout engine colocated with hybrid engine in same ray placement group but in separate process. # Rollout and hybrid processes share GPUs, switch context without weight synchronization. # Usage scenarios: GRM (LLM as a judge). COLOCATED = "colocated" # Standalone rollout server with separate GPU resource, disaggregated architecture. # Usage scenarios: off-policy training. STANDALONE = "standalone" class RolloutReplica(ABC): """Rollout replica is an individual server instance, which may be deployed on single or multiple nodes. It is equivalent to launch server in each node with command line: SGLang: ``` python -m sglang.launch_server --node-rank 0 --nnode 2 ... python -m sglang.launch_server --node-rank 1 --nnode 2 ... ``` vLLM: ``` vllm serve --data-parallel-size 16 --data-parallel-size-local 8 --data-parallel-start-rank 0 ... vllm serve --data-parallel-size 16 --data-parallel-size-local 8 --data-parallel-start-rank 8 ... ``` Args: replica_rank: int, rank of this rollout replica. config: RolloutConfig, full config. model_config: DictConfig, model config. gpus_per_node: int, number of gpus per node. """ def __init__( self, replica_rank: int, config: RolloutConfig, model_config: DictConfig, gpus_per_node: int = 8, is_reward_model: bool = False, ) -> None: self.replica_rank = replica_rank self.config = omega_conf_to_dataclass(config) self.model_config: HFModelConfig = model_config self.world_size = ( self.config.tensor_model_parallel_size * self.config.data_parallel_size * self.config.pipeline_model_parallel_size ) self.gpus_per_node = gpus_per_node self.gpus_per_replica_node = min(gpus_per_node, self.world_size) assert self.world_size % self.gpus_per_replica_node == 0, ( f"world_size {self.world_size} must be divisible by gpus_per_node {self.gpus_per_replica_node}" ) self.nnodes = self.world_size // self.gpus_per_replica_node self.is_reward_model = is_reward_model self.rollout_mode: RolloutMode = None self.workers: list[ActorHandle] = [] self.resource_pool: RayResourcePool = None self.bundle_indices: list[int] = [] self.servers: list[ActorHandle] = [] self._server_address: str = None self._server_handle: ActorHandle = None async def init_hybrid(self, worker_group: RayWorkerGroup): """Init hybrid rollout server, rollout engine and training engine(fsdp/megatron) fused in same process. Args: worker_group: RayWorkerGroup, fused workers where training engine(fsdp/megatron) have been initialized. """ self.rollout_mode = RolloutMode.HYBRID self.workers = worker_group.workers[ self.world_size * self.replica_rank : self.world_size * (self.replica_rank + 1) ] await self.launch_servers() async def init_hybrid_colocated(self, worker_group: RayWorkerGroup, resource_pool: RayResourcePool): """Init hybrid rollout server, rollout engine and training engine(fsdp/megatron) fused in same process. Args: worker_group: RayWorkerGroup, fused workers where training engine(fsdp/megatron) have been initialized. resource_pool: RayResourcePool, ray placement group where hybrid engine processes have been launched. bundle_indices: list[int], bundle indices for this rollout replica. """ self.rollout_mode = RolloutMode.HYBRID self.workers = worker_group.workers[ self.world_size * self.replica_rank : self.world_size * (self.replica_rank + 1) ] self.resource_pool = resource_pool self.bundle_indices = [self.replica_rank * self.world_size + idx for idx in range(self.world_size)] await self.launch_servers() # TODO(sgm): this should be the default solution, but need to make the RolloutMode more clear. async def init_colocated(self, resource_pool: RayResourcePool): """Init colocated rollout server, rollout engine and hybrid engine colocated in same ray placement group but in separate processes. Args: resource_pool: RayResourcePool, ray placement group where hybrid engine processes have been launched. """ self.rollout_mode = RolloutMode.COLOCATED self.resource_pool = resource_pool use_gpu = self.rollout_worker_use_gpu() worker_group = RayWorkerGroup( resource_pool=self.resource_pool, ray_cls_with_init=self.get_ray_class_with_init_args(), bin_pack=False, name_prefix=f"rollout_colocate_{self.replica_rank}" if not self.is_reward_model else f"rollout_reward_colocate_{self.replica_rank}", use_gpu=use_gpu, device_name="cuda" if not is_torch_npu_available(check_device=False) else "npu", ) self.workers = worker_group.workers await self.launch_servers() async def init_standalone(self): """Init standalone rollout server, create new resource pool for this rollout.""" # create resource pool for this rollout self.rollout_mode = RolloutMode.STANDALONE resource_pool_name = ( f"rollout_pool_{self.replica_rank}" if not self.is_reward_model else f"rollout_pool_reward_{self.replica_rank}" ) resource_pool_spec = { resource_pool_name: [self.gpus_per_replica_node] * self.nnodes, } resource_pool_manager = ResourcePoolManager(resource_pool_spec=resource_pool_spec, mapping=None) resource_pool_manager.create_resource_pool() self.resource_pool = resource_pool_manager.resource_pool_dict[resource_pool_name] # create worker group for this rollout use_gpu = self.rollout_worker_use_gpu() worker_group = RayWorkerGroup( resource_pool=self.resource_pool, ray_cls_with_init=self.get_ray_class_with_init_args(), bin_pack=False, name_prefix=f"rollout_standalone_{self.replica_rank}" if not self.is_reward_model else f"rollout_reward_standalone_{self.replica_rank}", use_gpu=use_gpu, device_name="cuda" if not is_torch_npu_available(check_device=False) else "npu", ) self.workers = worker_group.workers await self.launch_servers() @abstractmethod def get_ray_class_with_init_args(self) -> RayClassWithInitArgs: """Get rollout worker actor class for colocated and standalone mode.""" raise NotImplementedError @abstractmethod async def launch_servers(self): """Launch http server in each node.""" raise NotImplementedError @property def server_address(self) -> str: """Get rollout server address for OpenAI chat completion.""" return self._server_address @property def server_handle(self) -> ActorHandle: """Get rollout server handle for Token-in-token-out generation.""" return self._server_handle def rollout_worker_use_gpu(self) -> bool: return True async def wake_up(self): """Wake up each rollout server.""" await asyncio.gather(*[server.wake_up.remote() for server in self.servers]) async def sleep(self): """Sleep each rollout server.""" await asyncio.gather(*[server.sleep.remote() for server in self.servers]) async def abort_all_requests(self): """Partial rollout: abort and save all unfinished requests in each rollout server.""" # TODO(wuxibin) # await asyncio.gather(*[server.abort_all_requests.remote() for server in self.servers]) print(f"abort all requests in rollout replica {self.replica_rank}") async def resume_all_requests(self): """Partial rollout: resume all unfinished requests in each rollout server.""" # TODO(wuxibin) # await asyncio.gather(*[server.resume_all_requests.remote() for server in self.servers]) print(f"resume all requests in rollout replica {self.replica_rank}") async def clear_kv_cache(self): """reset kv cache in each rollout server.""" await asyncio.gather(*[server.clear_kv_cache.remote() for server in self.servers]) async def start_profile(self, **kwargs): """Start profiling on the replica.""" await asyncio.gather(*[server.start_profile.remote(**kwargs) for server in self.servers]) async def stop_profile(self): """Stop profiling on the replica.""" await asyncio.gather(*[server.stop_profile.remote() for server in self.servers]) class RolloutReplicaRegistry: """Factory for managing rollout replica implementations.""" _registry: dict[str, Callable[[], type[RolloutReplica]]] = {} @classmethod def register(cls, name: str, loader: Callable[[], type[RolloutReplica]]) -> None: """Register a new rollout replica type.""" cls._registry[name] = loader @classmethod def get(cls, name: str) -> type[RolloutReplica]: """Get a rollout replica class by name.""" if name not in cls._registry: raise ValueError(f"Unknown rollout mode: {name}. Available: {list(cls._registry.keys())}") return cls._registry[name]() # Loader functions for built-in types def _load_vllm(): from verl.workers.rollout.vllm_rollout.vllm_async_server import vLLMReplica return vLLMReplica def _load_sglang(): os.environ["SGLANG_USE_CPU_ENGINE"] = "1" try: import vllm # noqa: F401 except ImportError: import sys import types from unittest.mock import Mock mock_vllm = types.ModuleType("vllm") mock_custom_ops = types.ModuleType("vllm._custom_ops") mock_custom_ops.scaled_fp8_quant = Mock() mock_vllm._custom_ops = mock_custom_ops mock_model_executor = types.ModuleType("vllm.model_executor") mock_layers = types.ModuleType("vllm.model_executor.layers") mock_activation = types.ModuleType("vllm.model_executor.layers.activation") class GeluAndMul: # noqa: N801 pass class SiluAndMul: # noqa: N801 pass mock_activation.GeluAndMul = GeluAndMul mock_activation.SiluAndMul = SiluAndMul mock_layers.activation = mock_activation mock_model_executor.layers = mock_layers mock_vllm.model_executor = mock_model_executor sys.modules["vllm"] = mock_vllm sys.modules["vllm._custom_ops"] = mock_custom_ops sys.modules["vllm.model_executor"] = mock_model_executor sys.modules["vllm.model_executor.layers"] = mock_layers sys.modules["vllm.model_executor.layers.activation"] = mock_activation from verl.workers.rollout.sglang_rollout.async_sglang_server import SGLangReplica del os.environ["SGLANG_USE_CPU_ENGINE"] return SGLangReplica def _load_trtllm(): from verl.workers.rollout.trtllm_rollout.trtllm_async_server import TRTLLMReplica return TRTLLMReplica # Register built-in types RolloutReplicaRegistry.register("vllm", _load_vllm) RolloutReplicaRegistry.register("sglang", _load_sglang) RolloutReplicaRegistry.register("trtllm", _load_trtllm) # Original function for backward compatibility def get_rollout_replica_class(rollout: str) -> type[RolloutReplica]: return RolloutReplicaRegistry.get(rollout)
verl__workers__rollout__replica.py
# Copyright 2023-2024 SGLang Team # Copyright 2025 ModelBest Inc. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import difflib import logging import os from enum import Enum from typing import Any, Optional import torch from pydantic import BaseModel, ConfigDict, model_validator from transformers import PreTrainedTokenizer, PreTrainedTokenizerFast, ProcessorMixin from verl.tools.schemas import OpenAIFunctionToolCall, OpenAIFunctionToolSchema, ToolResponse from verl.utils.model import compute_position_id_with_mask logger = logging.getLogger(__file__) logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN")) BASE_CHAT_HISTORY = [ {"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "I am a user."}, ] class FinishReasonTypeEnum(str, Enum): """The enum for finish reason type.""" LENGTH = "length" STOP = "stop" TOOL_CALL = "tool_calls" @classmethod def from_str(cls, value: str) -> "FinishReasonTypeEnum": if value == "stop": return cls.STOP elif value == "length": return cls.LENGTH elif value == "tool_calls": return cls.TOOL_CALL else: raise ValueError(f"Unsupported finish reason type: {value}") class Message(BaseModel): role: str content: str | dict[str, Any] | list[dict[str, Any]] | ToolResponse tool_calls: Optional[list[OpenAIFunctionToolCall]] = None class AsyncRolloutRequestStateEnum(str, Enum): """The enum for async rollout request state.""" PENDING = "pending" RUNNING = "running" COMPLETED = "completed" FAILED = "failed" TOOL_CALLING = "tool_calling" INTERACTING = "interacting" class TokenizationSanityCheckModeEnum(str, Enum): """The enum for tokenization sanity check mode.""" DISABLE = "disable" STRICT = "strict" IGNORE_STRIPPABLE = "ignore_strippable" class AsyncRolloutRequest(BaseModel): """The data model for async rollout.""" model_config = ConfigDict(arbitrary_types_allowed=True) batch_data_id: int = 0 rollout_offset: int = 0 request_id: str state: AsyncRolloutRequestStateEnum messages: list[Message] multi_modal_keys: Optional[list[str]] = None multi_modal_data: Optional[dict[str, Any]] = None multi_modal_inputs: Optional[dict[str, torch.Tensor]] = None tool_schemas: Optional[list[OpenAIFunctionToolSchema]] = None tools_kwargs: dict[str, Any] = {} interaction_kwargs: dict[str, Any] = {} input_ids: Optional[torch.Tensor] = None prompt_ids: Optional[torch.Tensor] = None response_ids: Optional[torch.Tensor] = None attention_mask: Optional[torch.Tensor] = None prompt_attention_mask: Optional[torch.Tensor] = None response_attention_mask: Optional[torch.Tensor] = None position_ids: Optional[torch.Tensor] = None prompt_position_ids: Optional[torch.Tensor] = None response_position_ids: Optional[torch.Tensor] = None loss_mask: Optional[torch.Tensor] = None prompt_loss_mask: Optional[torch.Tensor] = None response_loss_mask: Optional[torch.Tensor] = None reward_scores: dict[str, float] max_prompt_len: int max_response_len: int = 8192 max_model_len: int = 32768 metrics: dict[str, list[Any]] = {} output_token_ids: torch.Tensor | None = None rollout_log_probs: torch.Tensor | None = None use_inference_chat_template: bool tokenization_sanity_check_mode: TokenizationSanityCheckModeEnum generation_prompt_ids: Optional[torch.Tensor] = None base_conv_wo_gen_prompt_end_pos: int base_conv_with_gen_prompt_end_pos: int @model_validator(mode="before") @classmethod def initialize_request(cls, values): if not (messages := values.get("messages")): raise ValueError("messages is required for AsyncRolloutRequest initialization") if not (max_prompt_len := values.get("max_prompt_len")): raise ValueError("max_prompt_len is required for AsyncRolloutRequest initialization") if not (processing_class := values.pop("processing_class", None)): raise ValueError("processing_class is required for AsyncRolloutRequest initialization") values["messages"] = [Message.model_validate(msg) for msg in messages] # If there is no multi_modal_keys, we assume the multi-modal data is image and video. if not values.get("multi_modal_keys"): values["multi_modal_keys"] = ["image", "video"] if not values.get("multi_modal_data"): values["multi_modal_data"] = {key: [] for key in values["multi_modal_keys"]} else: # check if all multi_modal_keys are in multi_modal_data for key in values["multi_modal_keys"]: if key not in values["multi_modal_data"]: values["multi_modal_data"][key] = [] if not values.get("multi_modal_inputs"): values["multi_modal_inputs"] = {} tools = ( [tool.model_dump() for tool in tool_schemas] if (tool_schemas := values.get("tool_schemas", [])) else None ) multi_modal_data = values["multi_modal_data"] tokens_without_prompt = cls._handle_apply_chat_template( processing_class, messages, multi_modal_data=multi_modal_data, tools=tools, add_generation_prompt=False, tokenize=True, ) if ( values.get("input_ids") is None or values.get("attention_mask") is None or values.get("position_ids") is None ): tokenization_dict_with_prompt = cls._handle_apply_chat_template( processing_class, messages, multi_modal_data=multi_modal_data, tools=tools, add_generation_prompt=True, tokenize=True, return_dict=True, ) values["input_ids"], values["attention_mask"] = ( tokenization_dict_with_prompt["input_ids"], tokenization_dict_with_prompt["attention_mask"], ) if values["input_ids"].shape[-1] > max_prompt_len: # Only log the warning to avoid truncating in the middle of generation prompt. Consider raising an # error for this case in the future. # Ensure batch_data_id exists with default value if not provided if "batch_data_id" not in values: values["batch_data_id"] = cls.model_fields["batch_data_id"].default logger.warning( f"Prompt {values['batch_data_id']} has length {values['input_ids'].shape[-1]} " f"which is greater than max_prompt_len {max_prompt_len} after applied chat template with tools." ) # Process multi_modal_inputs multi_modal_inputs = tokenization_dict_with_prompt.copy() multi_modal_inputs.pop("input_ids", None) multi_modal_inputs.pop("attention_mask", None) values["multi_modal_inputs"] = multi_modal_inputs values["position_ids"] = values["prompt_position_ids"] = cls._get_position_ids( processing_class, values["input_ids"], values["attention_mask"], multi_modal_inputs ) values["prompt_ids"], values["prompt_attention_mask"] = values["input_ids"], values["attention_mask"] values["loss_mask"] = values["prompt_loss_mask"] = torch.zeros_like(values["input_ids"], dtype=torch.bool) values["generation_prompt_ids"] = values["input_ids"][..., tokens_without_prompt.shape[-1] :] values["base_conv_wo_gen_prompt_end_pos"] = cls._handle_apply_chat_template( processing_class, BASE_CHAT_HISTORY, multi_modal_data=multi_modal_data, tools=tools, add_generation_prompt=False, tokenize=True, ).shape[-1] values["base_conv_with_gen_prompt_end_pos"] = cls._handle_apply_chat_template( processing_class, BASE_CHAT_HISTORY, multi_modal_data=multi_modal_data, tools=tools, add_generation_prompt=True, tokenize=True, ).shape[-1] return values @staticmethod def _handle_apply_chat_template( processing_class: PreTrainedTokenizer | PreTrainedTokenizerFast | ProcessorMixin, messages: list[Message], multi_modal_data: dict[str, Any], tools: Optional[list[OpenAIFunctionToolSchema]] = None, add_generation_prompt: bool = False, tokenize: bool = False, return_dict: bool = False, ): raw_prompt = processing_class.apply_chat_template( messages, tools=tools, add_generation_prompt=add_generation_prompt, tokenize=False ) if not tokenize: return raw_prompt if isinstance(processing_class, PreTrainedTokenizer) or isinstance(processing_class, PreTrainedTokenizerFast): if any(len(values) > 0 for values in multi_modal_data.values()): logger.warning( "There is multi_modal_data but you are not using a processor. Multi-modal data will be ignored." ) model_inputs = processing_class(text=[raw_prompt], return_tensors="pt") elif isinstance(processing_class, ProcessorMixin): # When we update multi_model_keys, we also need to update this logic images = images if len(images := multi_modal_data.get("image", [])) > 0 else None videos = videos if len(videos := multi_modal_data.get("video", [])) > 0 else None model_inputs = processing_class(text=[raw_prompt], images=images, videos=videos, return_tensors="pt") else: raise ValueError(f"Unsupported processing class type: {type(processing_class)}") model_inputs = dict(model_inputs) if return_dict: return model_inputs else: return model_inputs["input_ids"] @staticmethod def _get_position_ids( processing_class: PreTrainedTokenizer | PreTrainedTokenizerFast | ProcessorMixin, input_ids: torch.Tensor, attention_mask: torch.Tensor, multi_modal_inputs: Optional[dict[str, torch.Tensor]] = None, ) -> torch.Tensor: # special case for qwen2vl is_qwen2vl = ( hasattr(processing_class, "image_processor") and "Qwen2VLImageProcessor" in processing_class.image_processor.__class__.__name__ ) if is_qwen2vl: from verl.models.transformers.qwen2_vl import get_rope_index image_grid_thw = video_grid_thw = second_per_grid_ts = None if multi_modal_inputs: image_grid_thw = multi_modal_inputs.get("image_grid_thw") video_grid_thw = multi_modal_inputs.get("video_grid_thw") second_per_grid_ts = multi_modal_inputs.get("second_per_grid_ts") assert input_ids.dim() == 2 and input_ids.shape[0] == 1, ( f"input_ids should be 2D with batch size 1, but got shape {input_ids.shape}" ) assert attention_mask.dim() == 2 and attention_mask.shape[0] == 1, ( f"attention_mask should be 2D with batch size 1, but got shape {attention_mask.shape}" ) new_position_ids = get_rope_index( processing_class, input_ids=input_ids.squeeze(0), image_grid_thw=image_grid_thw, video_grid_thw=video_grid_thw, second_per_grid_ts=second_per_grid_ts, attention_mask=attention_mask.squeeze(0), ) return new_position_ids # (3, seq_len) else: return compute_position_id_with_mask(attention_mask) # (1, seq_len) def _update_input_ids( self, processing_class: PreTrainedTokenizer | PreTrainedTokenizerFast | ProcessorMixin, new_input_ids: torch.Tensor, attention_mask: bool, loss_mask: bool, new_multi_modal_inputs: Optional[dict[str, torch.Tensor]] = None, ) -> None: """ Update the input_ids, attention_mask, position_ids, and loss_mask of the request in additive manner. """ self.input_ids = torch.cat([self.input_ids, new_input_ids], dim=-1) attention_mask = torch.ones_like(new_input_ids) * int(attention_mask) self.attention_mask = torch.cat([self.attention_mask, attention_mask], dim=-1) loss_mask = torch.ones_like(new_input_ids) * int(loss_mask) self.loss_mask = torch.cat([self.loss_mask, loss_mask], dim=-1) if new_multi_modal_inputs: self._update_multi_modal_inputs(new_multi_modal_inputs) new_position_ids = self._get_position_ids( processing_class, new_input_ids, attention_mask, new_multi_modal_inputs ) last_pos = self.position_ids[..., -1:] new_position_ids = new_position_ids + (last_pos + 1) self.position_ids = torch.cat([self.position_ids, new_position_ids], dim=-1) assert ( self.input_ids.shape[-1] == self.attention_mask.shape[-1] == self.position_ids.shape[-1] == self.loss_mask.shape[-1] ), f"""Request {self.request_id} has different length of {self.input_ids.shape[-1]=}, {self.attention_mask.shape[-1]=}, {self.position_ids.shape[-1]=}, {self.loss_mask.shape[-1]=}""" def _update_multi_modal_inputs(self, new_multi_modal_inputs: dict[str, torch.Tensor]) -> None: """ Update the multi_modal_inputs of the request in additive manner. """ for key in new_multi_modal_inputs: input_tensor = new_multi_modal_inputs[key] self.multi_modal_inputs[key] = ( torch.cat([self.multi_modal_inputs[key], input_tensor], dim=0) if key in self.multi_modal_inputs else input_tensor ) def get_generation_prompt_ids( self, processing_class: PreTrainedTokenizer | PreTrainedTokenizerFast | ProcessorMixin ) -> list[int]: """ Get the generation prompt ids for rollout engine. Because rollout engine(SGLang) requires the ids to be a list, we need to convert the tensor to a list. """ generation_prompt_ids = ( None if self.input_ids[..., -self.generation_prompt_ids.shape[-1] :].eq(self.generation_prompt_ids).all() else self.generation_prompt_ids ) if generation_prompt_ids is not None: self._update_input_ids(processing_class, generation_prompt_ids, attention_mask=True, loss_mask=False) if self.use_inference_chat_template: messages = [msg.model_dump() for msg in self.messages] tools = [tool.model_dump() for tool in self.tool_schemas] if self.tool_schemas else None generation_prompt_ids = self._handle_apply_chat_template( processing_class, messages, multi_modal_data=self.multi_modal_data, tools=tools, add_generation_prompt=True, tokenize=True, ) return generation_prompt_ids.squeeze(0).tolist() else: return self.input_ids.squeeze(0).tolist() def add_user_message( self, processing_class: PreTrainedTokenizer | PreTrainedTokenizerFast | ProcessorMixin, content: str, ) -> None: self.messages.append(Message(role="user", content=content)) messages = [*BASE_CHAT_HISTORY, self.messages[-1]] tools = [tool.model_dump() for tool in self.tool_schemas] if self.tool_schemas else None # We don't need to pass multi_modal_data here because we don't have any multi-modal data from Engine # Inference, it is pure text. content_ids = self._handle_apply_chat_template( processing_class, messages, multi_modal_data={}, tools=tools, add_generation_prompt=False, tokenize=True )[..., self.base_conv_wo_gen_prompt_end_pos :] self._update_input_ids(processing_class, content_ids, attention_mask=True, loss_mask=False) def add_assistant_message( self, processing_class: PreTrainedTokenizer | PreTrainedTokenizerFast | ProcessorMixin, content: str, content_ids: Optional[torch.Tensor] = None, tool_calls: Optional[list[OpenAIFunctionToolCall]] = None, ) -> None: self.messages.append(Message(role="assistant", content=content, tool_calls=tool_calls)) if content_ids is None: messages = [*BASE_CHAT_HISTORY, self.messages[-1]] tools = [tool.model_dump() for tool in self.tool_schemas] if self.tool_schemas else None # We don't need to pass multi_modal_data here because we don't have any multi-modal data from Engine # Inference, it is pure text. content_ids = self._handle_apply_chat_template( processing_class, messages, multi_modal_data={}, tools=tools, add_generation_prompt=False, tokenize=True )[..., self.base_conv_with_gen_prompt_end_pos :] self._update_input_ids(processing_class, content_ids, attention_mask=True, loss_mask=True) def add_tool_response_messages( self, processing_class: PreTrainedTokenizer | PreTrainedTokenizerFast | ProcessorMixin, contents: list[ToolResponse], ) -> None: if not contents or all(content.is_empty() for content in contents): return # We also handle the case when tool returns image # We require the processing of the image and video to be done at tool.execute() level delta_multi_modal_data = {key: [] for key in self.multi_modal_keys} for content in contents: if content.is_text_only(): self.messages.append(Message(role="tool", content=content.text)) else: content_list = [] # When we update multi_model_keys, we also need to update this logic if content.image: content_list.extend([{"type": "image"} for _ in content.image]) delta_multi_modal_data["image"].extend(content.image) if content.video: content_list.extend([{"type": "video"} for _ in content.video]) delta_multi_modal_data["video"].extend(content.video) if content.text: content_list.append({"type": "text", "text": content.text}) self.messages.append(Message(role="tool", content=content_list)) messages = [*BASE_CHAT_HISTORY, *self.messages[-len(contents) :]] tools = [tool.model_dump() for tool in self.tool_schemas] if self.tool_schemas else None for key in self.multi_modal_keys: if len(delta_multi_modal_data[key]) > 0: self.multi_modal_data[key].extend(delta_multi_modal_data[key]) # We just passed the new multi-modal data to the chat template to update the input_ids. content_info = self._handle_apply_chat_template( processing_class, messages, multi_modal_data=delta_multi_modal_data, tools=tools, add_generation_prompt=False, tokenize=True, return_dict=True, ) content_ids = content_info["input_ids"][..., self.base_conv_wo_gen_prompt_end_pos :] # process multi_modal_inputs multi_modal_inputs = content_info.copy() multi_modal_inputs.pop("input_ids", None) multi_modal_inputs.pop("attention_mask", None) # chat templates include generation prompt tokens (e.g., "<im_start>assistant\n") # So when tool response is added, we need to explicitly remove these tokens. self._remove_generation_prompt_ids_if_present() self._update_input_ids( processing_class, content_ids, attention_mask=True, loss_mask=False, new_multi_modal_inputs=multi_modal_inputs, ) def update_metrics(self, metrics: Any, tool_id: str) -> None: """ metrics: should be a dict of tools_name -> Any """ if self.metrics.get(tool_id) is None: self.metrics[tool_id] = [] self.metrics[tool_id].append(metrics) def _get_prompt_diffs( self, processing_class: PreTrainedTokenizer | PreTrainedTokenizerFast | ProcessorMixin, full_prompt_ids: torch.Tensor, current_prompt_ids: torch.Tensor, diff_surrounding_chars: int = 10, ) -> list[dict[str, Any]]: """Get differences between full prompt and current prompt with surrounding context. This function helps debug tokenization mismatches by showing the differences between full prompt and current prompt with surrounding context. Instead of just showing the exact diff, it includes additional tokens before and after to help locate the issue in the chat template. For example, if the actual diff is a newline change from "\n\n" to "\n", with diff_surrounding_chars the output might look like: full_prompt_chunk: "<|im_start|>assistant\n\nI think..." current_prompt_chunk: "<|im_start|>assistant\nI think..." This context makes it much easier to identify where in the chat template the mismatch occurs. Args: processing_class: The processing class to use for decoding the token IDs full_prompt_ids: Token IDs from applying chat template to all messages at once current_prompt_ids: Token IDs from incremental chat template application diff_surrounding_chars: Number of surrounding characters to include for context (default: 10) Returns: List of dicts containing the differing chunks with context and their indices """ full_prompt_ids = full_prompt_ids.squeeze(0) current_prompt_ids = current_prompt_ids.squeeze(0) full_prompt = processing_class.decode(full_prompt_ids, skip_special_tokens=False) current_prompt = processing_class.decode(current_prompt_ids, skip_special_tokens=False) s = difflib.SequenceMatcher(None, full_prompt, current_prompt, autojunk=False) diffs = [] for tag, i1, i2, j1, j2 in s.get_opcodes(): if tag == "equal": continue # Get the surrounding context for better readability start_i = max(0, i1 - diff_surrounding_chars) end_i = min(len(full_prompt), i2 + diff_surrounding_chars) start_j = max(0, j1 - diff_surrounding_chars) end_j = min(len(current_prompt), j2 + diff_surrounding_chars) diffs.append( { "full_prompt_chunk": full_prompt[start_i:end_i], "current_prompt_chunk": current_prompt[start_j:end_j], "indices": (start_i, end_i, start_j, end_j), } ) return diffs def _remove_generation_prompt_ids_if_present(self) -> None: """ Remove generation prompt IDs from input tensors if they are present at the end. """ if self.input_ids[..., -self.generation_prompt_ids.shape[-1] :].eq(self.generation_prompt_ids).all(): self.input_ids = self.input_ids[..., : -self.generation_prompt_ids.shape[-1]] self.attention_mask = self.attention_mask[..., : -self.generation_prompt_ids.shape[-1]] self.position_ids = self.position_ids[..., : -self.generation_prompt_ids.shape[-1]] self.loss_mask = self.loss_mask[..., : -self.generation_prompt_ids.shape[-1]] def finalize( self, processing_class: PreTrainedTokenizer | PreTrainedTokenizerFast | ProcessorMixin, reward_scores: dict[str, list[float]], finish_reason_type: FinishReasonTypeEnum = FinishReasonTypeEnum.STOP, ) -> None: self.state = AsyncRolloutRequestStateEnum.COMPLETED self.reward_scores = reward_scores # In case we failed to generate the assistant message and the generation prompt ids were already added to # input_ids, remove them from the end of input_ids self._remove_generation_prompt_ids_if_present() self.response_ids = self.input_ids[..., self.prompt_ids.shape[-1] :] if self.tokenization_sanity_check_mode != TokenizationSanityCheckModeEnum.DISABLE: # When there is a diff, we log the diffs with diff_surrounding_chars context diff_surrounding_chars = 10 messages = [msg.model_dump() for msg in self.messages] tools = [tool.model_dump() for tool in self.tool_schemas] if self.tool_schemas else None full_prompt_info = self._handle_apply_chat_template( processing_class, messages, multi_modal_data=self.multi_modal_data, tools=tools, add_generation_prompt=False, tokenize=True, return_dict=True, ) full_prompt_ids = full_prompt_info["input_ids"] # We must use dict(full_prompt_info) to convert BatchFeature values to a new dict # because np.array() only keeps the keys for BatchFeature. full_prompt_multi_modal_inputs = full_prompt_info.copy() full_prompt_multi_modal_inputs.pop("input_ids", None) full_prompt_multi_modal_inputs.pop("attention_mask", None) for multi_modal_inputs_key in self.multi_modal_inputs: if multi_modal_inputs_key in full_prompt_multi_modal_inputs: if ( not self.multi_modal_inputs[multi_modal_inputs_key] .eq(full_prompt_multi_modal_inputs[multi_modal_inputs_key]) .all() ): logger.warning( f"Multi-modal data {multi_modal_inputs_key} is not consistent. " f"This may lead to unexpected behavior during training. " f"Please review your multi_modal_inputs logic." ) else: logger.warning( f"Multi-modal inputs key {multi_modal_inputs_key} is not found in the multi_modal_inputs. " f"This may lead to unexpected behavior during training." f"Please review your multi_modal_inputs logic." ) if diffs := self._get_prompt_diffs( processing_class, full_prompt_ids, self.input_ids, diff_surrounding_chars=diff_surrounding_chars ): log_warning = False if self.tokenization_sanity_check_mode == TokenizationSanityCheckModeEnum.STRICT: log_warning = True elif self.tokenization_sanity_check_mode == TokenizationSanityCheckModeEnum.IGNORE_STRIPPABLE: non_strippable_diffs_exist = any( d["full_prompt_chunk"].strip() or d["current_prompt_chunk"].strip() for d in diffs ) if non_strippable_diffs_exist: log_warning = True if log_warning: mode_str = f" ({self.tokenization_sanity_check_mode.value})" logger.warning( f"Inconsistent training and inference tokenization detected{mode_str}. This may lead to " f"unexpected behavior during training. Please review your chat template to determine if this " f"is intentional. For more information, refer to the multiturn README.md." ) logger.warning( f"Showing {diff_surrounding_chars} characters before and after the diffs for context and " f"better readability." ) diff_details_list = [] for d in diffs: i1, i2, j1, j2 = d["indices"] diff_details_list.append( f"idx {i1}:{i2} -> {j1}:{j2} | full_prompt_chunk: {repr(d['full_prompt_chunk'])} | " f"current_prompt_chunk: {repr(d['current_prompt_chunk'])}" ) diff_details = "\n".join(diff_details_list) logger.warning(f"Found differences:\n{diff_details}") if finish_reason_type == FinishReasonTypeEnum.STOP: pass elif finish_reason_type == FinishReasonTypeEnum.LENGTH: pass else: raise ValueError(f"Unsupported finalize finish reason type: {finish_reason_type}") self.truncate_output_ids(processing_class) assert ( self.input_ids.shape[-1] == self.attention_mask.shape[-1] == self.position_ids.shape[-1] == self.loss_mask.shape[-1] ), f"""Request {self.request_id} has different length of {self.input_ids.shape[-1]=}, {self.attention_mask.shape[-1]=}, {self.position_ids.shape[-1]=}, {self.loss_mask.shape[-1]=}""" def truncate_output_ids( self, processing_class: PreTrainedTokenizer | PreTrainedTokenizerFast | ProcessorMixin ) -> None: self.input_ids = self.input_ids[..., : self.max_model_len] self.attention_mask = self.attention_mask[..., : self.max_model_len] self.position_ids = self.position_ids[..., : self.max_model_len] self.loss_mask = self.loss_mask[..., : self.max_model_len] self.response_ids = self.input_ids[..., self.prompt_ids.shape[-1] :][..., : self.max_response_len] self.response_attention_mask = self.attention_mask[..., self.prompt_attention_mask.shape[-1] :][ ..., : self.max_response_len ] self.response_position_ids = self.position_ids[..., self.prompt_position_ids.shape[-1] :][ ..., : self.max_response_len ] self.response_loss_mask = self.loss_mask[..., self.prompt_loss_mask.shape[-1] :][..., : self.max_response_len]
verl__workers__rollout__schemas.py
# Copyright 2023-2024 SGLang Team # Copyright 2025 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import asyncio import dataclasses import json import logging import os from typing import Any, Optional import ray import sglang import sglang.srt.entrypoints.engine import torch from packaging import version from ray.actor import ActorHandle from sglang.srt.entrypoints.http_server import ( ServerArgs, _GlobalState, _launch_subprocesses, app, set_global_state, ) from sglang.srt.managers.io_struct import ( GenerateReqInput, ReleaseMemoryOccupationReqInput, ResumeMemoryOccupationReqInput, ) from sglang.srt.managers.tokenizer_manager import ServerStatus from verl.single_controller.ray import RayClassWithInitArgs from verl.utils.config import omega_conf_to_dataclass from verl.utils.device import get_visible_devices_keyword from verl.utils.net_utils import get_free_port, is_valid_ipv6_address from verl.utils.profiler import DistProfiler, build_sglang_profiler_args from verl.workers.config import HFModelConfig, RolloutConfig from verl.workers.rollout.replica import RolloutMode, RolloutReplica, TokenOutput from verl.workers.rollout.sglang_rollout.sglang_rollout import ServerAdapter, _set_envs_and_config from verl.workers.rollout.utils import get_max_position_embeddings, run_unvicorn logger = logging.getLogger(__file__) logger.setLevel(logging.INFO) visible_devices_keyword = get_visible_devices_keyword() class SGLangHttpServer: """SGLang http server in single node, this is equivalent to launch server with command line: ``` python -m sglang.launch_server --node-rank 0 --nnode 1 ... ``` Args: config (DictConfig): full config. rollout_mode (RolloutMode): rollout mode. replica_rank (int): replica rank, a replica may contain multiple nodes. node_rank (int): node rank. nnodes (int): number of nodes. cuda_visible_devices (str): cuda visible devices. """ def __init__( self, config: RolloutConfig, model_config: HFModelConfig, rollout_mode: RolloutMode, workers: list[ActorHandle], replica_rank: int, node_rank: int, nnodes: int, cuda_visible_devices: str, base_gpu_id: int, ): print(f"SGLang http server: {rollout_mode=}, {replica_rank=}, {node_rank=}, {nnodes=}, {cuda_visible_devices=}") os.environ[visible_devices_keyword] = cuda_visible_devices self.config: RolloutConfig = omega_conf_to_dataclass(config) self.model_config: HFModelConfig = omega_conf_to_dataclass(model_config, dataclass_type=HFModelConfig) max_position_embeddings = get_max_position_embeddings(self.model_config.hf_config) if self.config.max_model_len is None: self.config.max_model_len = max_position_embeddings else: if self.config.max_model_len > max_position_embeddings: raise ValueError( f"max_model_len ({self.config.max_model_len}) should be less than or equal to " f"max_position_embeddings ({max_position_embeddings})" ) self.rollout_mode = rollout_mode self.workers = workers self.replica_rank = replica_rank self.node_rank = node_rank self.nnodes = nnodes self.base_gpu_id = base_gpu_id if self.rollout_mode != RolloutMode.HYBRID and self.config.load_format == "dummy": logger.warning(f"rollout mode is {self.rollout_mode}, load_format is dummy, set to auto") self.config.load_format = "auto" # used for http server self._server_address = ray.util.get_node_ip_address().strip("[]") self._server_port = None # used for controlling sglang server profiler profiler_config = self.config.profiler tool_config = None if profiler_config is not None: if profiler_config.tool in ["torch", "npu"]: tool_config = omega_conf_to_dataclass((profiler_config.tool_config or {}).get(profiler_config.tool)) else: logger.warning(f"agent loop only support torch and npu profiler, got {profiler_config.tool}") profiler_config = None self.profiler_controller = DistProfiler(self.replica_rank, config=profiler_config, tool_config=tool_config) # used for NCCL process group if self.node_rank == 0: self._master_address = self._server_address self._master_port, self._master_sock = get_free_port(self._server_address) logger.info( f"SGLangHttpServer, replica_rank: {self.replica_rank}, " f"master address: {self._master_address}, port: {self._master_port}" ) else: self._master_address = None self._master_port = None def get_master_address(self): """Get master address and port for init NCCL process group.""" return self._master_address, self._master_port def get_server_address(self): """Get http server address and port.""" assert self._server_port is not None, "http server is not launched, port is None" return self._server_address, self._server_port async def launch_server(self, master_address: str = None, master_port: int = None): if self.node_rank != 0: assert master_address and master_port, "non-master node should provide master address and port" self._master_address = master_address self._master_port = master_port engine_kwargs = self.config.get("engine_kwargs", {}).get("sglang", {}) or {} attention_backend = engine_kwargs.pop("attention_backend", None) quantization = self.config.get("quantization", None) if quantization is not None: if quantization == "fp8": assert version.parse(sglang.__version__) >= version.parse("0.5.5"), ( "sglang>=0.5.5 is required for FP8 quantization" ) FP8_BLOCK_QUANT_KWARGS = { "activation_scheme": "dynamic", "fmt": "e4m3", "quant_method": "fp8", "weight_block_size": [128, 128], } fp8_block_quant_kwargs = dict(FP8_BLOCK_QUANT_KWARGS) else: raise ValueError(f"Currently only support fp8 quantization, got: {quantization}") dist_init_addr = ( f"[{self._master_address}]:{self._master_port}" if is_valid_ipv6_address(self._master_address) else f"{self._master_address}:{self._master_port}" ) infer_tp = self.config.tensor_model_parallel_size * self.config.data_parallel_size args = { "model_path": self.model_config.local_path, "dtype": self.config.dtype, "mem_fraction_static": self.config.gpu_memory_utilization, "disable_cuda_graph": self.config.enforce_eager, "enable_memory_saver": True, "base_gpu_id": self.base_gpu_id, "gpu_id_step": 1, "tp_size": infer_tp, "dp_size": self.config.data_parallel_size, "ep_size": self.config.expert_parallel_size, "node_rank": self.node_rank, "load_format": self.config.load_format, "dist_init_addr": dist_init_addr, "nnodes": self.nnodes, "trust_remote_code": self.model_config.trust_remote_code, "max_running_requests": self.config.get("max_num_seqs", None), "log_level": "error", "mm_attention_backend": "fa3", "attention_backend": attention_backend if attention_backend is not None else "fa3", "skip_tokenizer_init": self.config.skip_tokenizer_init, "skip_server_warmup": True, "quantization": quantization, "json_model_override_args": json.dumps({"quantization_config": fp8_block_quant_kwargs}) if quantization == "fp8" else json.dumps({}), **engine_kwargs, } if self.config.prometheus.enable: if self.config.prometheus.served_model_name: # Extract model name from path if it's a full path served_model_name = self.config.prometheus.served_model_name if "/" in served_model_name: # If it's a full path, extract the last part as model name served_model_name = served_model_name.split("/")[-1] args["served_model_name"] = served_model_name # start sglang metrics args["enable_metrics"] = True # enable_weights_cpu_backup is supported in sglang>=0.5.3 if "enable_weights_cpu_backup" in [f.name for f in dataclasses.fields(ServerArgs)]: enable_weights_cpu_backup = True if self.rollout_mode == RolloutMode.COLOCATED else False args["enable_weights_cpu_backup"] = enable_weights_cpu_backup if self.config.enable_rollout_routing_replay: args.update({"enable_return_routed_experts": True}) # mtp if self.config.mtp.enable and self.config.mtp.enable_rollout: # Enable weights CPU backup for sglang >= 0.5.6 if sglang.__version__ < "0.5.6": raise ValueError(f"sglang version {sglang.__version__} is not supported for MTP rollout") args["speculative_algorithm"] = self.config.mtp.speculative_algorithm args["speculative_num_steps"] = self.config.mtp.speculative_num_steps args["speculative_eagle_topk"] = self.config.mtp.speculative_eagle_topk args["speculative_num_draft_tokens"] = self.config.mtp.speculative_num_draft_tokens args["enable_weights_cpu_backup"] = True args["enable_draft_weights_cpu_backup"] = True # NOTE: We can't directly call SGLang's launch_server since it's not an async function. # https://github.com/sgl-project/sglang/blob/main/python/sglang/srt/entrypoints/http_server.py sglang.srt.entrypoints.engine._set_envs_and_config = _set_envs_and_config os.environ["SGLANG_BLOCK_NONZERO_RANK_CHILDREN"] = "0" server_args = ServerArgs(**args) if version.parse(sglang.__version__) >= version.parse("0.5.7"): self.tokenizer_manager, self.template_manager, self.scheduler_info, *_ = _launch_subprocesses( server_args=server_args, init_tokenizer_manager_func=sglang.srt.entrypoints.engine.init_tokenizer_manager, run_scheduler_process_func=sglang.srt.entrypoints.engine.run_scheduler_process, run_detokenizer_process_func=sglang.srt.entrypoints.engine.run_detokenizer_process, ) else: self.tokenizer_manager, self.template_manager, self.scheduler_info, *_ = _launch_subprocesses( server_args=server_args ) # In multi-node cases, non-zero rank nodes should not launch http server. if self.node_rank > 0: return set_global_state( _GlobalState( tokenizer_manager=self.tokenizer_manager, template_manager=self.template_manager, scheduler_info=self.scheduler_info, ) ) app.is_single_tokenizer_mode = True # Set warmup_thread_{kw}args to avoid AttributeError in lifespan function app.server_args = server_args app.warmup_thread_kwargs = {"server_args": server_args} app.warmup_thread_args = (server_args, None, None) # Manually add Prometheus middleware before starting server # This ensures /metrics endpoint is available immediately if server_args.enable_metrics: from sglang.srt.utils.common import add_prometheus_middleware add_prometheus_middleware(app) self._server_port, self._server_task = await run_unvicorn(app, server_args, self._server_address) self.tokenizer_manager.server_status = ServerStatus.Up async def wake_up(self): if self.node_rank != 0: return if self.rollout_mode == RolloutMode.HYBRID: # In hybrid mode, rollout is wake up in `update_weights` raise ValueError(f"wake_up not support rollout_mode {self.rollout_mode}") elif self.rollout_mode == RolloutMode.COLOCATED: # Directly call engine to wake up without sync weights. obj = ResumeMemoryOccupationReqInput(tags=["kv_cache", "weights"]) await self.tokenizer_manager.resume_memory_occupation(obj, None) await self.tokenizer_manager.flush_cache() elif self.rollout_mode == RolloutMode.STANDALONE: logger.info("skip wake_up in standalone mode") async def sleep(self): if self.node_rank != 0 or not self.config.free_cache_engine: return if self.rollout_mode == RolloutMode.HYBRID: obj = ReleaseMemoryOccupationReqInput(tags=["kv_cache", "weights"]) await self.tokenizer_manager.release_memory_occupation(obj, None) elif self.rollout_mode == RolloutMode.COLOCATED: obj = ReleaseMemoryOccupationReqInput(tags=["kv_cache", "weights"]) await self.tokenizer_manager.release_memory_occupation(obj, None) elif self.rollout_mode == RolloutMode.STANDALONE: logger.info("skip sleep in standalone mode") async def clear_kv_cache(self): if self.node_rank == 0: await self.tokenizer_manager.flush_cache() async def generate( self, prompt_ids: torch.Tensor, sampling_params: dict[str, Any], request_id: str, image_data: Optional[list[Any]] = None, video_data: Optional[list[Any]] = None, ) -> TokenOutput: """Generate sequence with token-in-token-out.""" # TODO(@wuxibin): switch to `/generate` http endpoint once multi-modal support ready. max_possible_tokens = self.config.max_model_len - len(prompt_ids) if max_possible_tokens < 0: raise ValueError( f"Prompt length ({len(prompt_ids)}) exceeds the model's maximum context length " f"({self.config.max_model_len})." ) if "max_new_tokens" in sampling_params: max_new_tokens = sampling_params.pop("max_new_tokens") elif "max_tokens" in sampling_params: # support vllm-style 'max_tokens' param max_new_tokens = sampling_params.pop("max_tokens") else: max_new_tokens = self.config.response_length + self.config.prompt_length - len(prompt_ids) # Clamp max_new_tokens to the valid range [0, max_possible_tokens] max_new_tokens = max(0, min(max_new_tokens, max_possible_tokens)) assert max_new_tokens <= max_possible_tokens, ( f"max_new_tokens {max_new_tokens} exceeds available context space {max_possible_tokens}" ) sampling_params["max_new_tokens"] = max_new_tokens return_logprob = sampling_params.pop("logprobs", False) request = { "rid": request_id, "input_ids": prompt_ids, "sampling_params": sampling_params, "return_logprob": return_logprob, "image_data": image_data, # TODO: support video input for sglang # video_data=video_data, } if self.config.enable_rollout_routing_replay: request.update({"return_routed_experts": True}) generate_request = GenerateReqInput(**request) output = await self.tokenizer_manager.generate_request(generate_request, None).__anext__() if return_logprob: output_token_logprobs = output["meta_info"]["output_token_logprobs"] log_probs, token_ids = zip( *[(log_prob, token_ids) for log_prob, token_ids, _ in output_token_logprobs], strict=True ) else: token_ids = output["output_ids"] log_probs = None routed_experts = None if self.config.enable_rollout_routing_replay: if self.config.skip_tokenizer_init: routed_experts = output.get("meta_info", {}).get("routed_experts", None) else: from sglang.srt.layers.moe.routed_experts_capturer import extract_routed_experts_from_meta_info hf_config = self.model_config.hf_config if not hasattr(hf_config, "num_hidden_layers") or not hasattr(hf_config, "num_experts_per_tok"): raise AttributeError( "enable_rollout_routing_replay is set, but hf_config is missing " "'num_hidden_layers' or 'num_experts_per_tok'. This feature requires an MoE model " "configuration that defines these attributes." ) routed_experts = extract_routed_experts_from_meta_info(output).reshape( -1, hf_config.num_hidden_layers, hf_config.num_experts_per_tok ) return TokenOutput(token_ids=token_ids, log_probs=log_probs, routed_experts=routed_experts) async def start_profile(self, **kwargs): if ( self.profiler_controller.check_enable() and self.profiler_controller.check_this_rank() and self.profiler_controller.is_discrete_mode() ): profile_args = build_sglang_profiler_args( self.profiler_controller.config, self.profiler_controller.tool_config, self.replica_rank ) await self.tokenizer_manager.start_profile(**profile_args) async def stop_profile(self): if ( self.profiler_controller.check_enable() and self.profiler_controller.check_this_rank() and self.profiler_controller.is_discrete_mode() ): await self.tokenizer_manager.stop_profile() _rollout_worker_actor_cls = ray.remote(ServerAdapter) class SGLangReplica(RolloutReplica): def __init__( self, replica_rank: int, config: RolloutConfig, model_config: HFModelConfig, gpus_per_node: int = 8, is_reward_model: bool = False, ): super().__init__(replica_rank, config, model_config, gpus_per_node, is_reward_model) self.server_class = ray.remote(SGLangHttpServer) def get_ray_class_with_init_args(self) -> RayClassWithInitArgs: """Get rollout worker actor class for colocated and standalone mode.""" worker_dict_cls = RayClassWithInitArgs( cls=_rollout_worker_actor_cls, config=self.config, model_config=self.model_config, device_mesh=None, ) return worker_dict_cls async def launch_servers(self): """Launch http server in each node.""" assert len(self.workers) == self.world_size, ( f"worker number {len(self.workers)} not equal to world size {self.world_size}" ) # get (node_id, CUDA_VISIBLE_DEVICES) of all workers worker_infos = await asyncio.gather( *[ worker.__ray_call__.remote( lambda self: (ray.get_runtime_context().get_node_id(), os.environ[visible_devices_keyword]) ) for worker in self.workers ] ) worker_cuda_visible_devices = [worker_info[1] for worker_info in worker_infos] worker_node_ids = [worker_info[0] for worker_info in worker_infos] base_gpu_id = 0 infer_tp = self.config.tensor_model_parallel_size * self.config.data_parallel_size replica_world_size = infer_tp * self.config.pipeline_model_parallel_size if os.environ.get(f"RAY_EXPERIMENTAL_NOSET_{visible_devices_keyword}", None): logger.warning(f"RAY_EXPERIMENTAL_NOSET_{visible_devices_keyword} is set True!") base_gpu_id = (0 + self.replica_rank * replica_world_size) % self.gpus_per_node # create server actor in each node with node affinity and cuda visible devices for node_rank in range(self.nnodes): workers = self.workers[ node_rank * self.gpus_per_replica_node : (node_rank + 1) * self.gpus_per_replica_node ] node_cuda_visible_devices_set = worker_cuda_visible_devices[ node_rank * self.gpus_per_replica_node : (node_rank + 1) * self.gpus_per_replica_node ] node_cuda_visible_devices = ",".join( map( str, sorted( set( int(device) for worker_devices_set in node_cuda_visible_devices_set for device in worker_devices_set.split(",") if device.strip() ) ), ) ) node_id = worker_node_ids[node_rank * self.gpus_per_replica_node] name = ( f"sglang_server_{self.replica_rank}_{node_rank}" if not self.is_reward_model else f"sglang_server_reward_{self.replica_rank}_{node_rank}" ) server = self.server_class.options( scheduling_strategy=ray.util.scheduling_strategies.NodeAffinitySchedulingStrategy( node_id=node_id, soft=False, ), runtime_env={"env_vars": {f"RAY_EXPERIMENTAL_NOSET_{visible_devices_keyword}": "1"}}, name=name, ).remote( config=self.config, model_config=self.model_config, rollout_mode=self.rollout_mode, workers=workers, replica_rank=self.replica_rank, node_rank=node_rank, nnodes=self.nnodes, cuda_visible_devices=node_cuda_visible_devices, base_gpu_id=base_gpu_id, ) self.servers.append(server) # launch http server in each node master_address, master_port = await self.servers[0].get_master_address.remote() await asyncio.gather( *[ server.launch_server.remote(master_address=master_address, master_port=master_port) for server in self.servers ] ) # get http server address from first server server_address, server_port = await self.servers[0].get_server_address.remote() self._server_handle = self.servers[0] self._server_address = ( f"[{server_address}]:{server_port}" if is_valid_ipv6_address(server_address) else f"{server_address}:{server_port}" )
verl__workers__rollout__sglang_rollout__async_sglang_server.py
# Copyright 2025 z.ai # Copyright 2023-2024 SGLang Team # Copyright 2025 ModelBest Inc. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # This file is adapted from multiple sources: # 1. THUDM/slime project # Original source: https://github.com/THUDM/slime/blob/main/slime/backends/sglang_utils/http_server_engine.py # Copyright 2025 z.ai # Licensed under the Apache License, Version 2.0 # 2. SGLang project # Original source: https://github.com/sgl-project/sglang/blob/main/python/sglang/srt/entrypoints/http_server_engine.py # Copyright 2023-2024 SGLang Team # Licensed under the Apache License, Version 2.0 # # Modifications made by z.ai and ModelBest Inc. include but are not limited to: # - Enhanced error handling and retry logic # - Added async support with connection pooling # - Extended functionality for distributed weight updates # - Improved logging and monitoring capabilities # - Additional configuration options and optimizations """HTTP Server Engine Adapter for SGLang. This module provides HTTP-based adapters for SGLang engines, allowing communication with SGLang servers through HTTP requests instead of direct engine calls. Classes: HttpServerAdapter: Synchronous HTTP adapter for SGLang engines AsyncHttpServerAdapter: Asynchronous HTTP adapter for SGLang engines Functions: launch_server_process: Launch and initialize an SGLang HTTP server process """ import asyncio import logging import multiprocessing import os import time from contextlib import asynccontextmanager from typing import Any, Callable, Optional import aiohttp import requests from sglang.srt.entrypoints.EngineBase import EngineBase from sglang.srt.entrypoints.http_server import launch_server from sglang.srt.managers.io_struct import ( UpdateWeightsFromTensorReqInput, ) from sglang.srt.server_args import ServerArgs from sglang.srt.utils import kill_process_tree # Configure logger logger = logging.getLogger(__name__) logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN")) # Default configuration constants DEFAULT_TIMEOUT = 60.0 DEFAULT_MAX_ATTEMPTS = 3 DEFAULT_RETRY_DELAY = 2.0 DEFAULT_MAX_CONNECTIONS = 2000 DEFAULT_MAX_WAIT_TIME = 300.0 def _read_response(response: requests.Response): if response.status_code == 204 or not response.content: return {} try: return response.json() except ValueError: return { "content_type": response.headers.get("Content-Type", ""), "text": response.text, } async def _read_async_response(resp: aiohttp.ClientResponse) -> dict[str, Any]: if resp.status == 204 or (resp.content_length == 0): return {} try: return await resp.json(content_type=None) except Exception: try: text = await resp.text() except Exception: return {} return { "content_type": (resp.headers.get("Content-Type") or ""), "text": text, } def launch_server_process( server_args: ServerArgs, timeout: float = DEFAULT_TIMEOUT, max_wait_time=DEFAULT_MAX_WAIT_TIME, first_rank_in_node=False, ) -> multiprocessing.Process: """Launch an SGLang HTTP server process and wait for it to be ready. This function starts a new process running an SGLang HTTP server, then waits for the server to become ready by polling its health endpoints. It ensures the server is fully operational before returning. Args: server_args (ServerArgs): Server configuration arguments including host, port, and other settings timeout (float, optional): Timeout for individual HTTP requests during health checks. Defaults to DEFAULT_TIMEOUT. Returns: multiprocessing.Process: The launched multiprocessing.Process instance Raises: RuntimeError: If the server process terminates unexpectedly during startup or cache flush TimeoutError: If server fails to become ready within reasonable time (300 seconds) requests.RequestException: If health check requests fail repeatedly Note: This function will return immediately for non-master nodes (node_rank != 0), but the process will still be started and returned. This is for consistency; except for the process obtained by node_rank = 0, other processes have no actual effect. """ p = multiprocessing.Process(target=launch_server, args=(server_args,)) if server_args.node_rank != 0 or not first_rank_in_node: logger.info(f"Server process started with PID {p.pid} for node rank {server_args.node_rank}", flush=True) return p p.start() base_url = server_args.url() headers = { "Content-Type": "application/json; charset=utf-8", "Authorization": f"Bearer {server_args.api_key}", } # Health check with overall timeout start_time = time.time() with requests.Session() as session: while time.time() - start_time < max_wait_time: if not p.is_alive(): raise RuntimeError("Server process terminated unexpectedly during startup") try: if server_args.is_embedding: response = session.get(f"{base_url}/health", headers=headers, timeout=timeout) else: response = session.get(f"{base_url}/health_generate", headers=headers, timeout=timeout) if response.status_code == 200: break except requests.RequestException as e: logger.debug(f"Health check failed: {e}") time.sleep(2) else: p.terminate() logger.error(f"Server in {base_url} failed to become healthy within timeout period") raise TimeoutError("Server failed to become healthy within timeout period") # Ensure cache is ready while time.time() - start_time < max_wait_time: if not p.is_alive(): raise RuntimeError("Server process terminated unexpectedly during cache flush") try: response = session.get(f"{base_url}/flush_cache", headers=headers, timeout=timeout) if response.status_code == 200: break except requests.RequestException as e: logger.debug(f"Cache flush check failed: {e}") time.sleep(2) else: p.terminate() raise TimeoutError("Server cache flush failed within timeout period") return p class HttpServerAdapter(EngineBase): """HTTP-based adapter for SGLang engines. This adapter allows interaction with SGLang engines through HTTP requests instead of direct engine calls. It launches an HTTP server process and provides methods to communicate with it via REST API calls. You can use this class to launch a server from a HttpServerAdapter instance. We recommend using this class only when you need to use http server. Otherwise, you can use Engine directly. Attributes: router_ip (Optional[str]): IP address of the router for worker registration router_port (Optional[int]): Port of the router for worker registration server_args (ServerArgs): Server configuration arguments node_rank (int): Rank of this node in distributed setup process (multiprocessing.Process): The launched server process timeout (float): HTTP request timeout in seconds max_attempts (int): Maximum number of attempts for requests retry_delay (float): Base delay between retries in seconds """ def __init__( self, router_ip: Optional[str] = None, router_port: Optional[int] = None, timeout: float = DEFAULT_TIMEOUT, max_attempts: int = DEFAULT_MAX_ATTEMPTS, retry_delay: float = DEFAULT_RETRY_DELAY, first_rank_in_node: bool = False, max_start_wait_time: float = DEFAULT_MAX_WAIT_TIME, launch_server: bool = True, **kwargs: Any, ) -> None: """Initialize the HTTP server engine adapter. Args: router_ip (Optional[str], optional): IP address of router for worker registration. Defaults to None. router_port (Optional[int], optional): Port of router for worker registration. Defaults to None. timeout (float, optional): HTTP request timeout in seconds. Defaults to DEFAULT_TIMEOUT. max_attempts (int, optional): Maximum number of retry attempts for failed requests. Defaults to DEFAULT_MAX_ATTEMPTS. retry_delay (float, optional): Base delay between retries in seconds. Defaults to DEFAULT_RETRY_DELAY. launch_server (bool, optional): Whether to launch the server process. Defaults to True. **kwargs (Any): Additional arguments passed to ServerArgs Note: TODO: @ChangyiYang Enable SGLang router for this http server engine If both router_ip and router_port are provided and this is the master node (node_rank == 0), the adapter will automatically register with the router. """ self.router_ip: Optional[str] = router_ip self.router_port: Optional[int] = router_port self.timeout: float = timeout self.max_attempts: int = max_attempts self.retry_delay: float = retry_delay self.server_args: ServerArgs = ServerArgs(**kwargs) self.node_rank: int = self.server_args.node_rank self.max_start_wait_time: float = max_start_wait_time logger.info( f"Launch HttpServerAdapter at: {self.server_args.host}:{self.server_args.port} with {first_rank_in_node}" ) if launch_server: self.process: multiprocessing.Process = launch_server_process( self.server_args, self.timeout, self.max_start_wait_time, first_rank_in_node ) if self.node_rank == 0 and self.router_ip and self.router_port: self._register_with_router() def _register_with_router(self) -> None: """Register worker with router with error handling. This method attempts to register the current worker with a router service. If registration fails, it logs an error but does not raise an exception, allowing the server to continue operating without router integration. Raises: Does not raise exceptions - all errors are logged and handled gracefully. """ try: url = f"http://{self.router_ip}:{self.router_port}/add_worker" params = {"url": f"http://{self.server_args.host}:{self.server_args.port}"} response = requests.post(url, params=params, timeout=self.timeout) response.raise_for_status() logger.info("Successfully registered with router") except Exception as e: logger.error(f"Failed to register with router: {e}") # Don't raise here - server can still work without router def _make_request( self, endpoint: str, payload: Optional[dict[str, Any]] = None, method: str = "POST", timeout: float = DEFAULT_TIMEOUT, only_master: bool = True, ) -> dict[str, Any]: """Make a HTTP request with retry logic and consistent error handling. Args: endpoint (str): The API endpoint to call (without leading slash) payload (Optional[Dict[str, Any]], optional): The JSON payload to send. Defaults to empty dict if None. method (str, optional): HTTP method to use. Defaults to "POST". Returns: Dict[str, Any]: The JSON response from the server Raises: requests.HTTPError: If the HTTP request fails with a client/server error RuntimeError: If all retry attempts are exhausted Note: - For non-master nodes (node_rank != 0), returns empty dict immediately - Uses exponential backoff for retries - Logs warnings for timeout and connection errors, errors for HTTP errors """ if only_master and self.node_rank != 0: return {} url = f"http://{self.server_args.host}:{self.server_args.port}/{endpoint}" for attempt in range(self.max_attempts): try: if method.upper() == "GET": response = requests.get(url, timeout=self.timeout) else: response = requests.post(url, json=payload or {}, timeout=self.timeout) response.raise_for_status() return _read_response(response) except requests.exceptions.Timeout: logger.warning(f"Request to {endpoint} timed out (attempt {attempt + 1})") except requests.exceptions.ConnectionError: logger.warning(f"Connection error for {endpoint} (attempt {attempt + 1})") except requests.exceptions.HTTPError as e: logger.error(f"HTTP error for {endpoint}: {e}") raise except Exception as e: logger.error(f"Unexpected error for {endpoint}: {e}") if attempt == self.max_attempts - 1: raise if attempt < self.max_attempts - 1: time.sleep(self.retry_delay * (2**attempt)) raise RuntimeError(f"Failed to complete request to {endpoint} after {self.max_attempts} attempts") def update_weights_from_tensor(self, req: UpdateWeightsFromTensorReqInput) -> dict[str, Any]: """Update model weights from tensor data. The HTTP server will only post meta data, and the real weights will be copied directly from GPUs. Args: serialized_named_tensors (List[str]): List of serialized tensor data load_format (Optional[str], optional): Format specification for loading weights. Defaults to None. flush_cache (bool, optional): Whether to flush cache after updating weights. Defaults to False. Returns: Dict[str, Any]: Server response containing update status Note: The model should be on GPUs rather than CPU for this functionality to work properly. If you encounter issues, ensure your model is loaded on GPU devices rather than CPU. """ import base64 named_tensors = req.serialized_named_tensors load_format = req.load_format flush_cache = req.flush_cache if named_tensors: serialized_named_tensors = [ base64.b64encode(named_tensor).decode("utf-8") for named_tensor in named_tensors ] else: serialized_named_tensors = [] return self._make_request( "update_weights_from_tensor", { "serialized_named_tensors": serialized_named_tensors, "load_format": load_format, "flush_cache": flush_cache, }, ) def shutdown(self) -> None: """Shutdown the HTTP server and clean up resources. This method performs the following cleanup operations: 1. Unregisters the worker from the router (if configured) 2. Terminates the server process tree All operations are performed with error handling to ensure graceful shutdown even if individual steps fail. Note: This method should be called when the adapter is no longer needed to ensure proper cleanup of resources and processes. """ # Unregister from router if self.router_ip and self.router_port: try: url = f"http://{self.router_ip}:{self.router_port}/remove_worker" params = {"url": f"http://{self.server_args.host}:{self.server_args.port}"} requests.post(url, params=params, timeout=5.0) # Short timeout for shutdown logger.info("Successfully unregistered from router") except Exception as e: logger.warning(f"Failed to unregister from router: {e}") # Kill server process if hasattr(self, "process") and self.process is not None: try: kill_process_tree(self.process.pid) logger.info("Server process terminated") except Exception as e: logger.error(f"Failed to terminate server process: {e}") def generate( self, prompt: Optional[str] = None, sampling_params: Optional[dict[str, Any]] = None, input_ids: Optional[list[int]] = None, image_data: Optional[Any] = None, return_logprob: bool = False, logprob_start_len: Optional[int] = None, top_logprobs_num: Optional[int] = None, token_ids_logprob: Optional[list[int]] = None, lora_path: Optional[str] = None, custom_logit_processor: Optional[Callable] = None, ) -> dict[str, Any]: """Generate text using the SGLang server. Args: prompt (Optional[str], optional): Text prompt for generation. Defaults to None. sampling_params (Optional[Dict[str, Any]], optional): Parameters controlling text generation sampling. Defaults to None. input_ids (Optional[List[int]], optional): Alternative to prompt, direct token IDs input. Defaults to None. image_data (Optional[Any], optional): Image data for multimodal generation. Defaults to None. return_logprob (bool, optional): Whether to return log probabilities. Defaults to False. logprob_start_len (Optional[int], optional): Starting length for log probability calculation. Defaults to None. top_logprobs_num (Optional[int], optional): Number of top log probabilities to return. Defaults to None. token_ids_logprob (Optional[List[int]], optional): Specific token IDs for log probability calculation. Defaults to None. lora_path (Optional[str], optional): Path to LoRA adapter weights. Defaults to None. custom_logit_processor (Optional[Callable], optional): Custom logit processing function. Defaults to None. Returns: Dict[str, Any]: Generated text and associated metadata from the server Note: Either prompt or input_ids should be provided, but not both. The response format depends on the server configuration and parameters. """ payload = { "text": prompt, "sampling_params": sampling_params, "input_ids": input_ids, "image_data": image_data, "return_logprob": return_logprob, "logprob_start_len": logprob_start_len, "top_logprobs_num": top_logprobs_num, "token_ids_logprob": token_ids_logprob, "lora_path": lora_path, "custom_logit_processor": custom_logit_processor, } # Filter out None values payload = {k: v for k, v in payload.items() if v is not None} return self._make_request("generate", payload, only_master=False) def reward_score( self, prompt: Optional[str] = None, input_ids: Optional[list[int]] = None, image_data: Optional[Any] = None, lora_path: Optional[str] = None, ) -> dict[str, Any]: assert self.server_args.is_embedding, "Score is only supported for embedding models" payload = { "text": prompt, "input_ids": input_ids, "image_data": image_data, "lora_path": lora_path, } # Filter out None values payload = {k: v for k, v in payload.items() if v is not None} return self._make_request("classify", payload, only_master=False) def flush_cache(self) -> dict[str, Any]: """Flush the cache of the server. This method repeatedly attempts to flush the server cache until successful. The flush operation will not return status 200 when there are pending requests. Returns: Dict[str, Any]: Server response indicating cache flush status. For non-master nodes, returns empty dict. Note: Uses retry logic with limited attempts (max_attempts * 2) to avoid infinite loops. Each retry includes a delay to allow pending requests to complete. """ if self.node_rank != 0: return {} # Use retry logic with limited attempts to avoid infinite loops for attempt in range(self.max_attempts * 2): # Allow more retries for cache flush try: response = requests.get( f"http://{self.server_args.host}:{self.server_args.port}/flush_cache", timeout=self.timeout ) if response.status_code == 200: return _read_response(response) except Exception as e: logger.warning(f"Error flushing cache (attempt {attempt + 1}): {e}") time.sleep(self.retry_delay) logger.error("Failed to flush cache after maximum attempts") return {} def release_memory_occupation(self, tags: Optional[list[str]] = None) -> dict[str, Any]: """Release GPU memory occupation temporarily. Args: tags (Optional[List[str]], optional): List of tags to specify which memory to release. If None, releases all memory. Defaults to None. ["weights", "kv_cache"] Returns: Dict[str, Any]: Server response indicating memory release status """ return self._make_request("release_memory_occupation", {"tags": tags}) def resume_memory_occupation(self, tags: Optional[list[str]] = None) -> dict[str, Any]: """Resume GPU memory occupation. Args: tags (Optional[List[str]], optional): List of tags to specify which memory to resume. If None, resumes all memory. Defaults to None. ["weights", "kv_cache"] Returns: Dict[str, Any]: Server response indicating memory resume status """ return self._make_request("resume_memory_occupation", {"tags": tags}) def abort_request(self, rid: str = "", abort_all: bool = False) -> dict[str, Any]: """Abort a request. Args: rid (str): The ID of the request to abort abort_all (bool, optional): Whether to abort all requests. Defaults to False. Returns: Dict[str, Any]: Server response indicating abort status """ return self._make_request("abort_request", {"rid": rid, "abort_all": abort_all}) class AsyncHttpServerAdapter(HttpServerAdapter): """Asynchronous HTTP-based adapter for SGLang engines. This class inherits from HttpServerAdapter and adds async capabilities for non-blocking HTTP requests to the SGLang server. It provides the same functionality as the synchronous version but with async/await support. The async adapter is useful when you need to make multiple concurrent requests or integrate with async frameworks. It uses aiohttp for efficient async HTTP communication and maintains connection pooling for better performance. Attributes: max_connections (int): Maximum number of connections in the connection pool """ def __init__( self, router_ip: Optional[str] = None, router_port: Optional[int] = None, timeout: float = DEFAULT_TIMEOUT, max_attempts: int = DEFAULT_MAX_ATTEMPTS, retry_delay: float = DEFAULT_RETRY_DELAY, max_connections: int = DEFAULT_MAX_CONNECTIONS, first_rank_in_node: bool = False, launch_server: bool = True, **kwargs: Any, ) -> None: """Initialize the async HTTP server engine adapter. Args: router_ip (Optional[str], optional): IP address of router for worker registration. Defaults to None. router_port (Optional[int], optional): Port of router for worker registration. Defaults to None. timeout (float, optional): HTTP request timeout in seconds. Defaults to DEFAULT_TIMEOUT. max_attempts (int, optional): Maximum number of retry attempts for failed requests. Defaults to DEFAULT_MAX_ATTEMPTS. retry_delay (float, optional): Base delay between retries in seconds. Defaults to DEFAULT_RETRY_DELAY. max_connections (int, optional): Maximum number of connections in the connection pool. Defaults to DEFAULT_MAX_CONNECTIONS. launch_server (bool, optional): Whether to launch the server process. Defaults to True. **kwargs (Any): Additional arguments passed to ServerArgs """ super().__init__( router_ip, router_port, timeout, max_attempts, retry_delay, first_rank_in_node, launch_server=launch_server, **kwargs, ) self.max_connections: int = max_connections @asynccontextmanager async def _get_session(self) -> aiohttp.ClientSession: """Context manager for safe session access with proper connection pooling. Yields: aiohttp.ClientSession: Session instance for making HTTP requests Note: This method creates a new session for each request to avoid resource competition while still maintaining proper connection pooling through the shared connector. """ # Create a new session for each request to avoid resource competition connector = aiohttp.TCPConnector( limit=self.max_connections, limit_per_host=self.max_connections // 4, ttl_dns_cache=300, use_dns_cache=True, ) timeout = aiohttp.ClientTimeout(total=self.timeout) session = aiohttp.ClientSession(connector=connector, timeout=timeout) try: yield session finally: # Always close the session to free up resources if not session.closed: await session.close() async def _make_async_request( self, endpoint: str, payload: Optional[dict[str, Any]] = None, method: str = "POST", timeout: float = DEFAULT_TIMEOUT, only_master: bool = True, ) -> dict[str, Any]: """Make an async HTTP request with retry logic and consistent error handling. Args: endpoint (str): The API endpoint to call (without leading slash) payload (Optional[Dict[str, Any]], optional): The JSON payload to send. Defaults to empty dict if None. method (str, optional): HTTP method to use. Defaults to "POST". Returns: Dict[str, Any]: The JSON response from the server Raises: aiohttp.ClientResponseError: If the HTTP request fails with a client/server error RuntimeError: If all retry attempts are exhausted Note: - For non-master nodes (node_rank != 0), returns empty dict immediately - Uses exponential backoff for retries - Logs warnings for timeout and connection errors, errors for HTTP errors """ if only_master and self.node_rank != 0: return {} url = f"http://{self.server_args.host}:{self.server_args.port}/{endpoint}" for attempt in range(self.max_attempts): try: async with self._get_session() as session: if method.upper() == "GET": async with session.get(url, timeout=timeout) as response: response.raise_for_status() return await _read_async_response(response) else: async with session.post(url, json=payload or {}, timeout=timeout) as response: response.raise_for_status() return await _read_async_response(response) except asyncio.TimeoutError: logger.warning(f"Async request to {endpoint} timed out (attempt {attempt + 1})") except aiohttp.ClientConnectorError: logger.warning(f"Connection error for {endpoint} (attempt {attempt + 1})") except aiohttp.ClientResponseError as e: logger.error(f"HTTP error for {endpoint}: {e}") raise except Exception as e: logger.error(f"Unexpected error for {endpoint}: {e}") if attempt == self.max_attempts - 1: raise if attempt < self.max_attempts - 1: await asyncio.sleep(self.retry_delay * (2**attempt)) raise RuntimeError(f"Failed to complete async request to {endpoint} after {self.max_attempts} attempts") async def release_memory_occupation(self, tags: Optional[list[str]] = None) -> dict[str, Any]: """Release GPU memory occupation temporarily (async version). Args: tags (Optional[List[str]], optional): List of tags to specify which memory to release. If None, releases all memory. Defaults to None. ["weights", "kv_cache"] Returns: Dict[str, Any]: Server response indicating memory release status """ return await self._make_async_request("release_memory_occupation", {"tags": tags}) async def resume_memory_occupation(self, tags: Optional[list[str]] = None) -> dict[str, Any]: """Resume GPU memory occupation (async version). Similar to AsyncEngine, this method handles first-time weight reloading by calling release_memory_occupation if needed. Args: tags (Optional[List[str]], optional): List of tags to specify which memory to resume. If None, resumes all memory. Defaults to None. ["weights", "kv_cache"] Returns: Dict[str, Any]: Server response indicating memory resume status """ return await self._make_async_request("resume_memory_occupation", {"tags": tags}) async def update_weights_from_tensor( self, req: UpdateWeightsFromTensorReqInput, ) -> dict[str, Any]: """Update model weights from tensor data asynchronously. Args: serialized_named_tensors (List[str]): List of serialized tensor data load_format (Optional[str], optional): Format specification for loading weights. Defaults to None. flush_cache (bool, optional): Whether to flush cache after updating weights. Defaults to True. Returns: Dict[str, Any]: Server response containing update status """ import base64 named_tensors = req.serialized_named_tensors load_format = req.load_format flush_cache = req.flush_cache serialized_named_tensors = [base64.b64encode(named_tensor).decode("utf-8") for named_tensor in named_tensors] return await self._make_async_request( "update_weights_from_tensor", { "serialized_named_tensors": serialized_named_tensors, "load_format": load_format, "flush_cache": flush_cache, }, ) async def flush_cache(self) -> dict[str, Any]: """Flush the cache of the server asynchronously. Similar to the sync version, this method retries until the cache is successfully flushed. It uses async sleep between retries. Returns: Dict[str, Any]: Server response indicating cache flush status. For non-master nodes, returns empty dict. Note: Uses retry logic with limited attempts (max_attempts * 4) to avoid infinite loops. Each retry includes an async delay to allow pending requests to complete. """ if self.node_rank != 0: return {} # Use retry logic with limited attempts to avoid infinite loops for attempt in range(self.max_attempts * 4): # Allow more retries for cache flush try: async with self._get_session() as session: url = f"http://{self.server_args.host}:{self.server_args.port}/flush_cache" async with session.get(url) as response: if response.status == 200: return await _read_async_response(response) except Exception as e: logger.warning(f"Error flushing cache (attempt {attempt + 1}): {e}") await asyncio.sleep(self.retry_delay) logger.error("Failed to flush cache after maximum attempts") return {} async def generate( self, prompt: Optional[str] = None, sampling_params: Optional[dict[str, Any]] = None, input_ids: Optional[list[int]] = None, image_data: Optional[Any] = None, return_logprob: bool = False, logprob_start_len: Optional[int] = None, top_logprobs_num: Optional[int] = None, token_ids_logprob: Optional[list[int]] = None, lora_path: Optional[str] = None, custom_logit_processor: Optional[Callable] = None, ) -> dict[str, Any]: """Generate text using the SGLang server asynchronously.""" logger.info("generate() started") payload = { "text": prompt, "sampling_params": sampling_params, "input_ids": input_ids, "image_data": image_data, "return_logprob": return_logprob, "logprob_start_len": logprob_start_len, "top_logprobs_num": top_logprobs_num, "token_ids_logprob": token_ids_logprob, "lora_path": lora_path, "custom_logit_processor": custom_logit_processor, } # Filter out None values payload = {k: v for k, v in payload.items() if v is not None} # Send request response = await self._make_async_request("generate", payload, timeout=self.timeout, only_master=False) return response async def async_generate( self, prompt: Optional[str] = None, sampling_params: Optional[dict[str, Any]] = None, input_ids: Optional[list[int]] = None, image_data: Optional[Any] = None, return_logprob: bool = False, logprob_start_len: Optional[int] = None, top_logprobs_num: Optional[int] = None, token_ids_logprob: Optional[list[int]] = None, lora_path: Optional[str] = None, custom_logit_processor: Optional[Callable] = None, ) -> dict[str, Any]: """Async generate method that mirrors AsyncEngine.async_generate interface. This method provides compatibility with AsyncEngine's async_generate method by forwarding the call to the generate method. It ensures API consistency between direct engine usage and HTTP-based engine usage. Args: prompt (Optional[str], optional): Text prompt for generation. Defaults to None. sampling_params (Optional[Dict[str, Any]], optional): Parameters controlling text generation sampling. Defaults to None. input_ids (Optional[List[int]], optional): Alternative to prompt, direct token IDs input. Defaults to None. image_data (Optional[Any], optional): Image data for multimodal generation. Defaults to None. return_logprob (bool, optional): Whether to return log probabilities. Defaults to False. logprob_start_len (Optional[int], optional): Starting length for log probability calculation. Defaults to None. top_logprobs_num (Optional[int], optional): Number of top log probabilities to return. Defaults to None. token_ids_logprob (Optional[List[int]], optional): Specific token IDs for log probability calculation. Defaults to None. lora_path (Optional[str], optional): Path to LoRA adapter weights. Defaults to None. custom_logit_processor (Optional[Callable], optional): Custom logit processing function. Defaults to None. Returns: Dict[str, Any]: Generated text and associated metadata from the server Note: This method is provided for API compatibility with AsyncEngine. It forwards all calls to the generate method. """ return await self.generate( prompt=prompt, sampling_params=sampling_params, input_ids=input_ids, image_data=image_data, return_logprob=return_logprob, logprob_start_len=logprob_start_len, top_logprobs_num=top_logprobs_num, token_ids_logprob=token_ids_logprob, lora_path=lora_path, custom_logit_processor=custom_logit_processor, ) async def reward_score( self, prompt: Optional[str] = None, input_ids: Optional[list[int]] = None, image_data: Optional[Any] = None, lora_path: Optional[str] = None, ) -> dict[str, Any]: logger.info("reward_score() started") payload = { "text": prompt, "input_ids": input_ids, "image_data": image_data, "lora_path": lora_path, } # Filter out None values payload = {k: v for k, v in payload.items() if v is not None} # Send request response = await self._make_async_request("classify", payload, timeout=self.timeout, only_master=False) return response async def async_reward_score( self, prompt: Optional[str] = None, input_ids: Optional[list[int]] = None, image_data: Optional[Any] = None, lora_path: Optional[str] = None, ) -> dict[str, Any]: return await self.reward_score( prompt=prompt, input_ids=input_ids, image_data=image_data, lora_path=lora_path, ) async def abort_request(self, rid: str = "", abort_all: bool = False) -> dict[str, Any]: """Abort a request asynchronously. Args: rid (str): The ID of the request to abort abort_all (bool, optional): Whether to abort all requests. Defaults to False. Returns: Dict[str, Any]: Server response indicating abort status """ return await self._make_async_request("abort_request", {"rid": rid, "abort_all": abort_all})
verl__workers__rollout__sglang_rollout__http_server_engine.py
# Copyright 2023-2024 SGLang Team # Copyright 2025 ModelBest Inc. and/or its affiliates # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import logging import multiprocessing as mp import os from typing import Generator import ray import sglang.srt.entrypoints.engine import torch from sglang.srt.server_args import ServerArgs from sglang.srt.utils import ( assert_pkg_version, is_cuda, set_prometheus_multiproc_dir, set_ulimit, ) from sglang.srt.weight_sync.utils import update_weights as sgl_update_weights from torch.distributed.device_mesh import DeviceMesh, init_device_mesh from verl.utils.net_utils import is_valid_ipv6_address from verl.workers.config import HFModelConfig, RolloutConfig from verl.workers.rollout.base import BaseRollout from verl.workers.rollout.sglang_rollout.http_server_engine import AsyncHttpServerAdapter from verl.workers.rollout.sglang_rollout.utils import get_named_tensor_buckets logger = logging.getLogger(__file__) logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN")) # patch to avoid issue https://github.com/sgl-project/sglang/issues/6723 def _set_envs_and_config(server_args: ServerArgs): # Set global environments os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3" os.environ["NCCL_CUMEM_ENABLE"] = "0" os.environ["NCCL_NVLS_ENABLE"] = str(int(server_args.enable_nccl_nvls)) os.environ["TORCH_NCCL_AVOID_RECORD_STREAMS"] = "1" os.environ["CUDA_DEVICE_MAX_CONNECTIONS"] = "4" os.environ["CUDA_MODULE_LOADING"] = "AUTO" # Enable faulthandler in subprocesses os.environ["PYTHONFAULTHANDLER"] = "1" # Set prometheus env vars if server_args.enable_metrics: set_prometheus_multiproc_dir() # Set ulimit set_ulimit() # Check flashinfer version if server_args.attention_backend == "flashinfer": assert_pkg_version( "flashinfer_python", "0.2.5", "Please uninstall the old version and reinstall the latest version by following the instructions at https://docs.flashinfer.ai/installation.html.", ) if is_cuda(): assert_pkg_version( "sgl-kernel", "0.1.1", "Please reinstall the latest version with `pip install sgl-kernel --force-reinstall`", ) # Set mp start method mp.set_start_method("spawn", force=True) sglang.srt.entrypoints.engine._set_envs_and_config = _set_envs_and_config # because chatCompletion is an async method, it makes the whole ray actor be an async actor # which can not call loop.run_until_complete. So we need to make the engine to be an async class class ServerAdapter(BaseRollout): """SGLang server adapter used in native http server mode, serve as http client to request SGLang server to resume/release/update weights and kv_cache. - hybrid mode: reside in each hybrid worker to sync weights between training engine and SGLang server. - standalone/colocated mode: just a dummy placeholder to occupy the GPU to prevent ray scheduling new GPU actor. """ def __init__( self, config: RolloutConfig, model_config: HFModelConfig, device_mesh: DeviceMesh, ): if config.get("quantization", None) == "fp8": import sglang from packaging import version assert version.parse(sglang.__version__) >= version.parse("0.5.5"), ( "sglang>=0.5.5 is required for FP8 quantization" ) FP8_BLOCK_QUANT_KWARGS = { "activation_scheme": "dynamic", "fmt": "e4m3", "quant_method": "fp8", "weight_block_size": [128, 128], } fp8_block_quant_kwargs = dict(FP8_BLOCK_QUANT_KWARGS) model_config.hf_config.quantization_config = fp8_block_quant_kwargs super().__init__(config, model_config, device_mesh) self._engine: AsyncHttpServerAdapter = None rank = int(os.environ["RANK"]) local_world_size = int(os.environ["RAY_LOCAL_WORLD_SIZE"]) rollout_world_size = self.config.tensor_model_parallel_size * self.config.data_parallel_size self.replica_rank = rank // rollout_world_size self.rollout_rank = rank % rollout_world_size self.node_rank = self.rollout_rank // local_world_size self.local_rank = self.rollout_rank % local_world_size async def _init_server_adapter(self): if self._engine is not None: return # device_mesh is needed to gather cuda ipc handle to update weights if self.device_mesh is None: assert torch.distributed.is_initialized(), "torch distributed must be initialized" infer_tp = self.config.tensor_model_parallel_size * self.config.data_parallel_size infer_pp = self.config.pipeline_model_parallel_size infer_world_size = infer_tp * infer_pp dp = torch.distributed.get_world_size() // infer_world_size self.device_mesh = init_device_mesh( "cpu", mesh_shape=(dp, infer_tp, infer_pp), mesh_dim_names=["dp", "infer_tp", "infer_pp"] ) # Only init http server adapter in tp rank 0 if self.device_mesh["infer_tp"].get_local_rank() != 0: return # Lazy init http server adapter because http server is launched after hybrid engine. self.server_actor = ray.get_actor(f"sglang_server_{self.replica_rank}_{self.node_rank}") server_address, server_port = await self.server_actor.get_server_address.remote() logger.debug( f"replica_rank={self.replica_rank} node_rank={self.node_rank}, " f"server address: {server_address}, port: {server_port}" ) host = f"[{server_address}]" if is_valid_ipv6_address(server_address) else server_address self._engine = AsyncHttpServerAdapter( model_path=self.model_config.local_path, host=host, port=server_port, launch_server=False, trust_remote_code=self.model_config.trust_remote_code, ) async def resume(self, tags: list[str]): """Resume rollout weights or kv cache in GPU memory. Args: tag: weights or kv_cache. """ await self._init_server_adapter() if self.device_mesh["infer_tp"].get_local_rank() == 0 and self.config.free_cache_engine: await self._engine.resume_memory_occupation(tags=tags) async def release(self): """Release weights and kv cache in GPU memory.""" await self._init_server_adapter() if self.device_mesh["infer_tp"].get_local_rank() == 0 and self.config.free_cache_engine: await self._engine.release_memory_occupation(tags=["kv_cache", "weights"]) async def update_weights(self, weights: Generator[tuple[str, torch.Tensor], None, None], **kwargs): """ Update model weights using tensor buckets, similar to THUDM/slime's implementation. Notes: - For the best performance of `rebuild_cuda_tensor`, it is recommended to: 1. Enable `RAY_EXPERIMENTAL_NOSET_CUDA_VISIBLE_DEVICES`. 2. Manually set `CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7` when using Tensor Parallelism (TP >= 8). - See reference implementations in SLIME: - Main logic: https://github.com/THUDM/slime/blob/fb7605cc5fb09af0f9369d37f7192f12bddee577/slime/ray/ppo_actor.py#L452 - runtime envs: https://github.com/THUDM/slime/blob/fb7605cc5fb09af0f9369d37f7192f12bddee577/slime/ray/ppo_actor.py#L39 """ await self._init_server_adapter() update_weights_bucket_bytes = int(self.config.checkpoint_engine.update_weights_bucket_megabytes) << 20 if self.config.get("quantization", None) == "fp8": from verl.utils.sglang.sglang_fp8_utils import quant_weights_by_name logger.info("Convert bf16 weights to fp8 format before loading") weights = quant_weights_by_name( weights, self.model_config.hf_config.quantization_config, dtype=self.model_config.hf_config.dtype, ) else: weights = weights async for params_batch in get_named_tensor_buckets(weights, update_weights_bucket_bytes): await sgl_update_weights( engine=self._engine, params_batch=params_batch, device_mesh_key="infer_tp", device_mesh=self.device_mesh, ) if self.device_mesh["infer_tp"].get_local_rank() == 0: await self._engine.flush_cache()
verl__workers__rollout__sglang_rollout__sglang_rollout.py
# Copyright 2023-2024 SGLang Team # Copyright 2025 ModelBest Inc. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pickle from typing import Any, Iterator, Optional import numpy as np import torch import torch.distributed as dist from verl.utils.device import get_device_name from verl.workers.rollout.utils import ensure_async_iterator def broadcast_pyobj( data: list[Any], rank: int, dist_group: Optional[torch.distributed.ProcessGroup] = None, src: int = 0, force_cpu_device: bool = False, ): """from https://github.com/sgl-project/sglang/blob/844e2f227ab0cce6ef818a719170ce37b9eb1e1b/python/sglang/srt/utils.py#L905 Broadcast inputs from src rank to all other ranks with torch.dist backend. The `rank` here refer to the source rank on global process group (regardless of dist_group argument). """ device = torch.device(get_device_name() if not force_cpu_device else "cpu") if rank == src: if len(data) == 0: tensor_size = torch.tensor([0], dtype=torch.long, device=device) dist.broadcast(tensor_size, src=src, group=dist_group) else: serialized_data = pickle.dumps(data) size = len(serialized_data) tensor_data = torch.ByteTensor(np.frombuffer(serialized_data, dtype=np.uint8)).to(device) tensor_size = torch.tensor([size], dtype=torch.long, device=device) dist.broadcast(tensor_size, src=src, group=dist_group) dist.broadcast(tensor_data, src=src, group=dist_group) return data else: tensor_size = torch.tensor([0], dtype=torch.long, device=device) dist.broadcast(tensor_size, src=src, group=dist_group) size = tensor_size.item() if size == 0: return [] tensor_data = torch.empty(size, dtype=torch.uint8, device=device) dist.broadcast(tensor_data, src=src, group=dist_group) serialized_data = bytes(tensor_data.cpu().numpy()) data = pickle.loads(serialized_data) return data async def get_named_tensor_buckets( iterable: Iterator[tuple[str, torch.Tensor]], bucket_bytes: int ) -> Iterator[list[tuple[str, torch.Tensor]]]: """ Group tensors into buckets based on a specified size in megabytes. Args: iterable: An iterator of tuples containing tensor names and tensors. bucket_bytes: The maximum size of each bucket in bytes. Yields: Lists of tuples, where each tuple contains a tensor name and its corresponding tensor. Example: >>> tensors = [('tensor1', torch.randn(1000, 1000)), ('tensor2', torch.randn(2000, 2000))] >>> for bucket in get_named_tensor_buckets(tensors, bucket_size_mb=10): ... print(bucket) [('tensor1', tensor(...)), ('tensor2', tensor(...))] """ if bucket_bytes <= 0: raise ValueError(f"bucket_bytes must be greater than 0, got {bucket_bytes}") current_bucket = [] current_size = 0 async for name, tensor in ensure_async_iterator(iterable): tensor_size = tensor.element_size() * tensor.numel() if current_size + tensor_size > bucket_bytes: if current_bucket: yield current_bucket current_bucket = [(name, tensor.clone())] current_size = tensor_size else: current_bucket.append((name, tensor.clone())) current_size += tensor_size if current_bucket: yield current_bucket
verl__workers__rollout__sglang_rollout__utils.py
# Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ The base tokenizer class, required for any hybrid engine based rollout or inference with vLLM. """ from abc import ABC, abstractmethod import numpy as np import torch __all__ = ["HybridEngineBaseTokenizer"] class HybridEngineBaseTokenizer(ABC): """the tokenizer property and function name should align with HF's to meet vllm requirement""" @property @abstractmethod def vocab_size(self): """ `int`: Size of the base vocabulary (without the added tokens). """ pass @property @abstractmethod def pad_token_id(self): """ `Optional[int]`: Id of the padding token in the vocabulary. Returns `None` if the token has not been set. """ pass @property @abstractmethod def eos_token_id(self): """ `Optional[int]`: Id of the end of sentence token in the vocabulary. Returns `None` if the token has not been set. """ pass @property @abstractmethod def all_special_ids(self) -> list[int]: """ `List[int]`: List the ids of the special tokens(`'<unk>'`, `'<cls>'`, etc.) mapped to class attributes. """ pass @property @abstractmethod def all_special_tokens(self) -> list[str]: """ `List[str]`: A list of the unique special tokens (`'<unk>'`, `'<cls>'`, ..., etc.). Convert tokens of `tokenizers.AddedToken` type to string. """ pass @abstractmethod def encode(self, text): """ Converts a string to a sequence of ids (integer), using the tokenizer and vocabulary. Args: text (`str`, `List[str]` or `List[int]`): The first sequence to be encoded. This can be a string, a list of strings (tokenized string using the `tokenize` method) or a list of integers. text_pair (`str`, `List[str]` or `List[int]`, *optional*): Optional second sequence to be encoded. This can be a string, a list of strings (tokenized string using the `tokenize` method) or a list of integers. """ pass @abstractmethod def decode( self, token_ids: int | list[int] | np.ndarray | torch.Tensor, skip_special_tokens: bool = False, clean_up_tokenization_spaces: bool = None, **kwargs, ) -> str: """ Converts a sequence of ids in a string, using the tokenizer and vocabulary with options to remove special tokens and clean up tokenization spaces. Similar to doing `self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))`. Args: token_ids (`Union[int, List[int], np.ndarray, torch.Tensor]`): List of tokenized input ids. Can be obtained using the `__call__` method. skip_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not to remove special tokens in the decoding. clean_up_tokenization_spaces (`bool`, *optional*): Whether or not to clean up the tokenization spaces. If `None`, will default to `self.clean_up_tokenization_spaces`. kwargs (additional keyword arguments, *optional*): Will be passed to the underlying model specific decode method. Returns: `str`: The decoded sentence. """ pass @abstractmethod def convert_ids_to_tokens(self, ids: int | list[int], skip_special_tokens: bool = False) -> str | list[str]: """ Converts a single index or a sequence of indices in a token or a sequence of tokens, using the vocabulary and added tokens. Args: ids (`int` or `List[int]`): The token id (or token ids) to convert to tokens. skip_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not to remove special tokens in the decoding. Returns: `str` or `List[str]`: The decoded token(s). """ pass @abstractmethod def get_added_vocab(self) -> dict[str, int]: """ Returns the added tokens in the vocabulary as a dictionary of token to index. Results might be different from the fast call because for now we always add the tokens even if they are already in the vocabulary. This is something we should change. Returns: `Dict[str, int]`: The added tokens. """ pass @abstractmethod def convert_tokens_to_string(self, tokens: list[str]) -> str: """ Converts a sequence of tokens in a single string. The most simple way to do it is `" ".join(tokens)` but we often want to remove sub-word tokenization artifacts at the same time. Args: tokens (`List[str]`): The token to join in a string. Returns: `str`: The joined tokens. """ pass @property def is_fast(self): return False
verl__workers__rollout__tokenizer.py
# Copyright 2026 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import asyncio import logging import os from typing import Any, Optional import ray import torch from omegaconf import DictConfig from ray.actor import ActorHandle from ray.util import placement_group_table from ray.util.placement_group import PlacementGroup from verl.single_controller.ray import RayClassWithInitArgs, SubRayResourcePool from verl.utils.config import omega_conf_to_dataclass from verl.utils.net_utils import is_valid_ipv6_address from verl.workers.config import HFModelConfig, RolloutConfig from verl.workers.rollout.replica import RolloutMode, RolloutReplica, TokenOutput from verl.workers.rollout.trtllm_rollout.trtllm_rollout import ServerAdapter from verl.workers.rollout.utils import get_max_position_embeddings, run_unvicorn logger = logging.getLogger(__file__) logger.setLevel(logging.INFO) @ray.remote class TRTLLMHttpServer: """TensorRT LLM HTTP server in single node. Args: config (DictConfig): full config. model_config (HFModelConfig): model config. is_reward_model (bool): whether this is a reward model. rollout_mode (RolloutMode): rollout mode. workers (list[ActorHandle]): list of rollout workers. replica_rank (int): replica rank, a replica may contain multiple nodes. max_colocate_count (int): max colocate count. pgs (list[PlacementGroup]): placement groups. bundle_indices (list[list[int]]): bundle indices. """ def __init__( self, config: RolloutConfig, model_config: HFModelConfig, is_reward_model: bool, rollout_mode: RolloutMode, workers: list[ActorHandle], replica_rank: int, max_colocate_count: int, pgs: list[PlacementGroup] = None, bundle_indices: list[list[int]] = None, ): os.environ["TRT_LLM_DISABLE_LOAD_WEIGHTS_IN_PARALLEL"] = "1" assert torch.cuda.is_available(), "TRTLLM http server should run on GPU node" self.config: RolloutConfig = omega_conf_to_dataclass(config) self.model_config: HFModelConfig = omega_conf_to_dataclass(model_config, dataclass_type=HFModelConfig) self.is_reward_model = is_reward_model max_position_embeddings = get_max_position_embeddings(self.model_config.hf_config) if self.config.max_model_len is None: self.config.max_model_len = max_position_embeddings else: if self.config.max_model_len > max_position_embeddings: raise ValueError( f"max_model_len ({self.config.max_model_len}) should be less than or equal to " f"max_position_embeddings ({max_position_embeddings})" ) self.rollout_mode = rollout_mode self.workers = workers self.replica_rank = replica_rank self.max_colocate_count = max_colocate_count self.pgs = pgs self.bundle_indices = bundle_indices if self.rollout_mode != RolloutMode.HYBRID and self.config.load_format == "dummy": logger.warning(f"rollout mode is {self.rollout_mode}, load_format is dummy, set to auto") self.config.load_format = "auto" # used for http server self._server_address = ray.util.get_node_ip_address().strip("[]") self._server_port = None logger.info(f"TRTLLMHttpServer, replica_rank: {self.replica_rank}") self.sampling_args = { "detokenize": False, "end_id": -1, "pad_id": self.model_config.hf_config.pad_token_id, "stop_token_ids": [self.model_config.hf_config.eos_token_id], "include_stop_str_in_output": True, } def get_server_address(self): """Get http server address and port.""" assert self._server_port is not None, "http server is not launched, port is None" return self._server_address, self._server_port async def launch_server(self): from tensorrt_llm import AsyncLLM from tensorrt_llm.llmapi import CapacitySchedulerPolicy, CudaGraphConfig, KvCacheConfig, SchedulerConfig from tensorrt_llm.serve import OpenAIServer assert self.config.pipeline_model_parallel_size == 1, "pipeline_model_parallel_size > 1 is not supported yet" engine_kwargs = self.config.get("engine_kwargs", {}).get("trtllm", {}) or {} kv_cache_config = KvCacheConfig( enable_block_reuse=self.config.enable_prefix_caching, free_gpu_memory_fraction=self.config.gpu_memory_utilization, ) per_worker_gpu_share = 1.0 / self.max_colocate_count llm_kwargs = { "model": self.model_config.local_path, "backend": "pytorch", "dtype": self.config.dtype, "enable_chunked_prefill": self.config.enable_chunked_prefill, "skip_tokenizer_init": self.config.skip_tokenizer_init, "orchestrator_type": "ray", "ray_worker_extension_cls": "tensorrt_llm.llmapi.rlhf_utils.WorkerExtension", "kv_cache_config": kv_cache_config, "max_seq_len": self.config.max_model_len, "max_batch_size": self.config.max_num_seqs, "max_num_tokens": self.config.max_num_batched_tokens, "tensor_parallel_size": self.config.tensor_model_parallel_size, "pipeline_parallel_size": self.config.pipeline_model_parallel_size, "moe_expert_parallel_size": self.config.expert_parallel_size, "trust_remote_code": self.model_config.trust_remote_code, "placement_groups": self.pgs, "placement_bundle_indices": self.bundle_indices, "per_worker_gpu_share": per_worker_gpu_share, "enable_sleep": self.config.enable_sleep_mode, "allreduce_strategy": "NCCL", "sampler_type": "TRTLLMSampler", **engine_kwargs, } if self.is_reward_model: llm_kwargs.update( { "cuda_graph_config": None, "disable_overlap_scheduler": True, } ) else: llm_kwargs.update( { "cuda_graph_config": CudaGraphConfig( enable_padding=True, batch_sizes=self.config.cudagraph_capture_sizes, max_batch_size=0 if self.config.cudagraph_capture_sizes else self.config.max_num_seqs, ), "scheduler_config": SchedulerConfig( capacity_scheduler_policy=CapacitySchedulerPolicy.MAX_UTILIZATION, ), } ) self.llm = await AsyncLLM(**llm_kwargs) trtllm_server = OpenAIServer( llm=self.llm, model=self.model_config.local_path, tool_parser=None, server_role=None, metadata_server_cfg=None, ) app = trtllm_server.app self._server_port, self._server_task = await run_unvicorn(app, None, self._server_address) async def generate( self, prompt_ids: list[int], sampling_params: dict[str, Any], request_id: str, image_data: Optional[list[Any]] = None, video_data: Optional[list[Any]] = None, ) -> TokenOutput: """Generate sequence with token-in-token-out.""" assert image_data is None and video_data is None, "Multimodality is not yet supported in TRTLLMHttpServer." from tensorrt_llm.llmapi import SamplingParams max_tokens = min(self.config.response_length, self.config.max_model_len - len(prompt_ids)) sampling_params["max_tokens"] = max_tokens sampling_params["logprobs"] = 1 if sampling_params.pop("logprobs", False) else None if sampling_params["top_k"] == -1: sampling_params["top_k"] = 0 sampling_params.update(self.sampling_args) trt_llm_sampling_params = SamplingParams(**sampling_params) outputs = await self.llm.generate_async( inputs=prompt_ids, sampling_params=trt_llm_sampling_params, ) token_ids = outputs.outputs[0].token_ids log_probs = None if trt_llm_sampling_params.logprobs is not None: log_probs = [list(d.values())[0].logprob for d in outputs.outputs[0].logprobs] return TokenOutput(token_ids=token_ids, log_probs=log_probs) async def wake_up(self): if self.rollout_mode == RolloutMode.HYBRID: # In hybrid mode, rollout is wake up in `update_weights` raise ValueError(f"wake_up not support rollout_mode {self.rollout_mode}") if self.rollout_mode == RolloutMode.COLOCATED: await self.llm.resume(tags=ServerAdapter.get_full_tags()) elif self.rollout_mode == RolloutMode.STANDALONE: logger.info("skip wake_up in standalone mode") async def sleep(self): if not self.config.free_cache_engine: return if self.rollout_mode == RolloutMode.HYBRID: await self.llm.release(tags=ServerAdapter.get_full_tags()) elif self.rollout_mode == RolloutMode.COLOCATED: await self.llm.release(tags=ServerAdapter.get_full_tags()) elif self.rollout_mode == RolloutMode.STANDALONE: logger.info("skip sleep in standalone mode") async def report_device_ids(self) -> list[str]: """Report GPU device UUIDs from TRT-LLM workers.""" return await self.llm.collective_rpc( "report_device_id", unique_reply_rank=0, ) _rollout_worker_actor_cls = ray.remote(ServerAdapter) class TRTLLMReplica(RolloutReplica): def __init__( self, replica_rank: int, config: RolloutConfig, model_config: DictConfig, gpus_per_node: int = 8, is_reward_model: bool = False, ) -> None: super().__init__(replica_rank, config, model_config, gpus_per_node, is_reward_model) self.node_ip = ray.util.get_node_ip_address().strip("[]") def get_ray_class_with_init_args(self) -> RayClassWithInitArgs: """Get rollout worker actor class for colocated and standalone mode.""" worker_dict_cls = RayClassWithInitArgs( cls=_rollout_worker_actor_cls, config=self.config, model_config=self.model_config, device_mesh=None, replica_rank=self.replica_rank, ) return worker_dict_cls def rollout_worker_use_gpu(self) -> bool: return False def get_pgs_and_bundle_indices(self) -> tuple[list[PlacementGroup], list[list[int]]]: """Get placement groups and bundle indices for the replica.""" start_pg_index = 0 local_bundle_index = 0 # For SubRayResourcePool, the replica is assigned sub pool specific for this replica. if isinstance(self.resource_pool, SubRayResourcePool): assert self.resource_pool.subgroup_world_size == self.world_size, ( "Subgroup world size must be equal to world size" ) local_bundle_index = self.resource_pool.start_bundle_index # For RayResourcePool, the replica is assigned to entire resource pool. # We need to find start pg index and local bundle index based on replica rank. else: local_bundle_index = self.world_size * self.replica_rank while local_bundle_index >= self.resource_pool.pgs[start_pg_index].bundle_count: start_pg_index += 1 local_bundle_index -= self.resource_pool.pgs[start_pg_index].bundle_count assert ( start_pg_index < len(self.resource_pool.pgs) and local_bundle_index < self.resource_pool.pgs[start_pg_index].bundle_count ), "Start pg index or local bundle index out of range" # Global Bundle View for Replica x 2 & TP=4: # ┌───────────────────┬───────────────────┐ # │ Placement Group 0 │ Placement Group 1 │ # ├────┬────┬────┬────┼────┬────┬────┬────┤ # │ 0 │ 1 │ 2 │ 3 │ 0 │ 1 │ 2 │ 3 │ # └────┴────┴────┴────┴────┴────┴────┴────┘ # └───────────────┘ └───────────────┘ # Replica 0 Replica 1 # (4 GPUs) (4 GPUs) left_bundle_count = self.world_size pgs = [] bundle_indices = [] for pg in self.resource_pool.pgs[start_pg_index:]: if left_bundle_count == 0: break left_bundle_count_in_pg = min(left_bundle_count, pg.bundle_count - local_bundle_index) pg_bundle_indices = [local_bundle_index + idx for idx in range(left_bundle_count_in_pg)] pgs.append(pg) bundle_indices.append(pg_bundle_indices) left_bundle_count -= left_bundle_count_in_pg local_bundle_index = 0 assert left_bundle_count == 0, "all bundle indices should be assigned" return pgs, bundle_indices async def launch_servers(self): assert self.nnodes == 1, "TRTLLMReplica doesn't support multiple nodes for single replica yet." assert self.resource_pool.pgs is not None, "placement groups are not initialized" pgs, bundle_indices = self.get_pgs_and_bundle_indices() # Check server process should be launched on the same node as first bundle of first pg. first_pg_data = placement_group_table(pgs[0]) node_id = first_pg_data["bundles_to_node_id"][bundle_indices[0][0]] print(f"TRTLLMReplica: {self.replica_rank}") print(f"pg node_id: {node_id}") print(f"pgs: {pgs}") print(f"bundle_indices: {bundle_indices}") # TRTLLMReplica is a 1:1 map from replica to TRTLLMHttpServer. name = ( f"trtllm_server_{self.replica_rank}" if not self.is_reward_model else f"trtllm_server_reward_{self.replica_rank}" ) server = TRTLLMHttpServer.options( scheduling_strategy=ray.util.scheduling_strategies.NodeAffinitySchedulingStrategy( node_id=node_id, soft=False, ), runtime_env={"env_vars": {"RAY_EXPERIMENTAL_NOSET_CUDA_VISIBLE_DEVICES": "1"}}, name=name, ).remote( config=self.config, model_config=self.model_config, is_reward_model=self.is_reward_model, rollout_mode=self.rollout_mode, workers=self.workers, replica_rank=self.replica_rank, max_colocate_count=self.resource_pool.max_colocate_count, pgs=pgs, bundle_indices=bundle_indices, ) self.servers.append(server) # launch http server in each node await asyncio.gather(*[server.launch_server.remote() for server in self.servers]) # get http server address from first server server_address, server_port = await self.servers[0].get_server_address.remote() self._server_handle = self.servers[0] self._server_address = ( f"[{server_address}]:{server_port}" if is_valid_ipv6_address(server_address) else f"{server_address}:{server_port}" )
verl__workers__rollout__trtllm_rollout__trtllm_async_server.py
# Copyright 2026 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import asyncio import base64 import contextlib import gc import logging import os import pickle import threading from contextlib import asynccontextmanager from typing import Any, Generator, Optional import aiohttp import pynvml import ray import torch import torch.distributed as dist from torch.distributed.device_mesh import DeviceMesh, init_device_mesh from torch.multiprocessing.reductions import reduce_tensor from verl.utils.device import get_torch_device from verl.utils.net_utils import is_valid_ipv6_address from verl.workers.config import HFModelConfig, RolloutConfig from verl.workers.rollout.base import BaseRollout logger = logging.getLogger(__file__) logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN")) # Default configuration constants DEFAULT_TIMEOUT = 60.0 DEFAULT_MAX_ATTEMPTS = 3 DEFAULT_RETRY_DELAY = 2.0 DEFAULT_MAX_CONNECTIONS = 2000 DEFAULT_MAX_WAIT_TIME = 300.0 @contextlib.contextmanager def nvml_context(): """Context manager for NVML initialization and shutdown. Raises: RuntimeError: If NVML initialization fails """ try: pynvml.nvmlInit() yield except pynvml.NVMLError as e: raise RuntimeError(f"Failed to initialize NVML: {e}") from e finally: try: pynvml.nvmlShutdown() except pynvml.NVMLError: pass _NVML_INITIALIZED = False _NVML_LOCK = threading.Lock() def get_device_uuid(id: int) -> str: """Get the UUID of a CUDA device using NVML.""" global _NVML_INITIALIZED with _NVML_LOCK: if not _NVML_INITIALIZED: try: pynvml.nvmlInit() _NVML_INITIALIZED = True except pynvml.NVMLError as e: raise RuntimeError(f"Failed to initialize NVML: {e}") from e # Get the device handle and UUID try: handle = pynvml.nvmlDeviceGetHandleByIndex(id) uuid = pynvml.nvmlDeviceGetUUID(handle) # Ensure the UUID is returned as a string, not bytes if isinstance(uuid, bytes): return uuid.decode("utf-8") elif isinstance(uuid, str): return uuid else: raise RuntimeError(f"Unexpected UUID type: {type(uuid)} for device {id} (global index: {id})") except pynvml.NVMLError as e: raise RuntimeError(f"Failed to get device UUID for device {id} (global index: {id}): {e}") from e async def _read_async_response(resp: aiohttp.ClientResponse) -> dict[str, Any]: if resp.status == 204 or (resp.content_length == 0): return {} try: return await resp.json(content_type=None) except Exception: try: text = await resp.text() except Exception: return {} return { "content_type": (resp.headers.get("Content-Type") or ""), "text": text, } class AsyncTRTLLMHttpAdapter: def __init__( self, host: str, port: int, timeout: float = DEFAULT_TIMEOUT, max_attempts: int = DEFAULT_MAX_ATTEMPTS, retry_delay: float = DEFAULT_RETRY_DELAY, max_connections: int = DEFAULT_MAX_CONNECTIONS, ): self.host = host self.port = port self.timeout = timeout self.max_attempts = max_attempts self.retry_delay = retry_delay self.max_connections = max_connections @asynccontextmanager async def _get_session(self) -> aiohttp.ClientSession: """Context manager for safe session access with proper connection pooling. Yields: aiohttp.ClientSession: Session instance for making HTTP requests Note: This method creates a new session for each request to avoid resource competition while still maintaining proper connection pooling through the shared connector. """ # Create a new session for each request to avoid resource competition connector = aiohttp.TCPConnector( limit=self.max_connections, limit_per_host=self.max_connections // 4, ttl_dns_cache=300, use_dns_cache=True, ) timeout = aiohttp.ClientTimeout(total=self.timeout) session = aiohttp.ClientSession(connector=connector, timeout=timeout) try: yield session finally: # Always close the session to free up resources if not session.closed: await session.close() async def _make_async_request( self, endpoint: str, payload: Optional[dict[str, Any]] = None, timeout: float = DEFAULT_TIMEOUT, method: str = "POST", return_status: bool = False, ) -> dict[str, Any] | int: """Make an async HTTP request with retry logic and consistent error handling. Args: endpoint (str): The API endpoint to call (without leading slash) payload (Optional[Dict[str, Any]], optional): The JSON payload to send. Defaults to empty dict if None. method (str, optional): HTTP method to use. Defaults to "POST". Returns: Dict[str, Any]: The JSON response from the server Raises: aiohttp.ClientResponseError: If the HTTP request fails with a client/server error RuntimeError: If all retry attempts are exhausted Note: - Uses exponential backoff for retries - Logs warnings for timeout and connection errors, errors for HTTP errors """ url = f"http://{self.host}:{self.port}/{endpoint}" for attempt in range(self.max_attempts): try: async with self._get_session() as session: if method.upper() == "GET": async with session.get(url, timeout=timeout) as response: response.raise_for_status() return response.status if return_status else await _read_async_response(response) else: async with session.post(url, json=payload or {}, timeout=timeout) as response: response.raise_for_status() return response.status if return_status else await _read_async_response(response) except asyncio.TimeoutError: logger.warning(f"Async request to {endpoint} timed out (attempt {attempt + 1})") except aiohttp.ClientConnectorError: logger.warning(f"Connection error for {endpoint} (attempt {attempt + 1})") except aiohttp.ClientResponseError as e: logger.error(f"HTTP error for {endpoint}: {e}") raise except Exception as e: logger.error(f"Unexpected error for {endpoint}: {e}") if attempt == self.max_attempts - 1: raise if attempt < self.max_attempts - 1: await asyncio.sleep(self.retry_delay * (2**attempt)) raise RuntimeError(f"Failed to complete async request to {endpoint} after {self.max_attempts} attempts") async def resume_memory_occupation(self, tags: list[str]): """Resume GPU memory occupation (async version). Similar to AsyncEngine, this method handles first-time weight reloading by calling release_memory_occupation if needed. Args: tags (Optional[List[str]], optional): List of tags to specify which memory to resume. If None, resumes all memory. Defaults to None. ["weights", "kv_cache"] Returns: Dict[str, Any]: Server response indicating memory resume status """ return await self._make_async_request("resume_memory", {"tags": tags}) async def release_memory_occupation(self, tags: list[str]): """Release GPU memory occupation temporarily (async version). Args: tags (Optional[List[str]], optional): List of tags to specify which memory to release. If None, releases all memory. Defaults to None. ["weights", "kv_cache"] Returns: Dict[str, Any]: Server response indicating memory release status """ return await self._make_async_request("release_memory", {"tags": tags}) async def update_weights(self, weights: dict[str, str]): """Update model weights from tensor data asynchronously. Args: weights: A dictionary that maps the device uuid of the weight handles. Returns: Dict[str, Any]: Server response containing update status """ return await self._make_async_request("update_weights", {"weights": weights}) class ServerAdapter(BaseRollout): _WEIGHTS_TAGS = [ "sampler", "drafter", "guided_decoder", "spec_resource_manager", "model_extra", "executor_extra", "model", "draft_model", ] @staticmethod def get_full_tags() -> list[str]: return ServerAdapter._WEIGHTS_TAGS + ["kv_cache"] def __init__( self, config: RolloutConfig, model_config: HFModelConfig, device_mesh: DeviceMesh, replica_rank: int = -1 ): super().__init__(config, model_config, device_mesh) self._adapter = None self.hybrid_device_mesh = None self.gpu_id = None self.is_leader_rank = None self.replica_rank = None self.is_dp_rank = None # hybrid mode if self.device_mesh is not None: assert device_mesh.mesh_dim_names.index("dp") == 0, "DP dim should always be the first dimension" # Clone a new device mesh for CPU backend only (used for internal ranks communication) device_mesh_kwargs = dict( mesh_shape=device_mesh.mesh.shape, mesh_dim_names=device_mesh.mesh_dim_names, ) self.hybrid_device_mesh = init_device_mesh("cpu", **device_mesh_kwargs) self.hybrid_device_mesh[self.hybrid_device_mesh.mesh_dim_names[1:]]._flatten(mesh_dim_name="exclude_dp") self.is_leader_rank = self.hybrid_device_mesh["exclude_dp"].get_local_rank() == 0 logger.info(f"is_dp_leader: {self.is_leader_rank}") logger.info(f"exclude_dp_rank = {self.hybrid_device_mesh['exclude_dp'].get_local_rank()}") logger.info(f"exclude_dp_size = {self.hybrid_device_mesh['exclude_dp'].size()}") self.gpu_id = ray.get_gpu_ids()[0] self.replica_rank = self.hybrid_device_mesh["dp"].get_local_rank() assert len(ray.get_gpu_ids()) == 1, "ServerAdapter should run on a single GPU node" else: rank = int(os.environ["RANK"]) self.replica_rank = replica_rank self.is_leader_rank = rank == 0 # Below is required for all modes. assert self.replica_rank >= 0, "replica_rank is not set" assert self.is_leader_rank is not None, "is_leader_rank is not set" self.node_ip = ray.util.get_node_ip_address().strip("[]") async def _init_server_adapter(self): if self._adapter is not None: return # Lazy init http server adapter because http server is launched after hybrid engine. self.server_actor = ray.get_actor(f"trtllm_server_{self.replica_rank}") server_address, server_port = await self.server_actor.get_server_address.remote() assert server_address == self.node_ip, f"server address: {server_address} != node_ip: {self.node_ip}" logger.debug(f"replica_rank={self.replica_rank}, server address: {server_address}, port: {server_port}") host = f"[{server_address}]" if is_valid_ipv6_address(server_address) else server_address self._adapter = AsyncTRTLLMHttpAdapter( host=host, port=server_port, timeout=self.config.server.timeout, max_attempts=self.config.server.max_attempts, retry_delay=self.config.server.retry_delay, max_connections=self.config.server.max_connections, ) async def resume(self, tags: list[str]): """Resume rollout weights or kv cache in GPU memory. Args: tag: weights or kv_cache. """ # Synchronize all ranks before resuming KV cache to ensure non-leader ranks # have completed actor offloading to CPU, preventing OOM issue. if "kv_cache" in tags and self.config.free_cache_engine: await asyncio.to_thread(dist.barrier, group=self.hybrid_device_mesh["exclude_dp"].get_group()) if self.is_leader_rank and self.config.free_cache_engine: if "weights" in tags: tags = self._WEIGHTS_TAGS elif "kv_cache" in tags: tags = ["kv_cache"] else: raise ValueError(f"Invalid tag: {tags}") await self._init_server_adapter() await self._adapter.resume_memory_occupation(tags=tags) async def release(self): """Release weights and kv cache in GPU memory.""" if self.is_leader_rank and self.config.free_cache_engine: await self._init_server_adapter() tags = self._WEIGHTS_TAGS + ["kv_cache"] await self._adapter.release_memory_occupation(tags=tags) async def update_weights_from_ipc_handles(self, device_handles): assert self.hybrid_device_mesh is not None, "hybrid_device_mesh is not set" """Update weights from IPC handles.""" if self.is_leader_rank: gathered_handles = [None for _ in range(self.hybrid_device_mesh["exclude_dp"].size())] else: gathered_handles = None await asyncio.to_thread( dist.gather_object, obj=device_handles, object_gather_list=gathered_handles, group_dst=0, group=self.hybrid_device_mesh["exclude_dp"].get_group(), ) if self.is_leader_rank: all_handles = {k: v for d in gathered_handles for k, v in d.items()} await self._adapter.update_weights(all_handles) await asyncio.to_thread(dist.barrier, group=self.hybrid_device_mesh["exclude_dp"].get_group()) async def update_weights(self, weights: Generator[tuple[str, torch.Tensor], None, None], **kwargs): assert self.hybrid_device_mesh is not None, "hybrid_device_mesh is not set" """Update the weights of the rollout model. Args: weights: A generator that yields the name of the weight tensor and the tensor itself. """ if self.is_leader_rank: await self._init_server_adapter() total_available_bytes = int(self.config.checkpoint_engine.update_weights_bucket_megabytes) * 1024 * 1024 try: device_uuid = get_device_uuid(self.gpu_id) except Exception as e: logger.error(f"Failed to get device UUID in update_weights(): {e}") device_uuid = None raise e cur_available_bytes = total_available_bytes cur_handles = [] async def flush(): nonlocal cur_available_bytes, cur_handles if not cur_handles: return serialized_device_handles = {device_uuid: base64.b64encode(pickle.dumps(cur_handles)).decode("utf-8")} await self.update_weights_from_ipc_handles(serialized_device_handles) cur_available_bytes = total_available_bytes cur_handles = [] for name, param in weights: size_in_bytes = param.element_size() * param.numel() if size_in_bytes > cur_available_bytes: await flush() assert cur_available_bytes >= size_in_bytes, ( f"cur_available_bytes: {cur_available_bytes:,} size_in_bytes: {size_in_bytes:,} name: {name}" ) cur_available_bytes -= size_in_bytes handle = reduce_tensor(param.detach()) cur_handles.append((name, handle)) await flush() if self.is_leader_rank: # Finalize update weights await self._adapter.update_weights(None) await asyncio.to_thread(dist.barrier, group=self.hybrid_device_mesh["exclude_dp"].get_group()) gc.collect() get_torch_device().empty_cache() def _get_attribute(self, name: str): return getattr(self, name)
verl__workers__rollout__trtllm_rollout__trtllm_rollout.py
# Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import asyncio import logging import os import uvicorn from fastapi import FastAPI from verl.utils.net_utils import get_free_port logger = logging.getLogger(__file__) def get_max_position_embeddings(hf_config) -> int: max_len = getattr(hf_config, "max_position_embeddings", None) if max_len is None: text_config = getattr(hf_config, "text_config", None) if text_config is not None: max_len = getattr(text_config, "max_position_embeddings", None) if max_len is None: raise ValueError("max_position_embeddings not found in HFModelConfig!") return int(max_len) async def run_unvicorn(app: FastAPI, server_args, server_address, max_retries=5) -> tuple[int, asyncio.Task]: server_port, server_task = None, None for i in range(max_retries): try: server_port, sock = get_free_port(server_address) app.server_args = server_args config = uvicorn.Config(app, host=server_address, port=server_port, log_level="warning") server = uvicorn.Server(config) server.should_exit = True await server.serve() server_task = asyncio.create_task(server.main_loop()) break except (OSError, SystemExit) as e: logger.error(f"Failed to start HTTP server on port {server_port} at try {i}, error: {e}") else: logger.error(f"Failed to start HTTP server after {max_retries} retries, exiting...") os._exit(-1) logger.info(f"HTTP server started on port {server_port}") return server_port, server_task async def ensure_async_iterator(iterable): """Convert an iterable to an async iterator.""" if hasattr(iterable, "__aiter__"): async for item in iterable: yield item else: for item in iterable: yield item
verl__workers__rollout__utils.py
# Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import ctypes import gc import json import logging import os import platform import signal import threading from multiprocessing import shared_memory from types import MethodType from typing import Any, Callable, Literal, TypedDict, get_args import torch import zmq from verl.utils.device import get_torch_device, is_npu_available from verl.utils.vllm import TensorLoRARequest, VLLMHijack from verl.utils.vllm.patch import patch_vllm_moe_model_weight_loader from verl.utils.vllm.vllm_fp8_utils import apply_vllm_fp8_patches, is_fp8_model, load_quanted_weights logger = logging.getLogger(__file__) logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN")) # magic numbers that ensure we are using the same LoRA adapter during the rollout and training process VLLM_LORA_INT_ID = 123 VLLM_LORA_NAME = "123" VLLM_LORA_PATH = "simon_lora_path" VLLM_ASCEND_REQUIRED_ENV_VARS = {"VLLM_ALL2ALL_BACKEND": "flashinfer_all2allv", "VLLM_ASCEND_ENABLE_NZ": "0"} def set_death_signal(): """Kill the current process when the parent process exits.""" if platform.system() != "Linux": return libc = ctypes.CDLL("libc.so.6") libc.prctl(1, signal.SIGKILL) if os.getppid() == 1: os.kill(os.getpid(), signal.SIGKILL) def get_device_uuid(device_id: int) -> str: from vllm.platforms import current_platform # Convert torch.npu.current_device to its corresponding ASCEND_RT_VISIBLE_DEVICES. if is_npu_available: if os.getenv("ASCEND_RT_VISIBLE_DEVICES") is not None: npu_visible_devices = os.environ["ASCEND_RT_VISIBLE_DEVICES"].split(",") assert device_id < len(npu_visible_devices), f"device_id {device_id} must less than {npu_visible_devices}" return "NPU-" + npu_visible_devices[device_id] else: return f"NPU-{device_id}" else: return current_platform.get_device_uuid(device_id) def get_vllm_max_lora_rank(lora_rank: int): """ For vLLM, automatically adjusts the `max_lora_rank` to the nearest allowed value. The allowed values are retrieved from vLLM's MaxLoRARanks type definition. """ assert lora_rank > 0, f"lora_rank must be greater than 0, get {lora_rank}" try: from vllm.config.lora import MaxLoRARanks except Exception: # FIXME: migrate vllm version https://github.com/vllm-project/vllm/blob/main/vllm/config/lora.py#L25 MaxLoRARanks = Literal[1, 8, 16, 32, 64, 128, 256, 320, 512] vllm_max_lora_ranks = sorted(get_args(MaxLoRARanks)) if lora_rank > vllm_max_lora_ranks[-1]: raise ValueError(f"lora_rank must be less than or equal to {vllm_max_lora_ranks[-1]}, but got {lora_rank}") for rank in vllm_max_lora_ranks: if lora_rank <= rank: return rank # https://github.com/vllm-project/vllm/issues/13175 def monkey_patch_compute_logits(model, vocab_size: int): original_compute_logits = model.compute_logits def compute_logits( self, *args, **kwargs, ) -> torch.Tensor: logits = original_compute_logits(*args, **kwargs) logits[..., vocab_size:] = float("-inf") return logits model.compute_logits = MethodType(compute_logits, model) # copy from https://github.com/vllm-project/vllm/blob/main/examples/offline_inference/rlhf_utils.py def rebuild_ipc(handle: tuple[Callable, tuple], device_id: int | None = None) -> torch.Tensor: func, args = handle list_args = list(args) if device_id is not None: # the key is to change device id to the current device id # in case two processes have different CUDA_VISIBLE_DEVICES list_args[6] = device_id buffer = func(*list_args) return buffer def create_shared_memory(size: int, name: str): """Create shared memory for weight transfer. If already exists, attach to it.""" try: shm = shared_memory.SharedMemory(name=name, create=True, size=size) except FileExistsError: shm = shared_memory.SharedMemory(name=name) return shm def rebuild_shared_memory(name: str, size: int, dtype=torch.uint8): """Rebuild tensor from shared memory.""" shm = shared_memory.SharedMemory(name=name) tensor = torch.frombuffer(shm.buf[:size], dtype=dtype) return tensor, shm class TensorMetadata(TypedDict): name: str shape: torch.Size dtype: torch.dtype offset: int class vLLMColocateWorkerExtension: """ The class for vLLM's worker to inherit from, in the colocate setting. By defining an extension class, the code can work no matter what is the underlying worker class. This way, the code can be compatible with both vLLM V0 and V1. NOTE: we define this class in a separate module, and the main module should pass the full qualified name as `worker_extension_cls` argument. Feature support: 1. LoRA 2. Online FP8 quantization """ def __new__(cls, **kwargs): set_death_signal() # 1. patch for Lora VLLMHijack.hijack() # 2. patch online fp8 quant if os.environ.get("VERL_VLLM_FP8_QUANT_ENABLED", "0") == "1": apply_vllm_fp8_patches() # 3. patch QAT (compressed-tensors NVFP4) for dynamic weight loading vllm_config = kwargs.get("vllm_config") quant_config = getattr(vllm_config, "quant_config", None) if vllm_config else None _is_qat_model = getattr(quant_config, "quant_format", None) == "nvfp4-pack-quantized" if _is_qat_model: from verl.utils.qat import apply_qat_patches apply_qat_patches() logger.info("Applied QAT patches in vLLM worker subprocess") # TODO: For ascend NPU, when the corresponding vllm-ascend version is upgraded to v0.13.0, # please remove the VLLM_ASCEND_REQUIRED_ENV_VARS variable replacement action. # This is only a fix for vllm version < v0.13.0. if is_npu_available: for k in VLLM_ASCEND_REQUIRED_ENV_VARS: if k not in os.environ: os.environ[k] = VLLM_ASCEND_REQUIRED_ENV_VARS[k] instance = super().__new__(cls) instance._is_qat_model = _is_qat_model return instance def monkey_patch_model(self, vocab_size: int): # patch compute_logits to avoid sampling OOV token monkey_patch_compute_logits(self.model_runner.model, vocab_size) # patch weight loader to support MoE model patch_vllm_moe_model_weight_loader(self.model_runner.model) def update_weights_from_ipc(self, peft_config: dict = None, base_sync_done=False, use_shm: bool = False): """Update the weights of the rollout model.""" from vllm.platforms import current_platform if current_platform.device_type == "npu" and self.device is None: self.device = torch.device(f"npu:{self.local_rank}") # In async mode, make sure the old lora is removed before adding the new one if peft_config and base_sync_done: self.remove_lora(VLLM_LORA_INT_ID) # build communication buffer assert self.device is not None if not hasattr(self, "_zmq_ctx") or self._zmq_ctx is None: self._zmq_ctx = zmq.Context() socket = self._zmq_ctx.socket(zmq.REP) socket.connect(self._get_zmq_handle()) comm_metadata = socket.recv_pyobj() buffer, shm = None, None if not use_shm: handle = comm_metadata buffer = rebuild_ipc(handle, self.device.index) assert buffer.dtype == torch.uint8 else: shm_name = comm_metadata["name"] shm_size = comm_metadata["size"] buffer, shm = rebuild_shared_memory(shm_name, shm_size, dtype=torch.uint8) socket.send(b"") use_standard_weight_load = not (peft_config and base_sync_done) and not is_fp8_model( self.model_runner.vllm_config ) if self._is_qat_model: # QAT: Prepare for weight loading BEFORE receiving any buckets from verl.utils.qat import prepare_qat_for_load_weights prepare_qat_for_load_weights(self.model_runner.model, device=self.device) logger.info("QAT: prepare_qat_for_load_weights completed") elif use_standard_weight_load: # Re-apply here because async IPC weight sync can happen long after init and lose MoE weight_loader attrs. patch_vllm_moe_model_weight_loader(self.model_runner.model) # receive bucket and update weights while True: metadata = socket.recv_pyobj() weights, tensor = [], None for name, meta in metadata["bucket_meta"].items(): shape, dtype, offset = meta["shape"], meta["dtype"], meta["offset"] size = dtype.itemsize * shape.numel() # NOTE: we need to clone the tensor to release CUDA IPC memory # but for shared memory, it's not necessary and if we do clone, # it will cause extra memory copy overhead and slow down the process. tensor = buffer[offset : offset + size].view(dtype=dtype).view(shape) if not use_shm: tensor = tensor.clone() else: tensor = tensor.to(self.device) weights.append((name, tensor)) get_torch_device().synchronize() socket.send(b"") self._update_weights(weights, peft_config=peft_config, base_sync_done=base_sync_done) del weights, tensor if metadata["is_last"]: break if self._is_qat_model: # QAT: call process_weights_after_loading AFTER all buckets are received from verl.utils.qat import manual_process_weights_after_loading manual_process_weights_after_loading(self.model_runner.model) logger.info("QAT: process_weights_after_loading completed") elif use_standard_weight_load: # Some post-load transforms are non-idempotent; run once after all buckets. from vllm.model_executor.model_loader.utils import process_weights_after_loading model = self.model_runner.model model_config = self.model_runner.vllm_config.model_config process_weights_after_loading(model, model_config, self.device) # clean up socket.close() del buffer if shm is not None: shm.close() del shm get_torch_device().synchronize() gc.collect() get_torch_device().ipc_collect() get_torch_device().empty_cache() def _update_weights(self, weights: list[tuple[str, torch.Tensor]], peft_config: dict, base_sync_done: bool): if peft_config and base_sync_done: weights = dict(weights) lora_request = TensorLoRARequest( lora_name=VLLM_LORA_NAME, lora_int_id=VLLM_LORA_INT_ID, lora_path=VLLM_LORA_PATH, peft_config=peft_config, lora_tensors=weights, ) self.add_lora(lora_request) logger.info(f"vLLM load weights, loaded_params: {len(weights)}") else: # Add the FP8 related logic here as sharding manager has been deprecated. # Check if FP8 quantization is enabled and apply appropriate weight loading if is_fp8_model(self.model_runner.vllm_config): logger.info(f"FP8 model detected (async): {self.model_runner.vllm_config.quant_config}") # Convert bf16 weights to fp8 format before loading loaded_params = load_quanted_weights(weights, self.model_runner) logger.info(f"FP8 weights loaded (async), loaded_params: {len(loaded_params)}") else: logger.info("Loading standard weights (non-FP8, async)") self.model_runner.model.load_weights(weights) def _get_zmq_handle(self) -> str: """Get ZMQ handle for communication.""" if not hasattr(self, "device_uuid") or not self.device_uuid: self.device_uuid = get_device_uuid(self.device.index) return f"ipc:///tmp/rl-colocate-zmq-{self.device_uuid}.sock" class SuppressSignalInThread: def __enter__(self): self.original_signal = signal.signal def no_op_signal(sig, action): if threading.current_thread() is not threading.main_thread(): print(f"Ignored signal {sig} in thread {threading.current_thread().name}") return return self.original_signal(sig, action) signal.signal = no_op_signal return self def __exit__(self, exc_type, exc_val, exc_tb): signal.signal = self.original_signal def build_cli_args_from_config(config: dict[str, Any]) -> list[str]: """ Convert a config dictionary to CLI arguments for vLLM server. Handles different value types appropriately: - None: skipped - bool True: adds '--key' - bool False: skipped - list: expands to '--key item1 item2 ...' - empty list: skipped (vLLM uses nargs="+" which requires at least one value) - dict: JSON serialized - other: string converted Args: config: Dictionary of configuration key-value pairs Returns: List of CLI argument strings """ cli_args = [] for k, v in config.items(): if v is None: continue if isinstance(v, bool): if v: cli_args.append(f"--{k}") elif isinstance(v, list): if not v: # Skip empty lists - vLLM uses nargs="+" which requires at least one value continue # Lists need to be expanded as multiple separate arguments # e.g., --cuda-graph-sizes 1 2 4 8 becomes ['--cuda-graph-sizes', '1', '2', '4', '8'] cli_args.append(f"--{k}") cli_args.extend([str(item) for item in v]) else: cli_args.append(f"--{k}") # Use json.dumps for dict to ensure valid JSON format cli_args.append(json.dumps(v) if isinstance(v, dict) else str(v)) return cli_args
verl__workers__rollout__vllm_rollout__utils.py
# Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import asyncio import inspect import json import logging import os from pprint import pprint from typing import Any, Callable, Optional import numpy as np import ray import vllm.entrypoints.cli.serve from packaging import version from ray.actor import ActorHandle from vllm import SamplingParams from vllm.engine.arg_utils import AsyncEngineArgs from vllm.entrypoints.cli.serve import run_headless from vllm.entrypoints.openai.api_server import build_app, init_app_state from vllm.inputs import TokensPrompt from vllm.lora.request import LoRARequest from vllm.outputs import RequestOutput from vllm.usage.usage_lib import UsageContext from vllm.v1.engine.async_llm import AsyncLLM from verl.single_controller.ray import RayClassWithInitArgs from verl.utils.config import omega_conf_to_dataclass from verl.utils.device import get_resource_name, get_visible_devices_keyword from verl.utils.net_utils import get_free_port, is_valid_ipv6_address from verl.utils.profiler import DistProfiler, build_vllm_profiler_args from verl.utils.vllm.vllm_fp8_utils import apply_vllm_fp8_patches from verl.workers.config import HFModelConfig, RolloutConfig from verl.workers.rollout.replica import RolloutMode, RolloutReplica, TokenOutput from verl.workers.rollout.utils import get_max_position_embeddings, run_unvicorn from verl.workers.rollout.vllm_rollout import ServerAdapter from verl.workers.rollout.vllm_rollout.utils import ( VLLM_LORA_INT_ID, VLLM_LORA_NAME, VLLM_LORA_PATH, SuppressSignalInThread, build_cli_args_from_config, get_vllm_max_lora_rank, ) _VLLM_VERSION = version.parse(vllm.__version__) if _VLLM_VERSION > version.parse("0.11.0"): from vllm.utils.argparse_utils import FlexibleArgumentParser if _VLLM_VERSION == version.parse("0.12.0"): from vllm.entrypoints.harmony_utils import get_encoding elif _VLLM_VERSION >= version.parse("0.13.0"): from vllm.entrypoints.openai.parser.harmony_utils import get_encoding else: get_encoding = None if get_encoding is not None and os.getenv("VERL_USE_GPT_OSS", "0") == "1": get_encoding() else: from vllm.utils import FlexibleArgumentParser logger = logging.getLogger(__file__) logger.setLevel(logging.INFO) class vLLMHttpServer: """vLLM http server in single node, this is equivalent to launch server with command line: ``` vllm serve --tensor-parallel-size=8 ... ``` """ def __init__( self, config: RolloutConfig, model_config: HFModelConfig, rollout_mode: RolloutMode, workers: list[ActorHandle], replica_rank: int, node_rank: int, gpus_per_node: int, nnodes: int, cuda_visible_devices: str, ): """ Args: config (RolloutConfig): full config. model_config (HFModelConfig): model config. rollout_mode (RolloutMode): rollout mode. replica_rank (int): replica rank, a replica may contain multiple nodes. node_rank (int): node rank. gpus_per_node (int): number of gpus per node. nnodes (int): number of nodes. cuda_visible_devices (str): cuda visible devices. """ os.environ[get_visible_devices_keyword()] = cuda_visible_devices self.config: RolloutConfig = omega_conf_to_dataclass(config) self.model_config: HFModelConfig = omega_conf_to_dataclass(model_config, dataclass_type=HFModelConfig) max_position_embeddings = get_max_position_embeddings(self.model_config.hf_config) if self.config.max_model_len is None: self.config.max_model_len = max_position_embeddings else: if self.config.max_model_len > max_position_embeddings: raise ValueError( f"max_model_len ({self.config.max_model_len}) should be less than or equal to " f"max_position_embeddings ({max_position_embeddings})" ) self.rollout_mode = rollout_mode self.workers = workers self.replica_rank = replica_rank self.node_rank = node_rank self.gpus_per_node = gpus_per_node self.nnodes = nnodes if self.rollout_mode != RolloutMode.HYBRID and self.config.load_format == "dummy": logger.warning(f"rollout mode is {self.rollout_mode}, load_format is dummy, set to auto") self.config.load_format = "auto" # used for http server self._server_address = ray.util.get_node_ip_address().strip("[]") self._server_port = None # used for controlling vllm server profiler profiler_config = self.config.profiler tool_config = None if profiler_config is not None: if profiler_config.tool in ["torch", "npu"]: tool_config = omega_conf_to_dataclass((profiler_config.tool_config or {}).get(profiler_config.tool)) else: logger.warning(f"agent loop only support torch and npu profiler, got {profiler_config.tool}") profiler_config = None self.profiler_controller = DistProfiler(self.replica_rank, config=profiler_config, tool_config=tool_config) # used for data parallel: --data-parallel-address, --data-parallel-rpc-port if self.node_rank == 0: self._master_address = self._server_address # used for torch.distributed.init_process_group self._master_port, self._master_sock = get_free_port(self._server_address) # used for data parallel: --data-parallel-address, --data-parallel-rpc-port self._dp_rpc_port, self._dp_rpc_sock = get_free_port(self._server_address) self._dp_master_port, self._dp_master_sock = get_free_port(self._server_address) else: self._master_address = None self._master_port = None self._dp_rpc_port = None self._dp_master_port = None logger.info( f"vLLMHttpServer, replica_rank: {self.replica_rank}, node_rank: {self.node_rank}, " f"{get_visible_devices_keyword()}: {cuda_visible_devices}, " f"master_address: {self._master_address}, master_port: {self._master_port}, " f"data_parallel_rpc_port: {self._dp_rpc_port}, data_parallel_master_port: {self._dp_master_port}" ) def get_master_address(self): """Get master address and port for data parallel. Returns: tuple: (master_address, master_port, dp_rpc_port) """ return self._master_address, self._master_port, self._dp_rpc_port def get_server_address(self): """Get http server address and port.""" assert self._server_port is not None, "http server is not launched, port is None" return self._server_address, self._server_port async def collective_rpc( self, method: str | Callable, timeout: float | None = None, args: tuple = (), kwargs: dict[str, Any] | None = None, ): await self.engine.collective_rpc( method=method, timeout=timeout, args=args, kwargs=kwargs, ) async def launch_server(self, master_address: str = None, master_port: int = None, dp_rpc_port: int = None): if self.node_rank != 0: assert master_address and master_port and dp_rpc_port, ( "non-master node should provide master_address, master_port and dp_rpc_port" ) self._master_address = master_address self._master_port = master_port self._dp_rpc_port = dp_rpc_port # 1. setup vllm serve cli args engine_kwargs = self.config.get("engine_kwargs", {}).get("vllm", {}) or {} engine_kwargs = {key: val for key, val in engine_kwargs.items() if val is not None} if self.config.get("limit_images", None): # support for multi-image data engine_kwargs["limit_mm_per_prompt"] = {"image": self.config.get("limit_images")} if self.config.cudagraph_capture_sizes: engine_kwargs["cuda_graph_sizes"] = self.config.cudagraph_capture_sizes # Override default generation config from hugging face model config, # user can still override them by passing kwargs in each request. override_generation_config = dict( temperature=self.config.temperature, top_k=self.config.top_k, top_p=self.config.top_p, repetition_penalty=1.0, max_new_tokens=self.config.response_length, ) logger.info(f"override_generation_config: {override_generation_config}") logger.info(f"enable_sleep_mode: {self.config.enable_sleep_mode}") if not self.config.enable_sleep_mode: from verl.utils.device import set_expandable_segments set_expandable_segments(True) quantization = self.config.quantization hf_overrides = {} # Handle QAT (Quantization-Aware Training) configuration qat_config_dict = getattr(self.config, "qat", {}) or {} if qat_config_dict.get("enable", False): # QAT uses compressed-tensors quantization, apply patches for dynamic weight loading from verl.utils.qat import QATConfig, apply_qat_patches, load_quantization_config apply_qat_patches() # Load quantization config from JSON file qat_config = QATConfig(**qat_config_dict) quantization_config_dict = load_quantization_config(qat_config) hf_overrides["quantization_config"] = quantization_config_dict quantization = "compressed-tensors" logger.info("QAT quantization config injected to vLLM async server") elif quantization is not None: # Handle other quantization methods (fp8, torchao) _SUPPORTED_QUANTIZATION = ["fp8", "torchao"] if quantization not in _SUPPORTED_QUANTIZATION: raise ValueError(f"Currently only support {_SUPPORTED_QUANTIZATION} quantization, got: {quantization}") if quantization == "fp8": # Ignore MoE router layers for FP8 quantization all_mlp_gate_layers = [] for layer in range(self.model_config.hf_config.num_hidden_layers): all_mlp_gate_layers.append(f"model.layers.{layer}.mlp.gate") FP8_BLOCK_QUANT_KWARGS = { "activation_scheme": "dynamic", "fmt": "e4m3", "quant_method": "fp8", "weight_block_size": [128, 128], "ignored_layers": all_mlp_gate_layers, } hf_overrides["quantization_config"] = dict(FP8_BLOCK_QUANT_KWARGS) # Apply vllm fp8 patches # Will remove the patch after vllm support on-the-fly quant for rollout natively. apply_vllm_fp8_patches() # for subprocesses patching os.environ["VERL_VLLM_FP8_QUANT_ENABLED"] = "1" if quantization is not None and self.config.quantization_config_file is not None: hf_overrides["quantization_config_file"] = self.config.quantization_config_file compilation_config = engine_kwargs.pop("compilation_config", None) or {} if isinstance(compilation_config, str): compilation_config = json.loads(compilation_config) compilation_config.setdefault("cudagraph_mode", "FULL_AND_PIECEWISE") # FULL cuda graph is not yet supported with DCP, downgrade to PIECEWISE dcp_size = engine_kwargs.get("decode_context_parallel_size", 1) or 1 if dcp_size > 1 and compilation_config["cudagraph_mode"] == "FULL_AND_PIECEWISE": logger.warning( "FULL cuda graph is not supported with DCP (decode_context_parallel_size=%d), " "downgrading cudagraph_mode to PIECEWISE.", dcp_size, ) compilation_config["cudagraph_mode"] = "PIECEWISE" compilation_config = json.dumps(compilation_config) args = { "dtype": self.config.dtype, "load_format": self.config.load_format, "skip_tokenizer_init": False, "distributed_executor_backend": "mp", "worker_extension_cls": "verl.workers.rollout.vllm_rollout.utils.vLLMColocateWorkerExtension", "trust_remote_code": self.model_config.trust_remote_code, "max_model_len": self.config.max_model_len, "max_num_seqs": self.config.max_num_seqs, "enable_chunked_prefill": self.config.enable_chunked_prefill, "max_num_batched_tokens": self.config.max_num_batched_tokens, "enable_prefix_caching": self.config.enable_prefix_caching, "enable_sleep_mode": self.config.enable_sleep_mode, "logprobs_mode": self.config.logprobs_mode, "enforce_eager": self.config.enforce_eager, "gpu_memory_utilization": self.config.gpu_memory_utilization, "disable_log_stats": self.config.disable_log_stats, "tensor_parallel_size": self.config.tensor_model_parallel_size, "seed": self.replica_rank + self.config.get("seed", 0), "override_generation_config": json.dumps(override_generation_config), "quantization": quantization, "hf_overrides": hf_overrides, "scheduling_policy": self.config.scheduling_policy, "compilation_config": compilation_config, **engine_kwargs, } # update profiler args profiler_args = build_vllm_profiler_args( self.profiler_controller.config, self.profiler_controller.tool_config, self.replica_rank ) if _VLLM_VERSION >= version.parse("0.13.0"): # vLLM >= 0.13.0 supports profiler config via CLI args; env vars still work but will be deprecated args.update(profiler_args) if self.config.prometheus.enable: if self.config.prometheus.served_model_name: # Extract model name from path if it's a full path served_model_name = self.config.prometheus.served_model_name if "/" in served_model_name: # If it's a full path, extract the last part as model name served_model_name = served_model_name.split("/")[-1] args["served_model_name"] = served_model_name # mtp if self.config.mtp.enable and self.config.mtp.enable_rollout: speculative_config = { "method": self.config.mtp.method, "num_speculative_tokens": self.config.mtp.num_speculative_tokens, } args["speculative_config"] = speculative_config if self.config.expert_parallel_size > 1: assert self.gpus_per_node % self.config.tensor_model_parallel_size == 0, ( "gpus_per_node should be divisible by tensor_model_parallel_size" ) data_parallel_size_local = self.gpus_per_node // self.config.tensor_model_parallel_size assert len(self.workers) == data_parallel_size_local * self.config.tensor_model_parallel_size, ( f"num workers ({len(self.workers)}) should be equal to dp_size_local " ) f"({data_parallel_size_local}) * tp_size ({self.config.tensor_model_parallel_size})" args.update( { "enable_expert_parallel": self.config.expert_parallel_size > 1, "data_parallel_size": self.config.data_parallel_size, "data_parallel_size_local": data_parallel_size_local, "data_parallel_start_rank": self.node_rank * data_parallel_size_local, "data_parallel_address": self._master_address, "data_parallel_rpc_port": self._dp_rpc_port, } ) # used for torch.distributed.init_process_group if self.nnodes > 1: args.update( { "master_addr": self._master_address, "master_port": self._master_port, "node_rank": self.node_rank, "nnodes": self.nnodes, "data_parallel_address": self._master_address, "data_parallel_rpc_port": self._dp_rpc_port, } ) # update lora-related args lora_rank = self.model_config.lora.get("rank", 0) if lora_rank <= 0: lora_rank = ( self.model_config.lora_rank ) # FIXME: fallback to lora_rank for now, we should unify lora settings. if self.model_config.lora.get("merge", False): lora_rank = 0 if lora_rank > 0: lora_args = { "enable_lora": True, "max_loras": 1, "max_lora_rank": get_vllm_max_lora_rank(lora_rank), } if self.model_config.lora.get("fully_sharded_loras", False): lora_args["fully_sharded_loras"] = True args.update(lora_args) if self.config.enable_rollout_routing_replay: args.update({"enable_return_routed_experts": True}) server_args = ["serve", self.model_config.local_path] + build_cli_args_from_config(args) if self.replica_rank == 0: pprint(server_args) CMD_MODULES = [vllm.entrypoints.cli.serve] parser = FlexibleArgumentParser(description="vLLM CLI") subparsers = parser.add_subparsers(required=False, dest="subparser") cmds = {} for cmd_module in CMD_MODULES: new_cmds = cmd_module.cmd_init() for cmd in new_cmds: cmd.subparser_init(subparsers).set_defaults(dispatch_function=cmd.cmd) cmds[cmd.name] = cmd server_args = parser.parse_args(args=server_args) server_args.model = server_args.model_tag if server_args.subparser in cmds: cmds[server_args.subparser].validate(server_args) # 3. launch server if self.node_rank == 0: self._master_sock.close() await self.run_server(server_args) else: # TODO: avoid connect before master_sock close await asyncio.sleep(3) await self.run_headless(server_args) async def run_server(self, args: argparse.Namespace): engine_args = AsyncEngineArgs.from_cli_args(args) usage_context = UsageContext.OPENAI_API_SERVER vllm_config = engine_args.create_engine_config(usage_context=usage_context) vllm_config.parallel_config.data_parallel_master_port = self._dp_master_port fn_args = set(dict(inspect.signature(AsyncLLM.from_vllm_config).parameters).keys()) kwargs = {} if "enable_log_requests" in fn_args: kwargs["enable_log_requests"] = engine_args.enable_log_requests if "disable_log_stats" in fn_args: kwargs["disable_log_stats"] = engine_args.disable_log_stats engine_client = AsyncLLM.from_vllm_config(vllm_config=vllm_config, usage_context=usage_context, **kwargs) # Don't keep the dummy data in memory await engine_client.reset_mm_cache() await engine_client.collective_rpc( method="monkey_patch_model", kwargs={"vocab_size": len(self.model_config.tokenizer)} ) build_app_sig = inspect.signature(build_app) supported_tasks: tuple[Any, ...] = () if "supported_tasks" in build_app_sig.parameters: supported_tasks = await engine_client.get_supported_tasks() app = build_app(args, supported_tasks) else: app = build_app(args) init_app_sig = inspect.signature(init_app_state) if "vllm_config" in init_app_sig.parameters: await init_app_state(engine_client, vllm_config, app.state, args) elif "supported_tasks" in init_app_sig.parameters: await init_app_state(engine_client, app.state, args, supported_tasks) else: await init_app_state(engine_client, app.state, args) if self.replica_rank == 0 and self.node_rank == 0: logger.info(f"Initializing a V1 LLM engine with config: {vllm_config}") self.engine = engine_client self._server_port, self._server_task = await run_unvicorn(app, args, self._server_address) async def run_headless(self, args: argparse.Namespace): """Run headless server in a separate thread.""" def run_headless_wrapper(): with SuppressSignalInThread(): run_headless(args) def on_run_headless_done(future: asyncio.Future): try: exc = future.exception() if exc: logger.exception(f"run_headless failed with exception: {exc}") else: logger.warning("run_headless completed successfully, but it's not expected.") except Exception as e: logger.exception(f"get result from run_headless failed: {e}") finally: os._exit(1) self.task = asyncio.create_task(asyncio.to_thread(run_headless_wrapper)) self.task.add_done_callback(on_run_headless_done) async def generate( self, prompt_ids: list[int], sampling_params: dict[str, Any], request_id: str, image_data: Optional[list[Any]] = None, video_data: Optional[list[Any]] = None, priority: int = 0, ) -> TokenOutput: """Generate sequence with token-in-token-out.""" # Calculate the maximum possible new tokens based on available context space # This serves as a safety upper bound max_possible_tokens = self.config.max_model_len - len(prompt_ids) if max_possible_tokens < 0: raise ValueError( f"Prompt length ({len(prompt_ids)}) exceeds the model's maximum context length " f"({self.config.max_model_len})." ) # Determine max_tokens from sampling_params or use configured response_length as default if "max_tokens" in sampling_params: max_tokens = sampling_params.pop("max_tokens") elif "max_new_tokens" in sampling_params: # support sglang-style 'max_new_tokens' param max_tokens = sampling_params.pop("max_new_tokens") else: # Default to a calculation that considers configured lengths max_tokens = self.config.response_length + self.config.prompt_length - len(prompt_ids) # Clamp max_tokens to the valid range [0, max_possible_tokens] max_tokens = max(0, min(max_tokens, max_possible_tokens)) assert max_tokens <= max_possible_tokens, ( f"max_tokens {max_tokens} exceeds available context space {max_possible_tokens}" ) sampling_params["logprobs"] = 0 if sampling_params.pop("logprobs", False) else None sampling_params.setdefault("repetition_penalty", self.config.get("repetition_penalty", 1.0)) sampling_params = SamplingParams(max_tokens=max_tokens, **sampling_params) prompt_ids = _qwen2_5_vl_dedup_image_tokens(prompt_ids, self.model_config.processor) multi_modal_data = {} if image_data is not None: multi_modal_data["image"] = image_data if video_data is not None: multi_modal_data["video"] = video_data prompt = TokensPrompt(prompt_token_ids=prompt_ids, multi_modal_data=multi_modal_data) # Add lora request lora_request = None if ( self.model_config.lora_rank > 0 or self.model_config.lora.get("rank", 0) > 0 ) and not self.model_config.lora.get("merge", False): # Make sure we also check that the lora is already loaded in the engine lora_loaded = VLLM_LORA_INT_ID in await self.engine.list_loras() if lora_loaded: lora_request = LoRARequest( lora_name=VLLM_LORA_NAME, lora_int_id=VLLM_LORA_INT_ID, lora_path=VLLM_LORA_PATH ) generator = self.engine.generate( prompt=prompt, sampling_params=sampling_params, request_id=request_id, lora_request=lora_request, priority=priority, ) # Get final response final_res: Optional[RequestOutput] = None async for output in generator: final_res = output assert final_res is not None token_ids = final_res.outputs[0].token_ids log_probs = None if sampling_params.logprobs is not None: log_probs = [logprobs[token_ids[i]].logprob for i, logprobs in enumerate(final_res.outputs[0].logprobs)] routed_experts = None if self.config.enable_rollout_routing_replay: routed_experts = final_res.outputs[0].routed_experts # Determine stop reason from finish_reason finish_reason = final_res.outputs[0].finish_reason if finish_reason == "abort": stop_reason = "aborted" elif finish_reason in ("stop", "length"): stop_reason = "completed" else: stop_reason = finish_reason # for more stop reason in the future num_preempted = None if hasattr(final_res.outputs[0], "num_preempted"): num_preempted = final_res.outputs[0].num_preempted return TokenOutput( token_ids=token_ids, log_probs=log_probs, routed_experts=routed_experts, stop_reason=stop_reason, num_preempted=num_preempted, ) async def wake_up(self): if self.node_rank != 0: return if self.rollout_mode == RolloutMode.HYBRID: # In hybrid mode, rollout is wake up in `update_weights` raise ValueError(f"wake_up not support rollout_mode {self.rollout_mode}") elif self.rollout_mode == RolloutMode.COLOCATED: # Directly call engine to wake up without sync weights. await self.engine.wake_up(tags=["kv_cache", "weights"]) await self.engine.reset_prefix_cache() elif self.rollout_mode == RolloutMode.STANDALONE: logger.info("skip wake_up in standalone mode") async def sleep(self): if self.node_rank != 0 or not self.config.free_cache_engine: return if self.rollout_mode == RolloutMode.HYBRID: # Don't use engine.sleep(level=2) here await self.engine.collective_rpc("sleep", kwargs={"level": 2}) # clear encoder cache: https://github.com/vllm-project/vllm/pull/33452 # await self.engine.reset_encoder_cache() elif self.rollout_mode == RolloutMode.COLOCATED: await self.engine.sleep(level=1) elif self.rollout_mode == RolloutMode.STANDALONE: logger.info("skip sleep in standalone mode") async def start_profile(self, **kwargs): if ( self.profiler_controller.check_enable() and self.profiler_controller.check_this_rank() and self.profiler_controller.is_discrete_mode() ): await self.engine.start_profile(**kwargs) async def stop_profile(self): if ( self.profiler_controller.check_enable() and self.profiler_controller.check_this_rank() and self.profiler_controller.is_discrete_mode() ): await self.engine.stop_profile() async def clear_kv_cache(self): if self.node_rank == 0: await self.engine.reset_prefix_cache() async def wait_for_requests_to_drain(self): await self.engine.wait_for_requests_to_drain() async def abort_all_requests(self, reset_prefix_cache: bool = True) -> dict[str, Any]: """Abort all ongoing generation requests. On vLLM >= 0.12.0, uses AsyncLLM.pause_generation() to abort in-flight requests, drain, and clear caches. The engine remains paused after this call — use resume_generation() to accept new requests (e.g. before validation). On vLLM < 0.12.0, manually aborts each request and resets prefix cache. Returns: dict[str, Any]: Dictionary containing: - aborted_count: Number of requests aborted - request_ids: List of aborted request IDs """ try: if _VLLM_VERSION >= version.parse("0.12.0"): # Snapshot request IDs before pausing for reporting request_ids = list(self.engine.output_processor.request_states.keys()) # pause_generation with wait_for_inflight_requests=False will: # 1. Set engine to paused state (blocks new generate calls) # 2. Abort all in-flight requests # 3. Wait for requests to drain # 4. Clear prefix and mm caches if clear_cache=True await self.engine.pause_generation( wait_for_inflight_requests=False, clear_cache=reset_prefix_cache, ) else: # Take an atomic snapshot to avoid race conditions with the vLLM engine thread request_states_snapshot = list(self.engine.output_processor.request_states.items()) request_ids = [req_id for req_id, _ in request_states_snapshot] if not request_ids: return {"aborted_count": 0, "request_ids": []} # For each request, create an abort output and put it to its queue # This allows the generator to receive the aborted result from vllm.v1.engine import FinishReason for _, req_state in request_states_snapshot: request_output = req_state.make_request_output( [], pooling_output=None, finish_reason=FinishReason.ABORT, stop_reason=None ) req_state.queue.put(request_output) # Abort requests in the output processor and engine core self.engine.output_processor.abort_requests(request_ids) await self.engine.engine_core.abort_requests_async(request_ids) # Try to reset prefix cache to ensure clean state if reset_prefix_cache: await self.clear_kv_cache() logger.info("Prefix cache reset after abort") logger.info(f"Aborted {len(request_ids)} requests: {request_ids}") return {"aborted_count": len(request_ids), "request_ids": request_ids} except Exception as e: logger.error(f"Error aborting requests: {e}") return {"aborted_count": 0, "request_ids": [], "error": str(e)} async def resume_generation(self): """Resume generation after abort_all_requests (pause_generation). Only effective on vLLM >= 0.12.0 where pause_generation is used. No-op on older versions. """ if self.node_rank != 0: return if _VLLM_VERSION >= version.parse("0.12.0"): await self.engine.resume_generation() async def abort_request(self, request_id: str, reset_prefix_cache: bool = True) -> dict[str, Any]: """Abort a specific generation request. Args: request_id: The ID of the request to abort. Returns: dict[str, Any]: Dictionary containing abort result. """ try: request_states = self.engine.output_processor.request_states req_state = request_states.get(request_id) if req_state is None: return {"aborted": False, "error": f"Request {request_id} not found"} # Create abort output and put it to the queue from vllm.v1.engine import FinishReason request_output = req_state.make_request_output( [], pooling_output=None, finish_reason=FinishReason.ABORT, stop_reason=None ) req_state.queue.put(request_output) # Abort in output processor and engine core self.engine.output_processor.abort_requests([request_id]) await self.engine.engine_core.abort_requests_async([request_id]) # Try to reset prefix cache to ensure clean state if reset_prefix_cache: await self.clear_kv_cache() logger.info(f"Prefix cache reset after abort request {request_id}") logger.info(f"Aborted request: {request_id}") return {"aborted": True, "request_id": request_id} except Exception as e: logger.error(f"Error aborting request {request_id}: {e}") return {"aborted": False, "request_id": request_id, "error": str(e)} _rollout_worker_actor_cls = ray.remote(ServerAdapter) class vLLMReplica(RolloutReplica): def __init__( self, replica_rank: int, config: RolloutConfig, model_config: HFModelConfig, gpus_per_node: int = 8, is_reward_model: bool = False, ): super().__init__(replica_rank, config, model_config, gpus_per_node, is_reward_model) self.server_class = ray.remote(vLLMHttpServer) def get_ray_class_with_init_args(self) -> RayClassWithInitArgs: """Get rollout worker actor class for colocated and standalone mode.""" worker_dict_cls = RayClassWithInitArgs( cls=_rollout_worker_actor_cls, config=self.config, model_config=self.model_config, device_mesh=None, ) return worker_dict_cls async def launch_servers(self): """Launch http server in each node.""" assert len(self.workers) == self.world_size, ( f"worker number {len(self.workers)} not equal to world size {self.world_size}" ) # NOTE: We always use MP Executor backend whether it's single-node or multi-node. # For multi-node without DP (e.g TP=16), need vllm>=0.11.1, https://github.com/vllm-project/vllm/pull/23691 if self.config.data_parallel_size == 1 and self.nnodes > 1: assert _VLLM_VERSION >= version.parse("0.11.1"), ( "For multi-node MP Executor, either (1) set data_parallel_size > 1 or (2) upgrade vLLM to >= 0.11.1" ) # get (node_id, CUDA_VISIBLE_DEVICES) of all workers worker_infos = await asyncio.gather( *[ worker.__ray_call__.remote( lambda self: ( ray.get_runtime_context().get_node_id(), ray.get_runtime_context().get_accelerator_ids()[get_resource_name()][0], ) ) for worker in self.workers ] ) worker_cuda_visible_devices = [worker_info[1] for worker_info in worker_infos] worker_node_ids = [worker_info[0] for worker_info in worker_infos] # create server actor in each node with node affinity and cuda visible devices nnodes, gpus_per_replica_node = self.nnodes, self.gpus_per_replica_node for node_rank in range(nnodes): workers = self.workers[node_rank * gpus_per_replica_node : (node_rank + 1) * gpus_per_replica_node] node_cuda_visible_devices = ",".join( worker_cuda_visible_devices[node_rank * gpus_per_replica_node : (node_rank + 1) * gpus_per_replica_node] ) node_id = worker_node_ids[node_rank * gpus_per_replica_node] name = ( f"vllm_server_{self.replica_rank}_{node_rank}" if not self.is_reward_model else f"vllm_server_reward_{self.replica_rank}_{node_rank}" ) server = self.server_class.options( scheduling_strategy=ray.util.scheduling_strategies.NodeAffinitySchedulingStrategy( node_id=node_id, soft=False, ), runtime_env={"env_vars": {"RAY_EXPERIMENTAL_NOSET_CUDA_VISIBLE_DEVICES": "1"}}, name=name, ).remote( config=self.config, model_config=self.model_config, rollout_mode=self.rollout_mode, workers=workers, replica_rank=self.replica_rank, node_rank=node_rank, gpus_per_node=gpus_per_replica_node, nnodes=nnodes, cuda_visible_devices=node_cuda_visible_devices, ) self.servers.append(server) # launch http server in each node master_address, master_port, dp_rpc_port = await self.servers[0].get_master_address.remote() await asyncio.gather( *[ server.launch_server.remote( master_address=master_address, master_port=master_port, dp_rpc_port=dp_rpc_port ) for server in self.servers ] ) # get http server address from first server server_address, server_port = await self.servers[0].get_server_address.remote() self._server_handle = self.servers[0] self._server_address = ( f"[{server_address}]:{server_port}" if is_valid_ipv6_address(server_address) else f"{server_address}:{server_port}" ) async def sleep(self): """Sleep each rollout server.""" # Drain DP engines for safe sleep. await self.servers[0].wait_for_requests_to_drain.remote() await asyncio.gather(*[server.sleep.remote() for server in self.servers]) async def abort_all_requests(self) -> dict[str, Any]: """Abort all ongoing generation requests across all servers. Returns: dict[str, Any]: Combined abort results from all servers. """ results = await asyncio.gather(*[server.abort_all_requests.remote() for server in self.servers]) total_aborted = sum(r.get("aborted_count", 0) for r in results) all_request_ids = [] for r in results: all_request_ids.extend(r.get("request_ids", [])) return { "aborted_count": total_aborted, "request_ids": all_request_ids, "server_results": results, } async def resume_generation(self): """Resume generation on all servers after abort_all_requests.""" await asyncio.gather(*[server.resume_generation.remote() for server in self.servers]) # TODO(petersh6): refact the checkpoint engine's update_weights and rename this method async def resume_all_requests(self): """Resume all requests on all servers.""" await asyncio.gather(*[server.resume_generation.remote() for server in self.servers]) async def abort_request(self, request_id: str) -> dict[str, Any]: """Abort a specific request. Tries all servers since we don't know which one has it. Args: request_id: The ID of the request to abort. Returns: dict[str, Any]: Abort result. """ # TODO(petersh6): we should only abort on the server that has the request. results = await asyncio.gather(*[server.abort_request.remote(request_id) for server in self.servers]) for r in results: if r.get("aborted", False): return r return {"aborted": False, "request_id": request_id, "error": "Request not found on any server"} def _qwen2_5_vl_dedup_image_tokens(prompt_ids: list[int], processor): """Deduplicate consecutive image tokens in prompt_ids for Qwen2.5-VL, since vLLM will replicate the <|image_pad|> and <|video_pad|> token by image_data. For example, ``` <|vision_start|><|image_pad|><|image_pad|>...<|image_pad|><|vision_end|> => <|vision_start|><|image_pad|><|vision_end|> ``` """ if processor is not None and "Qwen2VLImageProcessor" in processor.image_processor.__class__.__name__: prompt_ids = np.array(prompt_ids) # Create a mask where True indicates elements to keep mask = np.ones(len(prompt_ids), dtype=bool) # Find where the array equals the value is_value = (prompt_ids == processor.image_token_id) | (prompt_ids == processor.video_token_id) # Find consecutive duplicates by checking if previous element is also the value mask[1:] &= ~(is_value[1:] & is_value[:-1]) return prompt_ids[mask].tolist() else: return prompt_ids
verl__workers__rollout__vllm_rollout__vllm_async_server.py
# Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ The vllm_rollout that can be applied in different backend When working with FSDP: - Use DTensor weight loader (recommended) or HF weight loader - Utilize state_dict from the FSDP to synchronize the weights among tp ranks in vLLM When working with Megatron: - Use Megatron weight loader - During training, only the current pp stage holds the parameters - Before inference, broadcast the parameters of the current pp rank to all other pp ranks (all pp ranks holds all the parameters) - Bind the parameters to the inference engine - Do inference in tp. pp is treated as additional dp - After inference, all the parameters that doesn't belong to this pp rank is freed. """ import gc import logging import os import time from typing import Any, Generator, Optional import ray import torch import zmq from packaging import version as vs from torch.distributed.device_mesh import DeviceMesh from torch.multiprocessing.reductions import reduce_tensor from verl import DataProto from verl.third_party.vllm import VLLM_SLEEP_LEVEL, get_version from verl.utils.device import get_device_id, get_device_name, get_torch_device, is_support_ipc from verl.workers.config import HFModelConfig, RolloutConfig from verl.workers.rollout.base import BaseRollout from verl.workers.rollout.utils import ensure_async_iterator from verl.workers.rollout.vllm_rollout.utils import TensorMetadata, get_device_uuid logger = logging.getLogger(__file__) logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "INFO")) def _check_vllm_version_for_sleep_level(): # https://github.com/vllm-project/vllm/issues/25171 minver = "0.11.0" current_version = get_version("vllm") if not current_version: logger.warning("Could not determine vLLM version, assuming an older version for sleep_level configuration.") return False return vs.parse(current_version) >= vs.parse(minver) class ServerAdapter(BaseRollout): """ vLLM server adapter used in native async mode, serve as a client to request vLLM server to resume/release/update weights and kv_cache. """ def __init__( self, config: RolloutConfig, model_config: HFModelConfig, device_mesh: DeviceMesh, ): super().__init__(config, model_config, device_mesh) self.server_handle: ray.actor.ActorHandle = None rank = int(os.environ["RANK"]) local_world_size = int(os.environ["RAY_LOCAL_WORLD_SIZE"]) rollout_world_size = ( self.config.tensor_model_parallel_size * self.config.data_parallel_size * self.config.pipeline_model_parallel_size ) self.replica_rank = rank // rollout_world_size self.rollout_rank = rank % rollout_world_size self.node_rank = self.rollout_rank // local_world_size if config.layered_summon or (config.expert_parallel_size > 1 and not _check_vllm_version_for_sleep_level()): logger.warning("Setting the sleep level to 1 may cause a memory overflow.") self.sleep_level = 1 else: self.sleep_level = VLLM_SLEEP_LEVEL self.device_uuid = get_device_uuid(get_device_id()) self.zmq_context = zmq.Context() self.zmq_handle = f"ipc:///tmp/rl-colocate-zmq-{self.device_uuid}.sock" self.use_shm = not is_support_ipc() if self.use_shm: logger.warning( "IPC is not supported on your devices. Falling back to shared memory for weight transfer, " "which may cause performance degradation. If you are using Ascend NPUs, please ensure that " "your software and CANN toolkit versions meet the requirements for IPC support. (Ascend HDK version " ">= 25.3.rc1 and CANN toolkit version >= 8.3.RC1)" ) async def _execute_method( self, method: str, non_block: bool = False, timeout: Optional[float] = None, args: tuple = (), kwargs: Optional[dict] = None, ) -> Any: """Execute method on inference engine via ray. Args: method: The method name to execute on the server. non_block: If True, execute the method asynchronously and return immediately. timeout: Timeout for the collective_rpc call. args: Positional arguments for the method. kwargs: Keyword arguments for the method. Returns: The result of the method execution, or None if non_block=True. """ if self.rollout_rank != 0: return None # Lazy init http server adapter because http server is launched after hybrid engine. if self.server_handle is None: self.server_handle = ray.get_actor(f"vllm_server_{self.replica_rank}_{self.node_rank}") future = self.server_handle.collective_rpc.remote(method, timeout=timeout, args=args, kwargs=kwargs) return future if non_block else await future async def resume(self, tags: list[str]): """Resume rollout weights or kv cache in GPU memory. Args: tags: weights or kv_cache. """ if self.config.free_cache_engine: await self._execute_method("wake_up", kwargs={"tags": tags}) async def release(self): """Release weights and kv cache in GPU memory.""" if self.config.free_cache_engine: await self._execute_method("sleep", kwargs={"level": self.sleep_level}) @torch.no_grad() async def update_weights(self, weights: Generator[tuple[str, torch.Tensor], None, None], **kwargs): """Update model weights via CUDA IPC (fallback to shared memory if IPC not supported) to inference workers.""" start_time = time.time() future = await self._execute_method( "update_weights_from_ipc", non_block=True, kwargs={**kwargs, "use_shm": self.use_shm}, ) # build communication buffer bucket_size_mb = self.config.checkpoint_engine.update_weights_bucket_megabytes bucket_size = int(bucket_size_mb) << 20 s = self.zmq_context.socket(zmq.REQ) s.bind(self.zmq_handle) buffer, shm = None, None if not self.use_shm: buffer = torch.empty(bucket_size, dtype=torch.uint8, device=f"{get_device_name()}:0") handle = reduce_tensor(buffer) s.send_pyobj(handle) else: import uuid from multiprocessing import shared_memory # Create unique name for shared memory shm_name = f"verl_weights_{uuid.uuid4().hex}" shm = shared_memory.SharedMemory(name=shm_name, create=True, size=bucket_size) buffer = torch.frombuffer(shm.buf, dtype=torch.uint8) comm_metadata = {"name": shm_name, "size": bucket_size} s.send_pyobj(comm_metadata) s.recv() # send bucket weights offset = 0 bucket_meta: dict[str, TensorMetadata] = {} # dtype = PrecisionType.to_dtype(self.config.dtype) async for name, weight in ensure_async_iterator(weights): # model parameters are in fp32 full precision # (vermouth1992) we should not force cast weight here because some parameters # (such as moe gate) have to keep fp32 precision. If a weight is bf16 in the rollout side, # the rollout should automatically cast on demand. However, this would incur a higher weight # transfer volume. # weight = weight.to(dtype, non_blocking=True) # fill the tensor bucket if offset + weight.nbytes > bucket_size: get_torch_device().synchronize() s.send_pyobj({"bucket_meta": bucket_meta, "is_last": False}) s.recv() bucket_meta = {} offset = 0 # TODO: slice embedding layer weight into chunks assert offset + weight.nbytes <= bucket_size, ( f"Weight {name}({weight.shape}, {weight.dtype}) is too large to fit in the bucket." f"Please increase rollout.update_weights_bucket_megabytes({bucket_size_mb} MB)." ) bucket_meta[name] = { "name": name, "shape": weight.shape, "dtype": weight.dtype, "offset": offset, } buffer[offset : offset + weight.nbytes].copy_(weight.view(-1).view(torch.uint8), non_blocking=True) offset += weight.nbytes # send the last bucket get_torch_device().synchronize() s.send_pyobj({"bucket_meta": bucket_meta, "is_last": True}) s.recv() # clean up s.close() del buffer if shm is not None: shm.close() shm.unlink() del shm gc.collect() get_torch_device().ipc_collect() get_torch_device().empty_cache() if future is not None: await future # reset prefix cache after updating weights if self.rollout_rank == 0: await self.server_handle.clear_kv_cache.remote() if self.replica_rank == 0 and self.rollout_rank == 0: logger.info(f"update_weights done, time cost: {time.time() - start_time:.2f}s") def generate_sequences(self, prompts: DataProto) -> DataProto: """Batch generate sequences in sync mode. Note: ServerAdapter uses async server mode and does not support synchronous generation. Since SPMD mode was retired (PR #4411), the generation workflow should use the async server interface instead. Raises: NotImplementedError: Always raised as sync generation is not supported. """ raise NotImplementedError( "ServerAdapter does not support synchronous generate_sequences(). " "The vLLM SPMD mode was retired in PR #4411. For batch generation, " "please use the async server interface via vLLMReplica and AsyncLLMServerManager, " "or use HFRollout for synchronous generation. " "See https://github.com/volcengine/verl/issues/4682 for more details." )
verl__workers__rollout__vllm_rollout__vllm_rollout.py
# Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Sharding manager to implement HybridEngine """ from verl import DataProto class BaseShardingManager: def __init__(self): self.timing = {} def __enter__(self): pass def __exit__(self, exc_type, exc_value, traceback): pass def preprocess_data(self, data: DataProto) -> DataProto: return data def postprocess_data(self, data: DataProto) -> DataProto: return data
verl__workers__sharding_manager__base.py
# Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Contains a resharding manager that binds weights from FSDP zero3 to XPerfGPT """ from torch.distributed.device_mesh import DeviceMesh from verl import DataProto from verl.protocol import all_gather_data_proto from verl.utils.ulysses import get_ulysses_sequence_parallel_group, set_ulysses_sequence_parallel_group from .base import BaseShardingManager class FSDPUlyssesShardingManager(BaseShardingManager): """ Sharding manager to support data resharding when using FSDP + Ulysses """ def __init__(self, device_mesh: DeviceMesh): super().__init__() self.device_mesh = device_mesh self.seed_offset = 12345 def __enter__(self): if self.device_mesh is not None: # We have a global SP group # so we have to change to use model-specific sp group self.prev_sp_group = get_ulysses_sequence_parallel_group() set_ulysses_sequence_parallel_group(self.device_mesh["sp"].get_group()) # TODO: check how to set seed for each model def __exit__(self, exc_type, exc_value, traceback): # restore random states if self.device_mesh is not None: # revert to previous sp group set_ulysses_sequence_parallel_group(self.prev_sp_group) # TODO: check how to set seed for each model def preprocess_data(self, data: DataProto) -> DataProto: """ AllGather data from sp region This is because the data is first sharded along the FSDP dimension as we utilize the DP_COMPUTE In Ulysses, we need to make sure the same data is used across a SP group """ if self.device_mesh is not None: group = self.device_mesh["sp"].get_group() all_gather_data_proto(data=data, process_group=group) return data def postprocess_data(self, data: DataProto) -> DataProto: """ Split the data to follow FSDP partition """ if self.device_mesh is not None: sp_size = self.device_mesh["sp"].size() sp_rank = self.device_mesh["sp"].get_local_rank() data = data.chunk(chunks=sp_size)[sp_rank] return data
verl__workers__sharding_manager__fsdp_ulysses.py
# Copyright 2025 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch import torch.nn.functional as F from tensordict import TensorDict from verl.trainer.ppo.core_algos import agg_loss, compute_value_loss, get_policy_loss_fn, kl_penalty from verl.utils import tensordict_utils as tu from verl.utils.dataset.dataset_utils import DatasetPadMode from verl.utils.metric import AggregationType, Metric from verl.utils.torch_functional import masked_mean, masked_sum from verl.workers.config import ActorConfig, CriticConfig from verl.workers.utils.padding import no_padding_2_padding def sft_loss(config: ActorConfig, model_output, data: TensorDict, dp_group=None): pad_mode = tu.get_non_tensor_data(data=data, key="pad_mode", default=DatasetPadMode.NO_PADDING) dp_size = data["dp_size"] batch_num_tokens = data["batch_num_tokens"] log_prob = model_output["log_probs"] if pad_mode == DatasetPadMode.NO_PADDING: # log_prob and loss mask are nested tensors of shape [bsz, j1] # for each sample, loss mask shape is [1, prompt_length + response_length] loss_mask = data["loss_mask"] log_prob_flatten = log_prob.values() loss_mask_flatten = loss_mask.values() # left-shift the loss mask by one token to align with log_prob loss_mask_flatten = torch.roll(loss_mask_flatten, shifts=-1, dims=0) # NOTE: loss is averaged over all tokens in the batch across all data parallel groups, # For FSDP backend, the loss is directly used for backward; while for Megatron backend, # the loss should be scaled by `num_microbatches` for pp schedule. loss = -masked_sum(log_prob_flatten, loss_mask_flatten) / batch_num_tokens * dp_size else: response_mask = data["response_mask"].to(bool) loss = -masked_sum(log_prob, response_mask) / batch_num_tokens * dp_size return loss, {} def _slice_response_from_unpad_output(tensor: torch.Tensor, data: TensorDict) -> torch.Tensor: """Slice response from unpad model output. Args: tensor: model output tensor of shape [bsz, 1] data: TensorDict with "prompt_ids", "response_ids", "attention_mask" Returns: tensor: sliced response tensor of shape [bsz, max_response_len] """ values = tensor.values() if tensor.is_nested else tensor prompt_ids = data["prompts"] response_ids = data["responses"] attention_mask = data["attention_mask"] if prompt_ids.is_nested: prompt_lens = prompt_ids.offsets().diff() response_lens = response_ids.offsets().diff() max_response_len = response_ids.offsets().max().item() else: assert not attention_mask.is_nested prompt_lens = attention_mask[:, : prompt_ids.shape[1]].sum(dim=1) response_lens = attention_mask[:, prompt_ids.shape[1] :].sum(dim=1) max_response_len = response_ids.shape[1] sequence_lens = prompt_lens + response_lens sequence_offsets = sequence_lens.cumsum(dim=0) assert sequence_offsets[-1].item() == values.shape[0] response_list = [] for resp_len, seq_offset in zip(response_lens, sequence_offsets, strict=True): pad_size = max_response_len - resp_len # left-shift model output by one token for log_probs/values response_list.append(F.pad(values[seq_offset - resp_len - 1 : seq_offset - 1], (0, pad_size))) output = torch.stack(response_list, dim=0) return output def ppo_loss(config: ActorConfig, model_output, data: TensorDict, dp_group=None): """Computes ppo loss from model output (log_prob, entropy, values, etc. ) and old_log_probs from data.""" log_prob = no_padding_2_padding(model_output["log_probs"], data) entropy = model_output.get("entropy", None) if entropy is not None: entropy = no_padding_2_padding(entropy, data) # global batch info for loss aggregation config.global_batch_info["dp_size"] = data["dp_size"] config.global_batch_info["batch_num_tokens"] = data["batch_num_tokens"] config.global_batch_info["global_batch_size"] = data["global_batch_size"] config.global_batch_info["loss_scale_factor"] = config.loss_scale_factor # assumes that if any of the global batch info is set, the policy_loss_fn will # normalize using dp_size/global_bsz/global_token; in this case, metric aggregation should be SUM # to reflect the mean loss over the global batch if ( data["dp_size"] > 1 or data["batch_num_tokens"] is not None or data["global_batch_size"] is not None or config.loss_scale_factor is not None ): metric_aggregation = AggregationType.SUM else: metric_aggregation = AggregationType.MEAN metrics = {} response_mask = data["response_mask"].to(bool) # compute policy loss old_log_prob = data["old_log_probs"] advantages = data["advantages"] rollout_is_weights = data.get("rollout_is_weights", None) loss_agg_mode = config.loss_agg_mode loss_mode = config.policy_loss.get("loss_mode", "vanilla") policy_loss_fn = get_policy_loss_fn(loss_mode) pg_loss, pg_metrics = policy_loss_fn( old_log_prob=old_log_prob, log_prob=log_prob, advantages=advantages, response_mask=response_mask, loss_agg_mode=loss_agg_mode, config=config, rollout_is_weights=rollout_is_weights, ) # AggregationType.MEAN for pg metrics: assumes policy_loss_fn normalizes by local_bsz/local_tokens # Ex: in compute_policy_loss_vanilla, pg_metrics are pg_clipfrac, ppo_kl, pg_clipfrac_lower pg_metrics = Metric.from_dict(pg_metrics, aggregation=AggregationType.MEAN) metrics.update(pg_metrics) metrics["actor/pg_loss"] = Metric(value=pg_loss, aggregation=metric_aggregation) policy_loss = pg_loss # add entropy loss if entropy is not None: entropy_loss = agg_loss( loss_mat=entropy, loss_mask=response_mask, loss_agg_mode=loss_agg_mode, **config.global_batch_info ) entropy_coeff = config.entropy_coeff policy_loss -= entropy_coeff * entropy_loss metrics["actor/entropy_loss"] = Metric(value=entropy_loss, aggregation=metric_aggregation) # add kl loss if config.use_kl_loss: ref_log_prob = data["ref_log_prob"] # compute kl loss kld = kl_penalty(logprob=log_prob, ref_logprob=ref_log_prob, kl_penalty=config.kl_loss_type) kl_loss = agg_loss( loss_mat=kld, loss_mask=response_mask, loss_agg_mode=config.loss_agg_mode, **config.global_batch_info ) policy_loss += kl_loss * config.kl_loss_coef metrics["kl_loss"] = Metric(value=kl_loss, aggregation=metric_aggregation) metrics["kl_coef"] = config.kl_loss_coef return policy_loss, metrics def value_loss(config: CriticConfig, model_output, data: TensorDict, dp_group=None): """value loss Args: config: CriticConfig model_output: model output from the model data: the input to the model dp_group: data paralle group Returns: value loss """ vpreds = _slice_response_from_unpad_output(model_output["values"], data) # (bsz, response_length) values = data["values"] returns = data["returns"] response_mask = data["response_mask"].to(bool) vf_loss, vf_clipfrac = compute_value_loss( vpreds=vpreds, values=values, returns=returns, response_mask=response_mask, cliprange_value=config.cliprange_value, loss_agg_mode=config.loss_agg_mode, ) metrics = {} metrics.update( { "critic/vf_loss": vf_loss.detach().item(), "critic/vf_clipfrac": vf_clipfrac.detach().item(), "critic/vpred_mean": masked_mean(vpreds, response_mask).detach().item(), } ) return vf_loss, metrics
verl__workers__utils__losses.py
# Copyright 2025 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch import torch.nn.functional as F from tensordict import TensorDict from verl.utils import tensordict_utils as tu from verl.utils.attention_utils import index_first_axis, unpad_input def left_right_2_no_padding(data: TensorDict) -> TensorDict: """ Convert TensorDict from left-right padding to no-padding format. Args: data: TensorDict with "input_ids", "attention_mask", "response_mask", "position_ids" Returns: data: TensorDict with - Tensor includes NestedTensors like "input_ids", "loss_mask", "position_ids" - NonTensorData includes "max_seq_len", "max_response_len", "indices" Note: 1. the return input_ids/position_ids/loss_mask are nested tensor. 2. we will remove "attention_mask", "response" in the return data, but "response_mask" is kept. """ assert "input_ids" in data, "input_ids is required in left-right padding data" assert "attention_mask" in data, "attention_mask is required in left-right padding data" assert "response_mask" in data, "response_mask is required in left-right padding data" assert "position_ids" in data, "position_ids is required in left-right padding data" input_ids = data.pop("input_ids") attention_mask = data["attention_mask"] response_mask = data["response_mask"] position_ids = data["position_ids"] # (bs, seq_len) or # (bs, 4, seq_len) max_seq_len, max_response_len = input_ids.shape[1], response_mask.shape[1] tu.assign_non_tensor_data(data, "max_seq_len", max_seq_len) tu.assign_non_tensor_data(data, "max_response_len", max_response_len) input_ids_rmpad, indices, cu_seqlens, *_ = unpad_input(input_ids.unsqueeze(-1), attention_mask) tu.assign_non_tensor_data(data, "indices", indices) input_ids_nested = torch.nested.nested_tensor_from_jagged(input_ids_rmpad.squeeze(-1), offsets=cu_seqlens) position_ids_list = [] for i in range(attention_mask.shape[0]): curr_mask = attention_mask[i].bool() curr_pos_ids = position_ids[i] if curr_pos_ids.dim() == 1: # (seq_len,) valid_ids = curr_pos_ids[curr_mask] else: # (4, seq_len) valid_ids = curr_pos_ids[:, curr_mask] position_ids_list.append(valid_ids) position_ids_nested = torch.nested.as_nested_tensor(position_ids_list, layout=torch.jagged) data["input_ids"] = input_ids_nested data["position_ids"] = position_ids_nested data["loss_mask"] = data["response_mask"] routed_experts = data.get("routed_experts", None) if routed_experts is not None and not routed_experts.is_nested: if routed_experts.max() <= 255: routed_experts = routed_experts.to(torch.uint8) routed_experts_rmpad = index_first_axis(routed_experts.unsqueeze(-1).flatten(0, 1), indices) routed_experts_nested = torch.nested.nested_tensor_from_jagged( routed_experts_rmpad.squeeze(-1), offsets=cu_seqlens ) data["routed_experts"] = routed_experts_nested return data def no_padding_2_padding(tensor: torch.Tensor, data: TensorDict) -> torch.Tensor: """Slice response from unpad model output. Args: tensor: a nested tensor or a 1D tensor in shape (total_nnz,), total_nnz is the total number of tokens across all sequences in the batch data: TensorDict with "prompts", "responses", "attention_mask" Returns: tensor: sliced response tensor of shape [bsz, max_response_len] """ values = tensor.values() if tensor.is_nested else tensor prompt_ids = data["prompts"] response_ids = data["responses"] attention_mask = data["attention_mask"] max_response_len = tu.get_non_tensor_data(data=data, key="max_response_len", default=-1) if prompt_ids.is_nested: prompt_lens = prompt_ids.offsets().diff() response_lens = response_ids.offsets().diff() if max_response_len < 0: max_response_len = response_ids.offsets().diff().max().item() else: assert not attention_mask.is_nested prompt_lens = attention_mask[:, : prompt_ids.shape[1]].sum(dim=1) response_lens = attention_mask[:, prompt_ids.shape[1] :].sum(dim=1) max_response_len = response_ids.shape[1] sequence_lens = prompt_lens + response_lens sequence_offsets = sequence_lens.cumsum(dim=0) assert sequence_offsets[-1].item() == values.shape[0] response_list = [] for resp_len, seq_offset in zip(response_lens, sequence_offsets, strict=True): pad_size = max_response_len - resp_len # left-shift model output by one token for log_probs/values response_list.append(F.pad(values[seq_offset - resp_len - 1 : seq_offset - 1], (0, pad_size))) output = torch.stack(response_list, dim=0) return output
verl__workers__utils__padding.py