|
|
"""A GPU worker class.""" |
|
|
import gc |
|
|
import os |
|
|
from typing import Any, Dict, List, Optional, Set, Tuple |
|
|
|
|
|
import torch |
|
|
import torch.distributed |
|
|
|
|
|
from vllm.config import (CacheConfig, DeviceConfig, LoadConfig, LoRAConfig, |
|
|
ModelConfig, ParallelConfig, SchedulerConfig, |
|
|
VisionLanguageConfig) |
|
|
from vllm.distributed import (broadcast_tensor_dict, |
|
|
ensure_model_parallel_initialized, |
|
|
init_distributed_environment) |
|
|
from vllm.distributed.device_communicators import pynccl_utils |
|
|
from vllm.distributed.device_communicators.custom_all_reduce import ( |
|
|
init_custom_ar) |
|
|
from vllm.lora.request import LoRARequest |
|
|
from vllm.model_executor import set_random_seed |
|
|
from vllm.sequence import SamplerOutput, SequenceGroupMetadata |
|
|
from vllm.worker.cache_engine import CacheEngine |
|
|
|
|
|
from vllm.worker.worker_base import WorkerBase |
|
|
from serve.model_runner import ModelRunner |
|
|
|
|
|
|
|
|
class Worker(WorkerBase): |
|
|
"""A worker class that executes (a partition of) the model on a GPU. |
|
|
|
|
|
Each worker is associated with a single GPU. The worker is responsible for |
|
|
maintaining the KV cache and executing the model on the GPU. In case of |
|
|
distributed inference, each worker is assigned a partition of the model. |
|
|
""" |
|
|
|
|
|
def __init__( |
|
|
self, |
|
|
model_config: ModelConfig, |
|
|
parallel_config: ParallelConfig, |
|
|
scheduler_config: SchedulerConfig, |
|
|
device_config: DeviceConfig, |
|
|
cache_config: CacheConfig, |
|
|
load_config: LoadConfig, |
|
|
local_rank: int, |
|
|
rank: int, |
|
|
distributed_init_method: str, |
|
|
lora_config: Optional[LoRAConfig] = None, |
|
|
vision_language_config: Optional[VisionLanguageConfig] = None, |
|
|
is_driver_worker: bool = False, |
|
|
) -> None: |
|
|
self.model_config = model_config |
|
|
self.parallel_config = parallel_config |
|
|
self.scheduler_config = scheduler_config |
|
|
self.device_config = device_config |
|
|
self.cache_config = cache_config |
|
|
self.local_rank = local_rank |
|
|
self.rank = rank |
|
|
self.distributed_init_method = distributed_init_method |
|
|
self.lora_config = lora_config |
|
|
self.load_config = load_config |
|
|
self.is_driver_worker = is_driver_worker |
|
|
if self.is_driver_worker: |
|
|
assert self.rank == 0, "The driver worker must have rank 0." |
|
|
|
|
|
if self.model_config.trust_remote_code: |
|
|
|
|
|
from vllm.utils import init_cached_hf_modules |
|
|
init_cached_hf_modules() |
|
|
self.vision_language_config = vision_language_config |
|
|
if self.vision_language_config: |
|
|
assert not self.lora_config, ( |
|
|
"To be tested: vision language model with LoRA settings.") |
|
|
|
|
|
self.model_runner = ModelRunner( |
|
|
model_config, |
|
|
parallel_config, |
|
|
scheduler_config, |
|
|
device_config, |
|
|
load_config=load_config, |
|
|
lora_config=self.lora_config, |
|
|
kv_cache_dtype=self.cache_config.cache_dtype, |
|
|
is_driver_worker=is_driver_worker, |
|
|
vision_language_config=vision_language_config, |
|
|
) |
|
|
|
|
|
|
|
|
self.cache_engine: CacheEngine |
|
|
self.gpu_cache: List[torch.Tensor] |
|
|
|
|
|
def init_device(self) -> None: |
|
|
if self.device_config.device.type == "cuda": |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
os.environ["TORCH_NCCL_AVOID_RECORD_STREAMS"] = "1" |
|
|
|
|
|
|
|
|
os.environ.pop("NCCL_ASYNC_ERROR_HANDLING", None) |
|
|
self.device = torch.device(f"cuda:{self.local_rank}") |
|
|
torch.cuda.set_device(self.device) |
|
|
|
|
|
_check_if_gpu_supports_dtype(self.model_config.dtype) |
|
|
torch.cuda.empty_cache() |
|
|
self.init_gpu_memory = torch.cuda.mem_get_info()[0] |
|
|
else: |
|
|
raise RuntimeError( |
|
|
f"Not support device type: {self.device_config.device}") |
|
|
|
|
|
init_worker_distributed_environment(self.parallel_config, self.rank, |
|
|
self.distributed_init_method, |
|
|
self.local_rank) |
|
|
|
|
|
set_random_seed(self.model_config.seed) |
|
|
|
|
|
def load_model(self, args): |
|
|
self.model_runner.load_model(args) |
|
|
|
|
|
@torch.inference_mode() |
|
|
def determine_num_available_blocks(self) -> Tuple[int, int]: |
|
|
"""Profiles the peak memory usage of the model to determine how many |
|
|
KV blocks may be allocated without OOMs. |
|
|
|
|
|
The engine will first conduct a profiling of the existing memory usage. |
|
|
Then, it calculate the maximum possible number of GPU and CPU blocks |
|
|
that can be allocated with the remaining free memory. |
|
|
|
|
|
.. tip:: |
|
|
You may limit the usage of GPU memory |
|
|
by adjusting the `gpu_memory_utilization` parameter. |
|
|
""" |
|
|
|
|
|
|
|
|
torch.cuda.empty_cache() |
|
|
|
|
|
|
|
|
|
|
|
self.model_runner.profile_run() |
|
|
|
|
|
|
|
|
|
|
|
torch.cuda.synchronize() |
|
|
free_gpu_memory, total_gpu_memory = torch.cuda.mem_get_info() |
|
|
|
|
|
|
|
|
peak_memory = self.init_gpu_memory - free_gpu_memory |
|
|
assert peak_memory > 0, ( |
|
|
"Error in memory profiling. This happens when the GPU memory was " |
|
|
"not properly cleaned up before initializing the vLLM instance.") |
|
|
|
|
|
cache_block_size = self.get_cache_block_size_bytes() |
|
|
num_gpu_blocks = int( |
|
|
(total_gpu_memory * self.cache_config.gpu_memory_utilization - |
|
|
peak_memory) // cache_block_size) |
|
|
num_cpu_blocks = int(self.cache_config.swap_space_bytes // |
|
|
cache_block_size) |
|
|
num_gpu_blocks = max(num_gpu_blocks, 0) |
|
|
num_cpu_blocks = max(num_cpu_blocks, 0) |
|
|
if self.model_runner.lora_manager: |
|
|
self.model_runner.remove_all_loras() |
|
|
gc.collect() |
|
|
torch.cuda.empty_cache() |
|
|
return num_gpu_blocks, num_cpu_blocks |
|
|
|
|
|
def initialize_cache(self, num_gpu_blocks: int, |
|
|
num_cpu_blocks: int) -> None: |
|
|
"""Allocate GPU and CPU KV cache with the specified number of blocks. |
|
|
|
|
|
This also warms up the model, which may record CUDA graphs. |
|
|
""" |
|
|
raise_if_cache_size_invalid(num_gpu_blocks, |
|
|
self.cache_config.block_size, |
|
|
self.model_config.max_model_len) |
|
|
|
|
|
self.cache_config.num_gpu_blocks = num_gpu_blocks |
|
|
self.cache_config.num_cpu_blocks = num_cpu_blocks |
|
|
|
|
|
self._init_cache_engine() |
|
|
self._warm_up_model() |
|
|
|
|
|
def _init_cache_engine(self): |
|
|
assert self.cache_config.num_gpu_blocks is not None |
|
|
self.cache_engine = CacheEngine(self.cache_config, self.model_config, |
|
|
self.parallel_config) |
|
|
self.gpu_cache = self.cache_engine.gpu_cache |
|
|
self.model_runner.set_block_size(self.cache_engine.block_size) |
|
|
|
|
|
def _warm_up_model(self) -> None: |
|
|
if not self.model_config.enforce_eager: |
|
|
self.model_runner.capture_model(self.gpu_cache) |
|
|
|
|
|
|
|
|
set_random_seed(self.model_config.seed) |
|
|
|
|
|
def cache_swap( |
|
|
self, |
|
|
blocks_to_swap_in: Dict[int, int], |
|
|
blocks_to_swap_out: Dict[int, int], |
|
|
blocks_to_copy: Dict[int, List[int]], |
|
|
) -> None: |
|
|
|
|
|
|
|
|
if blocks_to_swap_in: |
|
|
self.cache_engine.swap_in(blocks_to_swap_in) |
|
|
if blocks_to_swap_out: |
|
|
self.cache_engine.swap_out(blocks_to_swap_out) |
|
|
if blocks_to_copy: |
|
|
self.cache_engine.copy(blocks_to_copy) |
|
|
|
|
|
@torch.inference_mode() |
|
|
def execute_model( |
|
|
self, |
|
|
seq_group_metadata_list: Optional[List[SequenceGroupMetadata]] = None, |
|
|
blocks_to_swap_in: Optional[Dict[int, int]] = None, |
|
|
blocks_to_swap_out: Optional[Dict[int, int]] = None, |
|
|
blocks_to_copy: Optional[Dict[int, List[int]]] = None, |
|
|
num_lookahead_slots: int = 0, |
|
|
) -> List[SamplerOutput]: |
|
|
|
|
|
if self.is_driver_worker: |
|
|
assert seq_group_metadata_list is not None |
|
|
num_seq_groups = len(seq_group_metadata_list) |
|
|
assert blocks_to_swap_in is not None |
|
|
assert blocks_to_swap_out is not None |
|
|
assert blocks_to_copy is not None |
|
|
data: Dict[str, Any] = { |
|
|
"num_seq_groups": num_seq_groups, |
|
|
"blocks_to_swap_in": blocks_to_swap_in, |
|
|
"blocks_to_swap_out": blocks_to_swap_out, |
|
|
"blocks_to_copy": blocks_to_copy, |
|
|
} |
|
|
broadcast_tensor_dict(data, src=0) |
|
|
else: |
|
|
data = broadcast_tensor_dict(src=0) |
|
|
num_seq_groups = data["num_seq_groups"] |
|
|
blocks_to_swap_in = data["blocks_to_swap_in"] |
|
|
blocks_to_swap_out = data["blocks_to_swap_out"] |
|
|
blocks_to_copy = data["blocks_to_copy"] |
|
|
|
|
|
assert blocks_to_swap_in is not None |
|
|
assert blocks_to_swap_out is not None |
|
|
assert blocks_to_copy is not None |
|
|
self.cache_swap(blocks_to_swap_in, blocks_to_swap_out, blocks_to_copy) |
|
|
|
|
|
|
|
|
if num_seq_groups == 0: |
|
|
return [] |
|
|
|
|
|
output = self.model_runner.execute_model(seq_group_metadata_list, |
|
|
self.gpu_cache) |
|
|
|
|
|
|
|
|
|
|
|
return [output] |
|
|
|
|
|
def add_lora(self, lora_request: LoRARequest) -> bool: |
|
|
return self.model_runner.add_lora(lora_request) |
|
|
|
|
|
def remove_lora(self, lora_id: int) -> bool: |
|
|
return self.model_runner.remove_lora(lora_id) |
|
|
|
|
|
def list_loras(self) -> Set[int]: |
|
|
return self.model_runner.list_loras() |
|
|
|
|
|
@property |
|
|
def max_model_len(self) -> int: |
|
|
return self.model_config.max_model_len |
|
|
|
|
|
@property |
|
|
def vocab_size(self) -> int: |
|
|
return self.model_runner.vocab_size |
|
|
|
|
|
def get_cache_block_size_bytes(self) -> int: |
|
|
"""Get the size of the KV cache block size in bytes. |
|
|
""" |
|
|
return CacheEngine.get_cache_block_size(self.cache_config, |
|
|
self.model_config, |
|
|
self.parallel_config) |
|
|
|
|
|
|
|
|
def init_worker_distributed_environment( |
|
|
parallel_config: ParallelConfig, |
|
|
rank: int, |
|
|
distributed_init_method: Optional[str] = None, |
|
|
local_rank: int = -1, |
|
|
) -> None: |
|
|
"""Initialize the distributed environment.""" |
|
|
init_distributed_environment(parallel_config.world_size, rank, |
|
|
distributed_init_method, local_rank) |
|
|
|
|
|
if pynccl_utils.is_initialized(): |
|
|
pynccl_world_size = pynccl_utils.get_world_size() |
|
|
if pynccl_world_size != parallel_config.world_size: |
|
|
raise RuntimeError( |
|
|
"pynccl is already initialized but the pynccl world " |
|
|
"size does not match parallel_config.world_size " |
|
|
f"({pynccl_world_size} vs. {parallel_config.world_size}).") |
|
|
elif parallel_config.world_size > 1: |
|
|
|
|
|
|
|
|
pynccl_utils.init_process_group( |
|
|
world_size=parallel_config.world_size, |
|
|
local_rank=local_rank, |
|
|
rank=rank, |
|
|
init_method=distributed_init_method, |
|
|
) |
|
|
|
|
|
ensure_model_parallel_initialized(parallel_config.tensor_parallel_size, |
|
|
parallel_config.pipeline_parallel_size) |
|
|
|
|
|
|
|
|
if not parallel_config.disable_custom_all_reduce: |
|
|
init_custom_ar() |
|
|
|
|
|
|
|
|
torch.distributed.all_reduce(torch.zeros(1).cuda()) |
|
|
if pynccl_utils.is_initialized(): |
|
|
pynccl_utils.all_reduce(torch.zeros(1).cuda()) |
|
|
|
|
|
|
|
|
def _check_if_gpu_supports_dtype(torch_dtype: torch.dtype): |
|
|
|
|
|
if torch_dtype == torch.bfloat16: |
|
|
compute_capability = torch.cuda.get_device_capability() |
|
|
if compute_capability[0] < 8: |
|
|
gpu_name = torch.cuda.get_device_name() |
|
|
raise ValueError( |
|
|
"Bfloat16 is only supported on GPUs with compute capability " |
|
|
f"of at least 8.0. Your {gpu_name} GPU has compute capability " |
|
|
f"{compute_capability[0]}.{compute_capability[1]}. " |
|
|
"You can use float16 instead by explicitly setting the" |
|
|
"`dtype` flag in CLI, for example: --dtype=half.") |
|
|
|
|
|
|
|
|
def raise_if_cache_size_invalid(num_gpu_blocks, block_size, |
|
|
max_model_len) -> None: |
|
|
if num_gpu_blocks <= 0: |
|
|
raise ValueError("No available memory for the cache blocks. " |
|
|
"Try increasing `gpu_memory_utilization` when " |
|
|
"initializing the engine.") |
|
|
max_seq_len = block_size * num_gpu_blocks |
|
|
if max_model_len > max_seq_len: |
|
|
raise ValueError( |
|
|
f"The model's max seq len ({max_model_len}) " |
|
|
"is larger than the maximum number of tokens that can be " |
|
|
f"stored in KV cache ({max_seq_len}). Try increasing " |
|
|
"`gpu_memory_utilization` or decreasing `max_model_len` when " |
|
|
"initializing the engine.") |