method_name
stringlengths
3
45
method_body
stringlengths
9
6.25k
full_code
stringlengths
35
7.02k
docstring
stringlengths
18
4.7k
forward
hidden_states = self.model(input_ids, positions, kv_caches, input_metadata) return hidden_states
def forward(self, input_ids: torch.Tensor, positions: torch.Tensor, kv_caches: List[KVCache], input_metadata: InputMetadata) ->torch.Tensor: hidden_states = self.model(input_ids, positions, kv_caches, input_metadata) return hidden_states
null
_init_workers_ray
if self.parallel_config.tensor_parallel_size == 1: num_gpus = self.cache_config.gpu_memory_utilization else: num_gpus = 1 self.driver_dummy_worker: RayWorkerVllm = None self.workers: List[RayWorkerVllm] = [] driver_ip = get_ip() for bundle_id, bundle in enumerate(placement_group.bundle_specs): if not bundle.get('GPU', 0): continue scheduling_strategy = PlacementGroupSchedulingStrategy(placement_group= placement_group, placement_group_capture_child_tasks=True, placement_group_bundle_index=bundle_id) worker = ray.remote(num_cpus=0, num_gpus=num_gpus, scheduling_strategy= scheduling_strategy, **ray_remote_kwargs)(RayWorkerVllm).remote(self .model_config.trust_remote_code) worker_ip = ray.get(worker.get_node_ip.remote()) if worker_ip == driver_ip and self.driver_dummy_worker is None: self.driver_dummy_worker = worker else: self.workers.append(worker) if self.driver_dummy_worker is None: raise ValueError( 'Ray does not allocate any GPUs on the driver node. Consider adjusting the Ray placement group or running the driver on a GPU node.' ) driver_node_id, driver_gpu_ids = ray.get(self.driver_dummy_worker. get_node_and_gpu_ids.remote()) worker_node_and_gpu_ids = ray.get([worker.get_node_and_gpu_ids.remote() for worker in self.workers]) node_workers = defaultdict(list) node_gpus = defaultdict(list) node_workers[driver_node_id].append(0) node_gpus[driver_node_id].extend(driver_gpu_ids) for i, (node_id, gpu_ids) in enumerate(worker_node_and_gpu_ids, start=1): node_workers[node_id].append(i) node_gpus[node_id].extend(gpu_ids) for node_id, gpu_ids in node_gpus.items(): node_gpus[node_id] = sorted(gpu_ids) set_cuda_visible_devices(node_gpus[driver_node_id]) for worker, (node_id, _) in zip(self.workers, worker_node_and_gpu_ids): worker.set_cuda_visible_devices.remote(node_gpus[node_id]) distributed_init_method = f'tcp://{driver_ip}:{get_open_port()}' from vllm.worker.worker import Worker model_config = copy.deepcopy(self.model_config) parallel_config = copy.deepcopy(self.parallel_config) scheduler_config = copy.deepcopy(self.scheduler_config) for rank, (worker, (node_id, _)) in enumerate(zip(self.workers, worker_node_and_gpu_ids), start=1): local_rank = node_workers[node_id].index(rank) worker.init_worker.remote(lambda rank=rank, local_rank=local_rank: Worker(model_config, parallel_config, scheduler_config, local_rank, rank, distributed_init_method)) driver_rank = 0 driver_local_rank = node_workers[driver_node_id].index(driver_rank) self.driver_worker = Worker(model_config, parallel_config, scheduler_config, driver_local_rank, driver_rank, distributed_init_method, is_driver_worker=True) self._run_workers('init_model') self._run_workers('load_model', max_concurrent_workers=self.parallel_config .max_parallel_loading_workers)
def _init_workers_ray(self, placement_group: 'PlacementGroup', ** ray_remote_kwargs): if self.parallel_config.tensor_parallel_size == 1: num_gpus = self.cache_config.gpu_memory_utilization else: num_gpus = 1 self.driver_dummy_worker: RayWorkerVllm = None self.workers: List[RayWorkerVllm] = [] driver_ip = get_ip() for bundle_id, bundle in enumerate(placement_group.bundle_specs): if not bundle.get('GPU', 0): continue scheduling_strategy = PlacementGroupSchedulingStrategy(placement_group =placement_group, placement_group_capture_child_tasks=True, placement_group_bundle_index=bundle_id) worker = ray.remote(num_cpus=0, num_gpus=num_gpus, scheduling_strategy=scheduling_strategy, **ray_remote_kwargs)( RayWorkerVllm).remote(self.model_config.trust_remote_code) worker_ip = ray.get(worker.get_node_ip.remote()) if worker_ip == driver_ip and self.driver_dummy_worker is None: self.driver_dummy_worker = worker else: self.workers.append(worker) if self.driver_dummy_worker is None: raise ValueError( 'Ray does not allocate any GPUs on the driver node. Consider adjusting the Ray placement group or running the driver on a GPU node.' ) driver_node_id, driver_gpu_ids = ray.get(self.driver_dummy_worker. get_node_and_gpu_ids.remote()) worker_node_and_gpu_ids = ray.get([worker.get_node_and_gpu_ids.remote() for worker in self.workers]) node_workers = defaultdict(list) node_gpus = defaultdict(list) node_workers[driver_node_id].append(0) node_gpus[driver_node_id].extend(driver_gpu_ids) for i, (node_id, gpu_ids) in enumerate(worker_node_and_gpu_ids, start=1): node_workers[node_id].append(i) node_gpus[node_id].extend(gpu_ids) for node_id, gpu_ids in node_gpus.items(): node_gpus[node_id] = sorted(gpu_ids) set_cuda_visible_devices(node_gpus[driver_node_id]) for worker, (node_id, _) in zip(self.workers, worker_node_and_gpu_ids): worker.set_cuda_visible_devices.remote(node_gpus[node_id]) distributed_init_method = f'tcp://{driver_ip}:{get_open_port()}' from vllm.worker.worker import Worker model_config = copy.deepcopy(self.model_config) parallel_config = copy.deepcopy(self.parallel_config) scheduler_config = copy.deepcopy(self.scheduler_config) for rank, (worker, (node_id, _)) in enumerate(zip(self.workers, worker_node_and_gpu_ids), start=1): local_rank = node_workers[node_id].index(rank) worker.init_worker.remote(lambda rank=rank, local_rank=local_rank: Worker(model_config, parallel_config, scheduler_config, local_rank, rank, distributed_init_method)) driver_rank = 0 driver_local_rank = node_workers[driver_node_id].index(driver_rank) self.driver_worker = Worker(model_config, parallel_config, scheduler_config, driver_local_rank, driver_rank, distributed_init_method, is_driver_worker=True) self._run_workers('init_model') self._run_workers('load_model', max_concurrent_workers=self. parallel_config.max_parallel_loading_workers)
null
_check_if_gpu_supports_dtype
if torch_dtype == torch.bfloat16: compute_capability = torch.cuda.get_device_capability() if compute_capability[0] < 8: gpu_name = torch.cuda.get_device_name() raise ValueError( f'Bfloat16 is only supported on GPUs with compute capability of at least 8.0. Your {gpu_name} GPU has compute capability {compute_capability[0]}.{compute_capability[1]}.' )
def _check_if_gpu_supports_dtype(torch_dtype: torch.dtype): if torch_dtype == torch.bfloat16: compute_capability = torch.cuda.get_device_capability() if compute_capability[0] < 8: gpu_name = torch.cuda.get_device_name() raise ValueError( f'Bfloat16 is only supported on GPUs with compute capability of at least 8.0. Your {gpu_name} GPU has compute capability {compute_capability[0]}.{compute_capability[1]}.' )
null
initialize_engine
"""Initialize the LLMEngine from the command line arguments.""" engine_args = EngineArgs.from_cli_args(args) return LLMEngine.from_engine_args(engine_args)
def initialize_engine(args: argparse.Namespace) ->LLMEngine: """Initialize the LLMEngine from the command line arguments.""" engine_args = EngineArgs.from_cli_args(args) return LLMEngine.from_engine_args(engine_args)
Initialize the LLMEngine from the command line arguments.
__init__
super().__init__() hidden_size = config.hidden_size self.num_heads = config.num_attention_heads self.self_attention = FalconAttention(config, linear_method) self.mlp = FalconMLP(config, linear_method) self.config = config if config.new_decoder_architecture: self.ln_attn = LayerNorm(hidden_size, eps=config.layer_norm_epsilon) self.ln_mlp = LayerNorm(hidden_size, eps=config.layer_norm_epsilon) else: self.input_layernorm = LayerNorm(hidden_size, eps=config.layer_norm_epsilon ) if not config.parallel_attn: self.post_attention_layernorm = LayerNorm(hidden_size, eps=config. layer_norm_epsilon) self.reduce_row_parallel_results = not (config.new_decoder_architecture or config.parallel_attn)
def __init__(self, config: FalconConfig, linear_method: Optional[ LinearMethodBase]=None): super().__init__() hidden_size = config.hidden_size self.num_heads = config.num_attention_heads self.self_attention = FalconAttention(config, linear_method) self.mlp = FalconMLP(config, linear_method) self.config = config if config.new_decoder_architecture: self.ln_attn = LayerNorm(hidden_size, eps=config.layer_norm_epsilon) self.ln_mlp = LayerNorm(hidden_size, eps=config.layer_norm_epsilon) else: self.input_layernorm = LayerNorm(hidden_size, eps=config. layer_norm_epsilon) if not config.parallel_attn: self.post_attention_layernorm = LayerNorm(hidden_size, eps= config.layer_norm_epsilon) self.reduce_row_parallel_results = not (config.new_decoder_architecture or config.parallel_attn)
null
_init_engine
return MockEngine()
def _init_engine(self, *args, **kwargs): return MockEngine()
null
sample
head = self.lm_head.linear next_tokens = self.sampler(head.weight, hidden_states, sampling_metadata, head.bias) return next_tokens
def sample(self, hidden_states: torch.Tensor, sampling_metadata: SamplingMetadata) ->Optional[SamplerOutput]: head = self.lm_head.linear next_tokens = self.sampler(head.weight, hidden_states, sampling_metadata, head.bias) return next_tokens
null
test_reshape_and_cache
random.seed(seed) torch.random.manual_seed(seed) torch.cuda.manual_seed(seed) gpu_id = f'cuda:{device}' num_slots = block_size * num_blocks slot_mapping = random.sample(range(num_slots), num_tokens) slot_mapping = torch.tensor(slot_mapping, dtype=torch.long, device=gpu_id) qkv = torch.randn(num_tokens, 3, num_heads, head_size, dtype=dtype, device= gpu_id) _, key, value = qkv.unbind(dim=1) key_caches, value_caches = kv_cache_factory(num_blocks, block_size, 1, num_heads, head_size, dtype, seed, gpu_id) key_cache, value_cache = key_caches[0], value_caches[0] cloned_key_cache = key_cache.clone() cloned_value_cache = value_cache.clone() cache_ops.reshape_and_cache(key, value, key_cache, value_cache, slot_mapping) reshaped_key = key.reshape(num_tokens, *key_cache[0, :, :, 0, :].shape) block_indicies = torch.div(slot_mapping, block_size, rounding_mode='floor') block_indicies = block_indicies.cpu().tolist() block_offsets = slot_mapping % block_size block_offsets = block_offsets.cpu().tolist() for i in range(num_tokens): block_idx = block_indicies[i] block_offset = block_offsets[i] cloned_key_cache[block_idx, :, :, block_offset, :] = reshaped_key[i] cloned_value_cache[block_idx, :, :, block_offset] = value[i] assert torch.allclose(key_cache, cloned_key_cache) assert torch.allclose(value_cache, cloned_value_cache)
@pytest.mark.parametrize('num_tokens', NUM_TOKENS) @pytest.mark.parametrize('num_heads', NUM_HEADS) @pytest.mark.parametrize('head_size', HEAD_SIZES) @pytest.mark.parametrize('block_size', BLOCK_SIZES) @pytest.mark.parametrize('num_blocks', NUM_BLOCKS) @pytest.mark.parametrize('dtype', DTYPES) @pytest.mark.parametrize('seed', SEEDS) @pytest.mark.parametrize('device', DEVICES) @torch.inference_mode() def test_reshape_and_cache(kv_cache_factory, num_tokens: int, num_heads: int, head_size: int, block_size: int, num_blocks: int, dtype: torch. dtype, seed: int, device: int) ->None: random.seed(seed) torch.random.manual_seed(seed) torch.cuda.manual_seed(seed) gpu_id = f'cuda:{device}' num_slots = block_size * num_blocks slot_mapping = random.sample(range(num_slots), num_tokens) slot_mapping = torch.tensor(slot_mapping, dtype=torch.long, device=gpu_id) qkv = torch.randn(num_tokens, 3, num_heads, head_size, dtype=dtype, device=gpu_id) _, key, value = qkv.unbind(dim=1) key_caches, value_caches = kv_cache_factory(num_blocks, block_size, 1, num_heads, head_size, dtype, seed, gpu_id) key_cache, value_cache = key_caches[0], value_caches[0] cloned_key_cache = key_cache.clone() cloned_value_cache = value_cache.clone() cache_ops.reshape_and_cache(key, value, key_cache, value_cache, slot_mapping) reshaped_key = key.reshape(num_tokens, *key_cache[0, :, :, 0, :].shape) block_indicies = torch.div(slot_mapping, block_size, rounding_mode='floor') block_indicies = block_indicies.cpu().tolist() block_offsets = slot_mapping % block_size block_offsets = block_offsets.cpu().tolist() for i in range(num_tokens): block_idx = block_indicies[i] block_offset = block_offsets[i] cloned_key_cache[block_idx, :, :, block_offset, :] = reshaped_key[i] cloned_value_cache[block_idx, :, :, block_offset] = value[i] assert torch.allclose(key_cache, cloned_key_cache) assert torch.allclose(value_cache, cloned_value_cache)
null
get_len
return self.data.get_len()
def get_len(self) ->int: return self.data.get_len()
null
get_special_tokens_mask
""" Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True) bos_token_id = [1] if self.add_bos_token else [] eos_token_id = [1] if self.add_eos_token else [] if token_ids_1 is None: return bos_token_id + [0] * len(token_ids_0) + eos_token_id return bos_token_id + [0] * len(token_ids_0) + eos_token_id + bos_token_id + [0 ] * len(token_ids_1) + eos_token_id
def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) ->List[ int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True) bos_token_id = [1] if self.add_bos_token else [] eos_token_id = [1] if self.add_eos_token else [] if token_ids_1 is None: return bos_token_id + [0] * len(token_ids_0) + eos_token_id return bos_token_id + [0] * len(token_ids_0 ) + eos_token_id + bos_token_id + [0] * len(token_ids_1) + eos_token_id
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
add_global_metrics_labels
labels.update(kwargs)
def add_global_metrics_labels(**kwargs): labels.update(kwargs)
null
forward
qkv, _ = self.qkv_proj(hidden_states) q, k, v = qkv.split([self.q_size, self.kv_size, self.kv_size], dim=-1) q, k = self.rotary_emb(positions, q, k) k_cache, v_cache = kv_cache attn_output = self.attn(q, k, v, k_cache, v_cache, input_metadata) output, _ = self.o_proj(attn_output) return output
def forward(self, positions: torch.Tensor, hidden_states: torch.Tensor, kv_cache: KVCache, input_metadata: InputMetadata) ->torch.Tensor: qkv, _ = self.qkv_proj(hidden_states) q, k, v = qkv.split([self.q_size, self.kv_size, self.kv_size], dim=-1) q, k = self.rotary_emb(positions, q, k) k_cache, v_cache = kv_cache attn_output = self.attn(q, k, v, k_cache, v_cache, input_metadata) output, _ = self.o_proj(attn_output) return output
null
model_parallel_is_initialized
"""Check if tensor and pipeline parallel groups are initialized.""" return _TENSOR_MODEL_PARALLEL_GROUP is not None and _PIPELINE_MODEL_PARALLEL_GROUP is not None
def model_parallel_is_initialized(): """Check if tensor and pipeline parallel groups are initialized.""" return (_TENSOR_MODEL_PARALLEL_GROUP is not None and _PIPELINE_MODEL_PARALLEL_GROUP is not None)
Check if tensor and pipeline parallel groups are initialized.
swap_in
self._swap(self.cpu_cache, self.gpu_cache, src_to_dst)
def swap_in(self, src_to_dst: Dict[int, int]) ->None: self._swap(self.cpu_cache, self.gpu_cache, src_to_dst)
null
__init__
self.is_prompt = is_prompt self.max_context_len = max_context_len self.slot_mapping = slot_mapping self.context_lens = context_lens self.block_tables = block_tables self.use_cuda_graph = use_cuda_graph self.attn_bias = None
def __init__(self, is_prompt: bool, slot_mapping: torch.Tensor, max_context_len: Optional[int], context_lens: Optional[torch.Tensor], block_tables: Optional[torch.Tensor], use_cuda_graph: bool) ->None: self.is_prompt = is_prompt self.max_context_len = max_context_len self.slot_mapping = slot_mapping self.context_lens = context_lens self.block_tables = block_tables self.use_cuda_graph = use_cuda_graph self.attn_bias = None
null
_append_logical_block
block = LogicalTokenBlock(block_number=len(self.logical_token_blocks), block_size=self.block_size) self.logical_token_blocks.append(block)
def _append_logical_block(self) ->None: block = LogicalTokenBlock(block_number=len(self.logical_token_blocks), block_size=self.block_size) self.logical_token_blocks.append(block)
null
__init__
self.step_calls = 0 self.add_request_calls = 0 self.abort_request_calls = 0 self.request_id = None
def __init__(self): self.step_calls = 0 self.add_request_calls = 0 self.abort_request_calls = 0 self.request_id = None
null
forward
qkv, _ = self.query_key_value(hidden_states) q, k, v = qkv.chunk(chunks=3, dim=-1) q, k = self.rotary_emb(position_ids, q, k) k_cache, v_cache = kv_cache attn_output = self.attn(q, k, v, k_cache, v_cache, input_metadata) output, _ = self.dense(attn_output) return output
def forward(self, position_ids: torch.Tensor, hidden_states: torch.Tensor, kv_cache: KVCache, input_metadata: InputMetadata) ->torch.Tensor: qkv, _ = self.query_key_value(hidden_states) q, k, v = qkv.chunk(chunks=3, dim=-1) q, k = self.rotary_emb(position_ids, q, k) k_cache, v_cache = kv_cache attn_output = self.attn(q, k, v, k_cache, v_cache, input_metadata) output, _ = self.dense(attn_output) return output
null
from_engine_args
"""Creates an LLM engine from the engine arguments.""" engine_configs = engine_args.create_engine_configs() parallel_config = engine_configs[2] placement_group = initialize_cluster(parallel_config) engine = cls(*engine_configs, placement_group, log_stats=not engine_args. disable_log_stats) return engine
@classmethod def from_engine_args(cls, engine_args: EngineArgs) ->'LLMEngine': """Creates an LLM engine from the engine arguments.""" engine_configs = engine_args.create_engine_configs() parallel_config = engine_configs[2] placement_group = initialize_cluster(parallel_config) engine = cls(*engine_configs, placement_group, log_stats=not engine_args.disable_log_stats) return engine
Creates an LLM engine from the engine arguments.
abort_seq_group
if isinstance(request_id, str): request_id = request_id, request_ids = set(request_id) for state_queue in [self.waiting, self.running, self.swapped]: for seq_group in reversed(state_queue): if seq_group.request_id in request_ids: state_queue.remove(seq_group) for seq in seq_group.get_seqs(): if seq.is_finished(): continue seq.status = SequenceStatus.FINISHED_ABORTED self.free_seq(seq) request_ids.remove(seq_group.request_id) if not request_ids: return
def abort_seq_group(self, request_id: Union[str, Iterable[str]]) ->None: if isinstance(request_id, str): request_id = request_id, request_ids = set(request_id) for state_queue in [self.waiting, self.running, self.swapped]: for seq_group in reversed(state_queue): if seq_group.request_id in request_ids: state_queue.remove(seq_group) for seq in seq_group.get_seqs(): if seq.is_finished(): continue seq.status = SequenceStatus.FINISHED_ABORTED self.free_seq(seq) request_ids.remove(seq_group.request_id) if not request_ids: return
null
forward
hidden_states = self.embed_in(input_ids) for i in range(len(self.layers)): layer = self.layers[i] hidden_states = layer(position_ids, hidden_states, kv_caches[i], input_metadata) hidden_states = self.final_layer_norm(hidden_states) return hidden_states
def forward(self, input_ids: torch.Tensor, position_ids: torch.Tensor, kv_caches: List[KVCache], input_metadata: InputMetadata) ->torch.Tensor: hidden_states = self.embed_in(input_ids) for i in range(len(self.layers)): layer = self.layers[i] hidden_states = layer(position_ids, hidden_states, kv_caches[i], input_metadata) hidden_states = self.final_layer_norm(hidden_states) return hidden_states
null
get_vocab
"""Returns vocab as a dict""" vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab
def get_vocab(self): """Returns vocab as a dict""" vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab
Returns vocab as a dict
forward
del kv_caches self.input_buffers['input_ids'].copy_(input_ids, non_blocking=True) self.input_buffers['positions'].copy_(positions, non_blocking=True) self.input_buffers['slot_mapping'].copy_(input_metadata.slot_mapping, non_blocking=True) self.input_buffers['context_lens'].copy_(input_metadata.context_lens, non_blocking=True) self.input_buffers['block_tables'].copy_(input_metadata.block_tables, non_blocking=True) self.graph.replay() return self.output_buffers['hidden_states']
def forward(self, input_ids: torch.Tensor, positions: torch.Tensor, kv_caches: List[Tuple[torch.Tensor, torch.Tensor]], input_metadata: InputMetadata) ->torch.Tensor: del kv_caches self.input_buffers['input_ids'].copy_(input_ids, non_blocking=True) self.input_buffers['positions'].copy_(positions, non_blocking=True) self.input_buffers['slot_mapping'].copy_(input_metadata.slot_mapping, non_blocking=True) self.input_buffers['context_lens'].copy_(input_metadata.context_lens, non_blocking=True) self.input_buffers['block_tables'].copy_(input_metadata.block_tables, non_blocking=True) self.graph.replay() return self.output_buffers['hidden_states']
null
get_sliding_window
return getattr(self.hf_config, 'sliding_window', None)
def get_sliding_window(self) ->Optional[int]: return getattr(self.hf_config, 'sliding_window', None)
null
step
"""Performs one decoding iteration and returns newly generated results. This function performs one decoding iteration of the engine. It first schedules the sequences to be executed in the next iteration and the token blocks to be swapped in/out/copy. Then, it executes the model and updates the scheduler with the model outputs. Finally, it decodes the sequences and returns the newly generated results. """ seq_group_metadata_list, scheduler_outputs = self.scheduler.schedule() if not scheduler_outputs.is_empty(): all_outputs = self._run_workers('execute_model', driver_kwargs={ 'seq_group_metadata_list': seq_group_metadata_list, 'blocks_to_swap_in': scheduler_outputs.blocks_to_swap_in, 'blocks_to_swap_out': scheduler_outputs.blocks_to_swap_out, 'blocks_to_copy': scheduler_outputs.blocks_to_copy}) output = all_outputs[0] else: output = [] return self._process_model_outputs(output, scheduler_outputs)
def step(self) ->List[RequestOutput]: """Performs one decoding iteration and returns newly generated results. This function performs one decoding iteration of the engine. It first schedules the sequences to be executed in the next iteration and the token blocks to be swapped in/out/copy. Then, it executes the model and updates the scheduler with the model outputs. Finally, it decodes the sequences and returns the newly generated results. """ seq_group_metadata_list, scheduler_outputs = self.scheduler.schedule() if not scheduler_outputs.is_empty(): all_outputs = self._run_workers('execute_model', driver_kwargs={ 'seq_group_metadata_list': seq_group_metadata_list, 'blocks_to_swap_in': scheduler_outputs.blocks_to_swap_in, 'blocks_to_swap_out': scheduler_outputs.blocks_to_swap_out, 'blocks_to_copy': scheduler_outputs.blocks_to_copy}) output = all_outputs[0] else: output = [] return self._process_model_outputs(output, scheduler_outputs)
Performs one decoding iteration and returns newly generated results. This function performs one decoding iteration of the engine. It first schedules the sequences to be executed in the next iteration and the token blocks to be swapped in/out/copy. Then, it executes the model and updates the scheduler with the model outputs. Finally, it decodes the sequences and returns the newly generated results.
get_pipeline_model_parallel_prev_rank
"""Return the global rank that preceeds the caller in the pipeline""" assert _PIPELINE_GLOBAL_RANKS is not None, 'Pipeline parallel group is not initialized' rank_in_pipeline = get_pipeline_model_parallel_rank() world_size = get_pipeline_model_parallel_world_size() return _PIPELINE_GLOBAL_RANKS[(rank_in_pipeline - 1) % world_size]
def get_pipeline_model_parallel_prev_rank(): """Return the global rank that preceeds the caller in the pipeline""" assert _PIPELINE_GLOBAL_RANKS is not None, 'Pipeline parallel group is not initialized' rank_in_pipeline = get_pipeline_model_parallel_rank() world_size = get_pipeline_model_parallel_world_size() return _PIPELINE_GLOBAL_RANKS[(rank_in_pipeline - 1) % world_size]
Return the global rank that preceeds the caller in the pipeline
create_weights
"""Create weights for a linear layer.""" raise NotImplementedError
@abstractmethod def create_weights(self, input_size_per_partition: int, output_size_per_partition: int, input_size: int, output_size: int, params_dtype: torch.dtype) ->Dict[str, Any]: """Create weights for a linear layer.""" raise NotImplementedError
Create weights for a linear layer.
get_num_free_gpu_blocks
return self.gpu_allocator.get_num_free_blocks()
def get_num_free_gpu_blocks(self) ->int: return self.gpu_allocator.get_num_free_blocks()
null
_schedule
blocks_to_swap_in: Dict[int, int] = {} blocks_to_swap_out: Dict[int, int] = {} blocks_to_copy: Dict[int, List[int]] = {} now = time.monotonic() if not self.swapped: ignored_seq_groups: List[SequenceGroup] = [] scheduled: List[SequenceGroup] = [] num_curr_seqs = sum(seq_group.get_max_num_running_seqs() for seq_group in self.running) seq_lens: List[int] = [] while self.waiting: seq_group = self.waiting[0] waiting_seqs = seq_group.get_seqs(status=SequenceStatus.WAITING) assert len(waiting_seqs ) == 1, 'Waiting sequence group should have only one prompt sequence.' num_prompt_tokens = waiting_seqs[0].get_len() if num_prompt_tokens > self.prompt_limit: logger.warning( f'Input prompt ({num_prompt_tokens} tokens) is too long and exceeds limit of {self.prompt_limit}' ) for seq in waiting_seqs: seq.status = SequenceStatus.FINISHED_IGNORED ignored_seq_groups.append(seq_group) self.waiting.pop(0) continue can_allocate = self.block_manager.can_allocate(seq_group) if can_allocate == AllocStatus.LATER: break elif can_allocate == AllocStatus.NEVER: logger.warning( f'Input prompt ({num_prompt_tokens} tokens) is too long and exceeds the capacity of block_manager' ) for seq in waiting_seqs: seq.status = SequenceStatus.FINISHED_IGNORED ignored_seq_groups.append(seq_group) self.waiting.pop(0) continue new_seq_lens = seq_lens + [num_prompt_tokens] num_batched_tokens = len(new_seq_lens) * max(new_seq_lens) if num_batched_tokens > self.scheduler_config.max_num_batched_tokens: break num_new_seqs = seq_group.get_max_num_running_seqs() if num_curr_seqs + num_new_seqs > self.scheduler_config.max_num_seqs: break num_paddings = num_batched_tokens - sum(new_seq_lens) if num_paddings > self.scheduler_config.max_paddings: break seq_lens = new_seq_lens seq_group = self.waiting.pop(0) self._allocate(seq_group) self.running.append(seq_group) num_curr_seqs += num_new_seqs scheduled.append(seq_group) if scheduled or ignored_seq_groups: scheduler_outputs = SchedulerOutputs(scheduled_seq_groups=scheduled, prompt_run=True, num_batched_tokens=len(seq_lens) * max( seq_lens) if seq_lens else 0, blocks_to_swap_in= blocks_to_swap_in, blocks_to_swap_out=blocks_to_swap_out, blocks_to_copy=blocks_to_copy, ignored_seq_groups= ignored_seq_groups) return scheduler_outputs self.running = self.policy.sort_by_priority(now, self.running) running: List[SequenceGroup] = [] preempted: List[SequenceGroup] = [] while self.running: seq_group = self.running.pop(0) while not self.block_manager.can_append_slot(seq_group): if self.running: victim_seq_group = self.running.pop(-1) self._preempt(victim_seq_group, blocks_to_swap_out) preempted.append(victim_seq_group) else: self._preempt(seq_group, blocks_to_swap_out) preempted.append(seq_group) break else: self._append_slot(seq_group, blocks_to_copy) running.append(seq_group) self.running = running self.swapped = self.policy.sort_by_priority(now, self.swapped) if not preempted: num_curr_seqs = sum(seq_group.get_max_num_running_seqs() for seq_group in self.running) while self.swapped: seq_group = self.swapped[0] if not self.block_manager.can_swap_in(seq_group): break num_new_seqs = seq_group.get_max_num_running_seqs() if num_curr_seqs + num_new_seqs > self.scheduler_config.max_num_seqs: break seq_group = self.swapped.pop(0) self._swap_in(seq_group, blocks_to_swap_in) self._append_slot(seq_group, blocks_to_copy) num_curr_seqs += num_new_seqs self.running.append(seq_group) num_batched_tokens = sum(seq_group.num_seqs(status=SequenceStatus.RUNNING) for seq_group in self.running) scheduler_outputs = SchedulerOutputs(scheduled_seq_groups=self.running, prompt_run=False, num_batched_tokens=num_batched_tokens, blocks_to_swap_in=blocks_to_swap_in, blocks_to_swap_out= blocks_to_swap_out, blocks_to_copy=blocks_to_copy, ignored_seq_groups=[]) return scheduler_outputs
def _schedule(self) ->SchedulerOutputs: blocks_to_swap_in: Dict[int, int] = {} blocks_to_swap_out: Dict[int, int] = {} blocks_to_copy: Dict[int, List[int]] = {} now = time.monotonic() if not self.swapped: ignored_seq_groups: List[SequenceGroup] = [] scheduled: List[SequenceGroup] = [] num_curr_seqs = sum(seq_group.get_max_num_running_seqs() for seq_group in self.running) seq_lens: List[int] = [] while self.waiting: seq_group = self.waiting[0] waiting_seqs = seq_group.get_seqs(status=SequenceStatus.WAITING) assert len(waiting_seqs ) == 1, 'Waiting sequence group should have only one prompt sequence.' num_prompt_tokens = waiting_seqs[0].get_len() if num_prompt_tokens > self.prompt_limit: logger.warning( f'Input prompt ({num_prompt_tokens} tokens) is too long and exceeds limit of {self.prompt_limit}' ) for seq in waiting_seqs: seq.status = SequenceStatus.FINISHED_IGNORED ignored_seq_groups.append(seq_group) self.waiting.pop(0) continue can_allocate = self.block_manager.can_allocate(seq_group) if can_allocate == AllocStatus.LATER: break elif can_allocate == AllocStatus.NEVER: logger.warning( f'Input prompt ({num_prompt_tokens} tokens) is too long and exceeds the capacity of block_manager' ) for seq in waiting_seqs: seq.status = SequenceStatus.FINISHED_IGNORED ignored_seq_groups.append(seq_group) self.waiting.pop(0) continue new_seq_lens = seq_lens + [num_prompt_tokens] num_batched_tokens = len(new_seq_lens) * max(new_seq_lens) if (num_batched_tokens > self.scheduler_config. max_num_batched_tokens): break num_new_seqs = seq_group.get_max_num_running_seqs() if (num_curr_seqs + num_new_seqs > self.scheduler_config. max_num_seqs): break num_paddings = num_batched_tokens - sum(new_seq_lens) if num_paddings > self.scheduler_config.max_paddings: break seq_lens = new_seq_lens seq_group = self.waiting.pop(0) self._allocate(seq_group) self.running.append(seq_group) num_curr_seqs += num_new_seqs scheduled.append(seq_group) if scheduled or ignored_seq_groups: scheduler_outputs = SchedulerOutputs(scheduled_seq_groups= scheduled, prompt_run=True, num_batched_tokens=len(seq_lens ) * max(seq_lens) if seq_lens else 0, blocks_to_swap_in= blocks_to_swap_in, blocks_to_swap_out=blocks_to_swap_out, blocks_to_copy=blocks_to_copy, ignored_seq_groups= ignored_seq_groups) return scheduler_outputs self.running = self.policy.sort_by_priority(now, self.running) running: List[SequenceGroup] = [] preempted: List[SequenceGroup] = [] while self.running: seq_group = self.running.pop(0) while not self.block_manager.can_append_slot(seq_group): if self.running: victim_seq_group = self.running.pop(-1) self._preempt(victim_seq_group, blocks_to_swap_out) preempted.append(victim_seq_group) else: self._preempt(seq_group, blocks_to_swap_out) preempted.append(seq_group) break else: self._append_slot(seq_group, blocks_to_copy) running.append(seq_group) self.running = running self.swapped = self.policy.sort_by_priority(now, self.swapped) if not preempted: num_curr_seqs = sum(seq_group.get_max_num_running_seqs() for seq_group in self.running) while self.swapped: seq_group = self.swapped[0] if not self.block_manager.can_swap_in(seq_group): break num_new_seqs = seq_group.get_max_num_running_seqs() if (num_curr_seqs + num_new_seqs > self.scheduler_config. max_num_seqs): break seq_group = self.swapped.pop(0) self._swap_in(seq_group, blocks_to_swap_in) self._append_slot(seq_group, blocks_to_copy) num_curr_seqs += num_new_seqs self.running.append(seq_group) num_batched_tokens = sum(seq_group.num_seqs(status=SequenceStatus. RUNNING) for seq_group in self.running) scheduler_outputs = SchedulerOutputs(scheduled_seq_groups=self.running, prompt_run=False, num_batched_tokens=num_batched_tokens, blocks_to_swap_in=blocks_to_swap_in, blocks_to_swap_out= blocks_to_swap_out, blocks_to_copy=blocks_to_copy, ignored_seq_groups=[]) return scheduler_outputs
null
get_last_token_id
assert self.num_tokens > 0 return self.token_ids[self.num_tokens - 1]
def get_last_token_id(self) ->int: assert self.num_tokens > 0 return self.token_ids[self.num_tokens - 1]
null
forward
if residual is not None: ops.fused_add_rms_norm(x, residual, self.weight.data, self.variance_epsilon ) return x, residual out = torch.empty_like(x) ops.rms_norm(out, x, self.weight.data, self.variance_epsilon) return out
def forward(self, x: torch.Tensor, residual: Optional[torch.Tensor]=None ) ->Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]: if residual is not None: ops.fused_add_rms_norm(x, residual, self.weight.data, self. variance_epsilon) return x, residual out = torch.empty_like(x) ops.rms_norm(out, x, self.weight.data, self.variance_epsilon) return out
null
get_supported_act_dtypes
return [torch.half]
@classmethod def get_supported_act_dtypes(cls) ->List[torch.dtype]: return [torch.half]
null
weight_loader
param_data = param.data output_dim = getattr(param, 'output_dim', None) if loaded_shard_id is None: if output_dim is None: assert param_data.shape == loaded_weight.shape param_data.copy_(loaded_weight) return current_shard_offset = 0 shard_offsets = [] for i, output_size in enumerate(self.output_sizes): shard_offsets.append((i, current_shard_offset, output_size)) current_shard_offset += output_size packed_dim = getattr(param, 'packed_dim', None) for shard_id, shard_offset, shard_size in shard_offsets: if packed_dim == output_dim: shard_size = shard_size // param.pack_factor shard_offset = shard_offset // param.pack_factor loaded_weight_shard = loaded_weight.narrow(output_dim, shard_offset, shard_size) self.weight_loader(param, loaded_weight_shard, shard_id) return assert loaded_shard_id < len(self.output_sizes) tp_rank = get_tensor_model_parallel_rank() tp_size = get_tensor_model_parallel_world_size() if output_dim is not None: shard_offset = sum(self.output_sizes[:loaded_shard_id]) // tp_size shard_size = self.output_sizes[loaded_shard_id] // tp_size packed_dim = getattr(param, 'packed_dim', None) if packed_dim == output_dim: shard_size = shard_size // param.pack_factor shard_offset = shard_offset // param.pack_factor param_data = param_data.narrow(output_dim, shard_offset, shard_size) start_idx = tp_rank * shard_size loaded_weight = loaded_weight.narrow(output_dim, start_idx, shard_size) else: ignore_warning = getattr(param, 'ignore_warning', False) if not ignore_warning: logger.warning( 'Loading a weight without `output_dim` attribute in MergedColumnParallelLinear, assume the weight is the same for all partitions.' ) assert param_data.shape == loaded_weight.shape param_data.copy_(loaded_weight)
def weight_loader(self, param: Parameter, loaded_weight: torch.Tensor, loaded_shard_id: Optional[int]=None): param_data = param.data output_dim = getattr(param, 'output_dim', None) if loaded_shard_id is None: if output_dim is None: assert param_data.shape == loaded_weight.shape param_data.copy_(loaded_weight) return current_shard_offset = 0 shard_offsets = [] for i, output_size in enumerate(self.output_sizes): shard_offsets.append((i, current_shard_offset, output_size)) current_shard_offset += output_size packed_dim = getattr(param, 'packed_dim', None) for shard_id, shard_offset, shard_size in shard_offsets: if packed_dim == output_dim: shard_size = shard_size // param.pack_factor shard_offset = shard_offset // param.pack_factor loaded_weight_shard = loaded_weight.narrow(output_dim, shard_offset, shard_size) self.weight_loader(param, loaded_weight_shard, shard_id) return assert loaded_shard_id < len(self.output_sizes) tp_rank = get_tensor_model_parallel_rank() tp_size = get_tensor_model_parallel_world_size() if output_dim is not None: shard_offset = sum(self.output_sizes[:loaded_shard_id]) // tp_size shard_size = self.output_sizes[loaded_shard_id] // tp_size packed_dim = getattr(param, 'packed_dim', None) if packed_dim == output_dim: shard_size = shard_size // param.pack_factor shard_offset = shard_offset // param.pack_factor param_data = param_data.narrow(output_dim, shard_offset, shard_size) start_idx = tp_rank * shard_size loaded_weight = loaded_weight.narrow(output_dim, start_idx, shard_size) else: ignore_warning = getattr(param, 'ignore_warning', False) if not ignore_warning: logger.warning( 'Loading a weight without `output_dim` attribute in MergedColumnParallelLinear, assume the weight is the same for all partitions.' ) assert param_data.shape == loaded_weight.shape param_data.copy_(loaded_weight)
null
propagate_exception
"""Propagate an exception to request streams (all if request_id is None).""" if request_id is not None: self._request_streams[request_id].put(exc) else: for stream in self._request_streams.values(): stream.put(exc)
def propagate_exception(self, exc: Exception, request_id: Optional[str]=None ) ->None: """Propagate an exception to request streams (all if request_id is None).""" if request_id is not None: self._request_streams[request_id].put(exc) else: for stream in self._request_streams.values(): stream.put(exc)
Propagate an exception to request streams (all if request_id is None).
get_token_ids
return self.data.get_token_ids()
def get_token_ids(self) ->List[int]: return self.data.get_token_ids()
null
add_cli_args
"""Shared CLI arguments for vLLM engine.""" parser.add_argument('--model', type=str, default='facebook/opt-125m', help= 'name or path of the huggingface model to use') parser.add_argument('--tokenizer', type=str, default=EngineArgs.tokenizer, help='name or path of the huggingface tokenizer to use') parser.add_argument('--revision', type=str, default=None, help= 'the specific model version to use. It can be a branch name, a tag name, or a commit id. If unspecified, will use the default version.' ) parser.add_argument('--tokenizer-revision', type=str, default=None, help= 'the specific tokenizer version to use. It can be a branch name, a tag name, or a commit id. If unspecified, will use the default version.' ) parser.add_argument('--tokenizer-mode', type=str, default=EngineArgs. tokenizer_mode, choices=['auto', 'slow'], help= 'tokenizer mode. "auto" will use the fast tokenizer if available, and "slow" will always use the slow tokenizer.' ) parser.add_argument('--trust-remote-code', action='store_true', help= 'trust remote code from huggingface') parser.add_argument('--download-dir', type=str, default=EngineArgs. download_dir, help= 'directory to download and load the weights, default to the default cache dir of huggingface' ) parser.add_argument('--load-format', type=str, default=EngineArgs. load_format, choices=['auto', 'pt', 'safetensors', 'npcache', 'dummy'], help= 'The format of the model weights to load. "auto" will try to load the weights in the safetensors format and fall back to the pytorch bin format if safetensors format is not available. "pt" will load the weights in the pytorch bin format. "safetensors" will load the weights in the safetensors format. "npcache" will load the weights in pytorch format and store a numpy cache to speed up the loading. "dummy" will initialize the weights with random values, which is mainly for profiling.' ) parser.add_argument('--dtype', type=str, default=EngineArgs.dtype, choices= ['auto', 'half', 'float16', 'bfloat16', 'float', 'float32'], help= 'data type for model weights and activations. The "auto" option will use FP16 precision for FP32 and FP16 models, and BF16 precision for BF16 models.' ) parser.add_argument('--max-model-len', type=int, default=None, help= 'model context length. If unspecified, will be automatically derived from the model.' ) parser.add_argument('--worker-use-ray', action='store_true', help= 'use Ray for distributed serving, will be automatically set when using more than 1 GPU' ) parser.add_argument('--pipeline-parallel-size', '-pp', type=int, default= EngineArgs.pipeline_parallel_size, help='number of pipeline stages') parser.add_argument('--tensor-parallel-size', '-tp', type=int, default= EngineArgs.tensor_parallel_size, help='number of tensor parallel replicas') parser.add_argument('--max-parallel-loading-workers', type=int, help= 'load model sequentially in multiple batches, to avoid RAM OOM when using tensor parallel and large models' ) parser.add_argument('--block-size', type=int, default=EngineArgs.block_size, choices=[8, 16, 32], help='token block size') parser.add_argument('--seed', type=int, default=EngineArgs.seed, help= 'random seed') parser.add_argument('--swap-space', type=int, default=EngineArgs.swap_space, help='CPU swap space size (GiB) per GPU') parser.add_argument('--gpu-memory-utilization', type=float, default= EngineArgs.gpu_memory_utilization, help= 'the fraction of GPU memory to be used for the model executor, which can range from 0 to 1.If unspecified, will use the default value of 0.9.' ) parser.add_argument('--max-num-batched-tokens', type=int, default= EngineArgs.max_num_batched_tokens, help= 'maximum number of batched tokens per iteration') parser.add_argument('--max-num-seqs', type=int, default=EngineArgs. max_num_seqs, help='maximum number of sequences per iteration') parser.add_argument('--max-paddings', type=int, default=EngineArgs. max_paddings, help='maximum number of paddings in a batch') parser.add_argument('--disable-log-stats', action='store_true', help= 'disable logging statistics') parser.add_argument('--quantization', '-q', type=str, choices=['awq', 'gptq', 'squeezellm', None], default=None, help= 'Method used to quantize the weights. If None, we first check the `quantization_config` attribute in the model config file. If that is None, we assume the model weights are not quantized and use `dtype` to determine the data type of the weights.' ) parser.add_argument('--enforce-eager', action='store_true', help= 'Always use eager-mode PyTorch. If False, will use eager mode and CUDA graph in hybrid for maximal performance and flexibility.' ) parser.add_argument('--max-context-len-to-capture', type=int, default= EngineArgs.max_context_len_to_capture, help= 'maximum context length covered by CUDA graphs. When a sequence has context length larger than this, we fall back to eager mode.' ) return parser
@staticmethod def add_cli_args(parser: argparse.ArgumentParser) ->argparse.ArgumentParser: """Shared CLI arguments for vLLM engine.""" parser.add_argument('--model', type=str, default='facebook/opt-125m', help='name or path of the huggingface model to use') parser.add_argument('--tokenizer', type=str, default=EngineArgs. tokenizer, help='name or path of the huggingface tokenizer to use') parser.add_argument('--revision', type=str, default=None, help= 'the specific model version to use. It can be a branch name, a tag name, or a commit id. If unspecified, will use the default version.' ) parser.add_argument('--tokenizer-revision', type=str, default=None, help= 'the specific tokenizer version to use. It can be a branch name, a tag name, or a commit id. If unspecified, will use the default version.' ) parser.add_argument('--tokenizer-mode', type=str, default=EngineArgs. tokenizer_mode, choices=['auto', 'slow'], help= 'tokenizer mode. "auto" will use the fast tokenizer if available, and "slow" will always use the slow tokenizer.' ) parser.add_argument('--trust-remote-code', action='store_true', help= 'trust remote code from huggingface') parser.add_argument('--download-dir', type=str, default=EngineArgs. download_dir, help= 'directory to download and load the weights, default to the default cache dir of huggingface' ) parser.add_argument('--load-format', type=str, default=EngineArgs. load_format, choices=['auto', 'pt', 'safetensors', 'npcache', 'dummy'], help= 'The format of the model weights to load. "auto" will try to load the weights in the safetensors format and fall back to the pytorch bin format if safetensors format is not available. "pt" will load the weights in the pytorch bin format. "safetensors" will load the weights in the safetensors format. "npcache" will load the weights in pytorch format and store a numpy cache to speed up the loading. "dummy" will initialize the weights with random values, which is mainly for profiling.' ) parser.add_argument('--dtype', type=str, default=EngineArgs.dtype, choices=['auto', 'half', 'float16', 'bfloat16', 'float', 'float32'], help= 'data type for model weights and activations. The "auto" option will use FP16 precision for FP32 and FP16 models, and BF16 precision for BF16 models.' ) parser.add_argument('--max-model-len', type=int, default=None, help= 'model context length. If unspecified, will be automatically derived from the model.' ) parser.add_argument('--worker-use-ray', action='store_true', help= 'use Ray for distributed serving, will be automatically set when using more than 1 GPU' ) parser.add_argument('--pipeline-parallel-size', '-pp', type=int, default=EngineArgs.pipeline_parallel_size, help= 'number of pipeline stages') parser.add_argument('--tensor-parallel-size', '-tp', type=int, default= EngineArgs.tensor_parallel_size, help= 'number of tensor parallel replicas') parser.add_argument('--max-parallel-loading-workers', type=int, help= 'load model sequentially in multiple batches, to avoid RAM OOM when using tensor parallel and large models' ) parser.add_argument('--block-size', type=int, default=EngineArgs. block_size, choices=[8, 16, 32], help='token block size') parser.add_argument('--seed', type=int, default=EngineArgs.seed, help= 'random seed') parser.add_argument('--swap-space', type=int, default=EngineArgs. swap_space, help='CPU swap space size (GiB) per GPU') parser.add_argument('--gpu-memory-utilization', type=float, default= EngineArgs.gpu_memory_utilization, help= 'the fraction of GPU memory to be used for the model executor, which can range from 0 to 1.If unspecified, will use the default value of 0.9.' ) parser.add_argument('--max-num-batched-tokens', type=int, default= EngineArgs.max_num_batched_tokens, help= 'maximum number of batched tokens per iteration') parser.add_argument('--max-num-seqs', type=int, default=EngineArgs. max_num_seqs, help='maximum number of sequences per iteration') parser.add_argument('--max-paddings', type=int, default=EngineArgs. max_paddings, help='maximum number of paddings in a batch') parser.add_argument('--disable-log-stats', action='store_true', help= 'disable logging statistics') parser.add_argument('--quantization', '-q', type=str, choices=['awq', 'gptq', 'squeezellm', None], default=None, help= 'Method used to quantize the weights. If None, we first check the `quantization_config` attribute in the model config file. If that is None, we assume the model weights are not quantized and use `dtype` to determine the data type of the weights.' ) parser.add_argument('--enforce-eager', action='store_true', help= 'Always use eager-mode PyTorch. If False, will use eager mode and CUDA graph in hybrid for maximal performance and flexibility.' ) parser.add_argument('--max-context-len-to-capture', type=int, default= EngineArgs.max_context_len_to_capture, help= 'maximum context length covered by CUDA graphs. When a sequence has context length larger than this, we fall back to eager mode.' ) return parser
Shared CLI arguments for vLLM engine.
__repr__
return f'SqueezeLLMConfig(weight_bits={self.weight_bits})'
def __repr__(self) ->str: return f'SqueezeLLMConfig(weight_bits={self.weight_bits})'
null
__init__
self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.initializer_range = initializer_range self.rms_norm_eps = rms_norm_eps self.use_cache = use_cache super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, ** kwargs)
def __init__(self, vocab_size=64000, hidden_size=4096, intermediate_size= 11008, num_hidden_layers=32, num_attention_heads=32, hidden_act='silu', max_position_embeddings=4096, initializer_range=0.02, rms_norm_eps= 1e-06, use_cache=True, pad_token_id=0, bos_token_id=1, eos_token_id=2, tie_word_embeddings=False, **kwargs): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.initializer_range = initializer_range self.rms_norm_eps = rms_norm_eps self.use_cache = use_cache super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs)
null
test_gelu_new
torch.random.manual_seed(seed) torch.cuda.manual_seed(seed) gpu_id = f'cuda:{device}' x = torch.randn(num_tokens, d, dtype=dtype, device=gpu_id) layer = NewGELU() out = layer(x) ref_out = layer._forward(x) assert torch.allclose(out, ref_out, atol=1e-05, rtol=1e-05)
@pytest.mark.parametrize('num_tokens', NUM_TOKENS) @pytest.mark.parametrize('d', D) @pytest.mark.parametrize('dtype', DTYPES) @pytest.mark.parametrize('seed', SEEDS) @pytest.mark.parametrize('device', DEVICES) @torch.inference_mode() def test_gelu_new(num_tokens: int, d: int, dtype: torch.dtype, seed: int, device: int) ->None: torch.random.manual_seed(seed) torch.cuda.manual_seed(seed) gpu_id = f'cuda:{device}' x = torch.randn(num_tokens, d, dtype=dtype, device=gpu_id) layer = NewGELU() out = layer(x) ref_out = layer._forward(x) assert torch.allclose(out, ref_out, atol=1e-05, rtol=1e-05)
null
prepare_input_tensors
if self.is_driver_worker: is_prompt = seq_group_metadata_list[0].is_prompt if is_prompt: input_tokens, input_positions, input_metadata, prompt_lens = (self. _prepare_prompt(seq_group_metadata_list)) else: input_tokens, input_positions, input_metadata = self._prepare_decode( seq_group_metadata_list) prompt_lens = [] sampling_metadata = self._prepare_sample(seq_group_metadata_list, prompt_lens) def get_size_or_none(x: Optional[torch.Tensor]): return x.size() if x is not None else None py_data = {'input_tokens_size': input_tokens.size(), 'input_positions_size': input_positions.size(), 'is_prompt': input_metadata.is_prompt, 'slot_mapping_size': get_size_or_none( input_metadata.slot_mapping), 'max_context_len': input_metadata. max_context_len, 'context_lens_size': get_size_or_none( input_metadata.context_lens), 'block_tables_size': get_size_or_none (input_metadata.block_tables), 'use_cuda_graph': input_metadata. use_cuda_graph, 'selected_token_indices_size': sampling_metadata. selected_token_indices.size()} broadcast_object_list([py_data], src=0) broadcast(input_tokens, src=0) broadcast(input_positions, src=0) if input_metadata.slot_mapping is not None: broadcast(input_metadata.slot_mapping, src=0) if input_metadata.context_lens is not None: broadcast(input_metadata.context_lens, src=0) if input_metadata.block_tables is not None: broadcast(input_metadata.block_tables, src=0) broadcast(sampling_metadata.selected_token_indices, src=0) else: receving_list = [None] broadcast_object_list(receving_list, src=0) py_data = receving_list[0] input_tokens = torch.empty(*py_data['input_tokens_size'], dtype=torch. long, device='cuda') broadcast(input_tokens, src=0) input_positions = torch.empty(*py_data['input_positions_size'], dtype= torch.long, device='cuda') broadcast(input_positions, src=0) if py_data['slot_mapping_size'] is not None: slot_mapping = torch.empty(*py_data['slot_mapping_size'], dtype= torch.long, device='cuda') broadcast(slot_mapping, src=0) else: slot_mapping = None if py_data['context_lens_size'] is not None: context_lens = torch.empty(*py_data['context_lens_size'], dtype= torch.int, device='cuda') broadcast(context_lens, src=0) else: context_lens = None if py_data['block_tables_size'] is not None: block_tables = torch.empty(*py_data['block_tables_size'], dtype= torch.int, device='cuda') broadcast(block_tables, src=0) else: block_tables = None selected_token_indices = torch.empty(*py_data[ 'selected_token_indices_size'], dtype=torch.long, device='cuda') broadcast(selected_token_indices, src=0) input_metadata = InputMetadata(is_prompt=py_data['is_prompt'], slot_mapping=slot_mapping, max_context_len=py_data[ 'max_context_len'], context_lens=context_lens, block_tables= block_tables, use_cuda_graph=py_data['use_cuda_graph']) sampling_metadata = SamplingMetadata(seq_groups=None, seq_data=None, prompt_lens=None, selected_token_indices=selected_token_indices, categorized_sample_indices=None, perform_sampling=False) return input_tokens, input_positions, input_metadata, sampling_metadata
def prepare_input_tensors(self, seq_group_metadata_list: Optional[List[ SequenceGroupMetadata]]) ->Tuple[torch.Tensor, torch.Tensor, InputMetadata, SamplingMetadata]: if self.is_driver_worker: is_prompt = seq_group_metadata_list[0].is_prompt if is_prompt: input_tokens, input_positions, input_metadata, prompt_lens = (self ._prepare_prompt(seq_group_metadata_list)) else: input_tokens, input_positions, input_metadata = (self. _prepare_decode(seq_group_metadata_list)) prompt_lens = [] sampling_metadata = self._prepare_sample(seq_group_metadata_list, prompt_lens) def get_size_or_none(x: Optional[torch.Tensor]): return x.size() if x is not None else None py_data = {'input_tokens_size': input_tokens.size(), 'input_positions_size': input_positions.size(), 'is_prompt': input_metadata.is_prompt, 'slot_mapping_size': get_size_or_none (input_metadata.slot_mapping), 'max_context_len': input_metadata.max_context_len, 'context_lens_size': get_size_or_none(input_metadata.context_lens), 'block_tables_size': get_size_or_none(input_metadata. block_tables), 'use_cuda_graph': input_metadata.use_cuda_graph, 'selected_token_indices_size': sampling_metadata. selected_token_indices.size()} broadcast_object_list([py_data], src=0) broadcast(input_tokens, src=0) broadcast(input_positions, src=0) if input_metadata.slot_mapping is not None: broadcast(input_metadata.slot_mapping, src=0) if input_metadata.context_lens is not None: broadcast(input_metadata.context_lens, src=0) if input_metadata.block_tables is not None: broadcast(input_metadata.block_tables, src=0) broadcast(sampling_metadata.selected_token_indices, src=0) else: receving_list = [None] broadcast_object_list(receving_list, src=0) py_data = receving_list[0] input_tokens = torch.empty(*py_data['input_tokens_size'], dtype= torch.long, device='cuda') broadcast(input_tokens, src=0) input_positions = torch.empty(*py_data['input_positions_size'], dtype=torch.long, device='cuda') broadcast(input_positions, src=0) if py_data['slot_mapping_size'] is not None: slot_mapping = torch.empty(*py_data['slot_mapping_size'], dtype =torch.long, device='cuda') broadcast(slot_mapping, src=0) else: slot_mapping = None if py_data['context_lens_size'] is not None: context_lens = torch.empty(*py_data['context_lens_size'], dtype =torch.int, device='cuda') broadcast(context_lens, src=0) else: context_lens = None if py_data['block_tables_size'] is not None: block_tables = torch.empty(*py_data['block_tables_size'], dtype =torch.int, device='cuda') broadcast(block_tables, src=0) else: block_tables = None selected_token_indices = torch.empty(*py_data[ 'selected_token_indices_size'], dtype=torch.long, device='cuda') broadcast(selected_token_indices, src=0) input_metadata = InputMetadata(is_prompt=py_data['is_prompt'], slot_mapping=slot_mapping, max_context_len=py_data[ 'max_context_len'], context_lens=context_lens, block_tables= block_tables, use_cuda_graph=py_data['use_cuda_graph']) sampling_metadata = SamplingMetadata(seq_groups=None, seq_data=None, prompt_lens=None, selected_token_indices=selected_token_indices, categorized_sample_indices=None, perform_sampling=False) return input_tokens, input_positions, input_metadata, sampling_metadata
null
__init__
super().__init__() self.config = config self.embed_dim = config.n_embd self.wte = VocabParallelEmbedding(config.vocab_size, self.embed_dim) self.h = nn.ModuleList([GPTJBlock(config, linear_method) for _ in range( config.n_layer)]) self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon)
def __init__(self, config: GPTJConfig, linear_method: Optional[ LinearMethodBase]=None): super().__init__() self.config = config self.embed_dim = config.n_embd self.wte = VocabParallelEmbedding(config.vocab_size, self.embed_dim) self.h = nn.ModuleList([GPTJBlock(config, linear_method) for _ in range (config.n_layer)]) self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon)
null
rotary
return not self.alibi
@property def rotary(self): return not self.alibi
null
forward
qkv, _ = self.Wqkv(hidden_states) q, k, v = qkv.chunk(chunks=3, dim=-1) q, k = self.rotary_emb(position_ids, q, k) k_cache, v_cache = kv_cache attn_output = self.attn(q, k, v, k_cache, v_cache, input_metadata) output, _ = self.out_proj(attn_output) return output
def forward(self, position_ids: torch.Tensor, hidden_states: torch.Tensor, kv_cache: KVCache, input_metadata: InputMetadata) ->torch.Tensor: qkv, _ = self.Wqkv(hidden_states) q, k, v = qkv.chunk(chunks=3, dim=-1) q, k = self.rotary_emb(position_ids, q, k) k_cache, v_cache = kv_cache attn_output = self.attn(q, k, v, k_cache, v_cache, input_metadata) output, _ = self.out_proj(attn_output) return output
null
forward
residual = hidden_states if self.do_layer_norm_before: hidden_states = self.self_attn_layer_norm(hidden_states) hidden_states = self.self_attn(hidden_states=hidden_states, kv_cache= kv_cache, input_metadata=input_metadata) hidden_states = residual + hidden_states if not self.do_layer_norm_before: hidden_states = self.self_attn_layer_norm(hidden_states) residual = hidden_states if self.do_layer_norm_before: hidden_states = self.final_layer_norm(hidden_states) hidden_states, _ = self.fc1(hidden_states) hidden_states = self.activation_fn(hidden_states) hidden_states, _ = self.fc2(hidden_states) hidden_states = residual + hidden_states if not self.do_layer_norm_before: hidden_states = self.final_layer_norm(hidden_states) return hidden_states
def forward(self, hidden_states: torch.Tensor, kv_cache: KVCache, input_metadata: InputMetadata) ->torch.Tensor: residual = hidden_states if self.do_layer_norm_before: hidden_states = self.self_attn_layer_norm(hidden_states) hidden_states = self.self_attn(hidden_states=hidden_states, kv_cache= kv_cache, input_metadata=input_metadata) hidden_states = residual + hidden_states if not self.do_layer_norm_before: hidden_states = self.self_attn_layer_norm(hidden_states) residual = hidden_states if self.do_layer_norm_before: hidden_states = self.final_layer_norm(hidden_states) hidden_states, _ = self.fc1(hidden_states) hidden_states = self.activation_fn(hidden_states) hidden_states, _ = self.fc2(hidden_states) hidden_states = residual + hidden_states if not self.do_layer_norm_before: hidden_states = self.final_layer_norm(hidden_states) return hidden_states
null
_beam_search_sample
sample_idx = 0 results = [] for seq_group, is_prompt in zip(selected_seq_groups, is_prompts): seq_ids, sampling_params = seq_group num_parent_seqs = len(seq_ids) beam_width = sampling_params.best_of seq_group_logprobs = logprobs[sample_idx:sample_idx + num_parent_seqs] if is_prompt: assert num_parent_seqs == 1, 'Prompt input should have only one seq.' parent_ids = [0] * (2 * beam_width) _, next_token_ids = torch.topk(seq_group_logprobs[0], 2 * beam_width) next_token_ids = next_token_ids.tolist() else: cumulative_logprobs = [seq_data[seq_id].cumulative_logprob for seq_id in seq_ids] cumulative_logprobs = torch.tensor(cumulative_logprobs, dtype=torch .float, device=seq_group_logprobs.device) seq_group_logprobs = (seq_group_logprobs + cumulative_logprobs. unsqueeze(dim=1)) _, topk_ids = torch.topk(seq_group_logprobs.flatten(), 2 * beam_width) topk_ids = topk_ids.tolist() vocab_size = seq_group_logprobs.size(-1) parent_ids = [(i // vocab_size) for i in topk_ids] next_token_ids = [(i % vocab_size) for i in topk_ids] results.append((next_token_ids, parent_ids)) sample_idx += num_parent_seqs assert sample_idx == logprobs.size(0) return results
def _beam_search_sample(selected_seq_groups: List[Tuple[List[int], SamplingParams]], is_prompts: List[bool], seq_data: Dict[int, SequenceData], logprobs: torch.Tensor) ->List[Tuple[List[int], List[int]]]: sample_idx = 0 results = [] for seq_group, is_prompt in zip(selected_seq_groups, is_prompts): seq_ids, sampling_params = seq_group num_parent_seqs = len(seq_ids) beam_width = sampling_params.best_of seq_group_logprobs = logprobs[sample_idx:sample_idx + num_parent_seqs] if is_prompt: assert num_parent_seqs == 1, 'Prompt input should have only one seq.' parent_ids = [0] * (2 * beam_width) _, next_token_ids = torch.topk(seq_group_logprobs[0], 2 * beam_width) next_token_ids = next_token_ids.tolist() else: cumulative_logprobs = [seq_data[seq_id].cumulative_logprob for seq_id in seq_ids] cumulative_logprobs = torch.tensor(cumulative_logprobs, dtype= torch.float, device=seq_group_logprobs.device) seq_group_logprobs = (seq_group_logprobs + cumulative_logprobs. unsqueeze(dim=1)) _, topk_ids = torch.topk(seq_group_logprobs.flatten(), 2 * beam_width) topk_ids = topk_ids.tolist() vocab_size = seq_group_logprobs.size(-1) parent_ids = [(i // vocab_size) for i in topk_ids] next_token_ids = [(i % vocab_size) for i in topk_ids] results.append((next_token_ids, parent_ids)) sample_idx += num_parent_seqs assert sample_idx == logprobs.size(0) return results
null
get_from_keys
"""Get a value from the model's quantization config.""" for key in keys: if key in config: return config[key] raise ValueError( f"Cannot find any of {keys} in the model's quantization config.")
@staticmethod def get_from_keys(config: Dict[str, Any], keys: List[str]) ->Any: """Get a value from the model's quantization config.""" for key in keys: if key in config: return config[key] raise ValueError( f"Cannot find any of {keys} in the model's quantization config.")
Get a value from the model's quantization config.
get_pipeline_model_parallel_group
"""Get the pipeline model parallel group the caller rank belongs to.""" assert _PIPELINE_MODEL_PARALLEL_GROUP is not None, 'pipeline model parallel group is not initialized' return _PIPELINE_MODEL_PARALLEL_GROUP
def get_pipeline_model_parallel_group(): """Get the pipeline model parallel group the caller rank belongs to.""" assert _PIPELINE_MODEL_PARALLEL_GROUP is not None, 'pipeline model parallel group is not initialized' return _PIPELINE_MODEL_PARALLEL_GROUP
Get the pipeline model parallel group the caller rank belongs to.
__init__
super().__init__(*args, **kwargs, disable=True)
def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs, disable=True)
null
__init__
self.cache_config = cache_config self.model_config = model_config self.parallel_config = parallel_config self.head_size = model_config.get_head_size() self.num_layers = model_config.get_num_layers(parallel_config) self.num_heads = model_config.get_num_kv_heads(parallel_config) self.dtype = model_config.dtype self.block_size = cache_config.block_size self.num_gpu_blocks = cache_config.num_gpu_blocks self.num_cpu_blocks = cache_config.num_cpu_blocks self.gpu_cache = self.allocate_gpu_cache() self.cpu_cache = self.allocate_cpu_cache() self.cache_stream = torch.cuda.Stream() assert self.cache_stream != torch.cuda.current_stream() self.events = [torch.cuda.Event() for _ in range(self.num_layers)]
def __init__(self, cache_config: CacheConfig, model_config: ModelConfig, parallel_config: ParallelConfig) ->None: self.cache_config = cache_config self.model_config = model_config self.parallel_config = parallel_config self.head_size = model_config.get_head_size() self.num_layers = model_config.get_num_layers(parallel_config) self.num_heads = model_config.get_num_kv_heads(parallel_config) self.dtype = model_config.dtype self.block_size = cache_config.block_size self.num_gpu_blocks = cache_config.num_gpu_blocks self.num_cpu_blocks = cache_config.num_cpu_blocks self.gpu_cache = self.allocate_gpu_cache() self.cpu_cache = self.allocate_cpu_cache() self.cache_stream = torch.cuda.Stream() assert self.cache_stream != torch.cuda.current_stream() self.events = [torch.cuda.Event() for _ in range(self.num_layers)]
null
_prune_hidden_states
hidden_states = hidden_states.view(-1, hidden_states.shape[-1]) return hidden_states.index_select(0, sampling_metadata.selected_token_indices)
def _prune_hidden_states(hidden_states: torch.Tensor, sampling_metadata: SamplingMetadata) ->torch.Tensor: hidden_states = hidden_states.view(-1, hidden_states.shape[-1]) return hidden_states.index_select(0, sampling_metadata. selected_token_indices)
null
__init__
self.model_config = model_config self.parallel_config = parallel_config self.scheduler_config = scheduler_config self.local_rank = local_rank self.rank = rank self.distributed_init_method = distributed_init_method self.is_driver_worker = is_driver_worker if self.is_driver_worker: assert self.rank == 0, 'The driver worker must have rank 0.' self.model_runner = ModelRunner(model_config, parallel_config, scheduler_config, is_driver_worker) self.cache_config = None self.cache_engine = None self.cache_events = None self.gpu_cache = None
def __init__(self, model_config: ModelConfig, parallel_config: ParallelConfig, scheduler_config: SchedulerConfig, local_rank: int, rank: int, distributed_init_method: str, is_driver_worker: bool=False ) ->None: self.model_config = model_config self.parallel_config = parallel_config self.scheduler_config = scheduler_config self.local_rank = local_rank self.rank = rank self.distributed_init_method = distributed_init_method self.is_driver_worker = is_driver_worker if self.is_driver_worker: assert self.rank == 0, 'The driver worker must have rank 0.' self.model_runner = ModelRunner(model_config, parallel_config, scheduler_config, is_driver_worker) self.cache_config = None self.cache_engine = None self.cache_events = None self.gpu_cache = None
null
__init__
logging.Formatter.__init__(self, fmt, datefmt)
def __init__(self, fmt, datefmt=None): logging.Formatter.__init__(self, fmt, datefmt)
null
__init__
self.pipeline_parallel_size = pipeline_parallel_size self.tensor_parallel_size = tensor_parallel_size self.worker_use_ray = worker_use_ray self.max_parallel_loading_workers = max_parallel_loading_workers self.world_size = pipeline_parallel_size * tensor_parallel_size if self.world_size > 1: self.worker_use_ray = True self._verify_args()
def __init__(self, pipeline_parallel_size: int, tensor_parallel_size: int, worker_use_ray: bool, max_parallel_loading_workers: Optional[int]=None ) ->None: self.pipeline_parallel_size = pipeline_parallel_size self.tensor_parallel_size = tensor_parallel_size self.worker_use_ray = worker_use_ray self.max_parallel_loading_workers = max_parallel_loading_workers self.world_size = pipeline_parallel_size * tensor_parallel_size if self.world_size > 1: self.worker_use_ray = True self._verify_args()
null
has_unfinished_seqs
return self.waiting or self.running or self.swapped
def has_unfinished_seqs(self) ->bool: return self.waiting or self.running or self.swapped
null
_verify_quantization
supported_quantization = ['awq', 'gptq', 'squeezellm'] rocm_not_supported_quantization = ['awq'] if self.quantization is not None: self.quantization = self.quantization.lower() hf_quant_config = getattr(self.hf_config, 'quantization_config', None) if hf_quant_config is not None: hf_quant_method = str(hf_quant_config['quant_method']).lower() if self.quantization is None: self.quantization = hf_quant_method elif self.quantization != hf_quant_method: raise ValueError( f'Quantization method specified in the model config ({hf_quant_method}) does not match the quantization method specified in the `quantization` argument ({self.quantization}).' ) if self.quantization is not None: if self.quantization not in supported_quantization: raise ValueError( f'Unknown quantization method: {self.quantization}. Must be one of {supported_quantization}.' ) if is_hip() and self.quantization in rocm_not_supported_quantization: raise ValueError( f'{self.quantization} quantization is currently not supported in ROCm.' ) logger.warning( f'{self.quantization} quantization is not fully optimized yet. The speed can be slower than non-quantized models.' )
def _verify_quantization(self) ->None: supported_quantization = ['awq', 'gptq', 'squeezellm'] rocm_not_supported_quantization = ['awq'] if self.quantization is not None: self.quantization = self.quantization.lower() hf_quant_config = getattr(self.hf_config, 'quantization_config', None) if hf_quant_config is not None: hf_quant_method = str(hf_quant_config['quant_method']).lower() if self.quantization is None: self.quantization = hf_quant_method elif self.quantization != hf_quant_method: raise ValueError( f'Quantization method specified in the model config ({hf_quant_method}) does not match the quantization method specified in the `quantization` argument ({self.quantization}).' ) if self.quantization is not None: if self.quantization not in supported_quantization: raise ValueError( f'Unknown quantization method: {self.quantization}. Must be one of {supported_quantization}.' ) if is_hip() and self.quantization in rocm_not_supported_quantization: raise ValueError( f'{self.quantization} quantization is currently not supported in ROCm.' ) logger.warning( f'{self.quantization} quantization is not fully optimized yet. The speed can be slower than non-quantized models.' )
null
__init__
super().__init__() self.hidden_size = hidden_size tensor_model_parallel_world_size = get_tensor_model_parallel_world_size() self.total_num_heads = num_heads assert self.total_num_heads % tensor_model_parallel_world_size == 0 self.num_heads = self.total_num_heads // tensor_model_parallel_world_size self.head_dim = hidden_size // self.total_num_heads self.c_attn = QKVParallelLinear(hidden_size, self.head_dim, self. total_num_heads, bias=True, linear_method=linear_method) self.c_proj = RowParallelLinear(self.total_num_heads * self.head_dim, hidden_size, bias=False, linear_method=linear_method) self.scaling = self.head_dim ** -0.5 self.rotary_emb = get_rope(self.head_dim, rotary_dim=self.head_dim, max_position=max_position_embeddings, base=rope_theta, rope_scaling= rope_scaling) self.attn = PagedAttention(self.num_heads, self.head_dim, self.scaling)
def __init__(self, hidden_size: int, num_heads: int, max_position_embeddings: int, rope_theta: float=10000, rope_scaling: Optional[Dict[str, Any]]=None, linear_method: Optional[LinearMethodBase ]=None): super().__init__() self.hidden_size = hidden_size tensor_model_parallel_world_size = get_tensor_model_parallel_world_size() self.total_num_heads = num_heads assert self.total_num_heads % tensor_model_parallel_world_size == 0 self.num_heads = self.total_num_heads // tensor_model_parallel_world_size self.head_dim = hidden_size // self.total_num_heads self.c_attn = QKVParallelLinear(hidden_size, self.head_dim, self. total_num_heads, bias=True, linear_method=linear_method) self.c_proj = RowParallelLinear(self.total_num_heads * self.head_dim, hidden_size, bias=False, linear_method=linear_method) self.scaling = self.head_dim ** -0.5 self.rotary_emb = get_rope(self.head_dim, rotary_dim=self.head_dim, max_position=max_position_embeddings, base=rope_theta, rope_scaling =rope_scaling) self.attn = PagedAttention(self.num_heads, self.head_dim, self.scaling)
null
_compute_inv_freq
"""Compute the inverse frequency.""" inv_freq = 1.0 / base ** (torch.arange(0, self.rotary_dim, 2, dtype=torch. float, device='cuda') / self.rotary_dim) return inv_freq
def _compute_inv_freq(self, base: Union[int, float]) ->torch.Tensor: """Compute the inverse frequency.""" inv_freq = 1.0 / base ** (torch.arange(0, self.rotary_dim, 2, dtype= torch.float, device='cuda') / self.rotary_dim) return inv_freq
Compute the inverse frequency.
parse_args
parser = argparse.ArgumentParser(description= 'vLLM OpenAI-Compatible RESTful API server.') parser.add_argument('--host', type=str, default=None, help='host name') parser.add_argument('--port', type=int, default=8000, help='port number') parser.add_argument('--allow-credentials', action='store_true', help= 'allow credentials') parser.add_argument('--allowed-origins', type=json.loads, default=['*'], help='allowed origins') parser.add_argument('--allowed-methods', type=json.loads, default=['*'], help='allowed methods') parser.add_argument('--allowed-headers', type=json.loads, default=['*'], help='allowed headers') parser.add_argument('--served-model-name', type=str, default=None, help= 'The model name used in the API. If not specified, the model name will be the same as the huggingface name.' ) parser.add_argument('--chat-template', type=str, default=None, help= 'The file path to the chat template, or the template in single-line form for the specified model' ) parser.add_argument('--response-role', type=str, default='assistant', help= 'The role name to return if `request.add_generation_prompt=true`.') parser.add_argument('--ssl-keyfile', type=str, default=None, help= 'The file path to the SSL key file') parser.add_argument('--ssl-certfile', type=str, default=None, help= 'The file path to the SSL cert file') parser = AsyncEngineArgs.add_cli_args(parser) return parser.parse_args()
def parse_args(): parser = argparse.ArgumentParser(description= 'vLLM OpenAI-Compatible RESTful API server.') parser.add_argument('--host', type=str, default=None, help='host name') parser.add_argument('--port', type=int, default=8000, help='port number') parser.add_argument('--allow-credentials', action='store_true', help= 'allow credentials') parser.add_argument('--allowed-origins', type=json.loads, default=['*'], help='allowed origins') parser.add_argument('--allowed-methods', type=json.loads, default=['*'], help='allowed methods') parser.add_argument('--allowed-headers', type=json.loads, default=['*'], help='allowed headers') parser.add_argument('--served-model-name', type=str, default=None, help = 'The model name used in the API. If not specified, the model name will be the same as the huggingface name.' ) parser.add_argument('--chat-template', type=str, default=None, help= 'The file path to the chat template, or the template in single-line form for the specified model' ) parser.add_argument('--response-role', type=str, default='assistant', help='The role name to return if `request.add_generation_prompt=true`.' ) parser.add_argument('--ssl-keyfile', type=str, default=None, help= 'The file path to the SSL key file') parser.add_argument('--ssl-certfile', type=str, default=None, help= 'The file path to the SSL cert file') parser = AsyncEngineArgs.add_cli_args(parser) return parser.parse_args()
null
generate_greedy
greedy_params = SamplingParams(temperature=0.0, max_tokens=max_tokens) outputs = self.generate(prompts, greedy_params) return [(output_ids[0], output_str[0]) for output_ids, output_str in outputs]
def generate_greedy(self, prompts: List[str], max_tokens: int) ->List[Tuple [List[int], str]]: greedy_params = SamplingParams(temperature=0.0, max_tokens=max_tokens) outputs = self.generate(prompts, greedy_params) return [(output_ids[0], output_str[0]) for output_ids, output_str in outputs]
null
__repr__
return f'SequenceOutput(parent_seq_id={self.parent_seq_id}, output_token={self.output_token}, logprobs={self.logprobs})'
def __repr__(self) ->str: return ( f'SequenceOutput(parent_seq_id={self.parent_seq_id}, output_token={self.output_token}, logprobs={self.logprobs})' )
null
prepare_hf_model_weights
is_local = os.path.isdir(model_name_or_path) use_safetensors = False if load_format == 'auto': allow_patterns = ['*.safetensors', '*.bin'] elif load_format == 'safetensors': use_safetensors = True allow_patterns = ['*.safetensors'] elif load_format == 'pt': allow_patterns = ['*.pt'] elif load_format == 'npcache': allow_patterns = ['*.bin'] else: raise ValueError(f'Unknown load_format: {load_format}') if fall_back_to_pt: allow_patterns += ['*.pt'] if not is_local: with get_lock(model_name_or_path, cache_dir): hf_folder = snapshot_download(model_name_or_path, allow_patterns= allow_patterns, cache_dir=cache_dir, tqdm_class=Disabledtqdm, revision=revision) else: hf_folder = model_name_or_path hf_weights_files: List[str] = [] for pattern in allow_patterns: hf_weights_files += glob.glob(os.path.join(hf_folder, pattern)) if len(hf_weights_files) > 0: if pattern == '*.safetensors': use_safetensors = True break if not use_safetensors: blacklist = ['training_args.bin', 'optimizer.bin', 'optimizer.pt', 'scheduler.pt', 'scaler.pt'] hf_weights_files = [f for f in hf_weights_files if not any(f.endswith(x ) for x in blacklist)] if len(hf_weights_files) == 0: raise RuntimeError( f'Cannot find any model weights with `{model_name_or_path}`') return hf_folder, hf_weights_files, use_safetensors
def prepare_hf_model_weights(model_name_or_path: str, cache_dir: Optional[ str]=None, load_format: str='auto', fall_back_to_pt: bool=True, revision: Optional[str]=None) ->Tuple[str, List[str], bool]: is_local = os.path.isdir(model_name_or_path) use_safetensors = False if load_format == 'auto': allow_patterns = ['*.safetensors', '*.bin'] elif load_format == 'safetensors': use_safetensors = True allow_patterns = ['*.safetensors'] elif load_format == 'pt': allow_patterns = ['*.pt'] elif load_format == 'npcache': allow_patterns = ['*.bin'] else: raise ValueError(f'Unknown load_format: {load_format}') if fall_back_to_pt: allow_patterns += ['*.pt'] if not is_local: with get_lock(model_name_or_path, cache_dir): hf_folder = snapshot_download(model_name_or_path, allow_patterns=allow_patterns, cache_dir=cache_dir, tqdm_class=Disabledtqdm, revision=revision) else: hf_folder = model_name_or_path hf_weights_files: List[str] = [] for pattern in allow_patterns: hf_weights_files += glob.glob(os.path.join(hf_folder, pattern)) if len(hf_weights_files) > 0: if pattern == '*.safetensors': use_safetensors = True break if not use_safetensors: blacklist = ['training_args.bin', 'optimizer.bin', 'optimizer.pt', 'scheduler.pt', 'scaler.pt'] hf_weights_files = [f for f in hf_weights_files if not any(f. endswith(x) for x in blacklist)] if len(hf_weights_files) == 0: raise RuntimeError( f'Cannot find any model weights with `{model_name_or_path}`') return hf_folder, hf_weights_files, use_safetensors
null
__init__
super().__init__() self.total_num_heads = config.num_attention_heads self.hidden_size = config.hidden_size self.head_size = self.hidden_size // self.total_num_heads self.bias = getattr(config, 'attention_bias', True) tensor_model_parallel_world_size = get_tensor_model_parallel_world_size() assert self.total_num_heads % tensor_model_parallel_world_size == 0 self.num_heads = self.total_num_heads // tensor_model_parallel_world_size self.query_key_value = QKVParallelLinear(config.hidden_size, self.head_size, self.total_num_heads, bias=self.bias, linear_method=linear_method) self.dense = RowParallelLinear(config.hidden_size, config.hidden_size, bias =self.bias, linear_method=linear_method) scaling = self.head_size ** -0.5 rotary_dim = int(self.head_size * config.rotary_pct) assert rotary_dim % 2 == 0 rope_theta = getattr(config, 'rope_theta', 10000) max_position_embeddings = getattr(config, 'max_position_embeddings', 8192) self.rotary_emb = get_rope(self.head_size, rotary_dim=rotary_dim, max_position=max_position_embeddings, base=rope_theta) self.attn = PagedAttention(self.num_heads, self.head_size, scaling)
def __init__(self, config: GPTNeoXConfig, linear_method: Optional[ LinearMethodBase]=None): super().__init__() self.total_num_heads = config.num_attention_heads self.hidden_size = config.hidden_size self.head_size = self.hidden_size // self.total_num_heads self.bias = getattr(config, 'attention_bias', True) tensor_model_parallel_world_size = get_tensor_model_parallel_world_size() assert self.total_num_heads % tensor_model_parallel_world_size == 0 self.num_heads = self.total_num_heads // tensor_model_parallel_world_size self.query_key_value = QKVParallelLinear(config.hidden_size, self. head_size, self.total_num_heads, bias=self.bias, linear_method= linear_method) self.dense = RowParallelLinear(config.hidden_size, config.hidden_size, bias=self.bias, linear_method=linear_method) scaling = self.head_size ** -0.5 rotary_dim = int(self.head_size * config.rotary_pct) assert rotary_dim % 2 == 0 rope_theta = getattr(config, 'rope_theta', 10000) max_position_embeddings = getattr(config, 'max_position_embeddings', 8192) self.rotary_emb = get_rope(self.head_size, rotary_dim=rotary_dim, max_position=max_position_embeddings, base=rope_theta) self.attn = PagedAttention(self.num_heads, self.head_size, scaling)
null
_get_alibi_slopes
next_power_of_2 = 2 ** math.ceil(math.log2(total_num_heads)) m = torch.arange(1, next_power_of_2 + 1, dtype=torch.float32) m = m.mul(alibi_bias_max / next_power_of_2) slopes = 1.0 / torch.pow(2, m) if next_power_of_2 != total_num_heads: slopes = torch.concat([slopes[1::2], slopes[::2]])[:total_num_heads] return slopes
def _get_alibi_slopes(total_num_heads: int, alibi_bias_max: int ) ->torch.Tensor: next_power_of_2 = 2 ** math.ceil(math.log2(total_num_heads)) m = torch.arange(1, next_power_of_2 + 1, dtype=torch.float32) m = m.mul(alibi_bias_max / next_power_of_2) slopes = 1.0 / torch.pow(2, m) if next_power_of_2 != total_num_heads: slopes = torch.concat([slopes[1::2], slopes[::2]])[:total_num_heads] return slopes
null
__init__
super().__init__() self.hidden_size = config.hidden_size rope_theta = getattr(config, 'rope_theta', 10000) self.self_attn = MixtralAttention(hidden_size=self.hidden_size, num_heads= config.num_attention_heads, max_position=config.max_position_embeddings, num_kv_heads=config.num_key_value_heads, rope_theta=rope_theta, sliding_window=config.sliding_window, linear_method=linear_method) self.block_sparse_moe = MixtralMoE(config=config, linear_method=linear_method) self.input_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.post_attention_layernorm = RMSNorm(config.hidden_size, eps=config. rms_norm_eps)
def __init__(self, config: MixtralConfig, linear_method: Optional[ LinearMethodBase]=None) ->None: super().__init__() self.hidden_size = config.hidden_size rope_theta = getattr(config, 'rope_theta', 10000) self.self_attn = MixtralAttention(hidden_size=self.hidden_size, num_heads=config.num_attention_heads, max_position=config. max_position_embeddings, num_kv_heads=config.num_key_value_heads, rope_theta=rope_theta, sliding_window=config.sliding_window, linear_method=linear_method) self.block_sparse_moe = MixtralMoE(config=config, linear_method= linear_method) self.input_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.post_attention_layernorm = RMSNorm(config.hidden_size, eps=config. rms_norm_eps)
null
__init__
""" AquilaRMSNorm is equivalent to T5LayerNorm """ super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps
def __init__(self, hidden_size, eps=1e-06): """ AquilaRMSNorm is equivalent to T5LayerNorm """ super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps
AquilaRMSNorm is equivalent to T5LayerNorm
__init__
super().__init__() self.hidden_size = config.hidden_size rope_theta = getattr(config, 'rope_theta', 10000) max_position_embeddings = getattr(config, 'max_position_embeddings', 8192) self.self_attn = InternLMAttention(hidden_size=self.hidden_size, num_heads= config.num_attention_heads, bias=config.bias, rope_theta=rope_theta, max_position_embeddings=max_position_embeddings, linear_method= linear_method, rope_scaling=getattr(config, 'rope_scaling', None)) self.mlp = InternLMMLP(hidden_size=self.hidden_size, intermediate_size= config.intermediate_size, hidden_act=config.hidden_act, linear_method= linear_method) self.input_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.post_attention_layernorm = RMSNorm(config.hidden_size, eps=config. rms_norm_eps)
def __init__(self, config: LlamaConfig, linear_method: Optional[ LinearMethodBase]=None): super().__init__() self.hidden_size = config.hidden_size rope_theta = getattr(config, 'rope_theta', 10000) max_position_embeddings = getattr(config, 'max_position_embeddings', 8192) self.self_attn = InternLMAttention(hidden_size=self.hidden_size, num_heads=config.num_attention_heads, bias=config.bias, rope_theta= rope_theta, max_position_embeddings=max_position_embeddings, linear_method=linear_method, rope_scaling=getattr(config, 'rope_scaling', None)) self.mlp = InternLMMLP(hidden_size=self.hidden_size, intermediate_size= config.intermediate_size, hidden_act=config.hidden_act, linear_method=linear_method) self.input_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.post_attention_layernorm = RMSNorm(config.hidden_size, eps=config. rms_norm_eps)
null
num_finished_seqs
return len(self.get_finished_seqs())
def num_finished_seqs(self) ->int: return len(self.get_finished_seqs())
null
__init__
super().__init__() self.hidden_size = config.hidden_size self.total_num_heads = config.n_head self.head_dim = self.hidden_size // self.total_num_heads assert self.head_dim * self.total_num_heads == self.hidden_size tp_world_size = get_tensor_model_parallel_world_size() assert self.total_num_heads % tp_world_size == 0 self.num_heads = self.total_num_heads // tp_world_size self.query_key_value = QKVParallelLinear(self.hidden_size, self.head_dim, self.total_num_heads, bias=True, linear_method=linear_method) self.dense = RowParallelLinear(self.hidden_size, self.hidden_size, bias= True, linear_method=linear_method) tp_rank = get_tensor_model_parallel_rank() head_start = tp_rank * self.num_heads head_end = (tp_rank + 1) * self.num_heads alibi_slopes = _get_alibi_slopes(self.total_num_heads) alibi_slopes = alibi_slopes[head_start:head_end].tolist() scaling = self.head_dim ** -0.5 self.attn = PagedAttention(self.num_heads, self.head_dim, scaling, alibi_slopes=alibi_slopes)
def __init__(self, config: BloomConfig, linear_method: Optional[ LinearMethodBase]=None): super().__init__() self.hidden_size = config.hidden_size self.total_num_heads = config.n_head self.head_dim = self.hidden_size // self.total_num_heads assert self.head_dim * self.total_num_heads == self.hidden_size tp_world_size = get_tensor_model_parallel_world_size() assert self.total_num_heads % tp_world_size == 0 self.num_heads = self.total_num_heads // tp_world_size self.query_key_value = QKVParallelLinear(self.hidden_size, self. head_dim, self.total_num_heads, bias=True, linear_method=linear_method) self.dense = RowParallelLinear(self.hidden_size, self.hidden_size, bias =True, linear_method=linear_method) tp_rank = get_tensor_model_parallel_rank() head_start = tp_rank * self.num_heads head_end = (tp_rank + 1) * self.num_heads alibi_slopes = _get_alibi_slopes(self.total_num_heads) alibi_slopes = alibi_slopes[head_start:head_end].tolist() scaling = self.head_dim ** -0.5 self.attn = PagedAttention(self.num_heads, self.head_dim, scaling, alibi_slopes=alibi_slopes)
null
__init__
super().__init__() hidden_size = config.hidden_size self.c_fc = ColumnParallelLinear(hidden_size, intermediate_size, bias=True, linear_method=linear_method) self.c_proj = RowParallelLinear(intermediate_size, hidden_size, bias=True, linear_method=linear_method) quant_config = getattr(linear_method, 'quant_config', None) self.act = get_act_fn(config.activation_function, quant_config, intermediate_size)
def __init__(self, intermediate_size: int, config: GPTBigCodeConfig, linear_method: Optional[LinearMethodBase]=None): super().__init__() hidden_size = config.hidden_size self.c_fc = ColumnParallelLinear(hidden_size, intermediate_size, bias= True, linear_method=linear_method) self.c_proj = RowParallelLinear(intermediate_size, hidden_size, bias= True, linear_method=linear_method) quant_config = getattr(linear_method, 'quant_config', None) self.act = get_act_fn(config.activation_function, quant_config, intermediate_size)
null
forward
hidden_states, _ = self.dense_h_to_4h(hidden_states) hidden_states = self.act(hidden_states) hidden_states, _ = self.dense_4h_to_h(hidden_states) return hidden_states
def forward(self, hidden_states): hidden_states, _ = self.dense_h_to_4h(hidden_states) hidden_states = self.act(hidden_states) hidden_states, _ = self.dense_4h_to_h(hidden_states) return hidden_states
null
free_seq
self.block_manager.free(seq)
def free_seq(self, seq: Sequence) ->None: self.block_manager.free(seq)
null
get_vllm_version
version = find_version(get_path('vllm', '__init__.py')) if _is_hip(): hipcc_version = get_hipcc_rocm_version() if hipcc_version != MAIN_CUDA_VERSION: rocm_version_str = hipcc_version.replace('.', '')[:3] version += f'+rocm{rocm_version_str}' else: cuda_version = str(nvcc_cuda_version) if cuda_version != MAIN_CUDA_VERSION: cuda_version_str = cuda_version.replace('.', '')[:3] version += f'+cu{cuda_version_str}' return version
def get_vllm_version() ->str: version = find_version(get_path('vllm', '__init__.py')) if _is_hip(): hipcc_version = get_hipcc_rocm_version() if hipcc_version != MAIN_CUDA_VERSION: rocm_version_str = hipcc_version.replace('.', '')[:3] version += f'+rocm{rocm_version_str}' else: cuda_version = str(nvcc_cuda_version) if cuda_version != MAIN_CUDA_VERSION: cuda_version_str = cuda_version.replace('.', '')[:3] version += f'+cu{cuda_version_str}' return version
null
forward
inputs_embeds = self.embed_tokens(input_ids) pos_embeds = self.embed_positions(positions) if self.project_in is not None: inputs_embeds, _ = self.project_in(inputs_embeds) hidden_states = inputs_embeds + pos_embeds for i in range(len(self.layers)): layer = self.layers[i] hidden_states = layer(hidden_states, kv_caches[i], input_metadata) if self.final_layer_norm is not None: hidden_states = self.final_layer_norm(hidden_states) if self.project_out is not None: hidden_states, _ = self.project_out(hidden_states) return hidden_states
def forward(self, input_ids: torch.Tensor, positions: torch.Tensor, kv_caches: List[KVCache], input_metadata: InputMetadata) ->torch.Tensor: inputs_embeds = self.embed_tokens(input_ids) pos_embeds = self.embed_positions(positions) if self.project_in is not None: inputs_embeds, _ = self.project_in(inputs_embeds) hidden_states = inputs_embeds + pos_embeds for i in range(len(self.layers)): layer = self.layers[i] hidden_states = layer(hidden_states, kv_caches[i], input_metadata) if self.final_layer_norm is not None: hidden_states = self.final_layer_norm(hidden_states) if self.project_out is not None: hidden_states, _ = self.project_out(hidden_states) return hidden_states
null
convert_bin_to_safetensor_file
loaded = torch.load(pt_filename, map_location='cpu') if 'state_dict' in loaded: loaded = loaded['state_dict'] shared = _shared_pointers(loaded) for shared_weights in shared: for name in shared_weights[1:]: loaded.pop(name) loaded = {k: v.contiguous() for k, v in loaded.items()} dirname = os.path.dirname(sf_filename) os.makedirs(dirname, exist_ok=True) save_file(loaded, sf_filename, metadata={'format': 'pt'}) sf_size = os.stat(sf_filename).st_size pt_size = os.stat(pt_filename).st_size if (sf_size - pt_size) / pt_size > 0.01: raise RuntimeError( f"""The file size different is more than 1%: - {sf_filename}: {sf_size} - {pt_filename}: {pt_size} """ ) reloaded = load_file(sf_filename) for k in loaded: pt_tensor = loaded[k] sf_tensor = reloaded[k] if not torch.equal(pt_tensor, sf_tensor): raise RuntimeError(f'The output tensors do not match for key {k}')
def convert_bin_to_safetensor_file(pt_filename: str, sf_filename: str) ->None: loaded = torch.load(pt_filename, map_location='cpu') if 'state_dict' in loaded: loaded = loaded['state_dict'] shared = _shared_pointers(loaded) for shared_weights in shared: for name in shared_weights[1:]: loaded.pop(name) loaded = {k: v.contiguous() for k, v in loaded.items()} dirname = os.path.dirname(sf_filename) os.makedirs(dirname, exist_ok=True) save_file(loaded, sf_filename, metadata={'format': 'pt'}) sf_size = os.stat(sf_filename).st_size pt_size = os.stat(pt_filename).st_size if (sf_size - pt_size) / pt_size > 0.01: raise RuntimeError( f"""The file size different is more than 1%: - {sf_filename}: {sf_size} - {pt_filename}: {pt_size} """ ) reloaded = load_file(sf_filename) for k in loaded: pt_tensor = loaded[k] sf_tensor = reloaded[k] if not torch.equal(pt_tensor, sf_tensor): raise RuntimeError(f'The output tensors do not match for key {k}')
null
set_tokenizer
self.llm_engine.tokenizer = tokenizer
def set_tokenizer(self, tokenizer: Union[PreTrainedTokenizer, PreTrainedTokenizerFast]) ->None: self.llm_engine.tokenizer = tokenizer
null
get_total_num_kv_heads
"""Returns the total number of KV heads.""" falcon_model_types = ['falcon', 'RefinedWeb', 'RefinedWebModel'] new_decoder_arch_falcon = (self.hf_config.model_type in falcon_model_types and getattr(self.hf_config, 'new_decoder_architecture', False)) if not new_decoder_arch_falcon and getattr(self.hf_config, 'multi_query', False ): return 1 attributes = ['n_head_kv', 'num_kv_heads', 'num_key_value_heads', 'multi_query_group_num'] for attr in attributes: num_kv_heads = getattr(self.hf_config, attr, None) if num_kv_heads is not None: return num_kv_heads return self.hf_config.num_attention_heads
def get_total_num_kv_heads(self) ->int: """Returns the total number of KV heads.""" falcon_model_types = ['falcon', 'RefinedWeb', 'RefinedWebModel'] new_decoder_arch_falcon = (self.hf_config.model_type in falcon_model_types and getattr(self.hf_config, 'new_decoder_architecture', False)) if not new_decoder_arch_falcon and getattr(self.hf_config, 'multi_query', False): return 1 attributes = ['n_head_kv', 'num_kv_heads', 'num_key_value_heads', 'multi_query_group_num'] for attr in attributes: num_kv_heads = getattr(self.hf_config, attr, None) if num_kv_heads is not None: return num_kv_heads return self.hf_config.num_attention_heads
Returns the total number of KV heads.
__init__
self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance( bos_token, str) else bos_token eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance( eos_token, str) else eos_token unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance( unk_token, str) else unk_token pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance( pad_token, str) else pad_token self.vocab_file = vocab_file self.add_bos_token = add_bos_token self.add_eos_token = add_eos_token self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(vocab_file) super().__init__(bos_token=bos_token, eos_token=eos_token, unk_token= unk_token, pad_token=pad_token, add_bos_token=add_bos_token, add_eos_token=add_eos_token, sp_model_kwargs=self.sp_model_kwargs, clean_up_tokenization_spaces=clean_up_tokenization_spaces, **kwargs)
def __init__(self, vocab_file, unk_token='<unk>', bos_token='<s>', eos_token='</s>', pad_token=None, sp_model_kwargs: Optional[Dict[str, Any]]=None, add_bos_token=True, add_eos_token=False, clean_up_tokenization_spaces=False, **kwargs): self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs bos_token = AddedToken(bos_token, lstrip=False, rstrip=False ) if isinstance(bos_token, str) else bos_token eos_token = AddedToken(eos_token, lstrip=False, rstrip=False ) if isinstance(eos_token, str) else eos_token unk_token = AddedToken(unk_token, lstrip=False, rstrip=False ) if isinstance(unk_token, str) else unk_token pad_token = AddedToken(pad_token, lstrip=False, rstrip=False ) if isinstance(pad_token, str) else pad_token self.vocab_file = vocab_file self.add_bos_token = add_bos_token self.add_eos_token = add_eos_token self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(vocab_file) super().__init__(bos_token=bos_token, eos_token=eos_token, unk_token= unk_token, pad_token=pad_token, add_bos_token=add_bos_token, add_eos_token=add_eos_token, sp_model_kwargs=self.sp_model_kwargs, clean_up_tokenization_spaces=clean_up_tokenization_spaces, **kwargs)
null
main
"""Main function that sets up and runs the prompt processing.""" engine = initialize_engine(args) test_prompts = create_test_prompts() process_requests(engine, test_prompts)
def main(args: argparse.Namespace): """Main function that sets up and runs the prompt processing.""" engine = initialize_engine(args) test_prompts = create_test_prompts() process_requests(engine, test_prompts)
Main function that sets up and runs the prompt processing.
_raise_exception_on_finish
msg = ( 'Task finished unexpectedly. This should never happen! Please open an issue on Github.' ) try: try: task.result() except asyncio.CancelledError: return except Exception as exc: raise AsyncEngineDeadError(msg + ' See stack trace above for the actual cause.') from exc raise AsyncEngineDeadError(msg) except Exception as exc: request_tracker.propagate_exception(exc) raise exc
def _raise_exception_on_finish(task: asyncio.Task, request_tracker: 'RequestTracker') ->None: msg = ( 'Task finished unexpectedly. This should never happen! Please open an issue on Github.' ) try: try: task.result() except asyncio.CancelledError: return except Exception as exc: raise AsyncEngineDeadError(msg + ' See stack trace above for the actual cause.') from exc raise AsyncEngineDeadError(msg) except Exception as exc: request_tracker.propagate_exception(exc) raise exc
null
initialize_dummy_weights
"""Initialize model weights with random values. The model weights must be randomly initialized for accurate performance measurements. Additionally, the model weights should not cause NaNs in the forward pass. We empirically found that initializing the weights with values between -1e-3 and 1e-3 works well for most models. """ for param in model.state_dict().values(): if torch.is_floating_point(param): param.data.uniform_(low, high)
def initialize_dummy_weights(model: torch.nn.Module, low: float=-0.001, high: float=0.001) ->None: """Initialize model weights with random values. The model weights must be randomly initialized for accurate performance measurements. Additionally, the model weights should not cause NaNs in the forward pass. We empirically found that initializing the weights with values between -1e-3 and 1e-3 works well for most models. """ for param in model.state_dict().values(): if torch.is_floating_point(param): param.data.uniform_(low, high)
Initialize model weights with random values. The model weights must be randomly initialized for accurate performance measurements. Additionally, the model weights should not cause NaNs in the forward pass. We empirically found that initializing the weights with values between -1e-3 and 1e-3 works well for most models.
get_policy
return cls._POLICY_REGISTRY[policy_name](**kwargs)
@classmethod def get_policy(cls, policy_name: str, **kwargs) ->Policy: return cls._POLICY_REGISTRY[policy_name](**kwargs)
null
from_cli_args
attrs = [attr.name for attr in dataclasses.fields(cls)] engine_args = cls(**{attr: getattr(args, attr) for attr in attrs}) return engine_args
@classmethod def from_cli_args(cls, args: argparse.Namespace) ->'EngineArgs': attrs = [attr.name for attr in dataclasses.fields(cls)] engine_args = cls(**{attr: getattr(args, attr) for attr in attrs}) return engine_args
null
create_test_prompts
"""Create a list of test prompts with their sampling parameters.""" return [('A robot may not injure a human being', SamplingParams(temperature =0.0, logprobs=1, prompt_logprobs=1)), ('To be or not to be,', SamplingParams(temperature=0.8, top_k=5, presence_penalty=0.2)), ( 'What is the meaning of life?', SamplingParams(n=2, best_of=5, temperature=0.8, top_p=0.95, frequency_penalty=0.1)), ( 'It is only with the heart that one can see rightly', SamplingParams(n= 3, best_of=3, use_beam_search=True, temperature=0.0))]
def create_test_prompts() ->List[Tuple[str, SamplingParams]]: """Create a list of test prompts with their sampling parameters.""" return [('A robot may not injure a human being', SamplingParams( temperature=0.0, logprobs=1, prompt_logprobs=1)), ( 'To be or not to be,', SamplingParams(temperature=0.8, top_k=5, presence_penalty=0.2)), ('What is the meaning of life?', SamplingParams(n=2, best_of=5, temperature=0.8, top_p=0.95, frequency_penalty=0.1)), ( 'It is only with the heart that one can see rightly', SamplingParams(n=3, best_of=3, use_beam_search=True, temperature=0.0))]
Create a list of test prompts with their sampling parameters.
_process_model_outputs
scheduled_seq_groups = scheduler_outputs.scheduled_seq_groups for seq_group, outputs in zip(scheduled_seq_groups, output): self._process_sequence_group_outputs(seq_group, outputs) self.scheduler.free_finished_seq_groups() request_outputs: List[RequestOutput] = [] for seq_group in (scheduled_seq_groups + scheduler_outputs.ignored_seq_groups): request_output = RequestOutput.from_seq_group(seq_group) request_outputs.append(request_output) if self.log_stats: self._log_system_stats(scheduler_outputs.prompt_run, scheduler_outputs. num_batched_tokens) return request_outputs
def _process_model_outputs(self, output: SamplerOutput, scheduler_outputs: SchedulerOutputs) ->List[RequestOutput]: scheduled_seq_groups = scheduler_outputs.scheduled_seq_groups for seq_group, outputs in zip(scheduled_seq_groups, output): self._process_sequence_group_outputs(seq_group, outputs) self.scheduler.free_finished_seq_groups() request_outputs: List[RequestOutput] = [] for seq_group in (scheduled_seq_groups + scheduler_outputs. ignored_seq_groups): request_output = RequestOutput.from_seq_group(seq_group) request_outputs.append(request_output) if self.log_stats: self._log_system_stats(scheduler_outputs.prompt_run, scheduler_outputs.num_batched_tokens) return request_outputs
null
__init__
super().__init__() self.config = config self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size vocab_size = (config.vocab_size + 63) // 64 * 64 self.embed_tokens = VocabParallelEmbedding(vocab_size, config.hidden_size) self.layers = nn.ModuleList([InternLMDecoderLayer(config, linear_method) for _ in range(config.num_hidden_layers)]) self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
def __init__(self, config: LlamaConfig, linear_method: Optional[ LinearMethodBase]=None): super().__init__() self.config = config self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size vocab_size = (config.vocab_size + 63) // 64 * 64 self.embed_tokens = VocabParallelEmbedding(vocab_size, config.hidden_size) self.layers = nn.ModuleList([InternLMDecoderLayer(config, linear_method ) for _ in range(config.num_hidden_layers)]) self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
null
sample
next_tokens = self.sampler(self.lm_head.weight, hidden_states, sampling_metadata) return next_tokens
def sample(self, hidden_states: torch.Tensor, sampling_metadata: SamplingMetadata) ->Optional[SamplerOutput]: next_tokens = self.sampler(self.lm_head.weight, hidden_states, sampling_metadata) return next_tokens
null
get_cpu_memory
"""Returns the total CPU memory of the node in bytes.""" return psutil.virtual_memory().total
def get_cpu_memory() ->int: """Returns the total CPU memory of the node in bytes.""" return psutil.virtual_memory().total
Returns the total CPU memory of the node in bytes.
load_model
self.model = get_model(self.model_config)
def load_model(self) ->None: self.model = get_model(self.model_config)
null
__init__
super().__init__() self.embed_dim = config.hidden_size self.word_embeddings = VocabParallelEmbedding(config.vocab_size, self.embed_dim ) self.word_embeddings_layernorm = nn.LayerNorm(self.embed_dim, eps=config. layer_norm_epsilon) self.h = nn.ModuleList([BloomBlock(config, linear_method) for _ in range( config.num_hidden_layers)]) self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon)
def __init__(self, config: BloomConfig, linear_method: Optional[ LinearMethodBase]=None): super().__init__() self.embed_dim = config.hidden_size self.word_embeddings = VocabParallelEmbedding(config.vocab_size, self. embed_dim) self.word_embeddings_layernorm = nn.LayerNorm(self.embed_dim, eps= config.layer_norm_epsilon) self.h = nn.ModuleList([BloomBlock(config, linear_method) for _ in range(config.num_hidden_layers)]) self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon)
null
_compute_cos_sin_cache
max_len = self.max_position_embeddings * self.scaling_factor base = self.base * (self.scaling_factor * max_len / self. max_position_embeddings - (self.scaling_factor - 1)) ** (self. rotary_dim / (self.rotary_dim - 2)) inv_freq = self._compute_inv_freq(base) t = torch.arange(max_len, dtype=torch.float, device='cuda') freqs = torch.einsum('i,j -> ij', t, inv_freq) cos = freqs.cos() sin = freqs.sin() cache = torch.cat((cos, sin), dim=-1) return cache
def _compute_cos_sin_cache(self) ->torch.Tensor: max_len = self.max_position_embeddings * self.scaling_factor base = self.base * (self.scaling_factor * max_len / self. max_position_embeddings - (self.scaling_factor - 1)) ** (self. rotary_dim / (self.rotary_dim - 2)) inv_freq = self._compute_inv_freq(base) t = torch.arange(max_len, dtype=torch.float, device='cuda') freqs = torch.einsum('i,j -> ij', t, inv_freq) cos = freqs.cos() sin = freqs.sin() cache = torch.cat((cos, sin), dim=-1) return cache
null
random_uuid
return str(uuid.uuid4().hex)
def random_uuid() ->str: return str(uuid.uuid4().hex)
null
__init__
self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads if num_key_value_heads is None: num_key_value_heads = num_attention_heads self.num_key_value_heads = num_key_value_heads self.hidden_act = hidden_act self.initializer_range = initializer_range self.rms_norm_eps = rms_norm_eps self.use_cache = use_cache self.output_attentions = output_attentions self.rope_theta = rope_theta super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, ** kwargs)
def __init__(self, vocab_size=64000, hidden_size=4096, intermediate_size= 11008, num_hidden_layers=32, num_attention_heads=32, num_key_value_heads=4, hidden_act='silu', max_position_embeddings=4096, initializer_range=0.02, rms_norm_eps=1e-05, use_cache=True, pad_token_id=0, bos_token_id=1, eos_token_id=2, tie_word_embeddings= False, output_attentions=False, rope_theta=5000000.0, **kwargs): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads if num_key_value_heads is None: num_key_value_heads = num_attention_heads self.num_key_value_heads = num_key_value_heads self.hidden_act = hidden_act self.initializer_range = initializer_range self.rms_norm_eps = rms_norm_eps self.use_cache = use_cache self.output_attentions = output_attentions self.rope_theta = rope_theta super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs)
null
copy
key_caches = [key_cache for key_cache, _ in self.gpu_cache] value_caches = [value_cache for _, value_cache in self.gpu_cache] cache_ops.copy_blocks(key_caches, value_caches, src_to_dsts)
def copy(self, src_to_dsts: Dict[int, List[int]]) ->None: key_caches = [key_cache for key_cache, _ in self.gpu_cache] value_caches = [value_cache for _, value_cache in self.gpu_cache] cache_ops.copy_blocks(key_caches, value_caches, src_to_dsts)
null
forward
hidden_states = self.model(input_ids, positions, kv_caches, input_metadata) return hidden_states
def forward(self, input_ids: torch.Tensor, positions: torch.Tensor, kv_caches: List[KVCache], input_metadata: InputMetadata) ->torch.Tensor: hidden_states = self.model(input_ids, positions, kv_caches, input_metadata) return hidden_states
null
__init__
self.model = LLM(model=model_name, tokenizer=tokenizer_name, trust_remote_code=True, dtype=dtype, swap_space=0)
def __init__(self, model_name: str, tokenizer_name: Optional[str]=None, dtype: str='half') ->None: self.model = LLM(model=model_name, tokenizer=tokenizer_name, trust_remote_code=True, dtype=dtype, swap_space=0)
null
forward
hidden_states = self.embed_tokens(input_ids) residual = None for i in range(len(self.layers)): layer = self.layers[i] hidden_states, residual = layer(positions, hidden_states, kv_caches[i], input_metadata, residual) hidden_states, _ = self.norm(hidden_states, residual) return hidden_states
def forward(self, input_ids: torch.Tensor, positions: torch.Tensor, kv_caches: List[KVCache], input_metadata: InputMetadata) ->torch.Tensor: hidden_states = self.embed_tokens(input_ids) residual = None for i in range(len(self.layers)): layer = self.layers[i] hidden_states, residual = layer(positions, hidden_states, kv_caches [i], input_metadata, residual) hidden_states, _ = self.norm(hidden_states, residual) return hidden_states
null
forward
x, bias = self.dense_h_to_4h(x) if bias is not None: x += bias x = self.act(x) x, bias = self.dense_4h_to_h(x) return x, bias
def forward(self, x: torch.Tensor) ->torch.Tensor: x, bias = self.dense_h_to_4h(x) if bias is not None: x += bias x = self.act(x) x, bias = self.dense_4h_to_h(x) return x, bias
null
divide
"""Ensure that numerator is divisible by the denominator and return the division value.""" ensure_divisibility(numerator, denominator) return numerator // denominator
def divide(numerator, denominator): """Ensure that numerator is divisible by the denominator and return the division value.""" ensure_divisibility(numerator, denominator) return numerator // denominator
Ensure that numerator is divisible by the denominator and return the division value.
test_multi_process_tensor_parallel
set_start_method('spawn', force=True) distributed_init_port = get_open_port() processes = [] for rank in range(tensor_parallel_size): p = Process(target=test_target, args=(tensor_parallel_size, rank, distributed_init_port)) p.start() processes.append(p) for p in processes: p.join() assert all(p.exitcode == 0 for p in processes)
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason= 'Need at least 2 GPUs to run the test.') @pytest.mark.parametrize('tensor_parallel_size', [2]) @pytest.mark.parametrize('test_target', [all_reduce_test_worker, all_gather_test_worker]) def test_multi_process_tensor_parallel(tensor_parallel_size, test_target): set_start_method('spawn', force=True) distributed_init_port = get_open_port() processes = [] for rank in range(tensor_parallel_size): p = Process(target=test_target, args=(tensor_parallel_size, rank, distributed_init_port)) p.start() processes.append(p) for p in processes: p.join() assert all(p.exitcode == 0 for p in processes)
null
forward
hidden_states = self.embed_tokens(input_ids) residual = None for i in range(len(self.layers)): layer = self.layers[i] hidden_states, residual = layer(positions, hidden_states, kv_caches[i], input_metadata, residual) hidden_states, _ = self.norm(hidden_states, residual) return hidden_states
def forward(self, input_ids: torch.Tensor, positions: torch.Tensor, kv_caches: List[KVCache], input_metadata: InputMetadata) ->torch.Tensor: hidden_states = self.embed_tokens(input_ids) residual = None for i in range(len(self.layers)): layer = self.layers[i] hidden_states, residual = layer(positions, hidden_states, kv_caches [i], input_metadata, residual) hidden_states, _ = self.norm(hidden_states, residual) return hidden_states
null