method_name
stringlengths
3
45
method_body
stringlengths
9
6.25k
full_code
stringlengths
35
7.02k
docstring
stringlengths
18
4.7k
__init__
self.quant_config = quant_config
def __init__(self, quant_config: GPTQConfig): self.quant_config = quant_config
null
__init__
super().__init__() n_inner = getattr(config, 'n_inner', None) n_inner = n_inner if n_inner is not None else 4 * config.hidden_size self.fc1 = ColumnParallelLinear(config.hidden_size, n_inner, linear_method= linear_method) self.fc2 = RowParallelLinear(n_inner, config.hidden_size, linear_method= linear_method) quant_config = getattr(linear_method, 'quant_config', None) self.act = get_act_fn(config.activation_function, quant_config, n_inner)
def __init__(self, config: PretrainedConfig, linear_method: Optional[ LinearMethodBase]=None): super().__init__() n_inner = getattr(config, 'n_inner', None) n_inner = n_inner if n_inner is not None else 4 * config.hidden_size self.fc1 = ColumnParallelLinear(config.hidden_size, n_inner, linear_method=linear_method) self.fc2 = RowParallelLinear(n_inner, config.hidden_size, linear_method =linear_method) quant_config = getattr(linear_method, 'quant_config', None) self.act = get_act_fn(config.activation_function, quant_config, n_inner)
null
is_finished
return status in [SequenceStatus.FINISHED_STOPPED, SequenceStatus. FINISHED_LENGTH_CAPPED, SequenceStatus.FINISHED_ABORTED, SequenceStatus .FINISHED_IGNORED]
@staticmethod def is_finished(status: 'SequenceStatus') ->bool: return status in [SequenceStatus.FINISHED_STOPPED, SequenceStatus. FINISHED_LENGTH_CAPPED, SequenceStatus.FINISHED_ABORTED, SequenceStatus.FINISHED_IGNORED]
null
_abort
"""Abort a request. Abort a submitted request. If the request is finished or not found, this method will be a no-op. Args: request_id: The unique id of the request. """ self._request_tracker.abort_request(request_id, verbose=self.log_requests)
def _abort(self, request_id: str) ->None: """Abort a request. Abort a submitted request. If the request is finished or not found, this method will be a no-op. Args: request_id: The unique id of the request. """ self._request_tracker.abort_request(request_id, verbose=self.log_requests)
Abort a request. Abort a submitted request. If the request is finished or not found, this method will be a no-op. Args: request_id: The unique id of the request.
_query_server
response = requests.post('http://localhost:8000/generate', json={'prompt': prompt, 'max_tokens': max_tokens, 'temperature': 0, 'ignore_eos': True}) response.raise_for_status() return response.json()
def _query_server(prompt: str, max_tokens: int=5) ->dict: response = requests.post('http://localhost:8000/generate', json={ 'prompt': prompt, 'max_tokens': max_tokens, 'temperature': 0, 'ignore_eos': True}) response.raise_for_status() return response.json()
null
from_lists
pin_memory = not in_wsl() prompt_max_len = max(len(tokens) for tokens in prompt_tokens) prompt_padded_tokens = [(tokens + [vocab_size] * (prompt_max_len - len( tokens))) for tokens in prompt_tokens] output_max_len = max(len(tokens) for tokens in output_tokens) output_padded_tokens = [(tokens + [vocab_size] * (output_max_len - len( tokens))) for tokens in output_tokens] temperatures_t = torch.tensor(temperatures, device='cpu', dtype=dtype, pin_memory=pin_memory) top_ps_t = torch.tensor(top_ps, device='cpu', dtype=dtype, pin_memory= pin_memory) min_ps_t = torch.tensor(min_ps, device='cpu', dtype=dtype, pin_memory= pin_memory) presence_penalties_t = torch.tensor(presence_penalties, device='cpu', dtype =dtype, pin_memory=pin_memory) frequency_penalties_t = torch.tensor(frequency_penalties, device='cpu', dtype=dtype, pin_memory=pin_memory) repetition_penalties_t = torch.tensor(repetition_penalties, device='cpu', dtype=dtype, pin_memory=pin_memory) top_ks_t = torch.tensor(top_ks, device='cpu', dtype=torch.int, pin_memory= pin_memory) prompt_tensor = torch.tensor(prompt_padded_tokens, device='cpu', dtype= torch.long, pin_memory=pin_memory) output_tensor = torch.tensor(output_padded_tokens, device='cpu', dtype= torch.long, pin_memory=pin_memory) return cls(temperatures=temperatures_t.to(device=device, non_blocking=True), top_ps=top_ps_t.to(device=device, non_blocking=True), top_ks=top_ks_t. to(device=device, non_blocking=True), min_ps=min_ps_t.to(device=device, non_blocking=True), presence_penalties=presence_penalties_t.to(device= device, non_blocking=True), frequency_penalties=frequency_penalties_t. to(device=device, non_blocking=True), repetition_penalties= repetition_penalties_t.to(device=device, non_blocking=True), prompt_tokens=prompt_tensor.to(device=device, non_blocking=True), output_tokens=output_tensor.to(device=device, non_blocking=True))
@classmethod def from_lists(cls, temperatures: List[float], top_ps: List[float], top_ks: List[int], min_ps: List[float], presence_penalties: List[float], frequency_penalties: List[float], repetition_penalties: List[float], prompt_tokens: List[List[int]], output_tokens: List[List[int]], vocab_size: int, device: torch.device, dtype: torch.dtype ) ->'SamplingTensors': pin_memory = not in_wsl() prompt_max_len = max(len(tokens) for tokens in prompt_tokens) prompt_padded_tokens = [(tokens + [vocab_size] * (prompt_max_len - len( tokens))) for tokens in prompt_tokens] output_max_len = max(len(tokens) for tokens in output_tokens) output_padded_tokens = [(tokens + [vocab_size] * (output_max_len - len( tokens))) for tokens in output_tokens] temperatures_t = torch.tensor(temperatures, device='cpu', dtype=dtype, pin_memory=pin_memory) top_ps_t = torch.tensor(top_ps, device='cpu', dtype=dtype, pin_memory= pin_memory) min_ps_t = torch.tensor(min_ps, device='cpu', dtype=dtype, pin_memory= pin_memory) presence_penalties_t = torch.tensor(presence_penalties, device='cpu', dtype=dtype, pin_memory=pin_memory) frequency_penalties_t = torch.tensor(frequency_penalties, device='cpu', dtype=dtype, pin_memory=pin_memory) repetition_penalties_t = torch.tensor(repetition_penalties, device= 'cpu', dtype=dtype, pin_memory=pin_memory) top_ks_t = torch.tensor(top_ks, device='cpu', dtype=torch.int, pin_memory=pin_memory) prompt_tensor = torch.tensor(prompt_padded_tokens, device='cpu', dtype= torch.long, pin_memory=pin_memory) output_tensor = torch.tensor(output_padded_tokens, device='cpu', dtype= torch.long, pin_memory=pin_memory) return cls(temperatures=temperatures_t.to(device=device, non_blocking= True), top_ps=top_ps_t.to(device=device, non_blocking=True), top_ks =top_ks_t.to(device=device, non_blocking=True), min_ps=min_ps_t.to( device=device, non_blocking=True), presence_penalties= presence_penalties_t.to(device=device, non_blocking=True), frequency_penalties=frequency_penalties_t.to(device=device, non_blocking=True), repetition_penalties=repetition_penalties_t.to( device=device, non_blocking=True), prompt_tokens=prompt_tensor.to( device=device, non_blocking=True), output_tokens=output_tensor.to( device=device, non_blocking=True))
null
test_sampler_mixed
set_random_seed(seed) batch_size = random.randint(1, 256) input_tensor, fake_logits, sampler, model_runner = _prepare_test(batch_size) seq_group_metadata_list = [] expected_tokens = [] prompt_lens = [] for i in range(batch_size): n = 1 sampling_type = random.randint(0, 2) if sampling_type == 0: sampling_params = SamplingParams(temperature=0) elif sampling_type == 1: n = random.randint(1, 10) sampling_params = SamplingParams(temperature=random.random() + 0.1, top_p=min(random.random() + 0.1, 1), top_k=random.randint(0, 10 ) or -1, n=n, presence_penalty=random.randint(0, 1)) else: sampling_params = SamplingParams(temperature=0, use_beam_search= True, best_of=2) for idx in range(n): fake_logits[i, i + idx] = 100.0 expected_tokens.append(i + idx) seq_group_metadata_list.append(SequenceGroupMetadata(request_id= f'test_{i}', is_prompt=True, seq_data={(0): SequenceData([1, 2, 3]) }, sampling_params=sampling_params, block_tables={(0): [1]})) prompt_lens.append(seq_group_metadata_list[-1].seq_data[0].get_len()) sampling_metadata = model_runner._prepare_sample(seq_group_metadata_list, prompt_lens) sampler_output = sampler(embedding=None, hidden_states=input_tensor, sampling_metadata=sampling_metadata) for i, sequence_output in enumerate(sampler_output): if seq_group_metadata_list[i].sampling_params.use_beam_search: continue for nth_output in sequence_output.samples: assert nth_output.output_token in expected_tokens
@pytest.mark.parametrize('seed', RANDOM_SEEDS) def test_sampler_mixed(seed: int): set_random_seed(seed) batch_size = random.randint(1, 256) input_tensor, fake_logits, sampler, model_runner = _prepare_test(batch_size ) seq_group_metadata_list = [] expected_tokens = [] prompt_lens = [] for i in range(batch_size): n = 1 sampling_type = random.randint(0, 2) if sampling_type == 0: sampling_params = SamplingParams(temperature=0) elif sampling_type == 1: n = random.randint(1, 10) sampling_params = SamplingParams(temperature=random.random() + 0.1, top_p=min(random.random() + 0.1, 1), top_k=random. randint(0, 10) or -1, n=n, presence_penalty=random.randint( 0, 1)) else: sampling_params = SamplingParams(temperature=0, use_beam_search =True, best_of=2) for idx in range(n): fake_logits[i, i + idx] = 100.0 expected_tokens.append(i + idx) seq_group_metadata_list.append(SequenceGroupMetadata(request_id= f'test_{i}', is_prompt=True, seq_data={(0): SequenceData([1, 2, 3])}, sampling_params=sampling_params, block_tables={(0): [1]})) prompt_lens.append(seq_group_metadata_list[-1].seq_data[0].get_len()) sampling_metadata = model_runner._prepare_sample(seq_group_metadata_list, prompt_lens) sampler_output = sampler(embedding=None, hidden_states=input_tensor, sampling_metadata=sampling_metadata) for i, sequence_output in enumerate(sampler_output): if seq_group_metadata_list[i].sampling_params.use_beam_search: continue for nth_output in sequence_output.samples: assert nth_output.output_token in expected_tokens
null
forward
hidden_states = self.model(input_ids, positions, kv_caches, input_metadata) return hidden_states
def forward(self, input_ids: torch.Tensor, positions: torch.Tensor, kv_caches: List[KVCache], input_metadata: InputMetadata) ->torch.Tensor: hidden_states = self.model(input_ids, positions, kv_caches, input_metadata) return hidden_states
null
__init__
super().__init__() inner_dim = 4 * config.n_embd if config.n_inner is None else config.n_inner self.ln_1 = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon) self.attn = GPTJAttention(config, linear_method) self.mlp = GPTJMLP(inner_dim, config, linear_method)
def __init__(self, config: GPTJConfig, linear_method: Optional[ LinearMethodBase]=None): super().__init__() inner_dim = 4 * config.n_embd if config.n_inner is None else config.n_inner self.ln_1 = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon) self.attn = GPTJAttention(config, linear_method) self.mlp = GPTJMLP(inner_dim, config, linear_method)
null
get_rope
key = head_size, rotary_dim, max_position, base, is_neox_style, tuple( rope_scaling.items()) if rope_scaling is not None else None if key in _ROPE_DICT: return _ROPE_DICT[key] if rope_scaling is None: rotary_emb = RotaryEmbedding(head_size, rotary_dim, max_position, base, is_neox_style) else: scaling_type = rope_scaling['type'] scaling_factor = rope_scaling['factor'] if scaling_type == 'linear': rotary_emb = LinearScalingRotaryEmbedding(head_size, rotary_dim, max_position, base, is_neox_style, scaling_factor) elif scaling_type == 'dynamic': rotary_emb = DynamicNTKScalingRotaryEmbedding(head_size, rotary_dim, max_position, base, is_neox_style, scaling_factor) elif scaling_type == 'yarn': original_max_position = rope_scaling['original_max_position_embeddings' ] assert max_position == original_max_position * scaling_factor extra_kwargs = {k: v for k, v in rope_scaling.items() if k in ( 'extrapolation_factor', 'attn_factor', 'beta_fast', 'beta_slow')} rotary_emb = YaRNScalingRotaryEmbedding(head_size, rotary_dim, original_max_position, base, is_neox_style, scaling_factor, ** extra_kwargs) else: raise ValueError(f'Unknown RoPE scaling type {scaling_type}') _ROPE_DICT[key] = rotary_emb return rotary_emb
def get_rope(head_size: int, rotary_dim: int, max_position: int, base: int, is_neox_style: bool=True, rope_scaling: Optional[Dict[str, Any]]=None ) ->RotaryEmbedding: key = head_size, rotary_dim, max_position, base, is_neox_style, tuple( rope_scaling.items()) if rope_scaling is not None else None if key in _ROPE_DICT: return _ROPE_DICT[key] if rope_scaling is None: rotary_emb = RotaryEmbedding(head_size, rotary_dim, max_position, base, is_neox_style) else: scaling_type = rope_scaling['type'] scaling_factor = rope_scaling['factor'] if scaling_type == 'linear': rotary_emb = LinearScalingRotaryEmbedding(head_size, rotary_dim, max_position, base, is_neox_style, scaling_factor) elif scaling_type == 'dynamic': rotary_emb = DynamicNTKScalingRotaryEmbedding(head_size, rotary_dim, max_position, base, is_neox_style, scaling_factor) elif scaling_type == 'yarn': original_max_position = rope_scaling[ 'original_max_position_embeddings'] assert max_position == original_max_position * scaling_factor extra_kwargs = {k: v for k, v in rope_scaling.items() if k in ( 'extrapolation_factor', 'attn_factor', 'beta_fast', 'beta_slow')} rotary_emb = YaRNScalingRotaryEmbedding(head_size, rotary_dim, original_max_position, base, is_neox_style, scaling_factor, **extra_kwargs) else: raise ValueError(f'Unknown RoPE scaling type {scaling_type}') _ROPE_DICT[key] = rotary_emb return rotary_emb
null
_run_workers
"""Runs the given method on all workers.""" if max_concurrent_workers: raise NotImplementedError('max_concurrent_workers is not supported yet.') ray_worker_outputs = [worker.execute_method.remote(method, *args, **kwargs) for worker in self.workers] if driver_args is None: driver_args = args if driver_kwargs is None: driver_kwargs = kwargs driver_worker_output = getattr(self.driver_worker, method)(*driver_args, ** driver_kwargs) if self.workers: ray_worker_outputs = ray.get(ray_worker_outputs) return [driver_worker_output] + ray_worker_outputs
def _run_workers(self, method: str, *args, driver_args: Optional[List[Any]] =None, driver_kwargs: Optional[Dict[str, Any]]=None, max_concurrent_workers: Optional[int]=None, **kwargs) ->Any: """Runs the given method on all workers.""" if max_concurrent_workers: raise NotImplementedError( 'max_concurrent_workers is not supported yet.') ray_worker_outputs = [worker.execute_method.remote(method, *args, ** kwargs) for worker in self.workers] if driver_args is None: driver_args = args if driver_kwargs is None: driver_kwargs = kwargs driver_worker_output = getattr(self.driver_worker, method)(*driver_args, **driver_kwargs) if self.workers: ray_worker_outputs = ray.get(ray_worker_outputs) return [driver_worker_output] + ray_worker_outputs
Runs the given method on all workers.
_forward
"""PyTorch-native implementation equivalent to forward().""" query = query.view(*query.shape[:-1], -1, self.head_size) key = key.view(*key.shape[:-1], -1, self.head_size) query_rot = query[..., :self.rotary_dim] key_rot = key[..., :self.rotary_dim] if self.rotary_dim < self.head_size: query_pass = query[..., self.rotary_dim:] key_pass = key[..., self.rotary_dim:] cos_sin = self.cos_sin_cache[positions] cos, sin = cos_sin.chunk(2, dim=-1) if self.is_neox_style: cos = cos.repeat(1, 1, 2).unsqueeze(-2) sin = sin.repeat(1, 1, 2).unsqueeze(-2) else: cos = cos.repeat_interleave(2, dim=-1).unsqueeze(-2) sin = sin.repeat_interleave(2, dim=-1).unsqueeze(-2) rotate_fn = _rotate_neox if self.is_neox_style else _rotate_gptj query_rot = query_rot * cos + rotate_fn(query_rot) * sin key_rot = key_rot * cos + rotate_fn(key_rot) * sin if self.rotary_dim < self.head_size: query = torch.cat((query_rot, query_pass), dim=-1) key = torch.cat((key_rot, key_pass), dim=-1) else: query = query_rot key = key_rot query = query.flatten(-2) key = key.flatten(-2) return query, key
def _forward(self, positions: torch.Tensor, query: torch.Tensor, key: torch .Tensor) ->Tuple[torch.Tensor, torch.Tensor]: """PyTorch-native implementation equivalent to forward().""" query = query.view(*query.shape[:-1], -1, self.head_size) key = key.view(*key.shape[:-1], -1, self.head_size) query_rot = query[..., :self.rotary_dim] key_rot = key[..., :self.rotary_dim] if self.rotary_dim < self.head_size: query_pass = query[..., self.rotary_dim:] key_pass = key[..., self.rotary_dim:] cos_sin = self.cos_sin_cache[positions] cos, sin = cos_sin.chunk(2, dim=-1) if self.is_neox_style: cos = cos.repeat(1, 1, 2).unsqueeze(-2) sin = sin.repeat(1, 1, 2).unsqueeze(-2) else: cos = cos.repeat_interleave(2, dim=-1).unsqueeze(-2) sin = sin.repeat_interleave(2, dim=-1).unsqueeze(-2) rotate_fn = _rotate_neox if self.is_neox_style else _rotate_gptj query_rot = query_rot * cos + rotate_fn(query_rot) * sin key_rot = key_rot * cos + rotate_fn(key_rot) * sin if self.rotary_dim < self.head_size: query = torch.cat((query_rot, query_pass), dim=-1) key = torch.cat((key_rot, key_pass), dim=-1) else: query = query_rot key = key_rot query = query.flatten(-2) key = key.flatten(-2) return query, key
PyTorch-native implementation equivalent to forward().
__aiter__
return self
def __aiter__(self): return self
null
process_request_output
"""Process a request output from the engine.""" request_id = request_output.request_id self._request_streams[request_id].put(request_output) if request_output.finished: if verbose: logger.info(f'Finished request {request_id}.') self.abort_request(request_id)
def process_request_output(self, request_output: RequestOutput, *, verbose: bool=False) ->None: """Process a request output from the engine.""" request_id = request_output.request_id self._request_streams[request_id].put(request_output) if request_output.finished: if verbose: logger.info(f'Finished request {request_id}.') self.abort_request(request_id)
Process a request output from the engine.
__repr__
return f'SequenceData(prompt_token_ids={self.prompt_token_ids}, output_token_ids={self.output_token_ids}, cumulative_logprob={self.cumulative_logprob})'
def __repr__(self) ->str: return ( f'SequenceData(prompt_token_ids={self.prompt_token_ids}, output_token_ids={self.output_token_ids}, cumulative_logprob={self.cumulative_logprob})' )
null
sampling_type
if self.use_beam_search: return SamplingType.BEAM if self.temperature < _SAMPLING_EPS: return SamplingType.GREEDY return SamplingType.RANDOM
@cached_property def sampling_type(self) ->SamplingType: if self.use_beam_search: return SamplingType.BEAM if self.temperature < _SAMPLING_EPS: return SamplingType.GREEDY return SamplingType.RANDOM
null
forward
d = x.shape[-1] // 2 output_shape = x.shape[:-1] + (d,) out = torch.empty(output_shape, dtype=x.dtype, device=x.device) ops.silu_and_mul(out, x) return out
def forward(self, x: torch.Tensor) ->torch.Tensor: d = x.shape[-1] // 2 output_shape = x.shape[:-1] + (d,) out = torch.empty(output_shape, dtype=x.dtype, device=x.device) ops.silu_and_mul(out, x) return out
null
forward
qkv, _ = self.qkv_proj(hidden_states) q, k, v = qkv.split([self.q_size, self.kv_size, self.kv_size], dim=-1) q, k = self.rotary_emb(positions, q, k) k_cache, v_cache = kv_cache attn_output = self.attn(q, k, v, k_cache, v_cache, input_metadata) output, _ = self.o_proj(attn_output) return output
def forward(self, positions: torch.Tensor, hidden_states: torch.Tensor, kv_cache: KVCache, input_metadata: InputMetadata) ->torch.Tensor: qkv, _ = self.qkv_proj(hidden_states) q, k, v = qkv.split([self.q_size, self.kv_size, self.kv_size], dim=-1) q, k = self.rotary_emb(positions, q, k) k_cache, v_cache = kv_cache attn_output = self.attn(q, k, v, k_cache, v_cache, input_metadata) output, _ = self.o_proj(attn_output) return output
null
prompt
return next(iter(self.seqs_dict.values())).prompt
@property def prompt(self) ->str: return next(iter(self.seqs_dict.values())).prompt
null
__init__
super().__init__() self.num_embeddings = num_embeddings self.num_embeddings_padded = pad_vocab_size(num_embeddings) self.embedding_dim = embedding_dim if params_dtype is None: params_dtype = torch.get_default_dtype() self.tp_size = get_tensor_model_parallel_world_size() self.vocab_start_index, self.vocab_end_index = ( vocab_range_from_global_vocab_size(self.num_embeddings_padded, get_tensor_model_parallel_rank(), self.tp_size)) self.num_embeddings_per_partition = (self.vocab_end_index - self. vocab_start_index) self.weight = Parameter(torch.empty(self.num_embeddings_per_partition, self .embedding_dim, device=torch.cuda.current_device(), dtype=params_dtype)) set_weight_attrs(self.weight, {'parallel_dim': 0, 'weight_loader': self. weight_loader})
def __init__(self, num_embeddings: int, embedding_dim: int, params_dtype: Optional[torch.dtype]=None): super().__init__() self.num_embeddings = num_embeddings self.num_embeddings_padded = pad_vocab_size(num_embeddings) self.embedding_dim = embedding_dim if params_dtype is None: params_dtype = torch.get_default_dtype() self.tp_size = get_tensor_model_parallel_world_size() self.vocab_start_index, self.vocab_end_index = ( vocab_range_from_global_vocab_size(self.num_embeddings_padded, get_tensor_model_parallel_rank(), self.tp_size)) self.num_embeddings_per_partition = (self.vocab_end_index - self. vocab_start_index) self.weight = Parameter(torch.empty(self.num_embeddings_per_partition, self.embedding_dim, device=torch.cuda.current_device(), dtype= params_dtype)) set_weight_attrs(self.weight, {'parallel_dim': 0, 'weight_loader': self .weight_loader})
null
sample
next_tokens = self.sampler(self.lm_head_weight, hidden_states, sampling_metadata) return next_tokens
def sample(self, hidden_states: torch.Tensor, sampling_metadata: SamplingMetadata) ->Optional[SamplerOutput]: next_tokens = self.sampler(self.lm_head_weight, hidden_states, sampling_metadata) return next_tokens
null
remove
if seq_id not in self.seqs_dict: raise ValueError(f'Sequence {seq_id} not found.') del self.seqs_dict[seq_id]
def remove(self, seq_id: int) ->None: if seq_id not in self.seqs_dict: raise ValueError(f'Sequence {seq_id} not found.') del self.seqs_dict[seq_id]
null
initialize_cluster
"""Initialize the distributed cluster probably with Ray. Args: parallel_config: The configurations for parallel execution. engine_use_ray: Whether to use Ray for async engine. ray_address: The address of the Ray cluster. If None, uses the default Ray cluster address. Returns: A tuple of (`distributed_init_method`, `placement_group`). The `distributed_init_method` is the address for initializing the distributed backend. `placement_group` includes the specification of the resources for each distributed worker. """ if parallel_config.worker_use_ray or engine_use_ray: if ray is None: raise ImportError( 'Ray is not installed. Please install Ray to use distributed serving.' ) if is_hip(): ray.init(address=ray_address, ignore_reinit_error=True, num_gpus= parallel_config.world_size) else: ray.init(address=ray_address, ignore_reinit_error=True) if not parallel_config.worker_use_ray: assert parallel_config.world_size == 1, 'Ray is required if parallel_config.world_size > 1.' return None current_placement_group = ray.util.get_current_placement_group() if current_placement_group: bundles = current_placement_group.bundle_specs gpu_bundles = 0 for bundle in bundles: bundle_gpus = bundle.get('GPU', 0) if bundle_gpus > 1: raise ValueError( 'Placement group bundle cannot have more than 1 GPU.') if bundle_gpus: gpu_bundles += 1 if parallel_config.world_size > gpu_bundles: raise ValueError( 'The number of required GPUs exceeds the total number of available GPUs in the placement group.' ) else: num_gpus_in_cluster = ray.cluster_resources().get('GPU', 0) if parallel_config.world_size > num_gpus_in_cluster: raise ValueError( 'The number of required GPUs exceeds the total number of available GPUs in the cluster.' ) placement_group_specs = [{'GPU': 1}] * parallel_config.world_size current_placement_group = ray.util.placement_group(placement_group_specs) ray.get(current_placement_group.ready(), timeout=1800) return current_placement_group
def initialize_cluster(parallel_config: ParallelConfig, engine_use_ray: bool=False, ray_address: Optional[str]=None) ->Optional['PlacementGroup']: """Initialize the distributed cluster probably with Ray. Args: parallel_config: The configurations for parallel execution. engine_use_ray: Whether to use Ray for async engine. ray_address: The address of the Ray cluster. If None, uses the default Ray cluster address. Returns: A tuple of (`distributed_init_method`, `placement_group`). The `distributed_init_method` is the address for initializing the distributed backend. `placement_group` includes the specification of the resources for each distributed worker. """ if parallel_config.worker_use_ray or engine_use_ray: if ray is None: raise ImportError( 'Ray is not installed. Please install Ray to use distributed serving.' ) if is_hip(): ray.init(address=ray_address, ignore_reinit_error=True, num_gpus=parallel_config.world_size) else: ray.init(address=ray_address, ignore_reinit_error=True) if not parallel_config.worker_use_ray: assert parallel_config.world_size == 1, 'Ray is required if parallel_config.world_size > 1.' return None current_placement_group = ray.util.get_current_placement_group() if current_placement_group: bundles = current_placement_group.bundle_specs gpu_bundles = 0 for bundle in bundles: bundle_gpus = bundle.get('GPU', 0) if bundle_gpus > 1: raise ValueError( 'Placement group bundle cannot have more than 1 GPU.') if bundle_gpus: gpu_bundles += 1 if parallel_config.world_size > gpu_bundles: raise ValueError( 'The number of required GPUs exceeds the total number of available GPUs in the placement group.' ) else: num_gpus_in_cluster = ray.cluster_resources().get('GPU', 0) if parallel_config.world_size > num_gpus_in_cluster: raise ValueError( 'The number of required GPUs exceeds the total number of available GPUs in the cluster.' ) placement_group_specs = [{'GPU': 1}] * parallel_config.world_size current_placement_group = ray.util.placement_group( placement_group_specs) ray.get(current_placement_group.ready(), timeout=1800) return current_placement_group
Initialize the distributed cluster probably with Ray. Args: parallel_config: The configurations for parallel execution. engine_use_ray: Whether to use Ray for async engine. ray_address: The address of the Ray cluster. If None, uses the default Ray cluster address. Returns: A tuple of (`distributed_init_method`, `placement_group`). The `distributed_init_method` is the address for initializing the distributed backend. `placement_group` includes the specification of the resources for each distributed worker.
get_quant_config
quant_cls = get_quantization_config(quantization) hf_quant_config = getattr(hf_config, 'quantization_config', None) if hf_quant_config is not None: return quant_cls.from_config(hf_quant_config) is_local = os.path.isdir(model_name_or_path) if not is_local: with get_lock(model_name_or_path, cache_dir): hf_folder = snapshot_download(model_name_or_path, allow_patterns= '*.json', cache_dir=cache_dir, tqdm_class=Disabledtqdm) else: hf_folder = model_name_or_path config_files = glob.glob(os.path.join(hf_folder, '*.json')) quant_config_files = [f for f in config_files if any(f.endswith(x) for x in quant_cls.get_config_filenames())] if len(quant_config_files) == 0: raise ValueError(f'Cannot find the config file for {quantization}') if len(quant_config_files) > 1: raise ValueError( f'Found multiple config files for {quantization}: {quant_config_files}' ) quant_config_file = quant_config_files[0] with open(quant_config_file, 'r') as f: config = json.load(f) return quant_cls.from_config(config)
def get_quant_config(quantization: str, model_name_or_path: str, hf_config: PretrainedConfig, cache_dir: Optional[str]=None) ->QuantizationConfig: quant_cls = get_quantization_config(quantization) hf_quant_config = getattr(hf_config, 'quantization_config', None) if hf_quant_config is not None: return quant_cls.from_config(hf_quant_config) is_local = os.path.isdir(model_name_or_path) if not is_local: with get_lock(model_name_or_path, cache_dir): hf_folder = snapshot_download(model_name_or_path, allow_patterns='*.json', cache_dir=cache_dir, tqdm_class= Disabledtqdm) else: hf_folder = model_name_or_path config_files = glob.glob(os.path.join(hf_folder, '*.json')) quant_config_files = [f for f in config_files if any(f.endswith(x) for x in quant_cls.get_config_filenames())] if len(quant_config_files) == 0: raise ValueError(f'Cannot find the config file for {quantization}') if len(quant_config_files) > 1: raise ValueError( f'Found multiple config files for {quantization}: {quant_config_files}' ) quant_config_file = quant_config_files[0] with open(quant_config_file, 'r') as f: config = json.load(f) return quant_cls.from_config(config)
null
from_config
weight_bits = cls.get_from_keys(config, ['w_bit', 'bits']) group_size = cls.get_from_keys(config, ['q_group_size', 'group_size']) zero_point = cls.get_from_keys(config, ['zero_point']) return cls(weight_bits, group_size, zero_point)
@classmethod def from_config(cls, config: Dict[str, Any]) ->'AWQConfig': weight_bits = cls.get_from_keys(config, ['w_bit', 'bits']) group_size = cls.get_from_keys(config, ['q_group_size', 'group_size']) zero_point = cls.get_from_keys(config, ['zero_point']) return cls(weight_bits, group_size, zero_point)
null
set_weight_attrs
"""Set attributes on a weight tensor. This method is used to set attributes on a weight tensor. This method will not overwrite existing attributes. Args: weight: The weight tensor. weight_attrs: A dictionary of attributes to set on the weight tensor. """ if weight_attrs is None: return for key, value in weight_attrs.items(): assert not hasattr(weight, key ), f'Overwriting existing tensor attribute: {key}' setattr(weight, key, value)
def set_weight_attrs(weight: torch.Tensor, weight_attrs: Optional[Dict[str, Any]]): """Set attributes on a weight tensor. This method is used to set attributes on a weight tensor. This method will not overwrite existing attributes. Args: weight: The weight tensor. weight_attrs: A dictionary of attributes to set on the weight tensor. """ if weight_attrs is None: return for key, value in weight_attrs.items(): assert not hasattr(weight, key ), f'Overwriting existing tensor attribute: {key}' setattr(weight, key, value)
Set attributes on a weight tensor. This method is used to set attributes on a weight tensor. This method will not overwrite existing attributes. Args: weight: The weight tensor. weight_attrs: A dictionary of attributes to set on the weight tensor.
forward
hidden_states = self.transformer(input_ids, positions, kv_caches, input_metadata) return hidden_states
def forward(self, input_ids: torch.Tensor, positions: torch.Tensor, kv_caches: List[KVCache], input_metadata: InputMetadata) ->torch.Tensor: hidden_states = self.transformer(input_ids, positions, kv_caches, input_metadata) return hidden_states
null
_yarn_linear_ramp_mask
if low == high: high += 0.001 linear_func = (torch.arange(dim, dtype=dtype, device=device) - low) / (high - low) ramp_func = torch.clamp(linear_func, 0, 1) return ramp_func
def _yarn_linear_ramp_mask(low: float, high: float, dim: int, dtype: torch. dtype, device: torch.device) ->torch.Tensor: if low == high: high += 0.001 linear_func = (torch.arange(dim, dtype=dtype, device=device) - low) / (high - low) ramp_func = torch.clamp(linear_func, 0, 1) return ramp_func
null
create_stream_response_json
choice_data = CompletionResponseStreamChoice(index=index, text=text, logprobs=logprobs, finish_reason=finish_reason) response = CompletionStreamResponse(id=request_id, created=created_time, model=model_name, choices=[choice_data]) if usage is not None: response.usage = usage response_json = response.json(exclude_unset=True, ensure_ascii=False) return response_json
def create_stream_response_json(index: int, text: str, logprobs: Optional[ LogProbs]=None, finish_reason: Optional[str]=None, usage: Optional[ UsageInfo]=None) ->str: choice_data = CompletionResponseStreamChoice(index=index, text=text, logprobs=logprobs, finish_reason=finish_reason) response = CompletionStreamResponse(id=request_id, created=created_time, model=model_name, choices=[choice_data]) if usage is not None: response.usage = usage response_json = response.json(exclude_unset=True, ensure_ascii=False) return response_json
null
finished
return self._finished
@property def finished(self) ->bool: return self._finished
null
sample
next_tokens = self.sampler(self.lm_head.weight, hidden_states, sampling_metadata) return next_tokens
def sample(self, hidden_states: torch.Tensor, sampling_metadata: SamplingMetadata) ->Optional[SamplerOutput]: next_tokens = self.sampler(self.lm_head.weight, hidden_states, sampling_metadata) return next_tokens
null
abort_request
del request_id self.abort_request_calls += 1
def abort_request(self, request_id): del request_id self.abort_request_calls += 1
null
__init__
if 'disable_log_stats' not in kwargs: kwargs['disable_log_stats'] = True engine_args = EngineArgs(model=model, tokenizer=tokenizer, tokenizer_mode= tokenizer_mode, trust_remote_code=trust_remote_code, tensor_parallel_size=tensor_parallel_size, dtype=dtype, quantization= quantization, revision=revision, tokenizer_revision=tokenizer_revision, seed=seed, gpu_memory_utilization=gpu_memory_utilization, swap_space= swap_space, enforce_eager=enforce_eager, max_context_len_to_capture= max_context_len_to_capture, **kwargs) self.llm_engine = LLMEngine.from_engine_args(engine_args) self.request_counter = Counter()
def __init__(self, model: str, tokenizer: Optional[str]=None, tokenizer_mode: str='auto', trust_remote_code: bool=False, tensor_parallel_size: int=1, dtype: str='auto', quantization: Optional[ str]=None, revision: Optional[str]=None, tokenizer_revision: Optional[ str]=None, seed: int=0, gpu_memory_utilization: float=0.9, swap_space: int=4, enforce_eager: bool=False, max_context_len_to_capture: int=8192, **kwargs) ->None: if 'disable_log_stats' not in kwargs: kwargs['disable_log_stats'] = True engine_args = EngineArgs(model=model, tokenizer=tokenizer, tokenizer_mode=tokenizer_mode, trust_remote_code=trust_remote_code, tensor_parallel_size=tensor_parallel_size, dtype=dtype, quantization=quantization, revision=revision, tokenizer_revision= tokenizer_revision, seed=seed, gpu_memory_utilization= gpu_memory_utilization, swap_space=swap_space, enforce_eager= enforce_eager, max_context_len_to_capture= max_context_len_to_capture, **kwargs) self.llm_engine = LLMEngine.from_engine_args(engine_args) self.request_counter = Counter()
null
from_config
"""Create a config class from the model's quantization config.""" raise NotImplementedError
@classmethod @abstractmethod def from_config(cls, config: Dict[str, Any]) ->'QuantizationConfig': """Create a config class from the model's quantization config.""" raise NotImplementedError
Create a config class from the model's quantization config.
_swap_out
if not self.block_manager.can_swap_out(seq_group): raise RuntimeError( 'Aborted due to the lack of CPU swap space. Please increase the swap space to avoid this error.' ) mapping = self.block_manager.swap_out(seq_group) blocks_to_swap_out.update(mapping) for seq in seq_group.get_seqs(status=SequenceStatus.RUNNING): seq.status = SequenceStatus.SWAPPED
def _swap_out(self, seq_group: SequenceGroup, blocks_to_swap_out: Dict[int, int]) ->None: if not self.block_manager.can_swap_out(seq_group): raise RuntimeError( 'Aborted due to the lack of CPU swap space. Please increase the swap space to avoid this error.' ) mapping = self.block_manager.swap_out(seq_group) blocks_to_swap_out.update(mapping) for seq in seq_group.get_seqs(status=SequenceStatus.RUNNING): seq.status = SequenceStatus.SWAPPED
null
load_weights
params_dict = dict(self.named_parameters(remove_duplicate=False)) for name, loaded_weight in hf_model_weights_iterator(model_name_or_path, cache_dir, load_format, revision): if 'rotary_pos_emb.inv_freq' in name: continue if 'word_embeddings' in name: name = name.replace('.word_embeddings', '') if name.endswith('.bias') and name not in params_dict: continue param = params_dict[name] weight_loader = getattr(param, 'weight_loader', default_weight_loader) weight_loader(param, loaded_weight)
def load_weights(self, model_name_or_path: str, cache_dir: Optional[str]= None, load_format: str='auto', revision: Optional[str]=None): params_dict = dict(self.named_parameters(remove_duplicate=False)) for name, loaded_weight in hf_model_weights_iterator(model_name_or_path, cache_dir, load_format, revision): if 'rotary_pos_emb.inv_freq' in name: continue if 'word_embeddings' in name: name = name.replace('.word_embeddings', '') if name.endswith('.bias') and name not in params_dict: continue param = params_dict[name] weight_loader = getattr(param, 'weight_loader', default_weight_loader) weight_loader(param, loaded_weight)
null
forward
w1_out, _ = self.w1(hidden_states) w1_out = self.act_fn(w1_out) w3_out, _ = self.w3(hidden_states) current_hidden_states = w1_out * w3_out current_hidden_states, _ = self.w2(current_hidden_states) return current_hidden_states
def forward(self, hidden_states: torch.Tensor) ->torch.Tensor: w1_out, _ = self.w1(hidden_states) w1_out = self.act_fn(w1_out) w3_out, _ = self.w3(hidden_states) current_hidden_states = w1_out * w3_out current_hidden_states, _ = self.w2(current_hidden_states) return current_hidden_states
null
execute_model
if self.is_driver_worker: assert seq_group_metadata_list is not None num_seq_groups = len(seq_group_metadata_list) assert blocks_to_swap_in is not None assert blocks_to_swap_out is not None assert blocks_to_copy is not None block_swapping_info = [blocks_to_swap_in, blocks_to_swap_out, blocks_to_copy] broadcast_object_list([num_seq_groups] + block_swapping_info, src=0) else: recv_data = [None] * 4 broadcast_object_list(recv_data, src=0) num_seq_groups = recv_data[0] block_swapping_info = recv_data[1:] self.cache_swap(*block_swapping_info) if num_seq_groups == 0: return {} output = self.model_runner.execute_model(seq_group_metadata_list, self. gpu_cache) return output
@torch.inference_mode() def execute_model(self, seq_group_metadata_list: Optional[List[ SequenceGroupMetadata]]=None, blocks_to_swap_in: Optional[Dict[int, int ]]=None, blocks_to_swap_out: Optional[Dict[int, int]]=None, blocks_to_copy: Optional[Dict[int, List[int]]]=None) ->Optional[ SamplerOutput]: if self.is_driver_worker: assert seq_group_metadata_list is not None num_seq_groups = len(seq_group_metadata_list) assert blocks_to_swap_in is not None assert blocks_to_swap_out is not None assert blocks_to_copy is not None block_swapping_info = [blocks_to_swap_in, blocks_to_swap_out, blocks_to_copy] broadcast_object_list([num_seq_groups] + block_swapping_info, src=0) else: recv_data = [None] * 4 broadcast_object_list(recv_data, src=0) num_seq_groups = recv_data[0] block_swapping_info = recv_data[1:] self.cache_swap(*block_swapping_info) if num_seq_groups == 0: return {} output = self.model_runner.execute_model(seq_group_metadata_list, self. gpu_cache) return output
null
_set_default_torch_dtype
"""Sets the default torch dtype to the given dtype.""" old_dtype = torch.get_default_dtype() torch.set_default_dtype(dtype) yield torch.set_default_dtype(old_dtype)
@contextlib.contextmanager def _set_default_torch_dtype(dtype: torch.dtype): """Sets the default torch dtype to the given dtype.""" old_dtype = torch.get_default_dtype() torch.set_default_dtype(dtype) yield torch.set_default_dtype(old_dtype)
Sets the default torch dtype to the given dtype.
example_long_prompts
prompts = [] for filename in _LONG_PROMPTS: prompts += _read_prompts(filename) return prompts
@pytest.fixture def example_long_prompts() ->List[str]: prompts = [] for filename in _LONG_PROMPTS: prompts += _read_prompts(filename) return prompts
null
__init__
super().__init__() self.config = config self.linear_method = linear_method self.model = MistralModel(config, linear_method) self.lm_head = ParallelLMHead(config.vocab_size, config.hidden_size) self.sampler = Sampler(config.vocab_size)
def __init__(self, config: MistralConfig, linear_method: Optional[ LinearMethodBase]=None) ->None: super().__init__() self.config = config self.linear_method = linear_method self.model = MistralModel(config, linear_method) self.lm_head = ParallelLMHead(config.vocab_size, config.hidden_size) self.sampler = Sampler(config.vocab_size)
null
_append_tokens_to_blocks
cursor = 0 while cursor < len(token_ids): if not self.logical_token_blocks: self._append_logical_block() last_block = self.logical_token_blocks[-1] if last_block.is_full(): self._append_logical_block() last_block = self.logical_token_blocks[-1] num_empty_slots = last_block.get_num_empty_slots() last_block.append_tokens(token_ids[cursor:cursor + num_empty_slots]) cursor += num_empty_slots
def _append_tokens_to_blocks(self, token_ids: List[int]) ->None: cursor = 0 while cursor < len(token_ids): if not self.logical_token_blocks: self._append_logical_block() last_block = self.logical_token_blocks[-1] if last_block.is_full(): self._append_logical_block() last_block = self.logical_token_blocks[-1] num_empty_slots = last_block.get_num_empty_slots() last_block.append_tokens(token_ids[cursor:cursor + num_empty_slots]) cursor += num_empty_slots
null
__init__
self.weight_bits = weight_bits self.group_size = group_size self.desc_act = desc_act self.pack_factor = 32 // self.weight_bits if self.weight_bits != 4: raise ValueError( f'Currently, only 4-bit weight quantization is supported for GPTQ, but got {self.weight_bits} bits.' )
def __init__(self, weight_bits: int, group_size: int, desc_act: bool) ->None: self.weight_bits = weight_bits self.group_size = group_size self.desc_act = desc_act self.pack_factor = 32 // self.weight_bits if self.weight_bits != 4: raise ValueError( f'Currently, only 4-bit weight quantization is supported for GPTQ, but got {self.weight_bits} bits.' )
null
allocate
if not self.free_blocks: raise ValueError('Out of memory! No free blocks are available.') block = self.free_blocks.pop() block.ref_count = 1 return block
def allocate(self) ->PhysicalTokenBlock: if not self.free_blocks: raise ValueError('Out of memory! No free blocks are available.') block = self.free_blocks.pop() block.ref_count = 1 return block
null
free_finished_seq_groups
self.running = [seq_group for seq_group in self.running if not seq_group. is_finished()]
def free_finished_seq_groups(self) ->None: self.running = [seq_group for seq_group in self.running if not seq_group.is_finished()]
null
__init__
self.block_number = block_number self.block_size = block_size self.token_ids = [_BLANK_TOKEN_ID] * block_size self.num_tokens = 0
def __init__(self, block_number: int, block_size: int) ->None: self.block_number = block_number self.block_size = block_size self.token_ids = [_BLANK_TOKEN_ID] * block_size self.num_tokens = 0
null
load_weights
stacked_params_mapping = [('qkv_proj', 'q_proj', 'q'), ('qkv_proj', 'k_proj', 'k'), ('qkv_proj', 'v_proj', 'v'), ('gate_up_proj', 'gate_proj', 0), ('gate_up_proj', 'up_proj', 1)] params_dict = dict(self.named_parameters()) for name, loaded_weight in hf_model_weights_iterator(model_name_or_path, cache_dir, load_format, revision): if 'rotary_emb.inv_freq' in name: continue if 'rotary_emb.cos_cached' in name or 'rotary_emb.sin_cached' in name: continue for param_name, weight_name, shard_id in stacked_params_mapping: if weight_name not in name: continue name = name.replace(weight_name, param_name) if name.endswith('.bias') and name not in params_dict: continue param = params_dict[name] weight_loader = param.weight_loader weight_loader(param, loaded_weight, shard_id) break else: if name.endswith('.bias') and name not in params_dict: continue param = params_dict[name] weight_loader = getattr(param, 'weight_loader', default_weight_loader) weight_loader(param, loaded_weight)
def load_weights(self, model_name_or_path: str, cache_dir: Optional[str]= None, load_format: str='auto', revision: Optional[str]=None): stacked_params_mapping = [('qkv_proj', 'q_proj', 'q'), ('qkv_proj', 'k_proj', 'k'), ('qkv_proj', 'v_proj', 'v'), ('gate_up_proj', 'gate_proj', 0), ('gate_up_proj', 'up_proj', 1)] params_dict = dict(self.named_parameters()) for name, loaded_weight in hf_model_weights_iterator(model_name_or_path, cache_dir, load_format, revision): if 'rotary_emb.inv_freq' in name: continue if 'rotary_emb.cos_cached' in name or 'rotary_emb.sin_cached' in name: continue for param_name, weight_name, shard_id in stacked_params_mapping: if weight_name not in name: continue name = name.replace(weight_name, param_name) if name.endswith('.bias') and name not in params_dict: continue param = params_dict[name] weight_loader = param.weight_loader weight_loader(param, loaded_weight, shard_id) break else: if name.endswith('.bias') and name not in params_dict: continue param = params_dict[name] weight_loader = getattr(param, 'weight_loader', default_weight_loader) weight_loader(param, loaded_weight)
null
generate_greedy
outputs = self.generate(prompts, do_sample=False, max_new_tokens=max_tokens) for i in range(len(outputs)): output_ids, output_str = outputs[i] outputs[i] = output_ids[0], output_str[0] return outputs
def generate_greedy(self, prompts: List[str], max_tokens: int) ->List[Tuple [List[int], str]]: outputs = self.generate(prompts, do_sample=False, max_new_tokens=max_tokens ) for i in range(len(outputs)): output_ids, output_str = outputs[i] outputs[i] = output_ids[0], output_str[0] return outputs
null
forward
gate_up, _ = self.gate_up_proj(x) x = self.act_fn(gate_up) x, _ = self.down_proj(x) return x
def forward(self, x): gate_up, _ = self.gate_up_proj(x) x = self.act_fn(gate_up) x, _ = self.down_proj(x) return x
null
get_last_token_id
if not self.output_token_ids: return self.prompt_token_ids[-1] return self.output_token_ids[-1]
def get_last_token_id(self) ->int: if not self.output_token_ids: return self.prompt_token_ids[-1] return self.output_token_ids[-1]
null
fork_seq
self.block_manager.fork(parent_seq, child_seq)
def fork_seq(self, parent_seq: Sequence, child_seq: Sequence) ->None: self.block_manager.fork(parent_seq, child_seq)
null
__init__
super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps
def __init__(self, hidden_size: int, eps: float=1e-06) ->None: super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps
null
forward
gate_up, _ = self.gate_up_proj(x) x = self.act_fn(gate_up) x, _ = self.down_proj(x) return x
def forward(self, x): gate_up, _ = self.gate_up_proj(x) x = self.act_fn(gate_up) x, _ = self.down_proj(x) return x
null
append_slot
"""Allocate a physical slot for a new token.""" logical_blocks = seq.logical_token_blocks block_table = self.block_tables[seq.seq_id] if len(block_table) < len(logical_blocks): if self.block_sliding_window and len(block_table ) >= self.block_sliding_window: block_table.append(block_table[len(block_table) % self. block_sliding_window]) else: block = self.gpu_allocator.allocate() block_table.append(block) return None last_block = block_table[-1] assert last_block.device == Device.GPU if last_block.ref_count == 1: return None else: new_block = self.gpu_allocator.allocate() block_table[-1] = new_block self.gpu_allocator.free(last_block) return last_block.block_number, new_block.block_number
def append_slot(self, seq: Sequence) ->Optional[Tuple[int, int]]: """Allocate a physical slot for a new token.""" logical_blocks = seq.logical_token_blocks block_table = self.block_tables[seq.seq_id] if len(block_table) < len(logical_blocks): if self.block_sliding_window and len(block_table ) >= self.block_sliding_window: block_table.append(block_table[len(block_table) % self. block_sliding_window]) else: block = self.gpu_allocator.allocate() block_table.append(block) return None last_block = block_table[-1] assert last_block.device == Device.GPU if last_block.ref_count == 1: return None else: new_block = self.gpu_allocator.allocate() block_table[-1] = new_block self.gpu_allocator.free(last_block) return last_block.block_number, new_block.block_number
Allocate a physical slot for a new token.
get_node_and_gpu_ids
node_id = ray.get_runtime_context().get_node_id() gpu_ids = ray.get_gpu_ids() return node_id, gpu_ids
def get_node_and_gpu_ids(self) ->Tuple[str, List[int]]: node_id = ray.get_runtime_context().get_node_id() gpu_ids = ray.get_gpu_ids() return node_id, gpu_ids
null
forward
x, _ = self.dense_h_to_4h(x) x = self.gelu_impl(x) x, _ = self.dense_4h_to_h(x) return x
def forward(self, x: torch.Tensor) ->torch.Tensor: x, _ = self.dense_h_to_4h(x) x = self.gelu_impl(x) x, _ = self.dense_4h_to_h(x) return x
null
_get_alibi_slopes
closest_power_of_2 = 2 ** math.floor(math.log2(total_num_heads)) base = torch.tensor(2 ** -2 ** -(math.log2(closest_power_of_2) - 3), dtype= torch.float32) powers = torch.arange(1, 1 + closest_power_of_2, dtype=torch.int32) slopes = torch.pow(base, powers) if closest_power_of_2 != total_num_heads: extra_base = torch.tensor(2 ** -2 ** -(math.log2(2 * closest_power_of_2 ) - 3), dtype=torch.float32) num_remaining_heads = min(closest_power_of_2, total_num_heads - closest_power_of_2) extra_powers = torch.arange(start=1, end=1 + 2 * num_remaining_heads, step=2, dtype=torch.int32) slopes = torch.cat([slopes, torch.pow(extra_base, extra_powers)], dim=0) return slopes
def _get_alibi_slopes(total_num_heads: int) ->torch.Tensor: closest_power_of_2 = 2 ** math.floor(math.log2(total_num_heads)) base = torch.tensor(2 ** -2 ** -(math.log2(closest_power_of_2) - 3), dtype=torch.float32) powers = torch.arange(1, 1 + closest_power_of_2, dtype=torch.int32) slopes = torch.pow(base, powers) if closest_power_of_2 != total_num_heads: extra_base = torch.tensor(2 ** -2 ** -(math.log2(2 * closest_power_of_2) - 3), dtype=torch.float32) num_remaining_heads = min(closest_power_of_2, total_num_heads - closest_power_of_2) extra_powers = torch.arange(start=1, end=1 + 2 * num_remaining_heads, step=2, dtype=torch.int32) slopes = torch.cat([slopes, torch.pow(extra_base, extra_powers)], dim=0 ) return slopes
null
_apply_min_p
""" Adapted from https://github.com/oobabooga/text-generation-webui/blob/3146124ec01f02c8fb1650a6517cf1b60b537aaf/modules/sampler_hijack.py#L16C17-L16C17 """ probs = torch.softmax(logits, dim=-1) top_probs, _ = probs.max(dim=-1, keepdim=True) scaled_min_p = min_p.unsqueeze_(dim=1) * top_probs tokens_to_remove = probs < scaled_min_p logits = logits.masked_fill_(tokens_to_remove, -float('inf')) return logits
def _apply_min_p(logits: torch.Tensor, min_p: torch.Tensor) ->torch.Tensor: """ Adapted from https://github.com/oobabooga/text-generation-webui/blob/3146124ec01f02c8fb1650a6517cf1b60b537aaf/modules/sampler_hijack.py#L16C17-L16C17 """ probs = torch.softmax(logits, dim=-1) top_probs, _ = probs.max(dim=-1, keepdim=True) scaled_min_p = min_p.unsqueeze_(dim=1) * top_probs tokens_to_remove = probs < scaled_min_p logits = logits.masked_fill_(tokens_to_remove, -float('inf')) return logits
Adapted from https://github.com/oobabooga/text-generation-webui/blob/3146124ec01f02c8fb1650a6517cf1b60b537aaf/modules/sampler_hijack.py#L16C17-L16C17
_init_engine
if not self.engine_use_ray: engine_class = self._engine_class elif self.worker_use_ray: engine_class = ray.remote(num_cpus=0)(self._engine_class).remote else: cache_config = args[1] parallel_config = args[2] if parallel_config.tensor_parallel_size == 1: num_gpus = cache_config.gpu_memory_utilization else: num_gpus = 1 engine_class = ray.remote(num_gpus=num_gpus)(self._engine_class).remote return engine_class(*args, **kwargs)
def _init_engine(self, *args, **kwargs) ->Union[_AsyncLLMEngine, 'ray.ObjectRef']: if not self.engine_use_ray: engine_class = self._engine_class elif self.worker_use_ray: engine_class = ray.remote(num_cpus=0)(self._engine_class).remote else: cache_config = args[1] parallel_config = args[2] if parallel_config.tensor_parallel_size == 1: num_gpus = cache_config.gpu_memory_utilization else: num_gpus = 1 engine_class = ray.remote(num_gpus=num_gpus)(self._engine_class).remote return engine_class(*args, **kwargs)
null
forward
qkv, bias = self.query_key_value(hidden_states) if bias is not None: qkv += bias q, k, v = qkv.split([self.q_size, self.kv_size, self.kv_size], dim=-1) if self.use_rotary: q, k = self.rotary_emb(positions, q, k) k_cache, v_cache = kv_cache attn_output = self.attn(q, k, v, k_cache, v_cache, input_metadata) attn_output, bias = self.dense(attn_output) return attn_output, bias
def forward(self, positions: torch.Tensor, hidden_states: torch.Tensor, kv_cache: KVCache, input_metadata: InputMetadata) ->torch.Tensor: qkv, bias = self.query_key_value(hidden_states) if bias is not None: qkv += bias q, k, v = qkv.split([self.q_size, self.kv_size, self.kv_size], dim=-1) if self.use_rotary: q, k = self.rotary_emb(positions, q, k) k_cache, v_cache = kv_cache attn_output = self.attn(q, k, v, k_cache, v_cache, input_metadata) attn_output, bias = self.dense(attn_output) return attn_output, bias
null
forward
hidden_states = self.transformer(input_ids, positions, kv_caches, input_metadata) return hidden_states
def forward(self, input_ids: torch.Tensor, positions: torch.Tensor, kv_caches: List[KVCache], input_metadata: InputMetadata) ->torch.Tensor: hidden_states = self.transformer(input_ids, positions, kv_caches, input_metadata) return hidden_states
null
get_name
return 'awq'
def get_name(self) ->str: return 'awq'
null
load_weights
stacked_params_mapping = [('gate_up_proj', 'w2', 0), ('gate_up_proj', 'w1', 1)] params_dict = dict(self.named_parameters()) for name, loaded_weight in hf_model_weights_iterator(model_name_or_path, cache_dir, load_format, revision): if 'rotary_emb.inv_freq' in name: continue for param_name, weight_name, shard_id in stacked_params_mapping: if weight_name not in name: continue name = name.replace(weight_name, param_name) if name.endswith('.bias') and name not in params_dict: continue param = params_dict[name] weight_loader = param.weight_loader weight_loader(param, loaded_weight, shard_id) break else: if name.endswith('.bias') and name not in params_dict: continue param = params_dict[name] weight_loader = getattr(param, 'weight_loader', default_weight_loader) weight_loader(param, loaded_weight)
def load_weights(self, model_name_or_path: str, cache_dir: Optional[str]= None, load_format: str='auto', revision: Optional[str]=None): stacked_params_mapping = [('gate_up_proj', 'w2', 0), ('gate_up_proj', 'w1', 1)] params_dict = dict(self.named_parameters()) for name, loaded_weight in hf_model_weights_iterator(model_name_or_path, cache_dir, load_format, revision): if 'rotary_emb.inv_freq' in name: continue for param_name, weight_name, shard_id in stacked_params_mapping: if weight_name not in name: continue name = name.replace(weight_name, param_name) if name.endswith('.bias') and name not in params_dict: continue param = params_dict[name] weight_loader = param.weight_loader weight_loader(param, loaded_weight, shard_id) break else: if name.endswith('.bias') and name not in params_dict: continue param = params_dict[name] weight_loader = getattr(param, 'weight_loader', default_weight_loader) weight_loader(param, loaded_weight)
null
get_nvcc_cuda_version
"""Get the CUDA version from nvcc. Adapted from https://github.com/NVIDIA/apex/blob/8b7a1ff183741dd8f9b87e7bafd04cfde99cea28/setup.py """ nvcc_output = subprocess.check_output([cuda_dir + '/bin/nvcc', '-V'], universal_newlines=True) output = nvcc_output.split() release_idx = output.index('release') + 1 nvcc_cuda_version = parse(output[release_idx].split(',')[0]) return nvcc_cuda_version
def get_nvcc_cuda_version(cuda_dir: str) ->Version: """Get the CUDA version from nvcc. Adapted from https://github.com/NVIDIA/apex/blob/8b7a1ff183741dd8f9b87e7bafd04cfde99cea28/setup.py """ nvcc_output = subprocess.check_output([cuda_dir + '/bin/nvcc', '-V'], universal_newlines=True) output = nvcc_output.split() release_idx = output.index('release') + 1 nvcc_cuda_version = parse(output[release_idx].split(',')[0]) return nvcc_cuda_version
Get the CUDA version from nvcc. Adapted from https://github.com/NVIDIA/apex/blob/8b7a1ff183741dd8f9b87e7bafd04cfde99cea28/setup.py
_verify_greedy_sampling
if self.best_of > 1: raise ValueError( f'best_of must be 1 when using greedy sampling.Got {self.best_of}.')
def _verify_greedy_sampling(self) ->None: if self.best_of > 1: raise ValueError( f'best_of must be 1 when using greedy sampling.Got {self.best_of}.' )
null
sample
next_tokens = self.sampler(self.lm_head_weight, hidden_states, sampling_metadata) return next_tokens
def sample(self, hidden_states: torch.Tensor, sampling_metadata: SamplingMetadata) ->Optional[SamplerOutput]: next_tokens = self.sampler(self.lm_head_weight, hidden_states, sampling_metadata) return next_tokens
null
forward
hidden_states = self.transformer(input_ids, positions, kv_caches, input_metadata) hidden_states = self.lm_head.ln(hidden_states) return hidden_states
def forward(self, input_ids: torch.Tensor, positions: torch.Tensor, kv_caches: List[KVCache], input_metadata: InputMetadata) ->torch.Tensor: hidden_states = self.transformer(input_ids, positions, kv_caches, input_metadata) hidden_states = self.lm_head.ln(hidden_states) return hidden_states
null
forward
qkv, _ = self.qkv_proj(hidden_states) q, k, v = qkv.split([self.q_size, self.kv_size, self.kv_size], dim=-1) q, k = self.rotary_emb(positions, q, k) k_cache, v_cache = kv_cache attn_output = self.attn(q, k, v, k_cache, v_cache, input_metadata) output, _ = self.o_proj(attn_output) return output
def forward(self, positions: torch.Tensor, hidden_states: torch.Tensor, kv_cache: KVCache, input_metadata: InputMetadata) ->torch.Tensor: qkv, _ = self.qkv_proj(hidden_states) q, k, v = qkv.split([self.q_size, self.kv_size, self.kv_size], dim=-1) q, k = self.rotary_emb(positions, q, k) k_cache, v_cache = kv_cache attn_output = self.attn(q, k, v, k_cache, v_cache, input_metadata) output, _ = self.o_proj(attn_output) return output
null
forward
ops.rotary_embedding(positions, query, key, self.head_size, self. cos_sin_cache, self.is_neox_style) return query, key
def forward(self, positions: torch.Tensor, query: torch.Tensor, key: torch. Tensor) ->Tuple[torch.Tensor, torch.Tensor]: ops.rotary_embedding(positions, query, key, self.head_size, self. cos_sin_cache, self.is_neox_style) return query, key
null
_verify_non_beam_search
if self.early_stopping is not False: raise ValueError( 'early_stopping is not effective and must be False when not using beam search.' ) if self.length_penalty < 1.0 - _SAMPLING_EPS or self.length_penalty > 1.0 + _SAMPLING_EPS: raise ValueError( 'length_penalty is not effective and must be the default value of 1.0 when not using beam search.' )
def _verify_non_beam_search(self) ->None: if self.early_stopping is not False: raise ValueError( 'early_stopping is not effective and must be False when not using beam search.' ) if (self.length_penalty < 1.0 - _SAMPLING_EPS or self.length_penalty > 1.0 + _SAMPLING_EPS): raise ValueError( 'length_penalty is not effective and must be the default value of 1.0 when not using beam search.' )
null
__init__
self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers if num_key_value_heads is None: num_key_value_heads = num_attention_heads self.num_key_value_heads = num_key_value_heads self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.initializer_range = initializer_range self.rms_norm_eps = rms_norm_eps self.use_cache = use_cache super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, ** kwargs)
def __init__(self, vocab_size=100008, hidden_size=4096, intermediate_size= 11008, num_hidden_layers=32, num_attention_heads=32, num_key_value_heads=None, hidden_act='silu', max_position_embeddings= 2048, initializer_range=0.006, rms_norm_eps=1e-05, use_cache=True, pad_token_id=0, bos_token_id=1, eos_token_id=2, tie_word_embeddings= False, **kwargs): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers if num_key_value_heads is None: num_key_value_heads = num_attention_heads self.num_key_value_heads = num_key_value_heads self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.initializer_range = initializer_range self.rms_norm_eps = rms_norm_eps self.use_cache = use_cache super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs)
null
_set_config_defaults
for k, v in config_defaults.items(): if k not in config: config[k] = v return config
def _set_config_defaults(self, config: Dict[str, Any], config_defaults: Dict[str, Any]) ->Dict[str, Any]: for k, v in config_defaults.items(): if k not in config: config[k] = v return config
null
__init__
self.quant_config = quant_config
def __init__(self, quant_config: SqueezeLLMConfig): self.quant_config = quant_config
null
get_config_filenames
return ['quantize_config.json']
@classmethod def get_config_filenames(cls) ->List[str]: return ['quantize_config.json']
null
__init__
super().__init__() hidden_size = config.n_embd self.fc_in = ColumnParallelLinear(hidden_size, intermediate_size, linear_method=linear_method) self.fc_out = RowParallelLinear(intermediate_size, hidden_size, linear_method=linear_method) quant_config = getattr(linear_method, 'quant_config', None) self.act = get_act_fn(config.activation_function, quant_config, intermediate_size)
def __init__(self, intermediate_size: int, config: GPTJConfig, linear_method: Optional[LinearMethodBase]=None): super().__init__() hidden_size = config.n_embd self.fc_in = ColumnParallelLinear(hidden_size, intermediate_size, linear_method=linear_method) self.fc_out = RowParallelLinear(intermediate_size, hidden_size, linear_method=linear_method) quant_config = getattr(linear_method, 'quant_config', None) self.act = get_act_fn(config.activation_function, quant_config, intermediate_size)
null
test_get_prompt_logprobs
max_tokens = 5 hf_model = hf_runner(model, dtype=dtype) hf_logprobs = hf_model.generate_greedy_logprobs(example_prompts, max_tokens =max_tokens) del hf_model vllm_model = vllm_runner(model, dtype=dtype) vllm_sampling_params = SamplingParams(max_tokens=max_tokens, logprobs=5, prompt_logprobs=5, temperature=0.0) vllm_results = vllm_model.model.generate(example_prompts, sampling_params= vllm_sampling_params) for result in vllm_results: assert result.prompt_logprobs is not None assert result.outputs[0].logprobs is not None for vllm_result, hf_logprob in zip(vllm_results, hf_logprobs): vllm_prompt_logprobs = vllm_result.prompt_logprobs[1:] for i, vllm_prompt_logprob_dict in enumerate(vllm_prompt_logprobs): for token_id, logprob in vllm_prompt_logprob_dict.items(): torch.testing.assert_close(logprob, hf_logprob[0][i][token_id]. item(), atol=0.01, rtol=0.01) vllm_sample_logprobs = vllm_result.outputs[0].logprobs for i, vllm_sample_logprob_dict in enumerate(vllm_sample_logprobs): for token_id, logprob in vllm_sample_logprob_dict.items(): torch.testing.assert_close(logprob, hf_logprob[i][-1][token_id] .item(), atol=0.01, rtol=0.01)
@pytest.mark.parametrize('model', MODELS) @pytest.mark.parametrize('dtype', ['half']) def test_get_prompt_logprobs(hf_runner, vllm_runner, model, dtype, example_prompts): max_tokens = 5 hf_model = hf_runner(model, dtype=dtype) hf_logprobs = hf_model.generate_greedy_logprobs(example_prompts, max_tokens=max_tokens) del hf_model vllm_model = vllm_runner(model, dtype=dtype) vllm_sampling_params = SamplingParams(max_tokens=max_tokens, logprobs=5, prompt_logprobs=5, temperature=0.0) vllm_results = vllm_model.model.generate(example_prompts, sampling_params=vllm_sampling_params) for result in vllm_results: assert result.prompt_logprobs is not None assert result.outputs[0].logprobs is not None for vllm_result, hf_logprob in zip(vllm_results, hf_logprobs): vllm_prompt_logprobs = vllm_result.prompt_logprobs[1:] for i, vllm_prompt_logprob_dict in enumerate(vllm_prompt_logprobs): for token_id, logprob in vllm_prompt_logprob_dict.items(): torch.testing.assert_close(logprob, hf_logprob[0][i][ token_id].item(), atol=0.01, rtol=0.01) vllm_sample_logprobs = vllm_result.outputs[0].logprobs for i, vllm_sample_logprob_dict in enumerate(vllm_sample_logprobs): for token_id, logprob in vllm_sample_logprob_dict.items(): torch.testing.assert_close(logprob, hf_logprob[i][-1][ token_id].item(), atol=0.01, rtol=0.01)
null
from_config
weight_bits = cls.get_from_keys(config, ['wbits']) return cls(weight_bits)
@classmethod def from_config(cls, config: Dict[str, Any]) ->'SqueezeLLMConfig': weight_bits = cls.get_from_keys(config, ['wbits']) return cls(weight_bits)
null
_random_sample
random_samples = random_samples.cpu() sample_idx = 0 results = [] for seq_group, is_prompt in zip(selected_seq_groups, is_prompts): seq_ids, sampling_params = seq_group num_parent_seqs = len(seq_ids) if is_prompt: parent_ids = [0] * sampling_params.best_of next_token_ids = random_samples[sample_idx, :sampling_params.best_of ].tolist() else: parent_ids = list(range(num_parent_seqs)) next_token_ids = random_samples[sample_idx:sample_idx + num_parent_seqs, 0].tolist() results.append((next_token_ids, parent_ids)) sample_idx += num_parent_seqs return results
def _random_sample(selected_seq_groups: List[Tuple[List[int], SamplingParams]], is_prompts: List[bool], random_samples: torch.Tensor ) ->List[Tuple[List[int], List[int]]]: random_samples = random_samples.cpu() sample_idx = 0 results = [] for seq_group, is_prompt in zip(selected_seq_groups, is_prompts): seq_ids, sampling_params = seq_group num_parent_seqs = len(seq_ids) if is_prompt: parent_ids = [0] * sampling_params.best_of next_token_ids = random_samples[sample_idx, :sampling_params. best_of].tolist() else: parent_ids = list(range(num_parent_seqs)) next_token_ids = random_samples[sample_idx:sample_idx + num_parent_seqs, 0].tolist() results.append((next_token_ids, parent_ids)) sample_idx += num_parent_seqs return results
null
__init__
super().__init__() self.config = config self.linear_method = linear_method self.model = YiModel(config, linear_method) self.lm_head = ParallelLMHead(config.vocab_size, config.hidden_size) self.sampler = Sampler(config.vocab_size)
def __init__(self, config: YiConfig, linear_method: Optional[ LinearMethodBase]=None) ->None: super().__init__() self.config = config self.linear_method = linear_method self.model = YiModel(config, linear_method) self.lm_head = ParallelLMHead(config.vocab_size, config.hidden_size) self.sampler = Sampler(config.vocab_size)
null
forward
hidden_states = self.embed_tokens(input_ids) for i in range(len(self.layers)): layer = self.layers[i] hidden_states = layer(positions, hidden_states, kv_caches[i], input_metadata) hidden_states = self.norm(hidden_states) return hidden_states
def forward(self, input_ids: torch.Tensor, positions: torch.Tensor, kv_caches: List[KVCache], input_metadata: InputMetadata) ->torch.Tensor: hidden_states = self.embed_tokens(input_ids) for i in range(len(self.layers)): layer = self.layers[i] hidden_states = layer(positions, hidden_states, kv_caches[i], input_metadata) hidden_states = self.norm(hidden_states) return hidden_states
null
__init__
super().__init__() self.ln_1 = RMSNorm(config.hidden_size, eps=config.layer_norm_epsilon) rope_theta = getattr(config, 'rope_theta', 10000) rope_scaling = getattr(config, 'rope_scaling', None) self.attn = QWenAttention(config.hidden_size, config.num_attention_heads, config.max_position_embeddings, rope_theta=rope_theta, rope_scaling= rope_scaling, linear_method=linear_method) self.ln_2 = RMSNorm(config.hidden_size, eps=config.layer_norm_epsilon) self.mlp = QWenMLP(config.hidden_size, config.intermediate_size // 2, linear_method=linear_method)
def __init__(self, config: QWenConfig, linear_method: Optional[ LinearMethodBase]=None): super().__init__() self.ln_1 = RMSNorm(config.hidden_size, eps=config.layer_norm_epsilon) rope_theta = getattr(config, 'rope_theta', 10000) rope_scaling = getattr(config, 'rope_scaling', None) self.attn = QWenAttention(config.hidden_size, config. num_attention_heads, config.max_position_embeddings, rope_theta= rope_theta, rope_scaling=rope_scaling, linear_method=linear_method) self.ln_2 = RMSNorm(config.hidden_size, eps=config.layer_norm_epsilon) self.mlp = QWenMLP(config.hidden_size, config.intermediate_size // 2, linear_method=linear_method)
null
swap_in
mapping: Dict[PhysicalTokenBlock, PhysicalTokenBlock] = {} for seq in seq_group.get_seqs(status=SequenceStatus.SWAPPED): new_block_table: BlockTable = [] block_table = self.block_tables[seq.seq_id] for cpu_block in block_table: if cpu_block in mapping: gpu_block = mapping[cpu_block] gpu_block.ref_count += 1 else: gpu_block = self.gpu_allocator.allocate() mapping[cpu_block] = gpu_block new_block_table.append(gpu_block) self.cpu_allocator.free(cpu_block) self.block_tables[seq.seq_id] = new_block_table block_number_mapping = {cpu_block.block_number: gpu_block.block_number for cpu_block, gpu_block in mapping.items()} return block_number_mapping
def swap_in(self, seq_group: SequenceGroup) ->Dict[int, int]: mapping: Dict[PhysicalTokenBlock, PhysicalTokenBlock] = {} for seq in seq_group.get_seqs(status=SequenceStatus.SWAPPED): new_block_table: BlockTable = [] block_table = self.block_tables[seq.seq_id] for cpu_block in block_table: if cpu_block in mapping: gpu_block = mapping[cpu_block] gpu_block.ref_count += 1 else: gpu_block = self.gpu_allocator.allocate() mapping[cpu_block] = gpu_block new_block_table.append(gpu_block) self.cpu_allocator.free(cpu_block) self.block_tables[seq.seq_id] = new_block_table block_number_mapping = {cpu_block.block_number: gpu_block.block_number for cpu_block, gpu_block in mapping.items()} return block_number_mapping
null
_build_sampler_output
sampler_output = [] for seq_group, sample_result, group_prompt_logprobs, group_sample_logprobs in zip( sampling_metadata.seq_groups, sample_results, prompt_logprobs, sample_logprobs): seq_ids, _ = seq_group next_token_ids, parent_ids = sample_result seq_outputs = [] for parent_id, next_token_id, logprobs in zip(parent_ids, next_token_ids, group_sample_logprobs): seq_outputs.append(SequenceOutput(seq_ids[parent_id], next_token_id, logprobs)) sampler_output.append(SequenceGroupOutput(seq_outputs, group_prompt_logprobs)) return sampler_output
def _build_sampler_output(sample_results: List[Tuple[List[int], List[int]]], sampling_metadata: SamplingMetadata, prompt_logprobs: List[Optional[ PromptLogprobs]], sample_logprobs: List[SampleLogprobs]) ->SamplerOutput: sampler_output = [] for seq_group, sample_result, group_prompt_logprobs, group_sample_logprobs in zip( sampling_metadata.seq_groups, sample_results, prompt_logprobs, sample_logprobs): seq_ids, _ = seq_group next_token_ids, parent_ids = sample_result seq_outputs = [] for parent_id, next_token_id, logprobs in zip(parent_ids, next_token_ids, group_sample_logprobs): seq_outputs.append(SequenceOutput(seq_ids[parent_id], next_token_id, logprobs)) sampler_output.append(SequenceGroupOutput(seq_outputs, group_prompt_logprobs)) return sampler_output
null
apply_weights
qweight = weights['qweight'] qzeros = weights['qzeros'] scales = weights['scales'] pack_factor = self.quant_config.pack_factor out_shape = x.shape[:-1] + (qweight.shape[-1] * pack_factor,) reshaped_x = x.reshape(-1, x.shape[-1]) out = ops.awq_gemm(reshaped_x, qweight, scales, qzeros, pack_factor) if bias is not None: out = out + bias return out.reshape(out_shape)
def apply_weights(self, weights: Dict[str, Any], x: torch.Tensor, bias: Optional[torch.Tensor]=None) ->torch.Tensor: qweight = weights['qweight'] qzeros = weights['qzeros'] scales = weights['scales'] pack_factor = self.quant_config.pack_factor out_shape = x.shape[:-1] + (qweight.shape[-1] * pack_factor,) reshaped_x = x.reshape(-1, x.shape[-1]) out = ops.awq_gemm(reshaped_x, qweight, scales, qzeros, pack_factor) if bias is not None: out = out + bias return out.reshape(out_shape)
null
forward
hidden_states = self.embed_tokens(input_ids) residual = None for i in range(len(self.layers)): layer = self.layers[i] hidden_states, residual = layer(positions, hidden_states, kv_caches[i], input_metadata, residual) hidden_states, _ = self.norm(hidden_states, residual) return hidden_states
def forward(self, input_ids: torch.Tensor, positions: torch.Tensor, kv_caches: List[KVCache], input_metadata: InputMetadata) ->torch.Tensor: hidden_states = self.embed_tokens(input_ids) residual = None for i in range(len(self.layers)): layer = self.layers[i] hidden_states, residual = layer(positions, hidden_states, kv_caches [i], input_metadata, residual) hidden_states, _ = self.norm(hidden_states, residual) return hidden_states
null
build_demo
with gr.Blocks() as demo: gr.Markdown('# vLLM text completion demo\n') inputbox = gr.Textbox(label='Input', placeholder= 'Enter text and press ENTER') outputbox = gr.Textbox(label='Output', placeholder= 'Generated result from the model') inputbox.submit(http_bot, [inputbox], [outputbox]) return demo
def build_demo(): with gr.Blocks() as demo: gr.Markdown('# vLLM text completion demo\n') inputbox = gr.Textbox(label='Input', placeholder= 'Enter text and press ENTER') outputbox = gr.Textbox(label='Output', placeholder= 'Generated result from the model') inputbox.submit(http_bot, [inputbox], [outputbox]) return demo
null
__init__
super().__init__() self.dense_h_to_4h = ColumnParallelLinear(config.hidden_size, config. intermediate_size, linear_method=linear_method) self.dense_4h_to_h = RowParallelLinear(config.intermediate_size, config. hidden_size, linear_method=linear_method) quant_config = getattr(linear_method, 'quant_config', None) self.act = get_act_fn(config.hidden_act, quant_config, config.intermediate_size )
def __init__(self, config: GPTNeoXConfig, linear_method: Optional[ LinearMethodBase]=None): super().__init__() self.dense_h_to_4h = ColumnParallelLinear(config.hidden_size, config. intermediate_size, linear_method=linear_method) self.dense_4h_to_h = RowParallelLinear(config.intermediate_size, config .hidden_size, linear_method=linear_method) quant_config = getattr(linear_method, 'quant_config', None) self.act = get_act_fn(config.hidden_act, quant_config, config. intermediate_size)
null
_convert_token_to_id
"""Converts a token (str) in an id using the vocab.""" return self.sp_model.piece_to_id(token)
def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" return self.sp_model.piece_to_id(token)
Converts a token (str) in an id using the vocab.
__init__
super().__init__() self.vocab_size = vocab_size
def __init__(self, vocab_size: int) ->None: super().__init__() self.vocab_size = vocab_size
null
forward
if self.input_is_parallel: input_parallel = input_ else: tp_rank = get_tensor_model_parallel_rank() splitted_input = split_tensor_along_last_dim(input_, num_partitions= self.tp_size) input_parallel = splitted_input[tp_rank].contiguous() output_parallel = self.linear_method.apply_weights(self.linear_weights, input_parallel) if self.reduce_results and self.tp_size > 1: output_ = tensor_model_parallel_all_reduce(output_parallel) else: output_ = output_parallel if not self.skip_bias_add: output = output_ + self.bias if self.bias is not None else output_ output_bias = None else: output = output_ output_bias = self.bias return output, output_bias
def forward(self, input_): if self.input_is_parallel: input_parallel = input_ else: tp_rank = get_tensor_model_parallel_rank() splitted_input = split_tensor_along_last_dim(input_, num_partitions =self.tp_size) input_parallel = splitted_input[tp_rank].contiguous() output_parallel = self.linear_method.apply_weights(self.linear_weights, input_parallel) if self.reduce_results and self.tp_size > 1: output_ = tensor_model_parallel_all_reduce(output_parallel) else: output_ = output_parallel if not self.skip_bias_add: output = output_ + self.bias if self.bias is not None else output_ output_bias = None else: output = output_ output_bias = self.bias return output, output_bias
null
forward
if residual is None: residual = hidden_states hidden_states = self.ln1(hidden_states) else: hidden_states, residual = self.ln1(hidden_states, residual) hidden_states = self.self_attn(positions=positions, hidden_states= hidden_states, kv_cache=kv_cache, input_metadata=input_metadata) hidden_states, residual = self.ln2(hidden_states, residual) hidden_states = self.mlp(hidden_states) return hidden_states, residual
def forward(self, positions: torch.Tensor, hidden_states: torch.Tensor, kv_cache: KVCache, input_metadata: InputMetadata, residual: Optional[ torch.Tensor]) ->Tuple[torch.Tensor, torch.Tensor]: if residual is None: residual = hidden_states hidden_states = self.ln1(hidden_states) else: hidden_states, residual = self.ln1(hidden_states, residual) hidden_states = self.self_attn(positions=positions, hidden_states= hidden_states, kv_cache=kv_cache, input_metadata=input_metadata) hidden_states, residual = self.ln2(hidden_states, residual) hidden_states = self.mlp(hidden_states) return hidden_states, residual
null
test_multi_query_kv_attention
random.seed(seed) torch.random.manual_seed(seed) torch.cuda.manual_seed(seed) gpu_id = f'cuda:{device}' max_len = min(MAX_SEQ_LEN, 4096) seq_lens = random.sample(range(1, max_len), num_seqs) num_tokens = sum(seq_lens) scale = float(1.0 / head_size ** 0.5) num_query_heads, num_kv_heads = num_heads qkv = torch.empty(num_tokens, num_query_heads + 2 * num_kv_heads, head_size, dtype=dtype, device=gpu_id) qkv.uniform_(-scale, scale) query, key, value = qkv.split([num_query_heads, num_kv_heads, num_kv_heads], dim=1) num_queries_per_kv = num_query_heads // num_kv_heads if num_queries_per_kv > 1: key = torch.repeat_interleave(key, num_queries_per_kv, dim=1) value = torch.repeat_interleave(value, num_queries_per_kv, dim=1) attn_bias = BlockDiagonalCausalMask.from_seqlens(seq_lens) output = xops.memory_efficient_attention_forward(query.unsqueeze(0), key. unsqueeze(0), value.unsqueeze(0), attn_bias=attn_bias, p=0.0, scale=scale) output = output.squeeze(0) cu_seq_lens = [0] for seq_len in seq_lens: cu_seq_lens.append(cu_seq_lens[-1] + seq_len) ref_output = ref_multi_query_kv_attention(cu_seq_lens, query, key, value, scale, dtype) assert torch.allclose(output, ref_output, atol=0.001, rtol=1e-05)
@pytest.mark.parametrize('num_seqs', NUM_PREFILL_SEQS) @pytest.mark.parametrize('num_heads', NUM_HEADS) @pytest.mark.parametrize('head_size', HEAD_SIZES) @pytest.mark.parametrize('dtype', DTYPES) @pytest.mark.parametrize('seed', SEEDS) @pytest.mark.parametrize('device', DEVICES) @torch.inference_mode() def test_multi_query_kv_attention(num_seqs: int, num_heads: Tuple[int, int], head_size: int, dtype: torch.dtype, seed: int, device: int) ->None: random.seed(seed) torch.random.manual_seed(seed) torch.cuda.manual_seed(seed) gpu_id = f'cuda:{device}' max_len = min(MAX_SEQ_LEN, 4096) seq_lens = random.sample(range(1, max_len), num_seqs) num_tokens = sum(seq_lens) scale = float(1.0 / head_size ** 0.5) num_query_heads, num_kv_heads = num_heads qkv = torch.empty(num_tokens, num_query_heads + 2 * num_kv_heads, head_size, dtype=dtype, device=gpu_id) qkv.uniform_(-scale, scale) query, key, value = qkv.split([num_query_heads, num_kv_heads, num_kv_heads], dim=1) num_queries_per_kv = num_query_heads // num_kv_heads if num_queries_per_kv > 1: key = torch.repeat_interleave(key, num_queries_per_kv, dim=1) value = torch.repeat_interleave(value, num_queries_per_kv, dim=1) attn_bias = BlockDiagonalCausalMask.from_seqlens(seq_lens) output = xops.memory_efficient_attention_forward(query.unsqueeze(0), key.unsqueeze(0), value.unsqueeze(0), attn_bias=attn_bias, p=0.0, scale=scale) output = output.squeeze(0) cu_seq_lens = [0] for seq_len in seq_lens: cu_seq_lens.append(cu_seq_lens[-1] + seq_len) ref_output = ref_multi_query_kv_attention(cu_seq_lens, query, key, value, scale, dtype) assert torch.allclose(output, ref_output, atol=0.001, rtol=1e-05)
null
allocate_cpu_cache
cpu_cache: List[KVCache] = [] key_block_shape = self.get_key_block_shape() value_block_shape = self.get_value_block_shape() pin_memory = not in_wsl() if not pin_memory: logger.warning( "Using 'pin_memory=False' as WSL is detected. This may slow down the performance." ) for _ in range(self.num_layers): key_blocks = torch.empty(size=(self.num_cpu_blocks, *key_block_shape), dtype=self.dtype, pin_memory=pin_memory) value_blocks = torch.empty(size=(self.num_cpu_blocks, * value_block_shape), dtype=self.dtype, pin_memory=pin_memory) cpu_cache.append((key_blocks, value_blocks)) return cpu_cache
def allocate_cpu_cache(self) ->List[KVCache]: cpu_cache: List[KVCache] = [] key_block_shape = self.get_key_block_shape() value_block_shape = self.get_value_block_shape() pin_memory = not in_wsl() if not pin_memory: logger.warning( "Using 'pin_memory=False' as WSL is detected. This may slow down the performance." ) for _ in range(self.num_layers): key_blocks = torch.empty(size=(self.num_cpu_blocks, * key_block_shape), dtype=self.dtype, pin_memory=pin_memory) value_blocks = torch.empty(size=(self.num_cpu_blocks, * value_block_shape), dtype=self.dtype, pin_memory=pin_memory) cpu_cache.append((key_blocks, value_blocks)) return cpu_cache
null
weight_loader
parallel_dim = param.parallel_dim assert loaded_weight.shape[parallel_dim] == self.num_embeddings loaded_weight = loaded_weight[self.vocab_start_index:self.vocab_end_index] param[:loaded_weight.shape[0]].data.copy_(loaded_weight)
def weight_loader(self, param: Parameter, loaded_weight: torch.Tensor): parallel_dim = param.parallel_dim assert loaded_weight.shape[parallel_dim] == self.num_embeddings loaded_weight = loaded_weight[self.vocab_start_index:self.vocab_end_index] param[:loaded_weight.shape[0]].data.copy_(loaded_weight)
null
__init__
super().__init__() self.apply_residual_connection_post_layernorm = (config. apply_residual_connection_post_layernorm) self.fp32_residual_connection = config.fp32_residual_connection layer_norm_func = RMSNorm if config.rmsnorm else LayerNorm self.input_layernorm = layer_norm_func(config.hidden_size, eps=config. layernorm_epsilon) self.self_attention = GLMAttention(config, linear_method) self.hidden_dropout = config.hidden_dropout self.post_attention_layernorm = layer_norm_func(config.hidden_size, eps= config.layernorm_epsilon) self.mlp = GLMMLP(config, linear_method)
def __init__(self, config, linear_method: Optional[LinearMethodBase]=None): super().__init__() self.apply_residual_connection_post_layernorm = (config. apply_residual_connection_post_layernorm) self.fp32_residual_connection = config.fp32_residual_connection layer_norm_func = RMSNorm if config.rmsnorm else LayerNorm self.input_layernorm = layer_norm_func(config.hidden_size, eps=config. layernorm_epsilon) self.self_attention = GLMAttention(config, linear_method) self.hidden_dropout = config.hidden_dropout self.post_attention_layernorm = layer_norm_func(config.hidden_size, eps =config.layernorm_epsilon) self.mlp = GLMMLP(config, linear_method)
null
default_weight_loader
"""Default weight loader.""" assert param.size() == loaded_weight.size() param.data.copy_(loaded_weight)
def default_weight_loader(param: torch.Tensor, loaded_weight: torch.Tensor ) ->None: """Default weight loader.""" assert param.size() == loaded_weight.size() param.data.copy_(loaded_weight)
Default weight loader.
get_ip
return socket.gethostbyname(socket.gethostname())
def get_ip() ->str: return socket.gethostbyname(socket.gethostname())
null
_get_and_verify_max_len
"""Get and verify the model's maximum length.""" derived_max_model_len = float('inf') possible_keys = ['max_position_embeddings', 'n_positions', 'max_seq_len', 'seq_length', 'max_sequence_length', 'max_seq_length', 'seq_len'] for key in possible_keys: max_len_key = getattr(hf_config, key, None) if max_len_key is not None: derived_max_model_len = min(derived_max_model_len, max_len_key) if derived_max_model_len == float('inf'): if max_model_len is not None: return max_model_len default_max_len = 2048 logger.warning( f"The model's config.json does not contain any of the following keys to determine the original maximum length of the model: {possible_keys}. Assuming the model's maximum length is {default_max_len}." ) derived_max_model_len = default_max_len rope_scaling = getattr(hf_config, 'rope_scaling', None) if rope_scaling is not None: assert 'factor' in rope_scaling scaling_factor = rope_scaling['factor'] if rope_scaling['type'] == 'yarn': derived_max_model_len = rope_scaling['original_max_position_embeddings' ] derived_max_model_len *= scaling_factor if max_model_len is None: max_model_len = derived_max_model_len elif max_model_len > derived_max_model_len: raise ValueError( f"User-specified max_model_len ({max_model_len}) is greater than the derived max_model_len ({max_len_key}={derived_max_model_len} in model's config.json). This may lead to incorrect model outputs or CUDA errors. Make sure the value is correct and within the model context size." ) return int(max_model_len)
def _get_and_verify_max_len(hf_config: PretrainedConfig, max_model_len: Optional[int]) ->int: """Get and verify the model's maximum length.""" derived_max_model_len = float('inf') possible_keys = ['max_position_embeddings', 'n_positions', 'max_seq_len', 'seq_length', 'max_sequence_length', 'max_seq_length', 'seq_len'] for key in possible_keys: max_len_key = getattr(hf_config, key, None) if max_len_key is not None: derived_max_model_len = min(derived_max_model_len, max_len_key) if derived_max_model_len == float('inf'): if max_model_len is not None: return max_model_len default_max_len = 2048 logger.warning( f"The model's config.json does not contain any of the following keys to determine the original maximum length of the model: {possible_keys}. Assuming the model's maximum length is {default_max_len}." ) derived_max_model_len = default_max_len rope_scaling = getattr(hf_config, 'rope_scaling', None) if rope_scaling is not None: assert 'factor' in rope_scaling scaling_factor = rope_scaling['factor'] if rope_scaling['type'] == 'yarn': derived_max_model_len = rope_scaling[ 'original_max_position_embeddings'] derived_max_model_len *= scaling_factor if max_model_len is None: max_model_len = derived_max_model_len elif max_model_len > derived_max_model_len: raise ValueError( f"User-specified max_model_len ({max_model_len}) is greater than the derived max_model_len ({max_len_key}={derived_max_model_len} in model's config.json). This may lead to incorrect model outputs or CUDA errors. Make sure the value is correct and within the model context size." ) return int(max_model_len)
Get and verify the model's maximum length.
get_min_capability
return 60
@classmethod def get_min_capability(cls) ->int: return 60
null
sample_requests
with open(dataset_path) as f: dataset = json.load(f) dataset = [data for data in dataset if len(data['conversations']) >= 2] dataset = [(data['conversations'][0]['value'], data['conversations'][1][ 'value']) for data in dataset] prompts = [prompt for prompt, _ in dataset] prompt_token_ids = tokenizer(prompts).input_ids completions = [completion for _, completion in dataset] completion_token_ids = tokenizer(completions).input_ids tokenized_dataset = [] for i in range(len(dataset)): output_len = len(completion_token_ids[i]) tokenized_dataset.append((prompts[i], prompt_token_ids[i], output_len)) filtered_dataset: List[Tuple[str, int, int]] = [] for prompt, prompt_token_ids, output_len in tokenized_dataset: prompt_len = len(prompt_token_ids) if prompt_len < 4 or output_len < 4: continue if prompt_len > 1024 or prompt_len + output_len > 2048: continue filtered_dataset.append((prompt, prompt_len, output_len)) sampled_requests = random.sample(filtered_dataset, num_requests) return sampled_requests
def sample_requests(dataset_path: str, num_requests: int, tokenizer: PreTrainedTokenizerBase) ->List[Tuple[str, int, int]]: with open(dataset_path) as f: dataset = json.load(f) dataset = [data for data in dataset if len(data['conversations']) >= 2] dataset = [(data['conversations'][0]['value'], data['conversations'][1] ['value']) for data in dataset] prompts = [prompt for prompt, _ in dataset] prompt_token_ids = tokenizer(prompts).input_ids completions = [completion for _, completion in dataset] completion_token_ids = tokenizer(completions).input_ids tokenized_dataset = [] for i in range(len(dataset)): output_len = len(completion_token_ids[i]) tokenized_dataset.append((prompts[i], prompt_token_ids[i], output_len)) filtered_dataset: List[Tuple[str, int, int]] = [] for prompt, prompt_token_ids, output_len in tokenized_dataset: prompt_len = len(prompt_token_ids) if prompt_len < 4 or output_len < 4: continue if prompt_len > 1024 or prompt_len + output_len > 2048: continue filtered_dataset.append((prompt, prompt_len, output_len)) sampled_requests = random.sample(filtered_dataset, num_requests) return sampled_requests
null