method_name
stringlengths
3
45
method_body
stringlengths
9
6.25k
full_code
stringlengths
35
7.02k
docstring
stringlengths
18
4.7k
__init__
super().__init__() self.gate_up_proj = MergedColumnParallelLinear(hidden_size, [ intermediate_size] * 2, bias=False, linear_method=linear_method) self.c_proj = RowParallelLinear(intermediate_size, hidden_size, bias=False, linear_method=linear_method) if hidden_act != 'silu': raise ValueError( f'Unsupported activation: {hidden_act}. Only silu is supported for now.' ) self.act_fn = SiluAndMul()
def __init__(self, hidden_size: int, intermediate_size: int, hidden_act: str='silu', linear_method: Optional[LinearMethodBase]=None): super().__init__() self.gate_up_proj = MergedColumnParallelLinear(hidden_size, [ intermediate_size] * 2, bias=False, linear_method=linear_method) self.c_proj = RowParallelLinear(intermediate_size, hidden_size, bias= False, linear_method=linear_method) if hidden_act != 'silu': raise ValueError( f'Unsupported activation: {hidden_act}. Only silu is supported for now.' ) self.act_fn = SiluAndMul()
null
__init__
super().__init__() self.hidden_size = hidden_size tp_size = get_tensor_model_parallel_world_size() self.total_num_heads = num_heads assert self.total_num_heads % tp_size == 0 self.num_heads = self.total_num_heads // tp_size self.total_num_kv_heads = num_kv_heads if self.total_num_kv_heads >= tp_size: assert self.total_num_kv_heads % tp_size == 0 else: assert tp_size % self.total_num_kv_heads == 0 self.num_kv_heads = max(1, self.total_num_kv_heads // tp_size) self.head_dim = hidden_size // self.total_num_heads self.q_size = self.num_heads * self.head_dim self.kv_size = self.num_kv_heads * self.head_dim self.scaling = self.head_dim ** -0.5 self.rope_theta = rope_theta self.max_position_embeddings = max_position_embeddings self.qkv_proj = QKVParallelLinear(hidden_size, self.head_dim, self. total_num_heads, self.total_num_kv_heads, bias=False, linear_method= linear_method) self.o_proj = RowParallelLinear(self.total_num_heads * self.head_dim, hidden_size, bias=False, linear_method=linear_method) self.rotary_emb = get_rope(self.head_dim, rotary_dim=self.head_dim, max_position=max_position_embeddings, base=rope_theta, rope_scaling= rope_scaling) self.attn = PagedAttention(self.num_heads, self.head_dim, self.scaling, num_kv_heads=self.num_kv_heads)
def __init__(self, hidden_size: int, num_heads: int, num_kv_heads: int, rope_theta: float=10000, rope_scaling: Optional[Dict[str, Any]]=None, max_position_embeddings: int=8192, linear_method: Optional[ LinearMethodBase]=None) ->None: super().__init__() self.hidden_size = hidden_size tp_size = get_tensor_model_parallel_world_size() self.total_num_heads = num_heads assert self.total_num_heads % tp_size == 0 self.num_heads = self.total_num_heads // tp_size self.total_num_kv_heads = num_kv_heads if self.total_num_kv_heads >= tp_size: assert self.total_num_kv_heads % tp_size == 0 else: assert tp_size % self.total_num_kv_heads == 0 self.num_kv_heads = max(1, self.total_num_kv_heads // tp_size) self.head_dim = hidden_size // self.total_num_heads self.q_size = self.num_heads * self.head_dim self.kv_size = self.num_kv_heads * self.head_dim self.scaling = self.head_dim ** -0.5 self.rope_theta = rope_theta self.max_position_embeddings = max_position_embeddings self.qkv_proj = QKVParallelLinear(hidden_size, self.head_dim, self. total_num_heads, self.total_num_kv_heads, bias=False, linear_method =linear_method) self.o_proj = RowParallelLinear(self.total_num_heads * self.head_dim, hidden_size, bias=False, linear_method=linear_method) self.rotary_emb = get_rope(self.head_dim, rotary_dim=self.head_dim, max_position=max_position_embeddings, base=rope_theta, rope_scaling =rope_scaling) self.attn = PagedAttention(self.num_heads, self.head_dim, self.scaling, num_kv_heads=self.num_kv_heads)
null
forward
return self.act(x) / self.scales
def forward(self, x: torch.Tensor) ->torch.Tensor: return self.act(x) / self.scales
null
get_model_config
"""Gets the model configuration.""" return self.model_config
def get_model_config(self) ->ModelConfig: """Gets the model configuration.""" return self.model_config
Gets the model configuration.
__init__
super().__init__() self.config = config self.linear_method = linear_method self.transformer = PhiModel(config, linear_method) self.lm_head = PhiCausalLMHead(config) self.sampler = Sampler(config.vocab_size)
def __init__(self, config: PretrainedConfig, linear_method: Optional[ LinearMethodBase]=None): super().__init__() self.config = config self.linear_method = linear_method self.transformer = PhiModel(config, linear_method) self.lm_head = PhiCausalLMHead(config) self.sampler = Sampler(config.vocab_size)
null
__init__
super().__init__() self.hidden_size = hidden_size tp_size = get_tensor_model_parallel_world_size() self.total_num_heads = num_heads assert self.total_num_heads % tp_size == 0 self.num_heads = self.total_num_heads // tp_size self.total_num_kv_heads = num_kv_heads if self.total_num_kv_heads >= tp_size: assert self.total_num_kv_heads % tp_size == 0 else: assert tp_size % self.total_num_kv_heads == 0 self.num_kv_heads = max(1, self.total_num_kv_heads // tp_size) self.head_dim = hidden_size // self.total_num_heads self.q_size = self.num_heads * self.head_dim self.kv_size = self.num_kv_heads * self.head_dim self.scaling = self.head_dim ** -0.5 self.rope_theta = rope_theta self.max_position_embeddings = max_position_embeddings self.qkv_proj = QKVParallelLinear(hidden_size, self.head_dim, self. total_num_heads, self.total_num_kv_heads, bias=False, linear_method= linear_method) self.o_proj = RowParallelLinear(self.total_num_heads * self.head_dim, hidden_size, bias=False, linear_method=linear_method) self.rotary_emb = get_rope(self.head_dim, rotary_dim=self.head_dim, max_position=max_position_embeddings, base=self.rope_theta, rope_scaling=rope_scaling) self.attn = PagedAttention(self.num_heads, self.head_dim, self.scaling, num_kv_heads=self.num_kv_heads)
def __init__(self, hidden_size: int, num_heads: int, num_kv_heads: int, rope_theta: float=10000, rope_scaling: Optional[Dict[str, Any]]=None, max_position_embeddings: int=8192, linear_method: Optional[ LinearMethodBase]=None) ->None: super().__init__() self.hidden_size = hidden_size tp_size = get_tensor_model_parallel_world_size() self.total_num_heads = num_heads assert self.total_num_heads % tp_size == 0 self.num_heads = self.total_num_heads // tp_size self.total_num_kv_heads = num_kv_heads if self.total_num_kv_heads >= tp_size: assert self.total_num_kv_heads % tp_size == 0 else: assert tp_size % self.total_num_kv_heads == 0 self.num_kv_heads = max(1, self.total_num_kv_heads // tp_size) self.head_dim = hidden_size // self.total_num_heads self.q_size = self.num_heads * self.head_dim self.kv_size = self.num_kv_heads * self.head_dim self.scaling = self.head_dim ** -0.5 self.rope_theta = rope_theta self.max_position_embeddings = max_position_embeddings self.qkv_proj = QKVParallelLinear(hidden_size, self.head_dim, self. total_num_heads, self.total_num_kv_heads, bias=False, linear_method =linear_method) self.o_proj = RowParallelLinear(self.total_num_heads * self.head_dim, hidden_size, bias=False, linear_method=linear_method) self.rotary_emb = get_rope(self.head_dim, rotary_dim=self.head_dim, max_position=max_position_embeddings, base=self.rope_theta, rope_scaling=rope_scaling) self.attn = PagedAttention(self.num_heads, self.head_dim, self.scaling, num_kv_heads=self.num_kv_heads)
null
get_tokenizer
return self.llm_engine.tokenizer
def get_tokenizer(self) ->Union[PreTrainedTokenizer, PreTrainedTokenizerFast]: return self.llm_engine.tokenizer
null
__repr__
return f'PhysicalTokenBlock(device={self.device}, block_number={self.block_number}, ref_count={self.ref_count})'
def __repr__(self) ->str: return ( f'PhysicalTokenBlock(device={self.device}, block_number={self.block_number}, ref_count={self.ref_count})' )
null
generate
"""Generates the completions for the input prompts. NOTE: This class automatically batches the given prompts, considering the memory constraint. For the best performance, put all of your prompts into a single list and pass it to this method. Args: prompts: A list of prompts to generate completions for. sampling_params: The sampling parameters for text generation. If None, we use the default sampling parameters. prompt_token_ids: A list of token IDs for the prompts. If None, we use the tokenizer to convert the prompts to token IDs. use_tqdm: Whether to use tqdm to display the progress bar. Returns: A list of `RequestOutput` objects containing the generated completions in the same order as the input prompts. """ if prompts is None and prompt_token_ids is None: raise ValueError('Either prompts or prompt_token_ids must be provided.') if isinstance(prompts, str): prompts = [prompts] if prompts is not None and prompt_token_ids is not None and len(prompts ) != len(prompt_token_ids): raise ValueError( 'The lengths of prompts and prompt_token_ids must be the same.') if sampling_params is None: sampling_params = SamplingParams() num_requests = len(prompts) if prompts is not None else len(prompt_token_ids) for i in range(num_requests): prompt = prompts[i] if prompts is not None else None token_ids = None if prompt_token_ids is None else prompt_token_ids[i] self._add_request(prompt, sampling_params, token_ids) return self._run_engine(use_tqdm)
def generate(self, prompts: Optional[Union[str, List[str]]]=None, sampling_params: Optional[SamplingParams]=None, prompt_token_ids: Optional[List[List[int]]]=None, use_tqdm: bool=True) ->List[RequestOutput]: """Generates the completions for the input prompts. NOTE: This class automatically batches the given prompts, considering the memory constraint. For the best performance, put all of your prompts into a single list and pass it to this method. Args: prompts: A list of prompts to generate completions for. sampling_params: The sampling parameters for text generation. If None, we use the default sampling parameters. prompt_token_ids: A list of token IDs for the prompts. If None, we use the tokenizer to convert the prompts to token IDs. use_tqdm: Whether to use tqdm to display the progress bar. Returns: A list of `RequestOutput` objects containing the generated completions in the same order as the input prompts. """ if prompts is None and prompt_token_ids is None: raise ValueError('Either prompts or prompt_token_ids must be provided.' ) if isinstance(prompts, str): prompts = [prompts] if prompts is not None and prompt_token_ids is not None and len(prompts ) != len(prompt_token_ids): raise ValueError( 'The lengths of prompts and prompt_token_ids must be the same.') if sampling_params is None: sampling_params = SamplingParams() num_requests = len(prompts) if prompts is not None else len( prompt_token_ids) for i in range(num_requests): prompt = prompts[i] if prompts is not None else None token_ids = None if prompt_token_ids is None else prompt_token_ids[i] self._add_request(prompt, sampling_params, token_ids) return self._run_engine(use_tqdm)
Generates the completions for the input prompts. NOTE: This class automatically batches the given prompts, considering the memory constraint. For the best performance, put all of your prompts into a single list and pass it to this method. Args: prompts: A list of prompts to generate completions for. sampling_params: The sampling parameters for text generation. If None, we use the default sampling parameters. prompt_token_ids: A list of token IDs for the prompts. If None, we use the tokenizer to convert the prompts to token IDs. use_tqdm: Whether to use tqdm to display the progress bar. Returns: A list of `RequestOutput` objects containing the generated completions in the same order as the input prompts.
apply_weights
weight = weights['weight'] if self.separate_bias_add: if bias: return F.linear(x, weight) + bias return F.linear(x, weight) return F.linear(x, weight, bias)
def apply_weights(self, weights: Dict[str, torch.Tensor], x: torch.Tensor, bias: Optional[torch.Tensor]=None) ->torch.Tensor: weight = weights['weight'] if self.separate_bias_add: if bias: return F.linear(x, weight) + bias return F.linear(x, weight) return F.linear(x, weight, bias)
null
_verify_args
if self.max_num_batched_tokens < self.max_model_len: raise ValueError( f'max_num_batched_tokens ({self.max_num_batched_tokens}) is smaller than max_model_len ({self.max_model_len}). This effectively limits the maximum sequence length to max_num_batched_tokens and makes vLLM reject longer sequences. Please increase max_num_batched_tokens or decrease max_model_len.' ) if self.max_num_batched_tokens < self.max_num_seqs: raise ValueError( f'max_num_batched_tokens ({self.max_num_batched_tokens}) must be greater than or equal to max_num_seqs ({self.max_num_seqs}).' )
def _verify_args(self) ->None: if self.max_num_batched_tokens < self.max_model_len: raise ValueError( f'max_num_batched_tokens ({self.max_num_batched_tokens}) is smaller than max_model_len ({self.max_model_len}). This effectively limits the maximum sequence length to max_num_batched_tokens and makes vLLM reject longer sequences. Please increase max_num_batched_tokens or decrease max_model_len.' ) if self.max_num_batched_tokens < self.max_num_seqs: raise ValueError( f'max_num_batched_tokens ({self.max_num_batched_tokens}) must be greater than or equal to max_num_seqs ({self.max_num_seqs}).' )
null
allocate_gpu_cache
gpu_cache: List[KVCache] = [] key_block_shape = self.get_key_block_shape() value_block_shape = self.get_value_block_shape() for _ in range(self.num_layers): key_blocks = torch.empty(size=(self.num_gpu_blocks, *key_block_shape), dtype=self.dtype, device='cuda') value_blocks = torch.empty(size=(self.num_gpu_blocks, * value_block_shape), dtype=self.dtype, device='cuda') gpu_cache.append((key_blocks, value_blocks)) return gpu_cache
def allocate_gpu_cache(self) ->List[KVCache]: gpu_cache: List[KVCache] = [] key_block_shape = self.get_key_block_shape() value_block_shape = self.get_value_block_shape() for _ in range(self.num_layers): key_blocks = torch.empty(size=(self.num_gpu_blocks, * key_block_shape), dtype=self.dtype, device='cuda') value_blocks = torch.empty(size=(self.num_gpu_blocks, * value_block_shape), dtype=self.dtype, device='cuda') gpu_cache.append((key_blocks, value_blocks)) return gpu_cache
null
forward
residual = hidden_states hidden_states = self.input_layernorm(hidden_states) hidden_states = self.self_attn(positions=positions, hidden_states= hidden_states, kv_cache=kv_cache, input_metadata=input_metadata) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.post_attention_layernorm(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states return hidden_states
def forward(self, positions: torch.Tensor, hidden_states: torch.Tensor, kv_cache: KVCache, input_metadata: InputMetadata) ->torch.Tensor: residual = hidden_states hidden_states = self.input_layernorm(hidden_states) hidden_states = self.self_attn(positions=positions, hidden_states= hidden_states, kv_cache=kv_cache, input_metadata=input_metadata) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.post_attention_layernorm(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states return hidden_states
null
sample
next_tokens = self.sampler(self.lm_head.weight, hidden_states, sampling_metadata) return next_tokens
def sample(self, hidden_states: Optional[torch.Tensor], sampling_metadata: SamplingMetadata) ->Optional[SamplerOutput]: next_tokens = self.sampler(self.lm_head.weight, hidden_states, sampling_metadata) return next_tokens
null
verify_with_parallel_config
total_num_attention_heads = self.hf_config.num_attention_heads tensor_parallel_size = parallel_config.tensor_parallel_size if total_num_attention_heads % tensor_parallel_size != 0: raise ValueError( f'Total number of attention heads ({total_num_attention_heads}) must be divisible by tensor parallel size ({tensor_parallel_size}).' ) total_num_hidden_layers = self.hf_config.num_hidden_layers pipeline_parallel_size = parallel_config.pipeline_parallel_size if total_num_hidden_layers % pipeline_parallel_size != 0: raise ValueError( f'Total number of hidden layers ({total_num_hidden_layers}) must be divisible by pipeline parallel size ({pipeline_parallel_size}).' )
def verify_with_parallel_config(self, parallel_config: 'ParallelConfig' ) ->None: total_num_attention_heads = self.hf_config.num_attention_heads tensor_parallel_size = parallel_config.tensor_parallel_size if total_num_attention_heads % tensor_parallel_size != 0: raise ValueError( f'Total number of attention heads ({total_num_attention_heads}) must be divisible by tensor parallel size ({tensor_parallel_size}).' ) total_num_hidden_layers = self.hf_config.num_hidden_layers pipeline_parallel_size = parallel_config.pipeline_parallel_size if total_num_hidden_layers % pipeline_parallel_size != 0: raise ValueError( f'Total number of hidden layers ({total_num_hidden_layers}) must be divisible by pipeline parallel size ({pipeline_parallel_size}).' )
null
get_prompt_len
return len(self.prompt_token_ids)
def get_prompt_len(self) ->int: return len(self.prompt_token_ids)
null
get_priority
raise NotImplementedError
def get_priority(self, now: float, seq_group: SequenceGroup) ->float: raise NotImplementedError
null
get_value_block_shape
return self.num_heads, self.head_size, self.block_size
def get_value_block_shape(self) ->Tuple[int, int, int]: return self.num_heads, self.head_size, self.block_size
null
test_request_tracker
tracker = RequestTracker() tracker.new_requests_event = DummyEvent() stream_1 = tracker.add_request('1') assert tracker.new_requests_event.flag new, finished = tracker.get_new_and_finished_requests() assert not tracker.new_requests_event.flag assert len(new) == 1 assert new[0]['request_id'] == '1' assert not finished assert not stream_1.finished stream_2 = tracker.add_request('2') stream_3 = tracker.add_request('3') assert tracker.new_requests_event.flag new, finished = tracker.get_new_and_finished_requests() assert not tracker.new_requests_event.flag assert len(new) == 2 assert new[0]['request_id'] == '2' assert new[1]['request_id'] == '3' assert not finished assert not stream_2.finished assert not stream_3.finished with pytest.raises(KeyError): tracker.add_request('1') assert not tracker.new_requests_event.flag tracker.abort_request('1') new, finished = tracker.get_new_and_finished_requests() assert len(finished) == 1 assert '1' in finished assert not new assert stream_1.finished stream_4 = tracker.add_request('4') tracker.abort_request('4') assert tracker.new_requests_event.flag new, finished = tracker.get_new_and_finished_requests() assert len(finished) == 1 assert '4' in finished assert not new assert stream_4.finished stream_5 = tracker.add_request('5') assert tracker.new_requests_event.flag tracker.process_request_output(RequestOutput('2', 'output', [], [], [], finished=True)) new, finished = tracker.get_new_and_finished_requests() assert not tracker.new_requests_event.flag assert len(finished) == 1 assert '2' in finished assert len(new) == 1 assert new[0]['request_id'] == '5' assert stream_2.finished assert not stream_5.finished
def test_request_tracker(): tracker = RequestTracker() tracker.new_requests_event = DummyEvent() stream_1 = tracker.add_request('1') assert tracker.new_requests_event.flag new, finished = tracker.get_new_and_finished_requests() assert not tracker.new_requests_event.flag assert len(new) == 1 assert new[0]['request_id'] == '1' assert not finished assert not stream_1.finished stream_2 = tracker.add_request('2') stream_3 = tracker.add_request('3') assert tracker.new_requests_event.flag new, finished = tracker.get_new_and_finished_requests() assert not tracker.new_requests_event.flag assert len(new) == 2 assert new[0]['request_id'] == '2' assert new[1]['request_id'] == '3' assert not finished assert not stream_2.finished assert not stream_3.finished with pytest.raises(KeyError): tracker.add_request('1') assert not tracker.new_requests_event.flag tracker.abort_request('1') new, finished = tracker.get_new_and_finished_requests() assert len(finished) == 1 assert '1' in finished assert not new assert stream_1.finished stream_4 = tracker.add_request('4') tracker.abort_request('4') assert tracker.new_requests_event.flag new, finished = tracker.get_new_and_finished_requests() assert len(finished) == 1 assert '4' in finished assert not new assert stream_4.finished stream_5 = tracker.add_request('5') assert tracker.new_requests_event.flag tracker.process_request_output(RequestOutput('2', 'output', [], [], [], finished=True)) new, finished = tracker.get_new_and_finished_requests() assert not tracker.new_requests_event.flag assert len(finished) == 1 assert '2' in finished assert len(new) == 1 assert new[0]['request_id'] == '5' assert stream_2.finished assert not stream_5.finished
null
get_tensor_model_parallel_src_rank
"""Calculate the global rank corresponding to the first local rank in the tensor model parallel group.""" global_rank = torch.distributed.get_rank() local_world_size = get_tensor_model_parallel_world_size() return global_rank // local_world_size * local_world_size
def get_tensor_model_parallel_src_rank(): """Calculate the global rank corresponding to the first local rank in the tensor model parallel group.""" global_rank = torch.distributed.get_rank() local_world_size = get_tensor_model_parallel_world_size() return global_rank // local_world_size * local_world_size
Calculate the global rank corresponding to the first local rank in the tensor model parallel group.
sort_by_priority
return sorted(seq_groups, key=lambda seq_group: self.get_priority(now, seq_group), reverse=True)
def sort_by_priority(self, now: float, seq_groups: List[SequenceGroup]) ->List[ SequenceGroup]: return sorted(seq_groups, key=lambda seq_group: self.get_priority(now, seq_group), reverse=True)
null
sample
next_tokens = self.sampler(self.embed_out.weight, hidden_states, sampling_metadata) return next_tokens
def sample(self, hidden_states: torch.Tensor, sampling_metadata: SamplingMetadata) ->Optional[SamplerOutput]: next_tokens = self.sampler(self.embed_out.weight, hidden_states, sampling_metadata) return next_tokens
null
vocab_range_from_global_vocab_size
per_partition_vocab_size = divide(global_vocab_size, world_size) return vocab_range_from_per_partition_vocab_size(per_partition_vocab_size, rank )
def vocab_range_from_global_vocab_size(global_vocab_size: int, rank: int, world_size: int) ->Sequence[int]: per_partition_vocab_size = divide(global_vocab_size, world_size) return vocab_range_from_per_partition_vocab_size(per_partition_vocab_size, rank)
null
__init__
super().__init__() self.config = config self.linear_method = linear_method self.model = MixtralModel(config, linear_method) self.lm_head = ParallelLMHead(config.vocab_size, config.hidden_size) self.sampler = Sampler(config.vocab_size)
def __init__(self, config: MixtralConfig, linear_method: Optional[ LinearMethodBase]=None) ->None: super().__init__() self.config = config self.linear_method = linear_method self.model = MixtralModel(config, linear_method) self.lm_head = ParallelLMHead(config.vocab_size, config.hidden_size) self.sampler = Sampler(config.vocab_size)
null
__init__
self.block_size = block_size self.num_total_gpu_blocks = num_gpu_blocks self.num_total_cpu_blocks = num_cpu_blocks self.block_sliding_window = None if sliding_window is not None: assert sliding_window % block_size == 0, (sliding_window, block_size) self.block_sliding_window = sliding_window // block_size self.watermark = watermark assert watermark >= 0.0 self.watermark_blocks = int(watermark * num_gpu_blocks) self.gpu_allocator = BlockAllocator(Device.GPU, block_size, num_gpu_blocks) self.cpu_allocator = BlockAllocator(Device.CPU, block_size, num_cpu_blocks) self.block_tables: Dict[int, BlockTable] = {}
def __init__(self, block_size: int, num_gpu_blocks: int, num_cpu_blocks: int, watermark: float=0.01, sliding_window: Optional[int]=None) ->None: self.block_size = block_size self.num_total_gpu_blocks = num_gpu_blocks self.num_total_cpu_blocks = num_cpu_blocks self.block_sliding_window = None if sliding_window is not None: assert sliding_window % block_size == 0, (sliding_window, block_size) self.block_sliding_window = sliding_window // block_size self.watermark = watermark assert watermark >= 0.0 self.watermark_blocks = int(watermark * num_gpu_blocks) self.gpu_allocator = BlockAllocator(Device.GPU, block_size, num_gpu_blocks) self.cpu_allocator = BlockAllocator(Device.CPU, block_size, num_cpu_blocks) self.block_tables: Dict[int, BlockTable] = {}
null
_get_logprobs
batched_logprobs_query_seq_indices: List[int] = [] batched_logprobs_query_token_indices: List[int] = [] largest_num_logprobs = 0 sample_idx = 0 for i, (seq_group, sample_result) in enumerate(zip(sampling_metadata. seq_groups, sample_results)): seq_ids, sampling_params = seq_group next_token_ids, parent_ids = sample_result num_parent_seqs = len(seq_ids) if (i < sampling_metadata.num_prompts and sampling_params. prompt_logprobs is not None): largest_num_logprobs = max(largest_num_logprobs, sampling_params. prompt_logprobs) prompt_len = sampling_metadata.prompt_lens[i] prompt_tokens = sampling_metadata.seq_data[seq_ids[0]].prompt_token_ids batched_logprobs_query_seq_indices.extend(sample_idx + j for j in range(prompt_len - 1)) batched_logprobs_query_token_indices.extend(token_id for token_id in prompt_tokens[1:]) sample_idx += prompt_len - 1 batched_logprobs_query_seq_indices.extend([(sample_idx + parent_id) for parent_id in parent_ids]) batched_logprobs_query_token_indices.extend(next_token_ids) if sampling_params.logprobs is not None: largest_num_logprobs = max(largest_num_logprobs, sampling_params. logprobs) sample_idx += num_parent_seqs assert sample_idx == logprobs.size(0) batched_logprobs_query_result = logprobs[[ batched_logprobs_query_seq_indices, batched_logprobs_query_token_indices]] if largest_num_logprobs > 0: top_logprobs, top_token_ids = torch.topk(logprobs, largest_num_logprobs, dim=-1) top_logprobs = top_logprobs.cpu() top_token_ids = top_token_ids.cpu() else: top_logprobs, top_token_ids = None, None batched_logprobs_query_result = batched_logprobs_query_result.cpu() result_prompt_logprobs: List[Optional[PromptLogprobs]] = [] result_sample_logprobs: List[SampleLogprobs] = [] sample_idx = 0 query_result_idx = 0 for i, (seq_group, sample_result) in enumerate(zip(sampling_metadata. seq_groups, sample_results)): seq_ids, sampling_params = seq_group next_token_ids, parent_ids = sample_result if (i < sampling_metadata.num_prompts and sampling_params. prompt_logprobs is not None): num_logprobs = sampling_params.prompt_logprobs prompt_len = sampling_metadata.prompt_lens[i] prompt_tokens = sampling_metadata.seq_data[seq_ids[0]].prompt_token_ids group_prompt_logprobs: PromptLogprobs = [None] for token_id in prompt_tokens[1:]: prompt_logprobs_dict = {token_id: batched_logprobs_query_result [query_result_idx].item()} if num_logprobs > 0: prompt_logprobs_dict.update(zip(top_token_ids[sample_idx, : num_logprobs].tolist(), top_logprobs[sample_idx, : num_logprobs].tolist())) group_prompt_logprobs.append(prompt_logprobs_dict) sample_idx += 1 query_result_idx += 1 result_prompt_logprobs.append(group_prompt_logprobs) else: result_prompt_logprobs.append(None) num_logprobs = sampling_params.logprobs if num_logprobs is None: num_logprobs = 0 group_sample_logprobs: SampleLogprobs = [] for next_token_id, parent_id in zip(next_token_ids, parent_ids): sample_logprobs_dict = {next_token_id: batched_logprobs_query_result[query_result_idx].item()} query_result_idx += 1 if num_logprobs > 0: sample_logprobs_dict.update(zip(top_token_ids[sample_idx + parent_id, :num_logprobs].tolist(), top_logprobs[sample_idx + parent_id, :num_logprobs].tolist())) group_sample_logprobs.append(sample_logprobs_dict) result_sample_logprobs.append(group_sample_logprobs) sample_idx += len(seq_ids) return result_prompt_logprobs, result_sample_logprobs
def _get_logprobs(logprobs: torch.Tensor, sampling_metadata: SamplingMetadata, sample_results: List[Tuple[List[int], List[int]]] ) ->Tuple[List[Optional[List[Optional[Dict[int, float]]]]], List[List[ Dict[int, float]]]]: batched_logprobs_query_seq_indices: List[int] = [] batched_logprobs_query_token_indices: List[int] = [] largest_num_logprobs = 0 sample_idx = 0 for i, (seq_group, sample_result) in enumerate(zip(sampling_metadata. seq_groups, sample_results)): seq_ids, sampling_params = seq_group next_token_ids, parent_ids = sample_result num_parent_seqs = len(seq_ids) if (i < sampling_metadata.num_prompts and sampling_params. prompt_logprobs is not None): largest_num_logprobs = max(largest_num_logprobs, sampling_params.prompt_logprobs) prompt_len = sampling_metadata.prompt_lens[i] prompt_tokens = sampling_metadata.seq_data[seq_ids[0] ].prompt_token_ids batched_logprobs_query_seq_indices.extend(sample_idx + j for j in range(prompt_len - 1)) batched_logprobs_query_token_indices.extend(token_id for token_id in prompt_tokens[1:]) sample_idx += prompt_len - 1 batched_logprobs_query_seq_indices.extend([(sample_idx + parent_id) for parent_id in parent_ids]) batched_logprobs_query_token_indices.extend(next_token_ids) if sampling_params.logprobs is not None: largest_num_logprobs = max(largest_num_logprobs, sampling_params.logprobs) sample_idx += num_parent_seqs assert sample_idx == logprobs.size(0) batched_logprobs_query_result = logprobs[[ batched_logprobs_query_seq_indices, batched_logprobs_query_token_indices]] if largest_num_logprobs > 0: top_logprobs, top_token_ids = torch.topk(logprobs, largest_num_logprobs, dim=-1) top_logprobs = top_logprobs.cpu() top_token_ids = top_token_ids.cpu() else: top_logprobs, top_token_ids = None, None batched_logprobs_query_result = batched_logprobs_query_result.cpu() result_prompt_logprobs: List[Optional[PromptLogprobs]] = [] result_sample_logprobs: List[SampleLogprobs] = [] sample_idx = 0 query_result_idx = 0 for i, (seq_group, sample_result) in enumerate(zip(sampling_metadata. seq_groups, sample_results)): seq_ids, sampling_params = seq_group next_token_ids, parent_ids = sample_result if (i < sampling_metadata.num_prompts and sampling_params. prompt_logprobs is not None): num_logprobs = sampling_params.prompt_logprobs prompt_len = sampling_metadata.prompt_lens[i] prompt_tokens = sampling_metadata.seq_data[seq_ids[0] ].prompt_token_ids group_prompt_logprobs: PromptLogprobs = [None] for token_id in prompt_tokens[1:]: prompt_logprobs_dict = {token_id: batched_logprobs_query_result[query_result_idx].item()} if num_logprobs > 0: prompt_logprobs_dict.update(zip(top_token_ids[ sample_idx, :num_logprobs].tolist(), top_logprobs[ sample_idx, :num_logprobs].tolist())) group_prompt_logprobs.append(prompt_logprobs_dict) sample_idx += 1 query_result_idx += 1 result_prompt_logprobs.append(group_prompt_logprobs) else: result_prompt_logprobs.append(None) num_logprobs = sampling_params.logprobs if num_logprobs is None: num_logprobs = 0 group_sample_logprobs: SampleLogprobs = [] for next_token_id, parent_id in zip(next_token_ids, parent_ids): sample_logprobs_dict = {next_token_id: batched_logprobs_query_result[query_result_idx].item()} query_result_idx += 1 if num_logprobs > 0: sample_logprobs_dict.update(zip(top_token_ids[sample_idx + parent_id, :num_logprobs].tolist(), top_logprobs[ sample_idx + parent_id, :num_logprobs].tolist())) group_sample_logprobs.append(sample_logprobs_dict) result_sample_logprobs.append(group_sample_logprobs) sample_idx += len(seq_ids) return result_prompt_logprobs, result_sample_logprobs
null
create_token_type_ids_from_sequences
""" Creates a mask from the two sequences passed to be used in a sequence-pair classification task. An ALBERT sequence pair mask has the following format: ``` 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence | ``` if token_ids_1 is None, only returns the first portion of the mask (0s). Args: token_ids_0 (`List[int]`): List of ids. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s). """ bos_token_id = [self.bos_token_id] if self.add_bos_token else [] eos_token_id = [self.eos_token_id] if self.add_eos_token else [] output = [0] * len(bos_token_id + token_ids_0 + eos_token_id) if token_ids_1 is not None: output += [1] * len(bos_token_id + token_ids_1 + eos_token_id) return output
def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) ->List[int]: """ Creates a mask from the two sequences passed to be used in a sequence-pair classification task. An ALBERT sequence pair mask has the following format: ``` 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence | ``` if token_ids_1 is None, only returns the first portion of the mask (0s). Args: token_ids_0 (`List[int]`): List of ids. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s). """ bos_token_id = [self.bos_token_id] if self.add_bos_token else [] eos_token_id = [self.eos_token_id] if self.add_eos_token else [] output = [0] * len(bos_token_id + token_ids_0 + eos_token_id) if token_ids_1 is not None: output += [1] * len(bos_token_id + token_ids_1 + eos_token_id) return output
Creates a mask from the two sequences passed to be used in a sequence-pair classification task. An ALBERT sequence pair mask has the following format: ``` 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence | ``` if token_ids_1 is None, only returns the first portion of the mask (0s). Args: token_ids_0 (`List[int]`): List of ids. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
forward
qkv, _ = self.qkv_proj(hidden_states) q, k, v = qkv.split([self.q_size, self.kv_size, self.kv_size], dim=-1) q, k = self.rotary_emb(positions, q, k) k_cache, v_cache = kv_cache attn_output = self.attn(q, k, v, k_cache, v_cache, input_metadata) output, _ = self.o_proj(attn_output) return output
def forward(self, positions: torch.Tensor, hidden_states: torch.Tensor, kv_cache: KVCache, input_metadata: InputMetadata) ->torch.Tensor: qkv, _ = self.qkv_proj(hidden_states) q, k, v = qkv.split([self.q_size, self.kv_size, self.kv_size], dim=-1) q, k = self.rotary_emb(positions, q, k) k_cache, v_cache = kv_cache attn_output = self.attn(q, k, v, k_cache, v_cache, input_metadata) output, _ = self.o_proj(attn_output) return output
null
__init__
config.num_key_value_heads = max(config.num_key_value_heads_per_layer) delattr(config, 'num_key_value_heads_per_layer') super().__init__(config=config, linear_method=linear_method)
def __init__(self, config: Optional[PretrainedConfig]=None, linear_method: Optional[LinearMethodBase]=None) ->None: config.num_key_value_heads = max(config.num_key_value_heads_per_layer) delattr(config, 'num_key_value_heads_per_layer') super().__init__(config=config, linear_method=linear_method)
null
get_token_ids
return self.prompt_token_ids + self.output_token_ids
def get_token_ids(self) ->List[int]: return self.prompt_token_ids + self.output_token_ids
null
forward
inputs_embeds = self.embedding(input_ids) hidden_states = self.encoder(hidden_states=inputs_embeds, position_ids= position_ids, kv_caches=kv_caches, input_metadata=input_metadata) return hidden_states
def forward(self, input_ids: torch.Tensor, position_ids: torch.Tensor, kv_caches: List[KVCache], input_metadata: InputMetadata) ->torch.Tensor: inputs_embeds = self.embedding(input_ids) hidden_states = self.encoder(hidden_states=inputs_embeds, position_ids= position_ids, kv_caches=kv_caches, input_metadata=input_metadata) return hidden_states
null
generate_greedy_logprobs
all_logprobs = [] for prompt in prompts: input_ids = self.tokenizer(prompt, return_tensors='pt').input_ids output = self.model.generate(input_ids.cuda(), use_cache=True, do_sample=False, max_new_tokens=max_tokens, output_hidden_states= True, return_dict_in_generate=True) seq_logprobs = [] for hidden_states in output.hidden_states: last_hidden_states = hidden_states[-1][0] logits = torch.matmul(last_hidden_states, self.model. get_output_embeddings().weight.t()) if self.model.get_output_embeddings().bias is not None: logits += self.model.get_output_embeddings().bias.unsqueeze(0) logprobs = torch.nn.functional.log_softmax(logits, dim=-1, dtype= torch.float32) seq_logprobs.append(logprobs) all_logprobs.append(seq_logprobs) return all_logprobs
def generate_greedy_logprobs(self, prompts: List[str], max_tokens: int) ->List[ List[torch.Tensor]]: all_logprobs = [] for prompt in prompts: input_ids = self.tokenizer(prompt, return_tensors='pt').input_ids output = self.model.generate(input_ids.cuda(), use_cache=True, do_sample=False, max_new_tokens=max_tokens, output_hidden_states=True, return_dict_in_generate=True) seq_logprobs = [] for hidden_states in output.hidden_states: last_hidden_states = hidden_states[-1][0] logits = torch.matmul(last_hidden_states, self.model. get_output_embeddings().weight.t()) if self.model.get_output_embeddings().bias is not None: logits += self.model.get_output_embeddings().bias.unsqueeze(0) logprobs = torch.nn.functional.log_softmax(logits, dim=-1, dtype=torch.float32) seq_logprobs.append(logprobs) all_logprobs.append(seq_logprobs) return all_logprobs
null
forward
hidden_states = self.wte(input_ids) residual = None for i in range(len(self.h)): layer = self.h[i] hidden_states, residual = layer(positions, hidden_states, kv_caches[i], input_metadata, residual) hidden_states, _ = self.ln_f(hidden_states, residual) return hidden_states
def forward(self, input_ids: torch.Tensor, positions: torch.Tensor, kv_caches: List[KVCache], input_metadata: InputMetadata) ->torch.Tensor: hidden_states = self.wte(input_ids) residual = None for i in range(len(self.h)): layer = self.h[i] hidden_states, residual = layer(positions, hidden_states, kv_caches [i], input_metadata, residual) hidden_states, _ = self.ln_f(hidden_states, residual) return hidden_states
null
_get_model_architecture
architectures = getattr(config, 'architectures', []) for arch in architectures: model_cls = ModelRegistry.load_model_cls(arch) if model_cls is not None: return model_cls raise ValueError( f'Model architectures {architectures} are not supported for now. Supported architectures: {ModelRegistry.get_supported_archs()}' )
def _get_model_architecture(config: PretrainedConfig) ->Type[nn.Module]: architectures = getattr(config, 'architectures', []) for arch in architectures: model_cls = ModelRegistry.load_model_cls(arch) if model_cls is not None: return model_cls raise ValueError( f'Model architectures {architectures} are not supported for now. Supported architectures: {ModelRegistry.get_supported_archs()}' )
null
free
if seq.seq_id not in self.block_tables: return block_table = self.block_tables[seq.seq_id] self._free_block_table(block_table) del self.block_tables[seq.seq_id]
def free(self, seq: Sequence) ->None: if seq.seq_id not in self.block_tables: return block_table = self.block_tables[seq.seq_id] self._free_block_table(block_table) del self.block_tables[seq.seq_id]
null
build_inputs_with_special_tokens
bos_token_id = [self.bos_token_id] if self.add_bos_token else [] eos_token_id = [self.eos_token_id] if self.add_eos_token else [] output = bos_token_id + token_ids_0 + eos_token_id if token_ids_1 is not None: output = output + bos_token_id + token_ids_1 + eos_token_id return output
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): bos_token_id = [self.bos_token_id] if self.add_bos_token else [] eos_token_id = [self.eos_token_id] if self.add_eos_token else [] output = bos_token_id + token_ids_0 + eos_token_id if token_ids_1 is not None: output = output + bos_token_id + token_ids_1 + eos_token_id return output
null
_run_engine
if use_tqdm: num_requests = self.llm_engine.get_num_unfinished_requests() pbar = tqdm(total=num_requests, desc='Processed prompts') outputs: List[RequestOutput] = [] while self.llm_engine.has_unfinished_requests(): step_outputs = self.llm_engine.step() for output in step_outputs: if output.finished: outputs.append(output) if use_tqdm: pbar.update(1) if use_tqdm: pbar.close() outputs = sorted(outputs, key=lambda x: int(x.request_id)) return outputs
def _run_engine(self, use_tqdm: bool) ->List[RequestOutput]: if use_tqdm: num_requests = self.llm_engine.get_num_unfinished_requests() pbar = tqdm(total=num_requests, desc='Processed prompts') outputs: List[RequestOutput] = [] while self.llm_engine.has_unfinished_requests(): step_outputs = self.llm_engine.step() for output in step_outputs: if output.finished: outputs.append(output) if use_tqdm: pbar.update(1) if use_tqdm: pbar.close() outputs = sorted(outputs, key=lambda x: int(x.request_id)) return outputs
null
__init__
super().__init__() hidden_size = config.hidden_size inner_dim = config.n_inner if config.n_inner is not None else 4 * hidden_size self.ln_1 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon) self.attn = GPT2Attention(config, linear_method) self.ln_2 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon) self.mlp = GPT2MLP(inner_dim, config, linear_method)
def __init__(self, config: GPT2Config, linear_method: Optional[ LinearMethodBase]=None): super().__init__() hidden_size = config.hidden_size inner_dim = (config.n_inner if config.n_inner is not None else 4 * hidden_size) self.ln_1 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon) self.attn = GPT2Attention(config, linear_method) self.ln_2 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon) self.mlp = GPT2MLP(inner_dim, config, linear_method)
null
forward
batch_size, sequence_length, hidden_dim = hidden_states.shape hidden_states = hidden_states.view(-1, hidden_dim) router_logits, _ = self.gate(hidden_states) routing_weights = F.softmax(router_logits, dim=1, dtype=torch.float) routing_weights, selected_experts = torch.topk(routing_weights, self.top_k, dim=-1) routing_weights /= routing_weights.sum(dim=-1, keepdim=True) final_hidden_states = None for expert_idx in self.expert_indicies: expert_layer = self.experts[expert_idx] expert_mask = selected_experts == expert_idx expert_weights = (routing_weights * expert_mask).sum(dim=-1, keepdim=True) current_hidden_states = expert_layer(hidden_states).mul_(expert_weights) if final_hidden_states is None: final_hidden_states = current_hidden_states else: final_hidden_states.add_(current_hidden_states) return tensor_model_parallel_all_reduce(final_hidden_states).view(batch_size, sequence_length, hidden_dim)
def forward(self, hidden_states: torch.Tensor) ->torch.Tensor: batch_size, sequence_length, hidden_dim = hidden_states.shape hidden_states = hidden_states.view(-1, hidden_dim) router_logits, _ = self.gate(hidden_states) routing_weights = F.softmax(router_logits, dim=1, dtype=torch.float) routing_weights, selected_experts = torch.topk(routing_weights, self. top_k, dim=-1) routing_weights /= routing_weights.sum(dim=-1, keepdim=True) final_hidden_states = None for expert_idx in self.expert_indicies: expert_layer = self.experts[expert_idx] expert_mask = selected_experts == expert_idx expert_weights = (routing_weights * expert_mask).sum(dim=-1, keepdim=True) current_hidden_states = expert_layer(hidden_states).mul_(expert_weights ) if final_hidden_states is None: final_hidden_states = current_hidden_states else: final_hidden_states.add_(current_hidden_states) return tensor_model_parallel_all_reduce(final_hidden_states).view( batch_size, sequence_length, hidden_dim)
null
__init__
super().__init__() self.config = config assert not config.add_cross_attention assert not config.scale_attn_by_inverse_layer_idx assert not config.reorder_and_upcast_attn self.embed_dim = config.hidden_size self.wte = VocabParallelEmbedding(config.vocab_size, self.embed_dim) self.wpe = nn.Embedding(config.max_position_embeddings, self.embed_dim) self.h = nn.ModuleList([GPT2Block(config, linear_method) for _ in range( config.num_hidden_layers)]) self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon)
def __init__(self, config: GPT2Config, linear_method: Optional[ LinearMethodBase]=None): super().__init__() self.config = config assert not config.add_cross_attention assert not config.scale_attn_by_inverse_layer_idx assert not config.reorder_and_upcast_attn self.embed_dim = config.hidden_size self.wte = VocabParallelEmbedding(config.vocab_size, self.embed_dim) self.wpe = nn.Embedding(config.max_position_embeddings, self.embed_dim) self.h = nn.ModuleList([GPT2Block(config, linear_method) for _ in range (config.num_hidden_layers)]) self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon)
null
forward
if residual is None: residual = hidden_states hidden_states = self.input_layernorm(hidden_states) else: hidden_states, residual = self.input_layernorm(hidden_states, residual) hidden_states = self.self_attn(positions=positions, hidden_states= hidden_states, kv_cache=kv_cache, input_metadata=input_metadata) hidden_states, residual = self.post_attention_layernorm(hidden_states, residual ) hidden_states = self.block_sparse_moe(hidden_states) return hidden_states, residual
def forward(self, positions: torch.Tensor, hidden_states: torch.Tensor, kv_cache: KVCache, input_metadata: InputMetadata, residual: Optional[ torch.Tensor]) ->torch.Tensor: if residual is None: residual = hidden_states hidden_states = self.input_layernorm(hidden_states) else: hidden_states, residual = self.input_layernorm(hidden_states, residual) hidden_states = self.self_attn(positions=positions, hidden_states= hidden_states, kv_cache=kv_cache, input_metadata=input_metadata) hidden_states, residual = self.post_attention_layernorm(hidden_states, residual) hidden_states = self.block_sparse_moe(hidden_states) return hidden_states, residual
null
_verify_args
if self.pipeline_parallel_size > 1: raise NotImplementedError('Pipeline parallelism is not supported yet.')
def _verify_args(self) ->None: if self.pipeline_parallel_size > 1: raise NotImplementedError('Pipeline parallelism is not supported yet.')
null
load_chat_template
if args.chat_template is not None: try: with open(args.chat_template, 'r') as f: chat_template = f.read() except OSError: chat_template = codecs.decode(args.chat_template, 'unicode_escape') tokenizer.chat_template = chat_template logger.info(f'Using supplied chat template:\n{tokenizer.chat_template}') elif tokenizer.chat_template is not None: logger.info(f'Using default chat template:\n{tokenizer.chat_template}') else: logger.warning('No chat template provided. Chat API will not work.')
def load_chat_template(args, tokenizer): if args.chat_template is not None: try: with open(args.chat_template, 'r') as f: chat_template = f.read() except OSError: chat_template = codecs.decode(args.chat_template, 'unicode_escape') tokenizer.chat_template = chat_template logger.info(f'Using supplied chat template:\n{tokenizer.chat_template}' ) elif tokenizer.chat_template is not None: logger.info(f'Using default chat template:\n{tokenizer.chat_template}') else: logger.warning('No chat template provided. Chat API will not work.')
null
__init__
super().__init__() self.hidden_size = config.hidden_size tp_size = get_tensor_model_parallel_world_size() self.total_num_heads = config.num_attention_heads assert self.total_num_heads % tp_size == 0 self.num_heads = self.total_num_heads // tp_size self.multi_query_attention = config.multi_query_attention self.total_num_kv_heads = (config.multi_query_group_num if config. multi_query_attention else config.num_attention_heads) if self.total_num_kv_heads >= tp_size: assert self.total_num_kv_heads % tp_size == 0 else: assert tp_size % self.total_num_kv_heads == 0 self.num_kv_heads = max(1, self.total_num_kv_heads // tp_size) self.head_dim = config.hidden_size // self.total_num_heads self.q_size = self.num_heads * self.head_dim self.kv_size = self.num_kv_heads * self.head_dim self.scaling = self.head_dim ** -0.5 self.query_key_value = QKVParallelLinear(self.hidden_size, self.head_dim, self.total_num_heads, self.total_num_kv_heads, bias=config. add_bias_linear or config.add_qkv_bias, linear_method=linear_method) self.dense = RowParallelLinear(self.total_num_heads * self.head_dim, config .hidden_size, bias=config.add_bias_linear, linear_method=linear_method) rope_ratio = getattr(config, 'rope_ratio', 1.0) max_positions = getattr(config, 'seq_length', 8192) self.rotary_emb = get_rope(self.head_dim, rotary_dim=self.head_dim // 2, max_position=max_positions, base=10000 * rope_ratio, is_neox_style=False) self.attn = PagedAttention(self.num_heads, self.head_dim, self.scaling, num_kv_heads=self.num_kv_heads)
def __init__(self, config, linear_method: Optional[LinearMethodBase]=None): super().__init__() self.hidden_size = config.hidden_size tp_size = get_tensor_model_parallel_world_size() self.total_num_heads = config.num_attention_heads assert self.total_num_heads % tp_size == 0 self.num_heads = self.total_num_heads // tp_size self.multi_query_attention = config.multi_query_attention self.total_num_kv_heads = (config.multi_query_group_num if config. multi_query_attention else config.num_attention_heads) if self.total_num_kv_heads >= tp_size: assert self.total_num_kv_heads % tp_size == 0 else: assert tp_size % self.total_num_kv_heads == 0 self.num_kv_heads = max(1, self.total_num_kv_heads // tp_size) self.head_dim = config.hidden_size // self.total_num_heads self.q_size = self.num_heads * self.head_dim self.kv_size = self.num_kv_heads * self.head_dim self.scaling = self.head_dim ** -0.5 self.query_key_value = QKVParallelLinear(self.hidden_size, self. head_dim, self.total_num_heads, self.total_num_kv_heads, bias= config.add_bias_linear or config.add_qkv_bias, linear_method= linear_method) self.dense = RowParallelLinear(self.total_num_heads * self.head_dim, config.hidden_size, bias=config.add_bias_linear, linear_method= linear_method) rope_ratio = getattr(config, 'rope_ratio', 1.0) max_positions = getattr(config, 'seq_length', 8192) self.rotary_emb = get_rope(self.head_dim, rotary_dim=self.head_dim // 2, max_position=max_positions, base=10000 * rope_ratio, is_neox_style= False) self.attn = PagedAttention(self.num_heads, self.head_dim, self.scaling, num_kv_heads=self.num_kv_heads)
null
can_append_slot
num_free_gpu_blocks = self.gpu_allocator.get_num_free_blocks() num_seqs = seq_group.num_seqs(status=SequenceStatus.RUNNING) return num_seqs <= num_free_gpu_blocks
def can_append_slot(self, seq_group: SequenceGroup) ->bool: num_free_gpu_blocks = self.gpu_allocator.get_num_free_blocks() num_seqs = seq_group.num_seqs(status=SequenceStatus.RUNNING) return num_seqs <= num_free_gpu_blocks
null
vocab_size
"""Returns vocab size""" return self.sp_model.get_piece_size()
@property def vocab_size(self): """Returns vocab size""" return self.sp_model.get_piece_size()
Returns vocab size
load_weights
stacked_params_mapping = [('qkv_proj', 'q_proj', 'q'), ('qkv_proj', 'k_proj', 'k'), ('qkv_proj', 'v_proj', 'v'), ('gate_up_proj', 'gate_proj', 0), ('gate_up_proj', 'up_proj', 1)] params_dict = dict(self.named_parameters()) for name, loaded_weight in hf_model_weights_iterator(model_name_or_path, cache_dir, load_format, revision): if 'rotary_emb.inv_freq' in name: continue if 'k_proj' in name or 'v_proj' in name: loaded_weight = self._degroup_weight(loaded_weight) for param_name, weight_name, shard_id in stacked_params_mapping: if weight_name not in name: continue name = name.replace(weight_name, param_name) if name.endswith('.bias') and name not in params_dict: continue param = params_dict[name] weight_loader = param.weight_loader weight_loader(param, loaded_weight, shard_id) break else: if name.endswith('.bias') and name not in params_dict: continue param = params_dict[name] weight_loader = getattr(param, 'weight_loader', default_weight_loader) weight_loader(param, loaded_weight)
def load_weights(self, model_name_or_path: str, cache_dir: Optional[str]= None, load_format: str='auto', revision: Optional[str]=None): stacked_params_mapping = [('qkv_proj', 'q_proj', 'q'), ('qkv_proj', 'k_proj', 'k'), ('qkv_proj', 'v_proj', 'v'), ('gate_up_proj', 'gate_proj', 0), ('gate_up_proj', 'up_proj', 1)] params_dict = dict(self.named_parameters()) for name, loaded_weight in hf_model_weights_iterator(model_name_or_path, cache_dir, load_format, revision): if 'rotary_emb.inv_freq' in name: continue if 'k_proj' in name or 'v_proj' in name: loaded_weight = self._degroup_weight(loaded_weight) for param_name, weight_name, shard_id in stacked_params_mapping: if weight_name not in name: continue name = name.replace(weight_name, param_name) if name.endswith('.bias') and name not in params_dict: continue param = params_dict[name] weight_loader = param.weight_loader weight_loader(param, loaded_weight, shard_id) break else: if name.endswith('.bias') and name not in params_dict: continue param = params_dict[name] weight_loader = getattr(param, 'weight_loader', default_weight_loader) weight_loader(param, loaded_weight)
null
_verify_load_format
load_format = self.load_format.lower() supported_load_format = ['auto', 'pt', 'safetensors', 'npcache', 'dummy'] rocm_not_supported_load_format = [] if load_format not in supported_load_format: raise ValueError( f"Unknown load format: {self.load_format}. Must be one of 'auto', 'pt', 'safetensors', 'npcache', or 'dummy'." ) if is_hip() and load_format in rocm_not_supported_load_format: rocm_supported_load_format = [f for f in supported_load_format if f not in rocm_not_supported_load_format] raise ValueError( f"load format '{load_format}' is not supported in ROCm. Supported load format are {rocm_supported_load_format}" ) architectures = getattr(self.hf_config, 'architectures', []) if 'MixtralForCausalLM' in architectures and load_format == 'pt': raise ValueError( "Currently, the 'pt' format is not supported for Mixtral. Please use the 'safetensors' format instead. " ) self.load_format = load_format
def _verify_load_format(self) ->None: load_format = self.load_format.lower() supported_load_format = ['auto', 'pt', 'safetensors', 'npcache', 'dummy'] rocm_not_supported_load_format = [] if load_format not in supported_load_format: raise ValueError( f"Unknown load format: {self.load_format}. Must be one of 'auto', 'pt', 'safetensors', 'npcache', or 'dummy'." ) if is_hip() and load_format in rocm_not_supported_load_format: rocm_supported_load_format = [f for f in supported_load_format if f not in rocm_not_supported_load_format] raise ValueError( f"load format '{load_format}' is not supported in ROCm. Supported load format are {rocm_supported_load_format}" ) architectures = getattr(self.hf_config, 'architectures', []) if 'MixtralForCausalLM' in architectures and load_format == 'pt': raise ValueError( "Currently, the 'pt' format is not supported for Mixtral. Please use the 'safetensors' format instead. " ) self.load_format = load_format
null
__init__
super().__init__() self.config = config self.vocab_size = config.vocab_size self.wte = VocabParallelEmbedding(config.vocab_size, config.hidden_size) self.h = nn.ModuleList([QWenBlock(config, linear_method) for _ in range( config.num_hidden_layers)]) self.ln_f = RMSNorm(config.hidden_size, eps=config.layer_norm_epsilon)
def __init__(self, config: QWenConfig, linear_method: Optional[ LinearMethodBase]=None): super().__init__() self.config = config self.vocab_size = config.vocab_size self.wte = VocabParallelEmbedding(config.vocab_size, config.hidden_size) self.h = nn.ModuleList([QWenBlock(config, linear_method) for _ in range (config.num_hidden_layers)]) self.ln_f = RMSNorm(config.hidden_size, eps=config.layer_norm_epsilon)
null
test_decode_streaming
tokenizer = AutoTokenizer.from_pretrained(tokenizer_id) all_input_ids = tokenizer(truth, add_special_tokens=False)['input_ids'] if skip_special_tokens: all_input_ids = ([tokenizer.bos_token_id] if tokenizer.bos_token_id is not None else []) + all_input_ids + [tokenizer.eos_token_id] decoded_text = _run_incremental_decode(tokenizer, all_input_ids, skip_special_tokens=skip_special_tokens) assert decoded_text == truth
@pytest.mark.parametrize('truth', TRUTH) @pytest.mark.parametrize('tokenizer_id', TOKENIZERS) @pytest.mark.parametrize('skip_special_tokens', (True, False)) def test_decode_streaming(tokenizer_id, truth, skip_special_tokens): tokenizer = AutoTokenizer.from_pretrained(tokenizer_id) all_input_ids = tokenizer(truth, add_special_tokens=False)['input_ids'] if skip_special_tokens: all_input_ids = ([tokenizer.bos_token_id] if tokenizer.bos_token_id is not None else []) + all_input_ids + [tokenizer.eos_token_id] decoded_text = _run_incremental_decode(tokenizer, all_input_ids, skip_special_tokens=skip_special_tokens) assert decoded_text == truth
null
load_weights
stacked_params_mapping = [('qkv_proj', 'q_proj', 'q'), ('qkv_proj', 'k_proj', 'k'), ('qkv_proj', 'v_proj', 'v')] params_dict = dict(self.named_parameters(remove_duplicate=False)) for name, loaded_weight in hf_model_weights_iterator(model_name_or_path, cache_dir, load_format, revision): if 'lm_head.weight' in name: continue if name.startswith('decoder.'): name = 'model.' + name for param_name, weight_name, shard_id in stacked_params_mapping: if weight_name not in name: continue name = name.replace(weight_name, param_name) if name.endswith('.bias') and name not in params_dict: continue param = params_dict[name] weight_loader = param.weight_loader weight_loader(param, loaded_weight, shard_id) break else: if name.endswith('.bias') and name not in params_dict: continue param = params_dict[name] weight_loader = getattr(param, 'weight_loader', default_weight_loader) weight_loader(param, loaded_weight)
def load_weights(self, model_name_or_path: str, cache_dir: Optional[str]= None, load_format: str='auto', revision: Optional[str]=None): stacked_params_mapping = [('qkv_proj', 'q_proj', 'q'), ('qkv_proj', 'k_proj', 'k'), ('qkv_proj', 'v_proj', 'v')] params_dict = dict(self.named_parameters(remove_duplicate=False)) for name, loaded_weight in hf_model_weights_iterator(model_name_or_path, cache_dir, load_format, revision): if 'lm_head.weight' in name: continue if name.startswith('decoder.'): name = 'model.' + name for param_name, weight_name, shard_id in stacked_params_mapping: if weight_name not in name: continue name = name.replace(weight_name, param_name) if name.endswith('.bias') and name not in params_dict: continue param = params_dict[name] weight_loader = param.weight_loader weight_loader(param, loaded_weight, shard_id) break else: if name.endswith('.bias') and name not in params_dict: continue param = params_dict[name] weight_loader = getattr(param, 'weight_loader', default_weight_loader) weight_loader(param, loaded_weight)
null
__init__
super().__init__() self.hidden_size = hidden_size tp_size = get_tensor_model_parallel_world_size() self.total_num_heads = num_heads assert self.total_num_heads % tp_size == 0 self.num_heads = self.total_num_heads // tp_size self.total_num_kv_heads = num_kv_heads if self.total_num_kv_heads >= tp_size: assert self.total_num_kv_heads % tp_size == 0 else: assert tp_size % self.total_num_kv_heads == 0 self.num_kv_heads = max(1, self.total_num_kv_heads // tp_size) self.head_dim = hidden_size // self.total_num_heads self.q_size = self.num_heads * self.head_dim self.kv_size = self.num_kv_heads * self.head_dim self.scaling = self.head_dim ** -0.5 self.rope_theta = rope_theta self.sliding_window = sliding_window self.qkv_proj = QKVParallelLinear(hidden_size, self.head_dim, self. total_num_heads, self.total_num_kv_heads, bias=False, linear_method= linear_method) self.o_proj = RowParallelLinear(self.total_num_heads * self.head_dim, hidden_size, bias=False, linear_method=linear_method) self.rotary_emb = get_rope(self.head_dim, rotary_dim=self.head_dim, max_position=max_position, base=int(self.rope_theta), is_neox_style=True) self.attn = PagedAttention(self.num_heads, self.head_dim, self.scaling, num_kv_heads=self.num_kv_heads, sliding_window=self.sliding_window)
def __init__(self, hidden_size: int, num_heads: int, num_kv_heads: int, max_position: int=4096 * 32, rope_theta: float=10000, linear_method: Optional[LinearMethodBase]=None, sliding_window: Optional[int]=None ) ->None: super().__init__() self.hidden_size = hidden_size tp_size = get_tensor_model_parallel_world_size() self.total_num_heads = num_heads assert self.total_num_heads % tp_size == 0 self.num_heads = self.total_num_heads // tp_size self.total_num_kv_heads = num_kv_heads if self.total_num_kv_heads >= tp_size: assert self.total_num_kv_heads % tp_size == 0 else: assert tp_size % self.total_num_kv_heads == 0 self.num_kv_heads = max(1, self.total_num_kv_heads // tp_size) self.head_dim = hidden_size // self.total_num_heads self.q_size = self.num_heads * self.head_dim self.kv_size = self.num_kv_heads * self.head_dim self.scaling = self.head_dim ** -0.5 self.rope_theta = rope_theta self.sliding_window = sliding_window self.qkv_proj = QKVParallelLinear(hidden_size, self.head_dim, self. total_num_heads, self.total_num_kv_heads, bias=False, linear_method =linear_method) self.o_proj = RowParallelLinear(self.total_num_heads * self.head_dim, hidden_size, bias=False, linear_method=linear_method) self.rotary_emb = get_rope(self.head_dim, rotary_dim=self.head_dim, max_position=max_position, base=int(self.rope_theta), is_neox_style =True) self.attn = PagedAttention(self.num_heads, self.head_dim, self.scaling, num_kv_heads=self.num_kv_heads, sliding_window=self.sliding_window)
null
_convert_tokens_to_string_with_added_encoders
sub_texts = [] current_sub_text = [] all_special_tokens = set(tokenizer.all_special_tokens) for token in output_tokens: if skip_special_tokens and token in all_special_tokens: continue if token in tokenizer.get_added_vocab(): if current_sub_text: sub_text = tokenizer.convert_tokens_to_string(current_sub_text) sub_texts.append(sub_text) current_sub_text = [] sub_texts.append(token) else: current_sub_text.append(token) if current_sub_text: sub_text = tokenizer.convert_tokens_to_string(current_sub_text) sub_texts.append(sub_text) if spaces_between_special_tokens: return ' '.join(sub_texts) else: return ''.join(sub_texts)
def _convert_tokens_to_string_with_added_encoders(tokenizer: Union[ PreTrainedTokenizer, PreTrainedTokenizerFast], output_tokens: List[str], skip_special_tokens: bool, spaces_between_special_tokens: bool) ->str: sub_texts = [] current_sub_text = [] all_special_tokens = set(tokenizer.all_special_tokens) for token in output_tokens: if skip_special_tokens and token in all_special_tokens: continue if token in tokenizer.get_added_vocab(): if current_sub_text: sub_text = tokenizer.convert_tokens_to_string(current_sub_text) sub_texts.append(sub_text) current_sub_text = [] sub_texts.append(token) else: current_sub_text.append(token) if current_sub_text: sub_text = tokenizer.convert_tokens_to_string(current_sub_text) sub_texts.append(sub_text) if spaces_between_special_tokens: return ' '.join(sub_texts) else: return ''.join(sub_texts)
null
forward
del input_ raise RuntimeError("LMHead's weights should be used in the sampler.")
def forward(self, input_): del input_ raise RuntimeError("LMHead's weights should be used in the sampler.")
null
execute_method
executor = getattr(self, method) return executor(*args, **kwargs)
def execute_method(self, method, *args, **kwargs): executor = getattr(self, method) return executor(*args, **kwargs)
null
__init__
super().__init__() self.post_layer_norm = config.post_layer_norm self.num_layers = config.num_layers self.layers = nn.ModuleList([GLMBlock(config, linear_method) for i in range (self.num_layers)]) if self.post_layer_norm: layer_norm_func = RMSNorm if config.rmsnorm else LayerNorm self.final_layernorm = layer_norm_func(config.hidden_size, eps=config. layernorm_epsilon)
def __init__(self, config, linear_method: Optional[LinearMethodBase]=None): super().__init__() self.post_layer_norm = config.post_layer_norm self.num_layers = config.num_layers self.layers = nn.ModuleList([GLMBlock(config, linear_method) for i in range(self.num_layers)]) if self.post_layer_norm: layer_norm_func = RMSNorm if config.rmsnorm else LayerNorm self.final_layernorm = layer_norm_func(config.hidden_size, eps= config.layernorm_epsilon)
null
load_weights
stacked_params_mapping = [('qkv_proj', 'q_proj', 'q'), ('qkv_proj', 'k_proj', 'k'), ('qkv_proj', 'v_proj', 'v')] params_dict = dict(self.named_parameters()) for name, loaded_weight in hf_model_weights_iterator(model_name_or_path, cache_dir, load_format, revision, fall_back_to_pt=False): if 'rotary_emb.inv_freq' in name: continue for param_name, weight_name, shard_id in stacked_params_mapping: if weight_name not in name: continue name = name.replace(weight_name, param_name) if name.endswith('.bias') and name not in params_dict: continue param = params_dict[name] weight_loader = param.weight_loader weight_loader(param, loaded_weight, shard_id) break else: if name.endswith('.bias') and name not in params_dict: continue if 'block_sparse_moe.experts.' in name and name not in params_dict: continue param = params_dict[name] weight_loader = getattr(param, 'weight_loader', default_weight_loader) weight_loader(param, loaded_weight)
def load_weights(self, model_name_or_path: str, cache_dir: Optional[str]= None, load_format: str='auto', revision: Optional[str]=None): stacked_params_mapping = [('qkv_proj', 'q_proj', 'q'), ('qkv_proj', 'k_proj', 'k'), ('qkv_proj', 'v_proj', 'v')] params_dict = dict(self.named_parameters()) for name, loaded_weight in hf_model_weights_iterator(model_name_or_path, cache_dir, load_format, revision, fall_back_to_pt=False): if 'rotary_emb.inv_freq' in name: continue for param_name, weight_name, shard_id in stacked_params_mapping: if weight_name not in name: continue name = name.replace(weight_name, param_name) if name.endswith('.bias') and name not in params_dict: continue param = params_dict[name] weight_loader = param.weight_loader weight_loader(param, loaded_weight, shard_id) break else: if name.endswith('.bias') and name not in params_dict: continue if 'block_sparse_moe.experts.' in name and name not in params_dict: continue param = params_dict[name] weight_loader = getattr(param, 'weight_loader', default_weight_loader) weight_loader(param, loaded_weight)
null
forward
layernorm_output = self.input_layernorm(hidden_states) if self.apply_residual_connection_post_layernorm: residual = layernorm_output else: residual = hidden_states attention_output = self.self_attention(position_ids=position_ids, hidden_states=layernorm_output, kv_cache=kv_cache, input_metadata= input_metadata) attention_output = attention_output + residual layernorm_output = self.post_attention_layernorm(attention_output) if self.apply_residual_connection_post_layernorm: residual = layernorm_output else: residual = attention_output output = self.mlp(layernorm_output) + residual return output
def forward(self, position_ids: torch.Tensor, hidden_states: torch.Tensor, kv_cache: KVCache, input_metadata: InputMetadata) ->torch.Tensor: layernorm_output = self.input_layernorm(hidden_states) if self.apply_residual_connection_post_layernorm: residual = layernorm_output else: residual = hidden_states attention_output = self.self_attention(position_ids=position_ids, hidden_states=layernorm_output, kv_cache=kv_cache, input_metadata= input_metadata) attention_output = attention_output + residual layernorm_output = self.post_attention_layernorm(attention_output) if self.apply_residual_connection_post_layernorm: residual = layernorm_output else: residual = attention_output output = self.mlp(layernorm_output) + residual return output
null
create_kv_caches
torch.random.manual_seed(seed) torch.cuda.manual_seed(seed) scale = head_size ** -0.5 x = 16 // torch.tensor([], dtype=dtype).element_size() key_cache_shape = num_blocks, num_heads, head_size // x, block_size, x key_caches = [] for _ in range(num_layers): key_cache = torch.empty(size=key_cache_shape, dtype=dtype, device=device) key_cache.uniform_(-scale, scale) key_caches.append(key_cache) value_cache_shape = num_blocks, num_heads, head_size, block_size value_caches = [] for _ in range(num_layers): value_cache = torch.empty(size=value_cache_shape, dtype=dtype, device= device) value_cache.uniform_(-scale, scale) value_caches.append(value_cache) return key_caches, value_caches
def create_kv_caches(num_blocks: int, block_size: int, num_layers: int, num_heads: int, head_size: int, dtype: torch.dtype, seed: int, device: str ) ->Tuple[List[torch.Tensor], List[torch.Tensor]]: torch.random.manual_seed(seed) torch.cuda.manual_seed(seed) scale = head_size ** -0.5 x = 16 // torch.tensor([], dtype=dtype).element_size() key_cache_shape = num_blocks, num_heads, head_size // x, block_size, x key_caches = [] for _ in range(num_layers): key_cache = torch.empty(size=key_cache_shape, dtype=dtype, device= device) key_cache.uniform_(-scale, scale) key_caches.append(key_cache) value_cache_shape = num_blocks, num_heads, head_size, block_size value_caches = [] for _ in range(num_layers): value_cache = torch.empty(size=value_cache_shape, dtype=dtype, device=device) value_cache.uniform_(-scale, scale) value_caches.append(value_cache) return key_caches, value_caches
null
get_num_layers
total_num_hidden_layers = self.hf_config.num_hidden_layers return total_num_hidden_layers // parallel_config.pipeline_parallel_size
def get_num_layers(self, parallel_config: 'ParallelConfig') ->int: total_num_hidden_layers = self.hf_config.num_hidden_layers return total_num_hidden_layers // parallel_config.pipeline_parallel_size
null
__repr__
return f'GPTQConfig(weight_bits={self.weight_bits}, group_size={self.group_size}, desc_act={self.desc_act})'
def __repr__(self) ->str: return ( f'GPTQConfig(weight_bits={self.weight_bits}, group_size={self.group_size}, desc_act={self.desc_act})' )
null
get_lock
lock_dir = cache_dir if cache_dir is not None else '/tmp' lock_file_name = model_name_or_path.replace('/', '-') + '.lock' lock = filelock.FileLock(os.path.join(lock_dir, lock_file_name)) return lock
def get_lock(model_name_or_path: str, cache_dir: Optional[str]=None): lock_dir = cache_dir if cache_dir is not None else '/tmp' lock_file_name = model_name_or_path.replace('/', '-') + '.lock' lock = filelock.FileLock(os.path.join(lock_dir, lock_file_name)) return lock
null
get_priority
return now - seq_group.arrival_time
def get_priority(self, now: float, seq_group: SequenceGroup) ->float: return now - seq_group.arrival_time
null
forward
with patch('vllm.model_executor.layers.sampler._prune_hidden_states', lambda x, y: x), patch('vllm.model_executor.layers.sampler._get_logits', lambda *args, **kwargs: self.fake_logits): return super().forward(*args, **kwargs)
def forward(self, *args, **kwargs): with patch('vllm.model_executor.layers.sampler._prune_hidden_states', lambda x, y: x), patch('vllm.model_executor.layers.sampler._get_logits' , lambda *args, **kwargs: self.fake_logits): return super().forward(*args, **kwargs)
null
get_num_unfinished_seq_groups
return len(self.waiting) + len(self.running) + len(self.swapped)
def get_num_unfinished_seq_groups(self) ->int: return len(self.waiting) + len(self.running) + len(self.swapped)
null
stats
"""Get the statistics of the engine.""" return JSONResponse(engine.testing_stats())
@app.get('/stats') def stats() ->Response: """Get the statistics of the engine.""" return JSONResponse(engine.testing_stats())
Get the statistics of the engine.
add_request
"""Add a request to be sent to the engine on the next background loop iteration.""" if request_id in self._request_streams: raise KeyError(f'Request {request_id} already exists.') stream = AsyncStream(request_id) self._new_requests.put_nowait((stream, {'request_id': request_id, ** engine_add_request_kwargs})) self.new_requests_event.set() return stream
def add_request(self, request_id: str, **engine_add_request_kwargs ) ->AsyncStream: """Add a request to be sent to the engine on the next background loop iteration.""" if request_id in self._request_streams: raise KeyError(f'Request {request_id} already exists.') stream = AsyncStream(request_id) self._new_requests.put_nowait((stream, {'request_id': request_id, ** engine_add_request_kwargs})) self.new_requests_event.set() return stream
Add a request to be sent to the engine on the next background loop iteration.
__repr__
return f'SamplingMetadata(seq_groups={self.seq_groups}, seq_data={self.seq_data}, prompt_lens={self.prompt_lens}, selected_token_indices={self.selected_token_indices}, categorized_sample_indices={self.categorized_sample_indices}), perform_sampling={self.perform_sampling})'
def __repr__(self) ->str: return ( f'SamplingMetadata(seq_groups={self.seq_groups}, seq_data={self.seq_data}, prompt_lens={self.prompt_lens}, selected_token_indices={self.selected_token_indices}, categorized_sample_indices={self.categorized_sample_indices}), perform_sampling={self.perform_sampling})' )
null
__init__
self.output_sizes = output_sizes tp_size = get_tensor_model_parallel_world_size() assert all(output_size % tp_size == 0 for output_size in output_sizes) super().__init__(input_size, sum(output_sizes), bias, gather_output, skip_bias_add, params_dtype, linear_method)
def __init__(self, input_size: int, output_sizes: List[int], bias: bool= True, gather_output: bool=False, skip_bias_add: bool=False, params_dtype: Optional[torch.dtype]=None, linear_method: Optional[ LinearMethodBase]=None): self.output_sizes = output_sizes tp_size = get_tensor_model_parallel_world_size() assert all(output_size % tp_size == 0 for output_size in output_sizes) super().__init__(input_size, sum(output_sizes), bias, gather_output, skip_bias_add, params_dtype, linear_method)
null
__init__
super().__init__() self.config = config self.linear_method = linear_method assert not config.tie_word_embeddings self.transformer = GPTJModel(config, linear_method) self.lm_head = ParallelLMHead(config.vocab_size, config.n_embd, bias=True) self.sampler = Sampler(config.vocab_size)
def __init__(self, config: GPTJConfig, linear_method: Optional[ LinearMethodBase]=None): super().__init__() self.config = config self.linear_method = linear_method assert not config.tie_word_embeddings self.transformer = GPTJModel(config, linear_method) self.lm_head = ParallelLMHead(config.vocab_size, config.n_embd, bias=True) self.sampler = Sampler(config.vocab_size)
null
get_head_size
return self.hf_config.hidden_size // self.hf_config.num_attention_heads
def get_head_size(self) ->int: return self.hf_config.hidden_size // self.hf_config.num_attention_heads
null
forward
hidden_states = self.model(input_ids, positions, kv_caches, input_metadata) return hidden_states
def forward(self, input_ids: torch.Tensor, positions: torch.Tensor, kv_caches: List[KVCache], input_metadata: InputMetadata) ->torch.Tensor: hidden_states = self.model(input_ids, positions, kv_caches, input_metadata) return hidden_states
null
verify_with_parallel_config
total_cpu_memory = get_cpu_memory() num_gpus_per_node = parallel_config.tensor_parallel_size cpu_memory_usage = self.swap_space_bytes * num_gpus_per_node msg = ( f'{cpu_memory_usage / _GB:.2f} GiB out of the {total_cpu_memory / _GB:.2f} GiB total CPU memory is allocated for the swap space.' ) if cpu_memory_usage > 0.7 * total_cpu_memory: raise ValueError('Too large swap space. ' + msg) elif cpu_memory_usage > 0.4 * total_cpu_memory: logger.warning('Possibly too large swap space. ' + msg)
def verify_with_parallel_config(self, parallel_config: 'ParallelConfig' ) ->None: total_cpu_memory = get_cpu_memory() num_gpus_per_node = parallel_config.tensor_parallel_size cpu_memory_usage = self.swap_space_bytes * num_gpus_per_node msg = ( f'{cpu_memory_usage / _GB:.2f} GiB out of the {total_cpu_memory / _GB:.2f} GiB total CPU memory is allocated for the swap space.' ) if cpu_memory_usage > 0.7 * total_cpu_memory: raise ValueError('Too large swap space. ' + msg) elif cpu_memory_usage > 0.4 * total_cpu_memory: logger.warning('Possibly too large swap space. ' + msg)
null
_get_and_verify_dtype
config_dtype = getattr(config, 'torch_dtype', None) if config_dtype is None: config_dtype = torch.float32 if isinstance(dtype, str): dtype = dtype.lower() if dtype == 'auto': if config_dtype == torch.float32: torch_dtype = torch.float16 else: torch_dtype = config_dtype else: if dtype not in _STR_DTYPE_TO_TORCH_DTYPE: raise ValueError(f'Unknown dtype: {dtype}') torch_dtype = _STR_DTYPE_TO_TORCH_DTYPE[dtype] elif isinstance(dtype, torch.dtype): torch_dtype = dtype else: raise ValueError(f'Unknown dtype: {dtype}') if is_hip() and torch_dtype == torch.float32: rocm_supported_dtypes = [k for k, v in _STR_DTYPE_TO_TORCH_DTYPE.items( ) if k not in _ROCM_NOT_SUPPORTED_DTYPE] raise ValueError( f"dtype '{dtype}' is not supported in ROCm. Supported dtypes are {rocm_supported_dtypes}" ) if torch_dtype != config_dtype: if torch_dtype == torch.float32: pass elif config_dtype == torch.float32: pass else: logger.warning(f'Casting {config_dtype} to {torch_dtype}.') return torch_dtype
def _get_and_verify_dtype(config: PretrainedConfig, dtype: Union[str, torch .dtype]) ->torch.dtype: config_dtype = getattr(config, 'torch_dtype', None) if config_dtype is None: config_dtype = torch.float32 if isinstance(dtype, str): dtype = dtype.lower() if dtype == 'auto': if config_dtype == torch.float32: torch_dtype = torch.float16 else: torch_dtype = config_dtype else: if dtype not in _STR_DTYPE_TO_TORCH_DTYPE: raise ValueError(f'Unknown dtype: {dtype}') torch_dtype = _STR_DTYPE_TO_TORCH_DTYPE[dtype] elif isinstance(dtype, torch.dtype): torch_dtype = dtype else: raise ValueError(f'Unknown dtype: {dtype}') if is_hip() and torch_dtype == torch.float32: rocm_supported_dtypes = [k for k, v in _STR_DTYPE_TO_TORCH_DTYPE. items() if k not in _ROCM_NOT_SUPPORTED_DTYPE] raise ValueError( f"dtype '{dtype}' is not supported in ROCm. Supported dtypes are {rocm_supported_dtypes}" ) if torch_dtype != config_dtype: if torch_dtype == torch.float32: pass elif config_dtype == torch.float32: pass else: logger.warning(f'Casting {config_dtype} to {torch_dtype}.') return torch_dtype
null
test_copy_blocks
random.seed(seed) torch.random.manual_seed(seed) torch.cuda.manual_seed(seed) gpu_id = f'cuda:{device}' assert 2 * num_mappings <= num_blocks src_blocks = random.sample(range(num_blocks), num_mappings) remainig_blocks = list(set(range(num_blocks)) - set(src_blocks)) dst_blocks = random.sample(remainig_blocks, 2 * num_mappings) block_mapping = {} for i in range(num_mappings): src = src_blocks[i] dst1 = dst_blocks[2 * i] dst2 = dst_blocks[2 * i + 1] block_mapping[src] = [dst1, dst2] key_caches, value_caches = kv_cache_factory(num_blocks, block_size, num_layers, num_heads, head_size, dtype, seed, gpu_id) cloned_key_caches = [key_cache.clone() for key_cache in key_caches] cloned_value_caches = [value_cache.clone() for value_cache in value_caches] cache_ops.copy_blocks(key_caches, value_caches, block_mapping) for src, dsts in block_mapping.items(): for dst in dsts: for cloned_key_cache in cloned_key_caches: cloned_key_cache[dst].copy_(cloned_key_cache[src]) for cloned_value_cache in cloned_value_caches: cloned_value_cache[dst].copy_(cloned_value_cache[src]) for key_cache, cloned_key_cache in zip(key_caches, cloned_key_caches): assert torch.allclose(key_cache, cloned_key_cache) for value_cache, cloned_value_cache in zip(value_caches, cloned_value_caches): assert torch.allclose(value_cache, cloned_value_cache)
@pytest.mark.parametrize('num_mappings', NUM_MAPPINGS) @pytest.mark.parametrize('num_layers', NUM_LAYERS) @pytest.mark.parametrize('num_heads', NUM_HEADS) @pytest.mark.parametrize('head_size', HEAD_SIZES) @pytest.mark.parametrize('block_size', BLOCK_SIZES) @pytest.mark.parametrize('num_blocks', NUM_BLOCKS) @pytest.mark.parametrize('dtype', DTYPES) @pytest.mark.parametrize('seed', SEEDS) @pytest.mark.parametrize('device', DEVICES) @torch.inference_mode() def test_copy_blocks(kv_cache_factory, num_mappings: int, num_layers: int, num_heads: int, head_size: int, block_size: int, num_blocks: int, dtype: torch.dtype, seed: int, device: int) ->None: random.seed(seed) torch.random.manual_seed(seed) torch.cuda.manual_seed(seed) gpu_id = f'cuda:{device}' assert 2 * num_mappings <= num_blocks src_blocks = random.sample(range(num_blocks), num_mappings) remainig_blocks = list(set(range(num_blocks)) - set(src_blocks)) dst_blocks = random.sample(remainig_blocks, 2 * num_mappings) block_mapping = {} for i in range(num_mappings): src = src_blocks[i] dst1 = dst_blocks[2 * i] dst2 = dst_blocks[2 * i + 1] block_mapping[src] = [dst1, dst2] key_caches, value_caches = kv_cache_factory(num_blocks, block_size, num_layers, num_heads, head_size, dtype, seed, gpu_id) cloned_key_caches = [key_cache.clone() for key_cache in key_caches] cloned_value_caches = [value_cache.clone() for value_cache in value_caches] cache_ops.copy_blocks(key_caches, value_caches, block_mapping) for src, dsts in block_mapping.items(): for dst in dsts: for cloned_key_cache in cloned_key_caches: cloned_key_cache[dst].copy_(cloned_key_cache[src]) for cloned_value_cache in cloned_value_caches: cloned_value_cache[dst].copy_(cloned_value_cache[src]) for key_cache, cloned_key_cache in zip(key_caches, cloned_key_caches): assert torch.allclose(key_cache, cloned_key_cache) for value_cache, cloned_value_cache in zip(value_caches, cloned_value_caches): assert torch.allclose(value_cache, cloned_value_cache)
null
forward
input_dtype = hidden_states.dtype variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) return (self.weight * hidden_states).to(input_dtype)
def forward(self, hidden_states): input_dtype = hidden_states.dtype variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self. variance_epsilon) return (self.weight * hidden_states).to(input_dtype)
null
find
if seq_id not in self.seqs_dict: raise ValueError(f'Sequence {seq_id} not found.') return self.seqs_dict[seq_id]
def find(self, seq_id: int) ->Sequence: if seq_id not in self.seqs_dict: raise ValueError(f'Sequence {seq_id} not found.') return self.seqs_dict[seq_id]
null
test_silu_and_mul
torch.random.manual_seed(seed) torch.cuda.manual_seed(seed) gpu_id = f'cuda:{device}' x = torch.randn(num_tokens, 2 * d, dtype=dtype, device=gpu_id) layer = SiluAndMul() out = layer(x) ref_out = layer._forward(x) assert torch.allclose(out, ref_out, atol=1e-05, rtol=1e-05)
@pytest.mark.parametrize('num_tokens', NUM_TOKENS) @pytest.mark.parametrize('d', D) @pytest.mark.parametrize('dtype', DTYPES) @pytest.mark.parametrize('seed', SEEDS) @pytest.mark.parametrize('device', DEVICES) @torch.inference_mode() def test_silu_and_mul(num_tokens: int, d: int, dtype: torch.dtype, seed: int, device: int) ->None: torch.random.manual_seed(seed) torch.cuda.manual_seed(seed) gpu_id = f'cuda:{device}' x = torch.randn(num_tokens, 2 * d, dtype=dtype, device=gpu_id) layer = SiluAndMul() out = layer(x) ref_out = layer._forward(x) assert torch.allclose(out, ref_out, atol=1e-05, rtol=1e-05)
null
__init__
super().__init__() self.hidden_size = config.hidden_size total_num_heads = config.num_attention_heads self.tensor_model_parallel_world_size = get_tensor_model_parallel_world_size() assert total_num_heads % self.tensor_model_parallel_world_size == 0 self.num_heads = total_num_heads // self.tensor_model_parallel_world_size self.head_dim = self.hidden_size // total_num_heads self.scale = self.head_dim ** -0.5 self.multi_query = config.multi_query if self.multi_query: total_num_kv_heads = 1 self.num_kv_heads = 1 else: total_num_kv_heads = total_num_heads self.num_kv_heads = self.num_heads self.kv_dim = self.head_dim * self.num_kv_heads self.c_attn = QKVParallelLinear(self.hidden_size, self.head_dim, total_num_heads, total_num_kv_heads, bias=True, linear_method=linear_method ) self.c_proj = RowParallelLinear(self.hidden_size, self.hidden_size, bias= True, linear_method=linear_method) self.attn = PagedAttention(self.num_heads, self.head_dim, scale=self.scale, num_kv_heads=self.num_kv_heads)
def __init__(self, config: GPTBigCodeConfig, linear_method: Optional[ LinearMethodBase]=None): super().__init__() self.hidden_size = config.hidden_size total_num_heads = config.num_attention_heads self.tensor_model_parallel_world_size = ( get_tensor_model_parallel_world_size()) assert total_num_heads % self.tensor_model_parallel_world_size == 0 self.num_heads = total_num_heads // self.tensor_model_parallel_world_size self.head_dim = self.hidden_size // total_num_heads self.scale = self.head_dim ** -0.5 self.multi_query = config.multi_query if self.multi_query: total_num_kv_heads = 1 self.num_kv_heads = 1 else: total_num_kv_heads = total_num_heads self.num_kv_heads = self.num_heads self.kv_dim = self.head_dim * self.num_kv_heads self.c_attn = QKVParallelLinear(self.hidden_size, self.head_dim, total_num_heads, total_num_kv_heads, bias=True, linear_method= linear_method) self.c_proj = RowParallelLinear(self.hidden_size, self.hidden_size, bias=True, linear_method=linear_method) self.attn = PagedAttention(self.num_heads, self.head_dim, scale=self. scale, num_kv_heads=self.num_kv_heads)
null
forward
residual = hidden_states hidden_states = self.ln_1(hidden_states) attn_output = self.attn(position_ids=position_ids, hidden_states= hidden_states, kv_cache=kv_cache, input_metadata=input_metadata) mlp_output = self.mlp(hidden_states) hidden_states = attn_output + mlp_output + residual return hidden_states
def forward(self, position_ids: torch.Tensor, hidden_states: torch.Tensor, kv_cache: KVCache, input_metadata: InputMetadata) ->torch.Tensor: residual = hidden_states hidden_states = self.ln_1(hidden_states) attn_output = self.attn(position_ids=position_ids, hidden_states= hidden_states, kv_cache=kv_cache, input_metadata=input_metadata) mlp_output = self.mlp(hidden_states) hidden_states = attn_output + mlp_output + residual return hidden_states
null
_sample
categorized_seq_group_ids = {t: [] for t in SamplingType} categorized_sample_indices = sampling_metadata.categorized_sample_indices for i, seq_group in enumerate(sampling_metadata.seq_groups): _, sampling_params = seq_group sampling_type = sampling_params.sampling_type categorized_seq_group_ids[sampling_type].append(i) sample_results_dict: Dict[int, Tuple[List[int], List[int]]] = {} sample_metadata = {} for sampling_type in SamplingType: sample_indices = categorized_sample_indices[sampling_type] num_tokens = len(sample_indices) if num_tokens == 0: continue seq_group_ids = categorized_seq_group_ids[sampling_type] seq_groups = [sampling_metadata.seq_groups[i] for i in seq_group_ids] is_prompts = [(i < sampling_metadata.num_prompts) for i in seq_group_ids] sample_metadata[sampling_type ] = seq_group_ids, seq_groups, is_prompts, sample_indices if sampling_type == SamplingType.GREEDY: greedy_samples = torch.argmax(logprobs[sample_indices], dim=-1) elif sampling_type == SamplingType.RANDOM: max_best_of = 1 for seq_group, is_prompt in zip(seq_groups, is_prompts): if is_prompt: _, sampling_params = seq_group max_best_of = max(max_best_of, sampling_params.best_of) multinomial_samples = _multinomial(probs[sample_indices], max_best_of) elif sampling_type == SamplingType.BEAM: beam_search_logprobs = logprobs[sample_indices] else: raise ValueError(f'Unsupported sampling type: {sampling_type}') for sampling_type in SamplingType: if sampling_type not in sample_metadata: continue seq_group_ids, seq_groups, is_prompts, sample_indices = sample_metadata[ sampling_type] if sampling_type == SamplingType.GREEDY: sample_results = _greedy_sample(seq_groups, greedy_samples) elif sampling_type == SamplingType.RANDOM: sample_results = _random_sample(seq_groups, is_prompts, multinomial_samples) elif sampling_type == SamplingType.BEAM: sample_results = _beam_search_sample(seq_groups, is_prompts, sampling_metadata.seq_data, beam_search_logprobs) sample_results_dict.update(zip(seq_group_ids, sample_results)) sample_results = [sample_results_dict[i] for i in range(len( sampling_metadata.seq_groups))] return sample_results
def _sample(probs: torch.Tensor, logprobs: torch.Tensor, sampling_metadata: SamplingMetadata) ->List[Tuple[List[int], List[int]]]: categorized_seq_group_ids = {t: [] for t in SamplingType} categorized_sample_indices = sampling_metadata.categorized_sample_indices for i, seq_group in enumerate(sampling_metadata.seq_groups): _, sampling_params = seq_group sampling_type = sampling_params.sampling_type categorized_seq_group_ids[sampling_type].append(i) sample_results_dict: Dict[int, Tuple[List[int], List[int]]] = {} sample_metadata = {} for sampling_type in SamplingType: sample_indices = categorized_sample_indices[sampling_type] num_tokens = len(sample_indices) if num_tokens == 0: continue seq_group_ids = categorized_seq_group_ids[sampling_type] seq_groups = [sampling_metadata.seq_groups[i] for i in seq_group_ids] is_prompts = [(i < sampling_metadata.num_prompts) for i in seq_group_ids] sample_metadata[sampling_type ] = seq_group_ids, seq_groups, is_prompts, sample_indices if sampling_type == SamplingType.GREEDY: greedy_samples = torch.argmax(logprobs[sample_indices], dim=-1) elif sampling_type == SamplingType.RANDOM: max_best_of = 1 for seq_group, is_prompt in zip(seq_groups, is_prompts): if is_prompt: _, sampling_params = seq_group max_best_of = max(max_best_of, sampling_params.best_of) multinomial_samples = _multinomial(probs[sample_indices], max_best_of) elif sampling_type == SamplingType.BEAM: beam_search_logprobs = logprobs[sample_indices] else: raise ValueError(f'Unsupported sampling type: {sampling_type}') for sampling_type in SamplingType: if sampling_type not in sample_metadata: continue seq_group_ids, seq_groups, is_prompts, sample_indices = ( sample_metadata[sampling_type]) if sampling_type == SamplingType.GREEDY: sample_results = _greedy_sample(seq_groups, greedy_samples) elif sampling_type == SamplingType.RANDOM: sample_results = _random_sample(seq_groups, is_prompts, multinomial_samples) elif sampling_type == SamplingType.BEAM: sample_results = _beam_search_sample(seq_groups, is_prompts, sampling_metadata.seq_data, beam_search_logprobs) sample_results_dict.update(zip(seq_group_ids, sample_results)) sample_results = [sample_results_dict[i] for i in range(len( sampling_metadata.seq_groups))] return sample_results
null
tensor_model_parallel_gather
"""Gather the input tensor across model parallel group. NOTE: We assume that the input tensor is on the same device across all the ranks. """ world_size = get_tensor_model_parallel_world_size() if world_size == 1: return input_ assert -input_.dim() <= dim < input_.dim( ), f'Invalid dim ({dim}) for input tensor with shape {input_.size()}' if dim < 0: dim += input_.dim() if get_tensor_model_parallel_rank() == dst: gather_list = [torch.empty_like(input_) for _ in range(world_size)] else: gather_list = None torch.distributed.gather(input_, gather_list, dst=dst, group= get_tensor_model_parallel_group()) if get_tensor_model_parallel_rank() == dst: output_tensor = torch.cat(gather_list, dim=dim) else: output_tensor = None return output_tensor
def tensor_model_parallel_gather(input_, dst=0, dim=-1): """Gather the input tensor across model parallel group. NOTE: We assume that the input tensor is on the same device across all the ranks. """ world_size = get_tensor_model_parallel_world_size() if world_size == 1: return input_ assert -input_.dim() <= dim < input_.dim( ), f'Invalid dim ({dim}) for input tensor with shape {input_.size()}' if dim < 0: dim += input_.dim() if get_tensor_model_parallel_rank() == dst: gather_list = [torch.empty_like(input_) for _ in range(world_size)] else: gather_list = None torch.distributed.gather(input_, gather_list, dst=dst, group= get_tensor_model_parallel_group()) if get_tensor_model_parallel_rank() == dst: output_tensor = torch.cat(gather_list, dim=dim) else: output_tensor = None return output_tensor
Gather the input tensor across model parallel group. NOTE: We assume that the input tensor is on the same device across all the ranks.
put
if self._finished: return self._queue.put_nowait(item)
def put(self, item: RequestOutput) ->None: if self._finished: return self._queue.put_nowait(item)
null
__init__
self.seq_id = seq_id self.prompt = prompt self.block_size = block_size self.data = SequenceData(prompt_token_ids) self.output_logprobs: SampleLogprobs = [] self.output_text = '' self.logical_token_blocks: List[LogicalTokenBlock] = [] self._append_tokens_to_blocks(prompt_token_ids) self.status = SequenceStatus.WAITING self.prefix_offset = 0 self.read_offset = 0 self.tokens: Optional[List[str]] = None
def __init__(self, seq_id: int, prompt: str, prompt_token_ids: List[int], block_size: int) ->None: self.seq_id = seq_id self.prompt = prompt self.block_size = block_size self.data = SequenceData(prompt_token_ids) self.output_logprobs: SampleLogprobs = [] self.output_text = '' self.logical_token_blocks: List[LogicalTokenBlock] = [] self._append_tokens_to_blocks(prompt_token_ids) self.status = SequenceStatus.WAITING self.prefix_offset = 0 self.read_offset = 0 self.tokens: Optional[List[str]] = None
null
__init__
self.request_id = request_id self._queue = asyncio.Queue() self._finished = False
def __init__(self, request_id: str) ->None: self.request_id = request_id self._queue = asyncio.Queue() self._finished = False
null
_init_distributed_environment
"""Initialize the distributed environment.""" if torch.distributed.is_initialized(): torch_world_size = torch.distributed.get_world_size() if torch_world_size != parallel_config.world_size: raise RuntimeError( f'torch.distributed is already initialized but the torch world size does not match parallel_config.world_size ({torch_world_size} vs. {parallel_config.world_size}).' ) elif not distributed_init_method: raise ValueError( 'distributed_init_method must be set if torch.distributed is not already initialized' ) else: torch.distributed.init_process_group(backend='nccl', world_size= parallel_config.world_size, rank=rank, init_method= distributed_init_method) torch.distributed.all_reduce(torch.zeros(1).cuda()) initialize_model_parallel(parallel_config.tensor_parallel_size, parallel_config.pipeline_parallel_size)
def _init_distributed_environment(parallel_config: ParallelConfig, rank: int, distributed_init_method: Optional[str]=None) ->None: """Initialize the distributed environment.""" if torch.distributed.is_initialized(): torch_world_size = torch.distributed.get_world_size() if torch_world_size != parallel_config.world_size: raise RuntimeError( f'torch.distributed is already initialized but the torch world size does not match parallel_config.world_size ({torch_world_size} vs. {parallel_config.world_size}).' ) elif not distributed_init_method: raise ValueError( 'distributed_init_method must be set if torch.distributed is not already initialized' ) else: torch.distributed.init_process_group(backend='nccl', world_size= parallel_config.world_size, rank=rank, init_method= distributed_init_method) torch.distributed.all_reduce(torch.zeros(1).cuda()) initialize_model_parallel(parallel_config.tensor_parallel_size, parallel_config.pipeline_parallel_size)
Initialize the distributed environment.
get_linear_method
return SqueezeLLMLinearMethod(self)
def get_linear_method(self) ->'SqueezeLLMLinearMethod': return SqueezeLLMLinearMethod(self)
null
init_event
self.new_requests_event = asyncio.Event()
def init_event(self): self.new_requests_event = asyncio.Event()
null
sample
next_tokens = self.sampler(self.lm_head.weight, hidden_states, sampling_metadata, self.lm_head.bias) return next_tokens
def sample(self, hidden_states: torch.Tensor, sampling_metadata: SamplingMetadata) ->Optional[SamplerOutput]: next_tokens = self.sampler(self.lm_head.weight, hidden_states, sampling_metadata, self.lm_head.bias) return next_tokens
null
add_request
"""Add a request to the engine's request pool. The request is added to the request pool and will be processed by the scheduler as `engine.step()` is called. The exact scheduling policy is determined by the scheduler. Args: request_id: The unique ID of the request. prompt: The prompt string. Can be None if prompt_token_ids is provided. sampling_params: The sampling parameters for text generation. prompt_token_ids: The token IDs of the prompt. If None, we use the tokenizer to convert the prompts to token IDs. arrival_time: The arrival time of the request. If None, we use the current monotonic time. """ if arrival_time is None: arrival_time = time.monotonic() if prompt_token_ids is None: assert prompt is not None prompt_token_ids = self.tokenizer.encode(prompt) block_size = self.cache_config.block_size seq_id = next(self.seq_counter) seq = Sequence(seq_id, prompt, prompt_token_ids, block_size) seq_group = SequenceGroup(request_id, [seq], sampling_params, arrival_time) self.scheduler.add_seq_group(seq_group)
def add_request(self, request_id: str, prompt: Optional[str], sampling_params: SamplingParams, prompt_token_ids: Optional[List[int]]= None, arrival_time: Optional[float]=None) ->None: """Add a request to the engine's request pool. The request is added to the request pool and will be processed by the scheduler as `engine.step()` is called. The exact scheduling policy is determined by the scheduler. Args: request_id: The unique ID of the request. prompt: The prompt string. Can be None if prompt_token_ids is provided. sampling_params: The sampling parameters for text generation. prompt_token_ids: The token IDs of the prompt. If None, we use the tokenizer to convert the prompts to token IDs. arrival_time: The arrival time of the request. If None, we use the current monotonic time. """ if arrival_time is None: arrival_time = time.monotonic() if prompt_token_ids is None: assert prompt is not None prompt_token_ids = self.tokenizer.encode(prompt) block_size = self.cache_config.block_size seq_id = next(self.seq_counter) seq = Sequence(seq_id, prompt, prompt_token_ids, block_size) seq_group = SequenceGroup(request_id, [seq], sampling_params, arrival_time) self.scheduler.add_seq_group(seq_group)
Add a request to the engine's request pool. The request is added to the request pool and will be processed by the scheduler as `engine.step()` is called. The exact scheduling policy is determined by the scheduler. Args: request_id: The unique ID of the request. prompt: The prompt string. Can be None if prompt_token_ids is provided. sampling_params: The sampling parameters for text generation. prompt_token_ids: The token IDs of the prompt. If None, we use the tokenizer to convert the prompts to token IDs. arrival_time: The arrival time of the request. If None, we use the current monotonic time.
get_output_token_ids
return self.data.output_token_ids
def get_output_token_ids(self) ->List[int]: return self.data.output_token_ids
null
__init__
super().__init__() self.head_size = head_size self.rotary_dim = rotary_dim self.max_position_embeddings = max_position_embeddings self.base = base self.is_neox_style = is_neox_style cache = self._compute_cos_sin_cache() cache = cache.to(torch.get_default_dtype()) self.register_buffer('cos_sin_cache', cache, persistent=False)
def __init__(self, head_size: int, rotary_dim: int, max_position_embeddings: int, base: int, is_neox_style: bool) ->None: super().__init__() self.head_size = head_size self.rotary_dim = rotary_dim self.max_position_embeddings = max_position_embeddings self.base = base self.is_neox_style = is_neox_style cache = self._compute_cos_sin_cache() cache = cache.to(torch.get_default_dtype()) self.register_buffer('cos_sin_cache', cache, persistent=False)
null
__init__
self.index = index self.text = text self.token_ids = token_ids self.cumulative_logprob = cumulative_logprob self.logprobs = logprobs self.finish_reason = finish_reason
def __init__(self, index: int, text: str, token_ids: List[int], cumulative_logprob: float, logprobs: Optional[SampleLogprobs], finish_reason: Optional[str]=None) ->None: self.index = index self.text = text self.token_ids = token_ids self.cumulative_logprob = cumulative_logprob self.logprobs = logprobs self.finish_reason = finish_reason
null
__init__
super().__init__() self.gate_up_proj = MergedColumnParallelLinear(hidden_size, [ intermediate_size] * 2, bias=False, linear_method=linear_method) self.down_proj = RowParallelLinear(intermediate_size, hidden_size, bias= False, linear_method=linear_method) if hidden_act != 'silu': raise ValueError( f'Unsupported activation: {hidden_act}. Only silu is supported for now.' ) self.act_fn = SiluAndMul()
def __init__(self, hidden_size: int, intermediate_size: int, hidden_act: str, linear_method: Optional[LinearMethodBase]=None): super().__init__() self.gate_up_proj = MergedColumnParallelLinear(hidden_size, [ intermediate_size] * 2, bias=False, linear_method=linear_method) self.down_proj = RowParallelLinear(intermediate_size, hidden_size, bias =False, linear_method=linear_method) if hidden_act != 'silu': raise ValueError( f'Unsupported activation: {hidden_act}. Only silu is supported for now.' ) self.act_fn = SiluAndMul()
null
fork
new_seq = copy.deepcopy(self) new_seq.seq_id = new_seq_id return new_seq
def fork(self, new_seq_id: int) ->'Sequence': new_seq = copy.deepcopy(self) new_seq.seq_id = new_seq_id return new_seq
null
from_config
weight_bits = cls.get_from_keys(config, ['bits']) group_size = cls.get_from_keys(config, ['group_size']) desc_act = cls.get_from_keys(config, ['desc_act']) return cls(weight_bits, group_size, desc_act)
@classmethod def from_config(cls, config: Dict[str, Any]) ->'GPTQConfig': weight_bits = cls.get_from_keys(config, ['bits']) group_size = cls.get_from_keys(config, ['group_size']) desc_act = cls.get_from_keys(config, ['desc_act']) return cls(weight_bits, group_size, desc_act)
null
__init__
self.device = device self.block_number = block_number self.block_size = block_size self.ref_count = 0
def __init__(self, device: Device, block_number: int, block_size: int) ->None: self.device = device self.block_number = block_number self.block_size = block_size self.ref_count = 0
null
get_amdgpu_offload_arch
command = '/opt/rocm/llvm/bin/amdgpu-offload-arch' try: output = subprocess.check_output([command]) return output.decode('utf-8').strip() except subprocess.CalledProcessError as e: error_message = f'Error: {e}' raise RuntimeError(error_message) from e except FileNotFoundError as e: error_message = f'The command {command} was not found.' raise RuntimeError(error_message) from e return None
def get_amdgpu_offload_arch(): command = '/opt/rocm/llvm/bin/amdgpu-offload-arch' try: output = subprocess.check_output([command]) return output.decode('utf-8').strip() except subprocess.CalledProcessError as e: error_message = f'Error: {e}' raise RuntimeError(error_message) from e except FileNotFoundError as e: error_message = f'The command {command} was not found.' raise RuntimeError(error_message) from e return None
null
forward
qkv, _ = self.c_attn(hidden_states) q, k, v = qkv.chunk(chunks=3, dim=-1) q, k = self.rotary_emb(positions, q, k) k_cache, v_cache = kv_cache attn_output = self.attn(q, k, v, k_cache, v_cache, input_metadata) output, _ = self.c_proj(attn_output) return output
def forward(self, positions: torch.Tensor, hidden_states: torch.Tensor, kv_cache: KVCache, input_metadata: InputMetadata) ->torch.Tensor: qkv, _ = self.c_attn(hidden_states) q, k, v = qkv.chunk(chunks=3, dim=-1) q, k = self.rotary_emb(positions, q, k) k_cache, v_cache = kv_cache attn_output = self.attn(q, k, v, k_cache, v_cache, input_metadata) output, _ = self.c_proj(attn_output) return output
null
finished
return self.finish_reason is not None
def finished(self) ->bool: return self.finish_reason is not None
null

Dataset Card for "code-vllm-func-names"

More Information needed

Downloads last month
11
Edit dataset card