| | |
| |
|
| | |
| | |
| | |
| |
|
| | |
| |
|
| | |
| | |
| | |
| | |
| | |
| |
|
| | import math |
| | from typing import List, Optional, Tuple, Union |
| |
|
| | import torch |
| |
|
| | from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, CausalLMOutputWithCrossAttentions |
| | from transformers.modeling_utils import PreTrainedModel |
| | from transformers.utils import logging |
| | from transformers.activations import ACT2FN |
| |
|
| | logger = logging.get_logger(__name__) |
| |
|
| | from .configuration_hawk import HawkConfig |
| |
|
| | |
| | def _make_causal_mask( |
| | input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0 |
| | ): |
| | """ |
| | Make causal mask used for bi-directional self-attention. |
| | """ |
| | bsz, tgt_len = input_ids_shape |
| | mask = torch.full((tgt_len, tgt_len), torch.tensor(torch.finfo(dtype).min, device=device), device=device) |
| | mask_cond = torch.arange(mask.size(-1), device=device) |
| | mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0) |
| | mask = mask.to(dtype) |
| |
|
| | if past_key_values_length > 0: |
| | mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1) |
| | return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length) |
| |
|
| |
|
| | |
| | def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None): |
| | """ |
| | Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. |
| | """ |
| | bsz, src_len = mask.size() |
| | tgt_len = tgt_len if tgt_len is not None else src_len |
| |
|
| | expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype) |
| |
|
| | inverted_mask = 1.0 - expanded_mask |
| |
|
| | return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min) |
| |
|
| |
|
| | class RotaryEmbedding(torch.nn.Module): |
| | def __init__(self, dim, max_position_embeddings, base=10000, device=None): |
| | super().__init__() |
| | inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2).float().to(device) / dim)) |
| | self.register_buffer("inv_freq", inv_freq) |
| |
|
| | |
| | self.max_seq_len_cached = max_position_embeddings |
| | t = torch.arange(self.max_seq_len_cached, device=self.inv_freq.device, dtype=self.inv_freq.dtype) |
| | freqs = torch.einsum("i,j->ij", t, self.inv_freq) |
| | |
| | emb = torch.cat((freqs, freqs), dim=-1) |
| | self.cos_cached = emb.cos()[None, None, :, :] |
| | self.sin_cached = emb.sin()[None, None, :, :] |
| |
|
| | def forward(self, x, seq_len=None): |
| | |
| | |
| | if seq_len > self.max_seq_len_cached: |
| | self.max_seq_len_cached = seq_len |
| | t = torch.arange(self.max_seq_len_cached, device=x.device, dtype=self.inv_freq.dtype) |
| | freqs = torch.einsum("i,j->ij", t, self.inv_freq) |
| | |
| | emb = torch.cat((freqs, freqs), dim=-1).to(x.device) |
| | self.cos_cached = emb.cos()[None, None, :, :] |
| | self.sin_cached = emb.sin()[None, None, :, :] |
| | return self.cos_cached[:seq_len, ...].to(x.device), self.sin_cached[:seq_len, ...].to(x.device) |
| |
|
| |
|
| | def rotate_half(x): |
| | """Rotates half the hidden dims of the input.""" |
| | x1 = x[..., : x.shape[-1] // 2] |
| | x2 = x[..., x.shape[-1] // 2 :] |
| | return torch.cat((-x2, x1), dim=-1) |
| |
|
| |
|
| | def apply_rotary_pos_emb(q, k, cos, sin, position_ids): |
| | gather_indices = position_ids[:, None, :, None] |
| | gather_indices = gather_indices.repeat(1, cos.shape[1], 1, cos.shape[3]) |
| | cos = torch.gather(cos.repeat(gather_indices.shape[0], 1, 1, 1), 2, gather_indices) |
| | sin = torch.gather(sin.repeat(gather_indices.shape[0], 1, 1, 1), 2, gather_indices) |
| | q_embed = (q * cos) + (rotate_half(q) * sin) |
| | k_embed = (k * cos) + (rotate_half(k) * sin) |
| | return q_embed, k_embed |
| |
|
| | try: |
| | from flash_attn.flash_attn_interface import flash_attn_unpadded_func |
| | except ImportError: |
| | flash_attn_unpadded_func = None |
| |
|
| | try: |
| | from einops import rearrange |
| | except ImportError: |
| | rearrange = None |
| |
|
| | class FlashSelfAttention(torch.nn.Module): |
| | """Implement the scaled dot product attention with softmax. |
| | Arguments |
| | --------- |
| | softmax_scale: The temperature to use for the softmax attention.do |
| | (default: 1/sqrt(d_keys) where d_keys is computed at |
| | runtime) |
| | attention_dropout: The dropout rate to apply to the attention |
| | (default: 0.0) |
| | """ |
| | def __init__(self, causal=False, softmax_scale=None, attention_dropout=0.0, |
| | device=None, dtype=None): |
| | super().__init__() |
| | assert flash_attn_unpadded_func is not None, ('Please install FlashAttention first, ' |
| | 'e.g., with pip install flash-attn') |
| | assert rearrange is not None, 'Please install einops first, e.g., with pip install einops' |
| | self.causal = causal |
| | self.softmax_scale = softmax_scale |
| | self.dropout_p = attention_dropout |
| |
|
| | def forward(self, q, k, v): |
| | """Implements the multihead softmax attention. |
| | Arguments |
| | --------- |
| | q, k, v: The tensor containing the query, key, and value. (B, S, H, D) |
| | """ |
| |
|
| | assert all((i.dtype in [torch.float16, torch.bfloat16] for i in (q,k,v))) |
| | assert all((i.is_cuda for i in (q,k,v))) |
| |
|
| | batch_size, seqlen_q = q.shape[0], q.shape[1] |
| | seqlen_k = k.shape[1] |
| |
|
| | q, k, v = [rearrange(x, 'b s ... -> (b s) ...') for x in [q, k, v]] |
| | cu_seqlens_q = torch.arange(0, (batch_size + 1) * seqlen_q, step=seqlen_q, dtype=torch.int32, |
| | device=q.device) |
| |
|
| | if self.training: |
| | |
| | assert seqlen_k == seqlen_q |
| |
|
| | is_causal = self.causal |
| | cu_seqlens_k = cu_seqlens_q |
| | dropout_p = self.dropout_p |
| | else: |
| | |
| | |
| | is_causal = seqlen_q == seqlen_k |
| | cu_seqlens_k = torch.arange(0, (batch_size + 1) * seqlen_k, step=seqlen_k, dtype=torch.int32, |
| | device=q.device) |
| | dropout_p = 0 |
| |
|
| | output = flash_attn_unpadded_func( |
| | q, k, v, cu_seqlens_q, cu_seqlens_k, seqlen_q, seqlen_k, |
| | dropout_p, |
| | softmax_scale=self.softmax_scale, causal=is_causal |
| | ) |
| |
|
| | output = rearrange(output, '(b s) ... -> b s ...', b=batch_size) |
| | return output |
| |
|
| | |
| | class Attention(torch.nn.Module): |
| | """Multi-headed attention from 'Attention Is All You Need' paper""" |
| |
|
| | def __init__(self, config: HawkConfig): |
| | super().__init__() |
| | self.config = config |
| | self.hidden_size = config.n_embd |
| | self.num_heads = config.n_head |
| | self.head_dim = self.hidden_size // self.num_heads |
| | self.max_position_embeddings = config.n_positions |
| | self.rotary_ndims = config.rotary_dim |
| |
|
| | if (self.head_dim * self.num_heads) != self.hidden_size: |
| | raise ValueError( |
| | f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}" |
| | f" and `num_heads`: {self.num_heads})." |
| | ) |
| | self.qkv_proj = torch.nn.Linear(self.hidden_size, 3 * self.num_heads * self.head_dim, bias=False) |
| | |
| | |
| | |
| | self.c_proj = torch.nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False) |
| |
|
| | def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): |
| | return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() |
| | |
| | def _split_heads(self, tensor, num_heads, attn_head_size): |
| | """ |
| | Splits hidden_size dim into attn_head_size and num_heads |
| | """ |
| | new_shape = tensor.size()[:-1] + (num_heads, attn_head_size) |
| | tensor = tensor.view(new_shape) |
| | return tensor.permute(0, 2, 1, 3) |
| |
|
| | def forward( |
| | self, |
| | hidden_states: torch.Tensor, |
| | attention_mask: Optional[torch.Tensor] = None, |
| | position_ids: Optional[torch.LongTensor] = None, |
| | past_key_value: Optional[Tuple[torch.Tensor]] = None, |
| | output_attentions: bool = False, |
| | use_cache: bool = False, |
| | rotary_pos_emb = None, |
| | ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: |
| |
|
| | bsz, q_len, _ = hidden_states.size() |
| | qkv_states = self.qkv_proj(hidden_states) |
| |
|
| | query_states, key_states, value_states = qkv_states.chunk(3, dim=-1) |
| | query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).permute(0,2,1,3) |
| | key_states = key_states.view(bsz, q_len, self.num_heads, self.head_dim).permute(0,2,1,3) |
| | value_states = value_states.view(bsz, q_len, self.num_heads, self.head_dim).permute(0,2,1,3) |
| |
|
| | |
| | query_rot = query_states[..., : self.rotary_ndims] |
| | query_pass = query_states[..., self.rotary_ndims :] |
| | key_rot = key_states[..., : self.rotary_ndims] |
| | key_pass = key_states[..., self.rotary_ndims :] |
| |
|
| | |
| | kv_seq_len = key_states.shape[-2] |
| | if past_key_value: |
| | kv_seq_len += past_key_value[0].shape[-2] |
| | cos, sin = rotary_pos_emb(value_states, seq_len=kv_seq_len) |
| | query, key = apply_rotary_pos_emb(query_rot, key_rot, cos, sin, position_ids) |
| | query_states = torch.cat((query, query_pass), dim=-1) |
| | key_states = torch.cat((key, key_pass), dim=-1) |
| |
|
| | if past_key_value is not None: |
| | |
| | key_states = torch.cat([past_key_value[0], key_states], dim=2) |
| | value_states = torch.cat([past_key_value[1], value_states], dim=2) |
| |
|
| | past_key_value = (key_states, value_states) if use_cache else None |
| |
|
| | attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim) |
| |
|
| | if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len): |
| | raise ValueError( |
| | f"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is" |
| | f" {attn_weights.size()}" |
| | ) |
| |
|
| | if attention_mask is not None: |
| | if attention_mask.size() != (bsz, 1, q_len, kv_seq_len): |
| | raise ValueError( |
| | f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}" |
| | ) |
| | attn_weights = attn_weights + attention_mask |
| | attn_weights = torch.max( |
| | attn_weights, torch.tensor(torch.finfo(attn_weights.dtype).min, device=attn_weights.device) |
| | ) |
| |
|
| | |
| | attn_weights = torch.nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(value_states.dtype) |
| | attn_output = torch.matmul(attn_weights, value_states) |
| |
|
| | if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim): |
| | raise ValueError( |
| | f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is" |
| | f" {attn_output.size()}" |
| | ) |
| |
|
| | attn_output = attn_output.transpose(1, 2) |
| | attn_output = attn_output.reshape(bsz, q_len, self.hidden_size) |
| |
|
| | attn_output = self.c_proj(attn_output) |
| |
|
| | if not output_attentions: |
| | attn_weights = None |
| |
|
| | return attn_output, attn_weights, past_key_value |
| |
|
| | class MLP(torch.nn.Module): |
| | def __init__(self, hidden_size, intermediate_size, hidden_act): |
| | super().__init__() |
| | self.c_fc = torch.nn.Linear(hidden_size, intermediate_size*2, bias=False) |
| | self.c_proj = torch.nn.Linear(intermediate_size, hidden_size, bias=False) |
| | def swiglu(x): |
| | x = torch.chunk(x, 2, dim=-1) |
| | return torch.nn.functional.silu(x[0]) * x[1] |
| | self.activation_func = swiglu |
| |
|
| | def forward(self, hidden_states: Optional[Tuple[torch.FloatTensor]]) -> torch.FloatTensor: |
| | intermediate_parallel = self.c_fc(hidden_states) |
| | intermediate_parallel = self.activation_func(intermediate_parallel) |
| | output = self.c_proj(intermediate_parallel) |
| | return output |
| |
|
| | class HawkBlock(torch.nn.Module): |
| | def __init__(self, config: HawkConfig): |
| | super().__init__() |
| | self.hidden_size = config.n_embd |
| | self.input_layernorm = torch.nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon) |
| | self.attn = Attention(config=config) |
| | self.post_attention_layernorm = torch.nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon) |
| | self.mlp = MLP( |
| | hidden_size=self.hidden_size, |
| | intermediate_size=config.n_inner, |
| | hidden_act=config.activation_function, |
| | ) |
| |
|
| | def forward( |
| | self, |
| | hidden_states: torch.Tensor, |
| | attention_mask: Optional[torch.Tensor] = None, |
| | position_ids: Optional[torch.LongTensor] = None, |
| | past_key_value: Optional[Tuple[torch.Tensor]] = None, |
| | output_attentions: Optional[bool] = False, |
| | use_cache: Optional[bool] = False, |
| | rotary_pos_emb = None, |
| | ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: |
| | """ |
| | Args: |
| | hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` |
| | attention_mask (`torch.FloatTensor`, *optional*): attention mask of size |
| | `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. |
| | output_attentions (`bool`, *optional*): |
| | Whether or not to return the attentions tensors of all attention layers. See `attentions` under |
| | returned tensors for more detail. |
| | use_cache (`bool`, *optional*): |
| | If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding |
| | (see `past_key_values`). |
| | past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states |
| | """ |
| |
|
| | residual = hidden_states |
| | hidden_states = self.input_layernorm(hidden_states) |
| |
|
| | |
| | hidden_states, self_attn_weights, present_key_value = self.attn( |
| | hidden_states=hidden_states, |
| | attention_mask=attention_mask, |
| | position_ids=position_ids, |
| | past_key_value=past_key_value, |
| | output_attentions=output_attentions, |
| | use_cache=use_cache, |
| | rotary_pos_emb=rotary_pos_emb, |
| | ) |
| |
|
| | hidden_states = residual + hidden_states |
| |
|
| | |
| | residual = hidden_states |
| |
|
| | hidden_states = self.post_attention_layernorm(hidden_states) |
| | hidden_states = self.mlp(hidden_states) |
| | hidden_states = residual + hidden_states |
| |
|
| | outputs = (hidden_states,) |
| |
|
| | if output_attentions: |
| | outputs += (self_attn_weights,) |
| |
|
| | if use_cache: |
| | outputs += (present_key_value,) |
| |
|
| | return outputs |
| |
|
| | class HawkPreTrainedModel(PreTrainedModel): |
| | config_class = HawkConfig |
| | base_model_prefix = "model" |
| | supports_gradient_checkpointing = True |
| | _no_split_modules = ["HawkBlock"] |
| | _skip_keys_device_placement = "past_key_values" |
| | _keys_to_ignore_on_load_unexpected = [r"decoder\.version"] |
| |
|
| | def _init_weights(self, module): |
| | std = self.config.initializer_range |
| | if isinstance(module, torch.nn.Linear): |
| | module.weight.data.normal_(mean=0.0, std=std) |
| | if module.bias is not None: |
| | module.bias.data.zero_() |
| | elif isinstance(module, torch.nn.Embedding): |
| | module.weight.data.normal_(mean=0.0, std=std) |
| | if module.padding_idx is not None: |
| | module.weight.data[module.padding_idx].zero_() |
| |
|
| | def _set_gradient_checkpointing(self, module, value=False): |
| | if isinstance(module, HawkModel): |
| | module.gradient_checkpointing = value |
| |
|
| | class HawkModel(HawkPreTrainedModel): |
| | """ |
| | Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`HawkBlock`] |
| | |
| | Args: |
| | config: HawkConfig |
| | """ |
| |
|
| | def __init__(self, config: HawkConfig): |
| | super().__init__(config) |
| | self.vocab_size = config.vocab_size |
| |
|
| | self.word_embeddings = torch.nn.Embedding(config.vocab_size, config.n_embd) |
| | self.rotary_pos_emb = RotaryEmbedding(config.rotary_dim, config.n_positions) |
| | self.layers = torch.nn.ModuleList([HawkBlock(config) for _ in range(config.n_layer)]) |
| | self.final_layernorm = torch.nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon) |
| |
|
| | self.gradient_checkpointing = False |
| | |
| | self.post_init() |
| |
|
| | def get_input_embeddings(self): |
| | return self.word_embeddings |
| |
|
| | def set_input_embeddings(self, value): |
| | self.word_embeddings = value |
| |
|
| | |
| | def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length): |
| | |
| | |
| | combined_attention_mask = None |
| | if input_shape[-1] > 1: |
| | combined_attention_mask = _make_causal_mask( |
| | input_shape, |
| | inputs_embeds.dtype, |
| | device=inputs_embeds.device, |
| | past_key_values_length=past_key_values_length, |
| | ) |
| |
|
| | if attention_mask is not None: |
| | |
| | expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to( |
| | inputs_embeds.device |
| | ) |
| | combined_attention_mask = ( |
| | expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask |
| | ) |
| |
|
| | return combined_attention_mask |
| |
|
| | def forward( |
| | self, |
| | input_ids: torch.LongTensor = None, |
| | attention_mask: Optional[torch.Tensor] = None, |
| | position_ids: Optional[torch.LongTensor] = None, |
| | past_key_values: Optional[List[torch.FloatTensor]] = None, |
| | inputs_embeds: Optional[torch.FloatTensor] = None, |
| | use_cache: Optional[bool] = None, |
| | output_attentions: Optional[bool] = None, |
| | output_hidden_states: Optional[bool] = None, |
| | return_dict: Optional[bool] = None, |
| | ) -> Union[Tuple, BaseModelOutputWithPast]: |
| | |
| | output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions |
| | output_hidden_states = ( |
| | output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states |
| | ) |
| | use_cache = use_cache if use_cache is not None else self.config.use_cache |
| | return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
| |
|
| | |
| | if input_ids is not None and inputs_embeds is not None: |
| | raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") |
| | elif input_ids is not None: |
| | batch_size, seq_length = input_ids.shape |
| | elif inputs_embeds is not None: |
| | batch_size, seq_length, _ = inputs_embeds.shape |
| | else: |
| | raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") |
| |
|
| | seq_length_with_past = seq_length |
| | past_key_values_length = 0 |
| |
|
| | if past_key_values is not None: |
| | past_key_values_length = past_key_values[0][0].shape[2] |
| | seq_length_with_past = seq_length_with_past + past_key_values_length |
| |
|
| | if position_ids is None: |
| | device = input_ids.device if input_ids is not None else inputs_embeds.device |
| | position_ids = torch.arange( |
| | past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device |
| | ) |
| | position_ids = position_ids.unsqueeze(0).view(-1, seq_length) |
| | else: |
| | position_ids = position_ids.view(-1, seq_length).long() |
| |
|
| | if inputs_embeds is None: |
| | inputs_embeds = self.word_embeddings(input_ids) |
| | |
| | if attention_mask is None: |
| | attention_mask = torch.ones( |
| | (batch_size, seq_length_with_past), dtype=torch.bool, device=inputs_embeds.device |
| | ) |
| | attention_mask = self._prepare_decoder_attention_mask( |
| | attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length |
| | ) |
| |
|
| | |
| | rotary_pos_emb = self.rotary_pos_emb |
| |
|
| | hidden_states = inputs_embeds |
| |
|
| | if self.gradient_checkpointing and self.training: |
| | if use_cache: |
| | logger.warning_once( |
| | "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." |
| | ) |
| | use_cache = False |
| |
|
| | |
| | all_hidden_states = () if output_hidden_states else None |
| | all_self_attns = () if output_attentions else None |
| | next_decoder_cache = () if use_cache else None |
| |
|
| | for idx, decoder_layer in enumerate(self.layers): |
| | |
| | if output_hidden_states: |
| | all_hidden_states += (hidden_states,) |
| |
|
| | past_key_value = past_key_values[idx] if past_key_values is not None else None |
| |
|
| | if self.gradient_checkpointing and self.training: |
| |
|
| | def create_custom_forward(module): |
| | def custom_forward(*inputs): |
| | |
| | return module(*inputs, output_attentions, None) |
| |
|
| | return custom_forward |
| |
|
| | layer_outputs = torch.utils.checkpoint.checkpoint( |
| | create_custom_forward(decoder_layer), |
| | hidden_states, |
| | attention_mask, |
| | position_ids, |
| | None, |
| | ) |
| | else: |
| | layer_outputs = decoder_layer( |
| | hidden_states, |
| | attention_mask=attention_mask, |
| | position_ids=position_ids, |
| | past_key_value=past_key_value, |
| | output_attentions=output_attentions, |
| | use_cache=use_cache, |
| | rotary_pos_emb=rotary_pos_emb, |
| | ) |
| |
|
| | hidden_states = layer_outputs[0] |
| |
|
| | if use_cache: |
| | next_decoder_cache += (layer_outputs[2 if output_attentions else 1],) |
| |
|
| | if output_attentions: |
| | all_self_attns += (layer_outputs[1],) |
| |
|
| | hidden_states = self.final_layernorm(hidden_states) |
| |
|
| | |
| | if output_hidden_states: |
| | all_hidden_states += (hidden_states,) |
| |
|
| | next_cache = next_decoder_cache if use_cache else None |
| | if not return_dict: |
| | return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None) |
| | return BaseModelOutputWithPast( |
| | last_hidden_state=hidden_states, |
| | past_key_values=next_cache, |
| | hidden_states=all_hidden_states, |
| | attentions=all_self_attns, |
| | ) |
| | |
| |
|
| | class HawkForCausalLM(HawkPreTrainedModel): |
| | _keys_to_ignore_on_load_missing = [r"model.rotary_pos_emb.inv_freq"] |
| |
|
| | def __init__(self, config): |
| | super().__init__(config) |
| | self.model = HawkModel(config) |
| |
|
| | self.lm_head = torch.nn.Linear(config.n_embd, config.vocab_size, bias=False) |
| |
|
| | |
| | self.post_init() |
| |
|
| | def get_input_embeddings(self): |
| | return self.model.word_embeddings |
| |
|
| | def set_input_embeddings(self, value): |
| | self.model.word_embeddings = value |
| |
|
| | def get_output_embeddings(self): |
| | return self.lm_head |
| |
|
| | def set_output_embeddings(self, new_embeddings): |
| | self.lm_head = new_embeddings |
| |
|
| | def set_decoder(self, decoder): |
| | self.model = decoder |
| |
|
| | def get_decoder(self): |
| | return self.model |
| |
|
| | def forward( |
| | self, |
| | input_ids: torch.LongTensor = None, |
| | attention_mask: Optional[torch.Tensor] = None, |
| | position_ids: Optional[torch.LongTensor] = None, |
| | past_key_values: Optional[List[torch.FloatTensor]] = None, |
| | inputs_embeds: Optional[torch.FloatTensor] = None, |
| | labels: Optional[torch.LongTensor] = None, |
| | use_cache: Optional[bool] = None, |
| | output_attentions: Optional[bool] = None, |
| | output_hidden_states: Optional[bool] = None, |
| | return_dict: Optional[bool] = None, |
| | ) -> Union[Tuple, CausalLMOutputWithPast]: |
| | r""" |
| | Args: |
| | labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): |
| | Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., |
| | config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored |
| | (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. |
| | |
| | Returns: |
| | |
| | Example: |
| | |
| | ```python |
| | >>> from transformers import AutoTokenizer, LlamaForCausalLM |
| | |
| | >>> model = LlamaForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS) |
| | >>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER) |
| | |
| | >>> prompt = "Hey, are you consciours? Can you talk to me?" |
| | >>> inputs = tokenizer(prompt, return_tensors="pt") |
| | |
| | >>> # Generate |
| | >>> generate_ids = model.generate(inputs.input_ids, max_length=30) |
| | >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] |
| | "Hey, are you consciours? Can you talk to me?\nI'm not consciours, but I can talk to you." |
| | ```""" |
| |
|
| | output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions |
| | output_hidden_states = ( |
| | output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states |
| | ) |
| | return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
| |
|
| | |
| | outputs = self.model( |
| | input_ids=input_ids, |
| | attention_mask=attention_mask, |
| | position_ids=position_ids, |
| | past_key_values=past_key_values, |
| | inputs_embeds=inputs_embeds, |
| | use_cache=use_cache, |
| | output_attentions=output_attentions, |
| | output_hidden_states=output_hidden_states, |
| | return_dict=return_dict, |
| | ) |
| |
|
| | hidden_states = outputs[0] |
| |
|
| | logits = self.lm_head(hidden_states) |
| |
|
| | loss = None |
| | if labels is not None: |
| | |
| | shift_logits = logits[..., :-1, :].contiguous() |
| | shift_labels = labels[..., 1:].contiguous() |
| | |
| | loss_fct = torch.nn.CrossEntropyLoss() |
| | shift_logits = shift_logits.view(-1, self.config.vocab_size) |
| | shift_labels = shift_labels.view(-1) |
| | |
| | shift_labels = shift_labels.to(shift_logits.device) |
| | loss = loss_fct(shift_logits, shift_labels) |
| |
|
| | if not return_dict: |
| | output = (logits,) + outputs[1:] |
| | return (loss,) + output if loss is not None else output |
| |
|
| | return CausalLMOutputWithPast( |
| | loss=loss, |
| | logits=logits, |
| | past_key_values=outputs.past_key_values, |
| | hidden_states=outputs.hidden_states, |
| | attentions=outputs.attentions, |
| | ) |
| |
|
| | def prepare_inputs_for_generation( |
| | self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs |
| | ): |
| | if past_key_values: |
| | input_ids = input_ids[:, -1:] |
| |
|
| | position_ids = kwargs.get("position_ids", None) |
| | if attention_mask is not None and position_ids is None: |
| | |
| | position_ids = attention_mask.long().cumsum(-1) - 1 |
| | position_ids.masked_fill_(attention_mask == 0, 1) |
| | if past_key_values: |
| | position_ids = position_ids[:, -1].unsqueeze(-1) |
| |
|
| | |
| | if inputs_embeds is not None and past_key_values is None: |
| | model_inputs = {"inputs_embeds": inputs_embeds} |
| | else: |
| | model_inputs = {"input_ids": input_ids} |
| |
|
| | model_inputs.update( |
| | { |
| | "position_ids": position_ids, |
| | "past_key_values": past_key_values, |
| | "use_cache": kwargs.get("use_cache"), |
| | "attention_mask": attention_mask, |
| | } |
| | ) |
| | return model_inputs |
| |
|
| | @staticmethod |
| | def _reorder_cache(past_key_values, beam_idx): |
| | reordered_past = () |
| | for layer_past in past_key_values: |
| | reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),) |
| | return reordered_past |
| |
|