# coding=utf-8 # Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved. # # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX # and OPT implementations in this library. It has been modified from its # original forms to accommodate minor architectural differences compared # to GPT-NeoX and OPT used by the Meta AI team that trained the model. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch LLaMA model.""" import time import math import warnings from typing import List, Optional, Tuple, Union, Mapping from contextlib import nullcontext from dataclasses import dataclass from collections import defaultdict from tqdm import tqdm from accelerate import Accelerator import os import torch import torch.nn.functional as F import torch.utils.checkpoint from torch import nn from transformers.activations import ACT2FN from transformers.cache_utils import Cache from transformers.modeling_attn_mask_utils import ( AttentionMaskConverter, _prepare_4d_attention_mask, _prepare_4d_causal_attention_mask, _prepare_4d_causal_attention_mask_for_sdpa, ) from transformers.modeling_outputs import BaseModelOutputWithPast from transformers.modeling_utils import PreTrainedModel from transformers.pytorch_utils import ALL_LAYERNORM_LAYERS, is_torch_greater_or_equal_than_1_13 from transformers.utils import ( add_start_docstrings, add_start_docstrings_to_model_forward, is_flash_attn_2_available, is_flash_attn_greater_or_equal_2_10, logging, replace_return_docstrings, ) from transformers.integrations import is_deepspeed_zero3_enabled from transformers.utils.import_utils import is_torch_fx_available # This makes `_prepare_4d_causal_attention_mask` a leaf function in the FX graph. # It means that the function will not be traced through and simply appear as a node in the graph. if is_torch_fx_available(): if not is_torch_greater_or_equal_than_1_13: import torch.fx _prepare_4d_causal_attention_mask = torch.fx.wrap(_prepare_4d_causal_attention_mask) from .configuration_llama import LlamaConfig from .modeling_ultragist import Memory from .modeling_utils import optional_grad_ctx, compute_loss, ModelOutput logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "LlamaConfig" class LlamaRMSNorm(nn.Module): def __init__(self, hidden_size, eps=1e-6): """ LlamaRMSNorm is equivalent to T5LayerNorm """ super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, hidden_states): input_dtype = hidden_states.dtype hidden_states = hidden_states.to(torch.float32) variance = hidden_states.pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) return self.weight * hidden_states.to(input_dtype) ALL_LAYERNORM_LAYERS.append(LlamaRMSNorm) class LlamaRotaryEmbedding(nn.Module): def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None): super().__init__() self.dim = dim self.max_position_embeddings = max_position_embeddings self.base = base inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim)) self.register_buffer("inv_freq", inv_freq, persistent=False) # Build here to make `torch.jit.trace` work. self._set_cos_sin_cache( seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype() ) def _set_cos_sin_cache(self, seq_len, device, dtype): self.max_seq_len_cached = seq_len t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype) freqs = torch.outer(t, self.inv_freq) # Different from paper, but it uses a different permutation in order to obtain the same calculation emb = torch.cat((freqs, freqs), dim=-1) self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False) self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False) def forward(self, x, seq_len=None): # x: [bs, num_attention_heads, seq_len, head_size] if seq_len > self.max_seq_len_cached: self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype) return ( self.cos_cached[:seq_len].to(dtype=x.dtype), self.sin_cached[:seq_len].to(dtype=x.dtype), ) class LlamaLinearScalingRotaryEmbedding(LlamaRotaryEmbedding): """LlamaRotaryEmbedding extended with linear scaling. Credits to the Reddit user /u/kaiokendev""" def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0): self.scaling_factor = scaling_factor super().__init__(dim, max_position_embeddings, base, device) def _set_cos_sin_cache(self, seq_len, device, dtype): self.max_seq_len_cached = seq_len t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype) t = t / self.scaling_factor freqs = torch.outer(t, self.inv_freq) # Different from paper, but it uses a different permutation in order to obtain the same calculation emb = torch.cat((freqs, freqs), dim=-1) self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False) self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False) class LlamaDynamicNTKScalingRotaryEmbedding(LlamaRotaryEmbedding): """LlamaRotaryEmbedding extended with Dynamic NTK scaling. Credits to the Reddit users /u/bloc97 and /u/emozilla""" def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0): self.scaling_factor = scaling_factor super().__init__(dim, max_position_embeddings, base, device) def _set_cos_sin_cache(self, seq_len, device, dtype): self.max_seq_len_cached = seq_len if seq_len > self.max_position_embeddings: base = self.base * ( (self.scaling_factor * seq_len / self.max_position_embeddings) - (self.scaling_factor - 1) ) ** (self.dim / (self.dim - 2)) inv_freq = 1.0 / (base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim)) self.register_buffer("inv_freq", inv_freq, persistent=False) t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype) freqs = torch.outer(t, self.inv_freq) # Different from paper, but it uses a different permutation in order to obtain the same calculation emb = torch.cat((freqs, freqs), dim=-1) self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False) self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False) def rotate_half(x): """Rotates half the hidden dims of the input.""" x1 = x[..., : x.shape[-1] // 2] x2 = x[..., x.shape[-1] // 2 :] return torch.cat((-x2, x1), dim=-1) def apply_rotary_pos_emb(q, k, cos, sin, position_ids, unsqueeze_dim=1): """Applies Rotary Position Embedding to the query and key tensors. Args: q (`torch.Tensor`): The query tensor. k (`torch.Tensor`): The key tensor. cos (`torch.Tensor`): The cosine part of the rotary embedding. sin (`torch.Tensor`): The sine part of the rotary embedding. position_ids (`torch.Tensor`): The position indices of the tokens corresponding to the query and key tensors. For example, this can be used to pass offsetted position ids when working with a KV-cache. unsqueeze_dim (`int`, *optional*, defaults to 1): The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2. Returns: `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding. """ cos = cos[position_ids].unsqueeze(unsqueeze_dim) sin = sin[position_ids].unsqueeze(unsqueeze_dim) q_embed = (q * cos) + (rotate_half(q) * sin) k_embed = (k * cos) + (rotate_half(k) * sin) return q_embed, k_embed # Copied from streaming-llm def apply_rotary_pos_emb_single(x, cos, sin, position_ids): # The first two dimensions of cos and sin are always 1, so we can `squeeze` them. cos = cos[position_ids].unsqueeze(1) # [bs, 1, seq_len, dim] sin = sin[position_ids].unsqueeze(1) # [bs, 1, seq_len, dim] x_embed = (x * cos) + (rotate_half(x) * sin) return x_embed class LlamaMLP(nn.Module): def __init__(self, config): super().__init__() self.config = config self.hidden_size = config.hidden_size self.intermediate_size = config.intermediate_size self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False) self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False) self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False) self.act_fn = ACT2FN[config.hidden_act] if "mlp" in config.ultragist_param: self.ultragist_up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False) self.ultragist_up_proj.weight.data.zero_() self.ultragist_up_proj._is_hf_initialized = True self.ultragist_down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False) self.ultragist_down_proj.weight.data.zero_() self.ultragist_down_proj._is_hf_initialized = True def _init_ultragist_proj(self, missing_keys): """Initialize the ultragist projection weight with that of the ordinal projection.""" if "mlp" in self.config.ultragist_param: if is_deepspeed_zero3_enabled(): import deepspeed params = [self.up_proj.weight, self.down_proj.weight, self.ultragist_up_proj.weight, self.ultragist_down_proj.weight] with deepspeed.zero.GatheredParameters(params, modifier_rank=0): if (self.ultragist_up_proj.weight.sum(-1) == 0).any(): self.ultragist_up_proj.weight.data[:] = self.up_proj.weight.data self.ultragist_down_proj.weight.data[:] = self.down_proj.weight.data else: if any("ultragist_up_proj" in missing_key for missing_key in missing_keys): # only copy the value in-place, without tieing the weight self.ultragist_up_proj.weight.data[:] = self.up_proj.weight.data self.ultragist_down_proj.weight.data[:] = self.down_proj.weight.data def forward(self, x, ultragist_size): if self.config.pretraining_tp > 1: # TODO: support pretraining_tp raise NotImplementedError slice = self.intermediate_size // self.config.pretraining_tp gate_proj_slices = self.gate_proj.weight.split(slice, dim=0) up_proj_slices = self.up_proj.weight.split(slice, dim=0) down_proj_slices = self.down_proj.weight.split(slice, dim=1) gate_proj = torch.cat( [F.linear(x, gate_proj_slices[i]) for i in range(self.config.pretraining_tp)], dim=-1 ) up_proj = torch.cat([F.linear(x, up_proj_slices[i]) for i in range(self.config.pretraining_tp)], dim=-1) intermediate_states = (self.act_fn(gate_proj) * up_proj).split(slice, dim=2) down_proj = [ F.linear(intermediate_states[i], down_proj_slices[i]) for i in range(self.config.pretraining_tp) ] down_proj = sum(down_proj) else: if "mlp" in self.config.ultragist_param: if ultragist_size > 0: ordinal_hidden_states = x[:, :-ultragist_size] ultragist_hidden_states = x[:, -ultragist_size:] ordinal_down_proj = self.down_proj(self.act_fn(self.gate_proj(ordinal_hidden_states)) * self.up_proj(ordinal_hidden_states)) ultragist_down_proj = self.ultragist_down_proj(self.act_fn(self.gate_proj(ultragist_hidden_states)) * self.ultragist_up_proj(ultragist_hidden_states)) down_proj = torch.cat([ordinal_down_proj, ultragist_down_proj], dim=1) else: down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x)) else: down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x)) return down_proj def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: """ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch, num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim) """ batch, num_key_value_heads, slen, head_dim = hidden_states.shape if n_rep == 1: return hidden_states hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim) return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim) class LlamaAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, config: LlamaConfig, layer_idx: Optional[int] = None): super().__init__() self.config = config self.layer_idx = layer_idx if layer_idx is None: logger.warning_once( f"Instantiating {self.__class__.__name__} without passing `layer_idx` is not recommended and will " "to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` " "when creating this class." ) self.attention_dropout = config.attention_dropout self.hidden_size = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.hidden_size // self.num_heads self.num_key_value_heads = config.num_key_value_heads self.num_key_value_groups = self.num_heads // self.num_key_value_heads self.max_position_embeddings = config.max_position_embeddings self.rope_theta = config.rope_theta self.is_causal = True if (self.head_dim * self.num_heads) != self.hidden_size: raise ValueError( f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}" f" and `num_heads`: {self.num_heads})." ) self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=config.attention_bias) self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias) self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias) self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=config.attention_bias) self._init_rope() # NOTE: add extra parameters for ultragist tokens # skip post initialization to speed up loading if "q" in config.ultragist_param: self.ultragist_q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=config.attention_bias) # NOTE: initialize the ultragist parameters as zero self.ultragist_q_proj.weight.data.zero_() self.ultragist_q_proj._is_hf_initialized = True if "k" in config.ultragist_param: self.ultragist_k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias) self.ultragist_k_proj.weight.data.zero_() self.ultragist_k_proj._is_hf_initialized = True if "v" in config.ultragist_param: self.ultragist_v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias) self.ultragist_v_proj.weight.data.zero_() self.ultragist_v_proj._is_hf_initialized = True if "o" in config.ultragist_param: self.ultragist_o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=config.attention_bias) self.ultragist_o_proj.weight.data.zero_() self.ultragist_o_proj._is_hf_initialized = True def _init_rope(self): if self.config.rope_scaling is None: self.rotary_emb = LlamaRotaryEmbedding( self.head_dim, max_position_embeddings=self.max_position_embeddings, base=self.rope_theta, ) else: scaling_type = self.config.rope_scaling["type"] scaling_factor = self.config.rope_scaling["factor"] if scaling_type == "linear": self.rotary_emb = LlamaLinearScalingRotaryEmbedding( self.head_dim, max_position_embeddings=self.max_position_embeddings, scaling_factor=scaling_factor, base=self.rope_theta, ) elif scaling_type == "dynamic": self.rotary_emb = LlamaDynamicNTKScalingRotaryEmbedding( self.head_dim, max_position_embeddings=self.max_position_embeddings, scaling_factor=scaling_factor, base=self.rope_theta, ) else: raise ValueError(f"Unknown RoPE scaling type {scaling_type}") def _init_ultragist_proj(self, missing_keys): """Initialize the ultragist projection weight with that of the ordinal projection.""" ultragist_param = self.config.ultragist_param if is_deepspeed_zero3_enabled(): import deepspeed if "q" in ultragist_param: with deepspeed.zero.GatheredParameters([self.ultragist_q_proj.weight, self.q_proj.weight], modifier_rank=0): # FIXME: after deepspeed initialization, some weights becomes non-zero, but there are rows taht are full of zeros if (self.ultragist_q_proj.weight.sum(-1) == 0).any(): self.ultragist_q_proj.weight.data[:] = self.q_proj.weight.data if "k" in ultragist_param: with deepspeed.zero.GatheredParameters([self.ultragist_k_proj.weight, self.k_proj.weight], modifier_rank=0): if (self.ultragist_k_proj.weight.sum(-1) == 0).any(): self.ultragist_k_proj.weight.data[:] = self.k_proj.weight.data if "v" in ultragist_param: with deepspeed.zero.GatheredParameters([self.ultragist_v_proj.weight, self.v_proj.weight], modifier_rank=0): if (self.ultragist_v_proj.weight.sum(-1) == 0).any(): self.ultragist_v_proj.weight.data[:] = self.v_proj.weight.data if "o" in ultragist_param: with deepspeed.zero.GatheredParameters([self.ultragist_o_proj.weight, self.o_proj.weight], modifier_rank=0): if (self.ultragist_o_proj.weight.sum(-1) == 0).any(): self.ultragist_o_proj.weight.data[:] = self.o_proj.weight.data else: # only copy the value in-place, without tieing the weight if "q" in ultragist_param and any("ultragist_q_proj" in missing_key for missing_key in missing_keys): if (self.ultragist_q_proj.weight == 0).all(): self.ultragist_q_proj.weight.data[:] = self.q_proj.weight.data if "k" in ultragist_param and any("ultragist_k_proj" in missing_key for missing_key in missing_keys): if (self.ultragist_k_proj.weight == 0).all(): self.ultragist_k_proj.weight.data[:] = self.k_proj.weight.data if "v" in ultragist_param and any("ultragist_v_proj" in missing_key for missing_key in missing_keys): if (self.ultragist_v_proj.weight == 0).all(): self.ultragist_v_proj.weight.data[:] = self.v_proj.weight.data if "o" in ultragist_param and any("ultragist_o_proj" in missing_key for missing_key in missing_keys): if (self.ultragist_o_proj.weight == 0).all(): self.ultragist_o_proj.weight.data[:] = self.o_proj.weight.data def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def qkv_proj_with_ultragist(self, hidden_states, ultragist_size=0): if ultragist_size > 0: ordinal_hidden_states = hidden_states[:, :-ultragist_size] ultragist_hidden_states = hidden_states[:, -ultragist_size:] if "q" in self.config.ultragist_param: ordinal_query_states = self.q_proj(ordinal_hidden_states) ultragist_query_states = self.ultragist_q_proj(ultragist_hidden_states) query_states = torch.cat([ordinal_query_states, ultragist_query_states], dim=1) else: query_states = self.q_proj(hidden_states) if "k" in self.config.ultragist_param: ordinal_key_states = self.k_proj(ordinal_hidden_states) ultragist_key_states = self.ultragist_k_proj(ultragist_hidden_states) key_states = torch.cat([ordinal_key_states, ultragist_key_states], dim=1) else: key_states = self.k_proj(hidden_states) if "v" in self.config.ultragist_param: ordinal_value_states = self.v_proj(ordinal_hidden_states) ultragist_value_states = self.ultragist_v_proj(ultragist_hidden_states) value_states = torch.cat([ordinal_value_states, ultragist_value_states], dim=1) else: value_states = self.v_proj(hidden_states) else: query_states = self.q_proj(hidden_states) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) return query_states, key_states, value_states def o_proj_with_ultragist(self, attn_output, ultragist_size=0): if ultragist_size > 0: if "o" in self.config.ultragist_param: ordinal_attn_output = self.o_proj(attn_output[:, :-ultragist_size]) ultragist_attn_output = self.ultragist_o_proj(attn_output[:, -ultragist_size:]) attn_output = torch.cat([ordinal_attn_output, ultragist_attn_output], dim=1) else: attn_output = self.o_proj(attn_output) else: attn_output = self.o_proj(attn_output) return attn_output def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Cache] = None, output_attentions: bool = False, use_cache: bool = False, **kwargs, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: if "padding_mask" in kwargs: warnings.warn( "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`" ) bsz, q_len, _ = hidden_states.size() kv_seq_len = hidden_states.shape[-2] past_key, past_value, ultragist_sizes, total_ultragist_size, raw_size_to_cache, window_size = past_key_value if past_key is not None: past_seq_len = past_key.shape[2] kv_seq_len += past_seq_len else: past_seq_len = 0 query_states, key_states, value_states = self.qkv_proj_with_ultragist(hidden_states, total_ultragist_size) query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len) # return keys and values before rope # NOTE: incrementally return keys and values for efficiency if window_size > 0: past_key_value = (key_states, value_states, ultragist_sizes, total_ultragist_size, raw_size_to_cache, window_size) if past_key is not None: # reuse k, v, self_attention key_states = torch.cat([past_key, key_states], dim=2) value_states = torch.cat([past_value, value_states], dim=2) # NOTE: window_size == 0 indicates the ultragist is disabled, the model works as is, so the new past_key_values should concatenate old ones if window_size == 0: past_key_value = (key_states, value_states, ultragist_sizes, total_ultragist_size, raw_size_to_cache, window_size) key_position_ids = position_ids # align query position_ids with key query_position_ids = key_position_ids[:, -q_len:] key_states = apply_rotary_pos_emb_single(key_states, cos, sin, key_position_ids) query_states = apply_rotary_pos_emb_single(query_states, cos, sin, query_position_ids) key_states = repeat_kv(key_states, self.num_key_value_groups) value_states = repeat_kv(value_states, self.num_key_value_groups) attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim) # debug attention weights # if q_len == 1: # with open(f"data/debug/{self.layer_idx}.txt", "w") as f: # torch.set_printoptions(profile="full",linewidth=10000000,precision=1,sci_mode=False) # a = attn_weights.mean(1) # f.write(f"past_length: {past_key.shape[2]}\nattn_weight: {a.shape}\n") # f.write(str(a)) # torch.set_printoptions(profile="default") # if self.layer_idx == self.config.num_hidden_layers - 1: # print("this is time!!!") # input() if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len): raise ValueError( f"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is" f" {attn_weights.size()}" ) if attention_mask is not None: if attention_mask.size() != (bsz, 1, q_len, kv_seq_len): raise ValueError( f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}" ) attn_weights = attn_weights + attention_mask # upcast attention to fp32 attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype) attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training) attn_output = torch.matmul(attn_weights, value_states) if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is" f" {attn_output.size()}" ) # for debug # if past_key.shape[2] == 128 and self.layer_idx == 0: # torch.save({ # "hidden": hidden_states, # "query": query_states, # "key": key_states, # "value": value_states, # "output": attn_output, # "query_position_ids": query_position_ids, # "key_position_ids": key_position_ids, # }, "attn-output.pt") attn_output = attn_output.transpose(1, 2).contiguous() attn_output = attn_output.reshape(bsz, q_len, self.hidden_size) attn_output = self.o_proj_with_ultragist(attn_output, total_ultragist_size) if not output_attentions: attn_weights = None return attn_output, attn_weights, past_key_value class LlamaSdpaAttention(LlamaAttention): """ Llama attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from `LlamaAttention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to SDPA API. """ # Adapted from LlamaAttention.forward def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Cache] = None, output_attentions: bool = False, use_cache: bool = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: if output_attentions: # TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented. logger.warning_once( "LlamaModel is using LlamaSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, " 'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.' ) return super().forward( hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, ) bsz, q_len, _ = hidden_states.size() kv_seq_len = hidden_states.shape[-2] past_key, past_value, ultragist_sizes, total_ultragist_size, raw_size_to_cache, window_size = past_key_value if past_key is not None: past_seq_len = past_key.shape[2] kv_seq_len += past_seq_len else: past_seq_len = 0 query_states, key_states, value_states = self.qkv_proj_with_ultragist(hidden_states, total_ultragist_size) query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len) # return keys and values before rope # NOTE: incrementally return keys and values for efficiency if window_size > 0: past_key_value = (key_states, value_states, ultragist_sizes, total_ultragist_size, raw_size_to_cache, window_size) if past_key is not None: # reuse k, v, self_attention key_states = torch.cat([past_key, key_states], dim=2) value_states = torch.cat([past_value, value_states], dim=2) # NOTE: window_size == 0 indicates the ultragist is disabled, the model works as is, so the new past_key_values should concatenate old ones if window_size == 0: past_key_value = (key_states, value_states, ultragist_sizes, total_ultragist_size, raw_size_to_cache, window_size) key_position_ids = position_ids # align query position_ids with key query_position_ids = key_position_ids[:, -q_len:] key_states = apply_rotary_pos_emb_single(key_states, cos, sin, key_position_ids) query_states = apply_rotary_pos_emb_single(query_states, cos, sin, query_position_ids) key_states = repeat_kv(key_states, self.num_key_value_groups) value_states = repeat_kv(value_states, self.num_key_value_groups) if attention_mask is not None: if attention_mask.size() != (bsz, 1, q_len, kv_seq_len): raise ValueError( f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}" ) # if self.layer_idx == 0 and past_key is None: # with open(f"attention_mask.txt", "w") as f: # torch.set_printoptions(profile="full",linewidth=10000000,precision=1,sci_mode=False) # a = attention_mask # f.write(str(a)) # torch.set_printoptions(profile="default") # with open(f"position_ids.txt", "w") as f: # torch.set_printoptions(profile="full",linewidth=10000000,precision=1,sci_mode=False) # a = position_ids # f.write(str(a)) # torch.set_printoptions(profile="default") # SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask, # Reference: https://github.com/pytorch/pytorch/issues/112577. if query_states.device.type == "cuda" and attention_mask is not None: query_states = query_states.contiguous() key_states = key_states.contiguous() value_states = value_states.contiguous() attn_output = torch.nn.functional.scaled_dot_product_attention( query_states, key_states, value_states, attn_mask=attention_mask, dropout_p=self.attention_dropout if self.training else 0.0, # The q_len > 1 is necessary to match with AttentionMaskConverter.to_causal_4d that does not create a causal mask in case q_len == 1. is_causal=self.is_causal and attention_mask is None and q_len > 1, ) # for debug # if past_key is not None and past_key.shape[2] == 128 and self.layer_idx == 0: # torch.save({ # "hidden": hidden_states, # "query": query_states, # "key": key_states, # "value": value_states, # "output": attn_output, # "query_position_ids": query_position_ids, # "key_position_ids": key_position_ids, # }, "attn-output.pt") attn_output = attn_output.transpose(1, 2).contiguous() attn_output = attn_output.reshape(bsz, q_len, self.hidden_size) attn_output = self.o_proj_with_ultragist(attn_output, total_ultragist_size) return attn_output, None, past_key_value LLAMA_ATTENTION_CLASSES = { "eager": LlamaAttention, "sdpa": LlamaSdpaAttention, } class LlamaDecoderLayer(nn.Module): def __init__(self, config: LlamaConfig, layer_idx: int): super().__init__() self.hidden_size = config.hidden_size self.self_attn = LLAMA_ATTENTION_CLASSES[config._attn_implementation](config=config, layer_idx=layer_idx) self.mlp = LlamaMLP(config) self.input_layernorm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.post_attention_layernorm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = False, **kwargs, ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`, *optional*): attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1, query_sequence_length, key_sequence_length)` if default attention is used. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states """ if "padding_mask" in kwargs: warnings.warn( "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`" ) # NOTE: get ultragist_size in case the mlp is included in ultragist_param past_key, past_value, ultragist_sizes, total_ultragist_size, raw_size_to_cache, window_size = past_key_value residual = hidden_states hidden_states = self.input_layernorm(hidden_states) # Self Attention hidden_states, self_attn_weights, present_key_value = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, **kwargs, ) hidden_states = residual + hidden_states # Fully Connected residual = hidden_states hidden_states = self.post_attention_layernorm(hidden_states) hidden_states = self.mlp(hidden_states, total_ultragist_size) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights,) if use_cache: outputs += (present_key_value,) return outputs LLAMA_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`LlamaConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ @add_start_docstrings( "The bare LLaMA Model outputting raw hidden-states without any specific head on top.", LLAMA_START_DOCSTRING, ) class LlamaPreTrainedModel(PreTrainedModel): config_class = LlamaConfig base_model_prefix = "model" supports_gradient_checkpointing = True _no_split_modules = ["LlamaDecoderLayer"] _skip_keys_device_placement = "past_key_values" _supports_sdpa = True _supports_cache_class = True def _init_weights(self, module): std = self.config.initializer_range if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() LLAMA_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. If `past_key_values` is used, optionally only the last `input_ids` have to be input (see `past_key_values`). If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`] and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy. - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.n_positions - 1]`. [What are position IDs?](../glossary#position-ids) past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*): Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values` returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`. Two formats are allowed: - a [`~cache_utils.Cache`] instance; - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy cache format. The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the legacy cache format will be returned. If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare LLaMA Model outputting raw hidden-states without any specific head on top.", LLAMA_START_DOCSTRING, ) class LlamaModel(LlamaPreTrainedModel): """ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`LlamaDecoderLayer`] Args: config: LlamaConfig """ def __init__(self, config: LlamaConfig): super().__init__(config) self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx) # ultragist: add ultragist embedding self.ultragist_embed_tokens = nn.Embedding(1, config.hidden_size, self.padding_idx) self.ultragist_embed_tokens._is_hf_initialized = True self.layers = nn.ModuleList( [LlamaDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)] ) self._use_sdpa = config._attn_implementation == "sdpa" self.norm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() def _init_ultragist_embed(self, missing_keys): """Initialize the ultragist token embedding with that of the eos token.""" if is_deepspeed_zero3_enabled(): import deepspeed params = [self.ultragist_embed_tokens.weight, self.embed_tokens.weight] with deepspeed.zero.GatheredParameters(params, modifier_rank=0): # deepspeed will initialize the parameters to zero if (self.ultragist_embed_tokens.weight == 0).all(): if self.config.ultragist_embed_init == "bos": self.ultragist_embed_tokens.weight.data[:] = self.embed_tokens.weight.data[self.config.bos_token_id] elif self.config.ultragist_embed_init == "eos": self.ultragist_embed_tokens.weight.data[:] = self.embed_tokens.weight.data[self.config.eos_token_id] else: raise NotImplementedError(f"Make sure ultragist_embed_init is either eos or bos, found {self.config.ultragist_embed_init}") else: if any("ultragist_embed_tokens" in missing_key for missing_key in missing_keys): if self.config.ultragist_embed_init == "bos": self.ultragist_embed_tokens.weight.data[:] = self.embed_tokens.weight.data[self.config.bos_token_id] elif self.config.ultragist_embed_init == "eos": self.ultragist_embed_tokens.weight.data[:] = self.embed_tokens.weight.data[self.config.eos_token_id] else: raise NotImplementedError(f"Make sure ultragist_embed_init is either eos or bos, found {self.config.ultragist_embed_init}") def get_input_embeddings(self): return self.embed_tokens def set_input_embeddings(self, value): self.embed_tokens = value @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING) def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithPast]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) # ultragist: always use cache use_cache = True return_dict = return_dict if return_dict is not None else self.config.use_return_dict # retrieve input_ids and inputs_embeds if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: batch_size, seq_length = input_ids.shape[:2] elif inputs_embeds is not None: batch_size, seq_length = inputs_embeds.shape[:2] else: raise ValueError("You have to specify either input_ids or inputs_embeds") # ultragist: create position_ids for all keys including past_keys device = input_ids.device if input_ids is not None else inputs_embeds.device seq_length_with_past = seq_length past_key_values_length = 0 past_key, past_value, ultragist_sizes, total_ultragist_size, raw_size_to_cache, window_size = past_key_values[0] if past_key is not None: past_key_values_length = past_key.shape[2] seq_length_with_past = seq_length_with_past + past_key_values_length # ultragist: separately embed ordinal tokens and ultragist tokens because ordinal tokens do not receive gradients if total_ultragist_size > 0: ordinal_input_ids = input_ids[:, :-total_ultragist_size] ultragist_input_ids = input_ids[:, -total_ultragist_size:] ordinal_inputs_embeds = self.embed_tokens(ordinal_input_ids) # bias ultragist_token_ids because they are newly initialized ultragist_input_embeds = self.ultragist_embed_tokens(ultragist_input_ids - self.config.vocab_size) inputs_embeds = torch.cat([ordinal_inputs_embeds, ultragist_input_embeds], dim=1) else: inputs_embeds = self.embed_tokens(input_ids) # when total_ultragist_size > 0, we need to modify attention mask if self._use_sdpa and not output_attentions and total_ultragist_size == 0: # output_attentions=True can not be supported when using SDPA, and we fall back on # the manual implementation that requires a 4D causal mask in all cases. attention_mask = _prepare_4d_causal_attention_mask_for_sdpa( attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length, ) else: # 4d mask is passed through the layers attention_mask = _prepare_4d_causal_attention_mask( attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length ) position_ids = torch.arange(seq_length_with_past, dtype=torch.long, device=device).repeat(batch_size, 1) # prepare attention mask and position ids for ultragists # NOTE: we must modify the position_ids here instead of inside the self_attn forward, otherwise the version of position_ids is incompatible when enabling gradient checkpointing if total_ultragist_size > 0: # number of tokens to condense by the ultragists condensing_size = window_size - raw_size_to_cache # number of tokens in current window (containing cached raw activations) window_size_with_ultragist = window_size + total_ultragist_size # number of ultragists in cache memory_size = seq_length_with_past - window_size_with_ultragist min_value = torch.finfo(inputs_embeds.dtype).min ultragist_start_idx = -total_ultragist_size # batch_size, head_num, window_size reference_attention_mask = attention_mask[..., -total_ultragist_size - 1, -window_size_with_ultragist: -total_ultragist_size] for ultragist_size in ultragist_sizes: # in this case, the activations of ordinal tokens are used as ultragist activations if ultragist_size < 0: continue token_per_ultragist = condensing_size // ultragist_size # the end_idx may be -0, in that case, use max instead ultragist_end_idx = ultragist_start_idx + ultragist_size if ultragist_end_idx == 0: ultragist_end_idx = torch.iinfo(torch.long).max if self.config.ultragist_attn == "step-expansion": # each ultragist can attend to one more sub-interval than its predecessor # token_per_ultragist, 2 * token_per_ultragist, ..., ultragist_size * token_per_ultragist ultragist_arange = torch.arange(1, ultragist_size + 1, device=device) * token_per_ultragist # 0, 1, 2, ..., window_size - 1 ordinal_arange = torch.arange(window_size, device=device) # ultragist_size, window_size valid_pos = ordinal_arange.expand(ultragist_size, window_size) < ultragist_arange.unsqueeze(-1) # ultragist_size, window_size ordinal_attention_mask = torch.where(valid_pos, 0, min_value) # NOTE: add reference attention_mask so that padding tokens are considered ordinal_attention_mask = ordinal_attention_mask[None, None, ...] + reference_attention_mask.unsqueeze(-2) if self.config.ultragist_attend_prev: ultragist_attention_mask = attention_mask.new_full((ultragist_size, ultragist_size), min_value).triu(1) # the ultragist token is next to the last oridinal token it attends to ultragist_position_ids = torch.arange(token_per_ultragist, token_per_ultragist * ultragist_size + 1, token_per_ultragist) + memory_size ultragist_position_ids = ultragist_position_ids + torch.arange(ultragist_size) position_ids[:, ultragist_start_idx: ultragist_end_idx] = ultragist_position_ids else: ultragist_attention_mask = attention_mask.new_full((ultragist_size, ultragist_size), min_value).fill_diagonal_(0) # the ultragist token is next to the last oridinal token it attends to ultragist_position_ids = torch.arange(token_per_ultragist, token_per_ultragist * ultragist_size + 1, token_per_ultragist) + memory_size position_ids[:, ultragist_start_idx: ultragist_end_idx] = ultragist_position_ids attention_mask[..., ultragist_start_idx: ultragist_end_idx, -window_size_with_ultragist: -total_ultragist_size] = ordinal_attention_mask attention_mask[..., ultragist_start_idx: ultragist_end_idx, ultragist_start_idx: ultragist_end_idx] = ultragist_attention_mask # ultragists of different ratios are blind to others attention_mask[..., ultragist_start_idx: ultragist_end_idx, -total_ultragist_size: ultragist_start_idx] = min_value elif self.config.ultragist_attn == "segmentation": # each ultragist can attend to its corresponding sub-interval # ultragist_size, token_per_ultragist indices = torch.arange(token_per_ultragist * ultragist_size, device=device).view(ultragist_size, -1) # ultragist_size, window_size ordinal_attention_mask = attention_mask.new_full((ultragist_size, window_size), min_value) ordinal_attention_mask.scatter_(dim=-1, index=indices, value=0) # NOTE: add reference attention_mask so that padding tokens are considered ordinal_attention_mask = ordinal_attention_mask[None, None, ...] + reference_attention_mask.unsqueeze(-2) if self.config.ultragist_attend_prev: ultragist_attention_mask = attention_mask.new_full((ultragist_size, ultragist_size), min_value).triu(1) # the ultragist token is next to the last oridinal token it attends to ultragist_position_ids = position_ids.new_full(ultragist_size, fill_value=token_per_ultragist + memory_size) ultragist_position_ids = ultragist_position_ids + torch.arange(ultragist_size) position_ids[:, ultragist_start_idx: ultragist_end_idx] = ultragist_position_ids else: ultragist_attention_mask = attention_mask.new_full((ultragist_size, ultragist_size), min_value).fill_diagonal_(0) # the ultragist token is next to the last oridinal token it attends to ultragist_position_ids = position_ids.new_full(ultragist_size, fill_value=token_per_ultragist + memory_size) position_ids[:, ultragist_start_idx: ultragist_end_idx] = ultragist_position_ids attention_mask[..., ultragist_start_idx: ultragist_end_idx, -window_size_with_ultragist: -total_ultragist_size] = ordinal_attention_mask attention_mask[..., ultragist_start_idx: ultragist_end_idx, ultragist_start_idx: ultragist_end_idx] = ultragist_attention_mask # ultragists of different ratios are blind to others attention_mask[..., ultragist_start_idx: ultragist_end_idx, -total_ultragist_size: ultragist_start_idx] = min_value elif self.config.ultragist_attn == "full-coverage": pass else: raise NotImplementedError ultragist_start_idx = ultragist_end_idx # print(f"total_ultragist_size: {total_ultragist_size}") # print(f"raw_size_to_cache: {raw_size_to_cache}") # print(f"position_ids: {position_ids}") # print(f"attention_mask:\n{attention_mask}") # x = input() # if x == "s": # return # embed positions hidden_states = inputs_embeds # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None # ultragist: still use tuple to organize cache next_decoder_cache = () if use_cache else None for idx, decoder_layer in enumerate(self.layers): if output_hidden_states: all_hidden_states += (hidden_states,) # ultragist: slice out the past_key_value of the corresponding layer past_key_value = past_key_values[idx] if past_key_values is not None else None if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( decoder_layer.__call__, hidden_states, attention_mask, position_ids, past_key_value, output_attentions, use_cache, ) else: layer_outputs = decoder_layer( hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[2 if output_attentions else 1],) if output_attentions: all_self_attns += (layer_outputs[1],) hidden_states = self.norm(hidden_states) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) next_cache = next_decoder_cache if use_cache else None if not return_dict: return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None) return BaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns, ) class LlamaForCausalLM(LlamaPreTrainedModel): _tied_weights_keys = ["lm_head.weight"] def __init__(self, config): super().__init__(config) self.model = LlamaModel(config) self.vocab_size = config.vocab_size self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.model.embed_tokens def set_input_embeddings(self, value): self.model.embed_tokens = value def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings def set_decoder(self, decoder): self.model = decoder def get_decoder(self): return self.model @classmethod def from_pretrained(cls, *args, **kwargs): """Override the default from_pretrained to extend vocab size according to ultragist_size.""" kwargs.update(output_loading_info=True) model, loading_info = super().from_pretrained(*args, **kwargs) # NOTE: set memory after from_pretrained because there may be another transformer model inside the Memory object, which may cause weird erros during loading config = model.config model.memory = Memory( model_config=config, k_seq_dim=2, v_seq_dim=2, ) missing_keys = loading_info["missing_keys"] # NOTE: the ultragist parameters may or may not be loaded from the checkpoint # if it is loaded from the checkpoint, we should not re-initilize it model.model._init_ultragist_embed(missing_keys) # initialize weights of possible q,k,v,o,mlp for layer in model.model.layers: layer.self_attn._init_ultragist_proj(missing_keys) layer.mlp._init_ultragist_proj(missing_keys) return model def _native_forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, shift_labels: Optional[bool] = True, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, ModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # when we directly call _native_forward, the past_key_values would be None if past_key_values is None: # NOTE: set window size to 0, so that new past_key_values are returned properly, see MistralAttention.forward past_key_values = [(None, None, [0], 0, 0, 0) for _ in range(self.config.num_hidden_layers)] # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) outputs = self.model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] if self.config.pretraining_tp > 1: lm_head_slices = self.lm_head.weight.split(self.vocab_size // self.config.pretraining_tp, dim=0) logits = [F.linear(hidden_states, lm_head_slices[i]) for i in range(self.config.pretraining_tp)] logits = torch.cat(logits, dim=-1) else: logits = self.lm_head(hidden_states) logits = logits.float() loss = None batch_loss = None valid_token_num = None if labels is not None: loss, batch_loss, valid_token_num = compute_loss(logits, labels, shift=shift_labels) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return ModelOutput( loss=loss, batch_loss=batch_loss, valid_token_num=valid_token_num, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def _ultragist_forward(self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ): # t1 = time.time() # initialize cache self.memory.prepare( input_ids=input_ids, attention_mask=attention_mask, labels=labels ) # t2 = time.time() # print(f"{torch.distributed.get_rank()}: {input_ids.shape}") # after the first window, one token at a time while not self.memory.finish: # for _ in range(2): # t3 = time.time() input_ids, attention_mask, past_key_values, labels = self.memory.step() # NOTE: the first window is encoded without ultragist parameters, we should skip it when computing loss if self.training and self.memory._step_idx == 1: labels[:] = -100 # t4 = time.time() outputs = self._native_forward( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, labels=labels, # NOTE: the labels have been shifted so that all tokens in the window have the proper loss shift_labels=False, ) # t5 = time.time() # update past_key_values self.memory.update_memory(outputs.past_key_values) # t6 = time.time() if labels is not None: # update loss self.memory.update_loss(outputs.batch_loss, outputs.valid_token_num) # t7 = time.time() # print(f"Loop step time: {t4-t3}") # print(f"Loop forward time: {t5-t4}") # print(f"Loop update time: {t6-t5}") # print(f"Loop loss time: {t7-t6}") # input() # t8 = time.time() # output loss, past_key_values, and perplexity outputs = self.memory.output(outputs) # t9 = time.time() # print(f"Prepare time: {t2-t1}") # print(f"Output time: {t9-t8}") return outputs def forward(self, **kwargs): """Forward computation over a batch of sequences. """ # only allow gradient when training with optional_grad_ctx(with_grad=self.training): # we can disable ultragist to use the original llama if hasattr(self, "_enable_ultragist") and self._enable_ultragist == False: return self._native_forward(**kwargs) else: return self._ultragist_forward(**kwargs) def prepare_inputs_for_generation( self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs ): if past_key_values: input_ids = input_ids[:, -1:] position_ids = kwargs.get("position_ids", None) if attention_mask is not None and position_ids is None: # create position_ids on the fly for batch generation position_ids = attention_mask.long().cumsum(-1) - 1 position_ids.masked_fill_(attention_mask == 0, 1) if past_key_values: position_ids = position_ids[:, -1].unsqueeze(-1) # if `inputs_embeds` are passed, we only want to use them in the 1st generation step if inputs_embeds is not None and past_key_values is None: model_inputs = {"inputs_embeds": inputs_embeds} else: model_inputs = {"input_ids": input_ids} model_inputs.update( { "position_ids": position_ids, "past_key_values": past_key_values, "use_cache": kwargs.get("use_cache"), "attention_mask": attention_mask, } ) return model_inputs @staticmethod def _reorder_cache(past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: reordered_past += ( tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past), ) return reordered_past