# coding=utf-8 import math from dataclasses import dataclass, field from typing import Any, Dict, List, Optional, Tuple, Union import torch import torch.nn as nn import torch.utils.checkpoint from configuration_llava import LlavaConfig, PhiConfig from einops import rearrange, repeat from open_clip import create_model from transformers import PretrainedConfig, PreTrainedModel from transformers.activations import ACT2FN from transformers.modeling_outputs import CausalLMOutputWithPast, ModelOutput try: from flash_attn.bert_padding import pad_input, unpad_input from flash_attn.layers.rotary import RotaryEmbedding as FlashRotaryEmbedding from flash_attn.modules.mha import FlashCrossAttention, FlashSelfAttention from flash_attn.ops.fused_dense import FusedDense except: pad_input, unpad_input = None, None FlashRotaryEmbedding = None FlashSelfAttention, FlashCrossAttention = None, None FusedDense = None @dataclass class InferenceParams: """Inference parameters passed to model to efficiently calculate and store context during inference. Reference: https://github.com/Dao-AILab/flash-attention/blob/main/flash_attn/utils/generation.py. Args: max_seqlen: Maximum sequence length. max_batch_size: Maximum batch size. seqlen_offset: Sequence length offset. batch_size_offset: Batch size offset. key_value_memory_dict: Key value memory dictionary. lengths_per_sample: Lengths per sample. """ max_seqlen: int = field(metadata={"help": "Maximum sequence length."}) max_batch_size: int = field(metadata={"help": "Maximum batch size."}) seqlen_offset: int = field(default=0, metadata={"help": "Sequence length offset."}) batch_size_offset: int = field(default=0, metadata={"help": "Batch size offset."}) key_value_memory_dict: Dict[str, Any] = field( default_factory=dict, metadata={"help": "Key value memory dictionary."} ) lengths_per_sample: torch.Tensor = field( default=None, metadata={"help": "Lengths per sample."} ) class Embedding(nn.Module): """Token embedding with dropout.""" def __init__(self, config: PretrainedConfig) -> None: super().__init__() self.wte = nn.Embedding(config.vocab_size, config.n_embd) self.drop = nn.Dropout(config.embd_pdrop) def forward(self, input_ids: torch.LongTensor) -> torch.FloatTensor: input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) hidden_states = self.wte(input_ids) hidden_states = self.drop(hidden_states) return hidden_states def _apply_rotary_emb( x: torch.FloatTensor, cos: torch.FloatTensor, sin: torch.FloatTensor, ) -> torch.FloatTensor: _, seqlen, _, _ = x.shape _, rotary_dim = cos.shape rotary_dim *= 2 x_rot = x[:, :, :, :rotary_dim] x_pass = x[:, :, :, rotary_dim:] x1, x2 = x_rot.chunk(2, dim=-1) c, s = ( rearrange(cos[:seqlen], "s d -> s 1 d"), rearrange(sin[:seqlen], "s d -> s 1 d"), ) x1, x2, c, s = [t.to(dtype=torch.float32) for t in [x1, x2, c, s]] x_rot = torch.cat([x1 * c - x2 * s, x1 * s + x2 * c], axis=-1).to(x.dtype) return torch.cat([x_rot, x_pass], axis=-1) def _apply_rotary_emb_kv( kv: torch.FloatTensor, cos: torch.FloatTensor, sin: torch.FloatTensor, cos_k: Optional[torch.FloatTensor] = None, sin_k: Optional[torch.FloatTensor] = None, ) -> torch.FloatTensor: _, seqlen, _, _, _ = kv.shape _, rotary_dim = cos.shape rotary_dim *= 2 k_rot = kv[:, :, 0, :, :rotary_dim] k_pass = kv[:, :, 0, :, rotary_dim:] k1, k2 = k_rot.chunk(2, dim=-1) c, s = ( rearrange(cos[:seqlen], "s d -> s 1 d"), rearrange(sin[:seqlen], "s d -> s 1 d"), ) k1, k2, c, s = [t.to(dtype=torch.float32) for t in [k1, k2, c, s]] k_rot = torch.cat([k1 * c - k2 * s, k1 * s + k2 * c], axis=-1).to(kv.dtype) return torch.cat( [ torch.cat([k_rot, k_pass], axis=-1).unsqueeze(2), kv[:, :, 1:2, :, :], ], axis=2, ) def _apply_rotary_emb_qkv( qkv: torch.FloatTensor, cos: torch.FloatTensor, sin: torch.FloatTensor, cos_k: Optional[torch.FloatTensor] = None, sin_k: Optional[torch.FloatTensor] = None, ) -> torch.FloatTensor: _, seqlen, _, _, _ = qkv.shape _, rotary_dim = cos.shape rotary_dim *= 2 q_rot = qkv[:, :, 0, :, :rotary_dim] q_pass = qkv[:, :, 0, :, rotary_dim:] k_rot = qkv[:, :, 1, :, :rotary_dim] k_pass = qkv[:, :, 1, :, rotary_dim:] q1, q2 = q_rot.chunk(2, dim=-1) k1, k2 = k_rot.chunk(2, dim=-1) c, s = ( rearrange(cos[:seqlen], "s d -> s 1 d"), rearrange(sin[:seqlen], "s d -> s 1 d"), ) q1, q2, k1, k2, c, s = [t.to(dtype=torch.float32) for t in [q1, q2, k1, k2, c, s]] q_rot = torch.cat([q1 * c - q2 * s, q1 * s + q2 * c], axis=-1).to(qkv.dtype) k_rot = torch.cat([k1 * c - k2 * s, k1 * s + k2 * c], axis=-1).to(qkv.dtype) return torch.cat( [ torch.cat([q_rot, q_pass], axis=-1).unsqueeze(2), torch.cat([k_rot, k_pass], axis=-1).unsqueeze(2), qkv[:, :, 2:3, :, :], ], axis=2, ) class RotaryEmbedding(nn.Module): """Rotary positional embedding (RoPE). Reference: RoFormer: Enhanced Transformer with Rotary Position Embedding. https://arxiv.org/pdf/2104.09864.pdf. """ def __init__( self, dim: int, base: int = 10000, scale_base: Optional[float] = None, pos_idx_in_fp32: bool = True, max_position_embeddings: int = 2048, device: Optional[str] = None, **kwargs, ) -> None: super().__init__() if scale_base is not None: raise NotImplementedError self.dim = dim self.base = float(base) self.scale_base = scale_base self.pos_idx_in_fp32 = pos_idx_in_fp32 self.max_position_embeddings = max_position_embeddings self.device = device # Generate and save the inverse frequency buffer (non-trainable) inv_freq = self._compute_inv_freq(device) self.register_buffer("inv_freq", inv_freq, persistent=False) # Generate and save the scale buffer (non-trainable) scale = ( (torch.arange(0, dim, 2, device=device, dtype=torch.float32) + 0.4 * dim) / (1.4 * dim) if scale_base is not None else None ) self.register_buffer("scale", scale, persistent=False) # Initialize cached attributes since ONNX can't rely on dynamic initialization self._update_cos_sin_cache( max_position_embeddings, device=device, dtype=torch.float32 ) def _compute_inv_freq(self, device: Optional[str] = None) -> torch.FloatTensor: return 1.0 / ( self.base ** ( torch.arange(0, self.dim, 2, device=device, dtype=torch.float32) / self.dim ) ) def _update_cos_sin_cache( self, seqlen: int, device: Optional[str] = None, dtype: Optional[torch.dtype] = None, ) -> None: self._seq_len_cached = seqlen # fp32 is preferred since the output of `torch.arange` can be quite large # and bf16 would lose a lot of precision if self.pos_idx_in_fp32: t = torch.arange(seqlen, device=device, dtype=torch.float32) if self.inv_freq.dtype != torch.float32: inv_freq = self._compute_inv_freq(device=device) else: inv_freq = self.inv_freq else: t = torch.arange(seqlen, device=device, dtype=self.inv_freq.dtype) inv_freq = self.inv_freq # `torch.outer` is preferred since `torch.einsum` converts from fp32 to fp16 if used with AMP freqs = torch.outer(t, inv_freq) if self.scale is None: self._cos_cached = torch.cos(freqs).to(dtype) self._sin_cached = torch.sin(freqs).to(dtype) else: power = ( torch.arange(seqlen, dtype=self.scale.dtype, device=self.scale.device) - seqlen // 2 ) / self.scale_base scale = self.scale.to(device=power.device) ** rearrange(power, "s -> s 1") # Force the scale multiplication to happen in fp32 self._cos_cached = (torch.cos(freqs) * scale).to(dtype) self._sin_cached = (torch.sin(freqs) * scale).to(dtype) self._cos_k_cached = (torch.cos(freqs) / scale).to(dtype) self._sin_k_cached = (torch.sin(freqs) / scale).to(dtype) def forward( self, qkv: torch.Tensor, kv: Optional[torch.Tensor] = None, seqlen_offset: int = 0, **kwargs, ) -> Tuple[torch.Tensor, torch.Tensor]: if ( self._seq_len_cached < qkv.shape[1] + seqlen_offset or self._cos_cached.device != qkv.device or self._cos_cached.dtype != qkv.dtype or (self.training and self._cos_cached.is_inference()) ): self._update_cos_sin_cache( qkv.shape[1] + seqlen_offset, device=qkv.device, dtype=qkv.dtype ) if kv is None: return _apply_rotary_emb_qkv( qkv, self._cos_cached[seqlen_offset:], self._sin_cached[seqlen_offset:], ) else: q = _apply_rotary_emb( qkv, self._cos_cached[seqlen_offset:], self._sin_cached[seqlen_offset:], ) kv = _apply_rotary_emb_kv( kv, self._cos_cached[seqlen_offset:], self._sin_cached[seqlen_offset:], ) return q, kv class MLP(nn.Module): """Multi-Layer Perceptron. Reference: Attention Is All You Need. https://arxiv.org/pdf/1706.03762.pdf. """ def __init__( self, config: PretrainedConfig, n_inner: Optional[int] = None, act_fn: Optional[str] = None, ) -> None: super().__init__() act_fn = config.activation_function if act_fn is None else act_fn n_inner = getattr(config, "n_inner", None) if n_inner is None else n_inner n_inner = n_inner if n_inner is not None else 4 * config.n_embd self.fc1 = nn.Linear(config.n_embd, n_inner) self.fc2 = nn.Linear(n_inner, config.n_embd) self.act = ACT2FN[act_fn] def forward(self, hidden_states: torch.FloatTensor) -> torch.FloatTensor: hidden_states = self.fc1(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.fc2(hidden_states) return hidden_states class SelfAttention(nn.Module): """Self-attention layer (compatible with PyTorch). Reference: https://github.com/Dao-AILab/flash-attention/blob/main/flash_attn/modules/mha.py. """ def __init__( self, causal: bool = True, softmax_scale: Optional[float] = None, attention_dropout: float = 0.0, ) -> None: super().__init__() self.causal = causal self.softmax_scale = softmax_scale self.drop = nn.Dropout(attention_dropout) @torch.autocast("cpu", enabled=False) @torch.autocast("cuda", enabled=False) def forward( self, qkv: torch.FloatTensor, causal: bool = None, key_padding_mask: Optional[torch.BoolTensor] = None, **kwargs, ) -> torch.FloatTensor: batch_size, seqlen = qkv.shape[0], qkv.shape[1] q, k, v = qkv.unbind(dim=2) q = q.to(torch.float32) k = k.to(torch.float32) causal = self.causal if causal is None else causal softmax_scale = self.softmax_scale or 1.0 / math.sqrt(q.shape[-1]) # Autocast is manually disabled to avoid `torch.einsum` performing the operation # using float16, which might lead to overflow scores = torch.einsum("bthd,bshd->bhts", q, k * softmax_scale) if key_padding_mask is not None: padding_mask = torch.full( (batch_size, seqlen), -10000.0, dtype=scores.dtype, device=scores.device ) padding_mask.masked_fill_(key_padding_mask, 0.0) scores = scores + rearrange(padding_mask, "b s -> b 1 1 s") if causal: causal_mask = torch.triu( torch.full((seqlen, seqlen), -10000.0, device=scores.device), 1 ) scores = scores + causal_mask.to(dtype=scores.dtype) attention = torch.softmax(scores, dim=-1).to(v.dtype) attention = self.drop(attention) output = torch.einsum("bhts,bshd->bthd", attention, v) return output class CrossAttention(nn.Module): """Cross-attention layer (compatible with PyTorch). Reference: https://github.com/Dao-AILab/flash-attention/blob/main/flash_attn/modules/mha.py. """ def __init__( self, causal: bool = True, softmax_scale: Optional[float] = None, attention_dropout: float = 0.0, ) -> None: super().__init__() self.causal = causal self.softmax_scale = softmax_scale self.drop = nn.Dropout(attention_dropout) @torch.autocast("cpu", enabled=False) @torch.autocast("cuda", enabled=False) def forward( self, q: torch.FloatTensor, kv: torch.FloatTensor, causal: bool = None, key_padding_mask: Optional[torch.BoolTensor] = None, **kwargs, ) -> torch.FloatTensor: batch_size, seqlen_q = q.shape[0], q.shape[1] seqlen_k = kv.shape[1] if kv.shape[3] != q.shape[2]: kv = repeat(kv, "... hkv d -> ... (hkv g) d", g=q.shape[2] // kv.shape[3]) k, v = kv.unbind(dim=2) q = q.to(torch.float32) k = k.to(torch.float32) causal = self.causal if causal is None else causal softmax_scale = self.softmax_scale or 1.0 / math.sqrt(q.shape[-1]) # Autocast is manually disabled to avoid `torch.einsum` performing the operation # using float16, which might lead to overflow scores = torch.einsum("bthd,bshd->bhts", q, k * softmax_scale) if key_padding_mask is not None: padding_mask = torch.full( (batch_size, seqlen_k), -10000.0, dtype=scores.dtype, device=scores.device, ) padding_mask.masked_fill_(key_padding_mask, 0.0) scores = scores + rearrange(padding_mask, "b s -> b 1 1 s") if causal: rows = rearrange( torch.arange(seqlen_q, device=q.device, dtype=torch.long), "s -> s 1" ) cols = torch.arange(seqlen_k, device=k.device, dtype=torch.long) causal_mask = cols > rows + seqlen_k - seqlen_q scores = scores.masked_fill(causal_mask, -10000.0) attention = torch.softmax(scores, dim=-1).to(v.dtype) attention = self.drop(attention) output = torch.einsum("bhts,bshd->bthd", attention, v) return output def _find_mha_dims( config: PretrainedConfig, n_head: Optional[int] = None, n_head_kv: Optional[int] = None, head_dim: Optional[int] = None, ) -> Tuple[int, int]: if n_head is None and head_dim is None: head_dim = config.n_embd // config.n_head n_head = config.n_head elif n_head is None or head_dim is None: raise ValueError("`n_head` and `head_dim` must be both specified or `None`.") if n_head_kv is None: n_head_kv = getattr(config, "n_head_kv", None) or n_head return n_head, n_head_kv, head_dim def _update_kv_cache( kv: torch.FloatTensor, inference_params: InferenceParams, layer_idx: int ) -> torch.FloatTensor: num_heads, head_dim = kv.shape[-2:] if layer_idx not in inference_params.key_value_memory_dict: inference_params.key_value_memory_dict[layer_idx] = torch.empty( inference_params.max_batch_size, inference_params.max_seqlen, 2, num_heads, head_dim, dtype=kv.dtype, device=kv.device, ) batch_start = inference_params.batch_size_offset batch_end = batch_start + kv.shape[0] sequence_start = inference_params.seqlen_offset sequence_end = sequence_start + kv.shape[1] # When the current sequence length is equal to or larger than the maximum sequence length, # we need to concatenate the current `kv` with the cached `kv` to expand its length if sequence_end >= inference_params.max_seqlen: inference_params.key_value_memory_dict[layer_idx] = torch.concatenate( (inference_params.key_value_memory_dict[layer_idx], kv), dim=1 ) inference_params.key_value_memory_dict[layer_idx][ batch_start:batch_end, sequence_start:sequence_end, ... ] = kv kv = inference_params.key_value_memory_dict[layer_idx][ batch_start:batch_end, :sequence_end, ... ] return kv class MHA(nn.Module): """Multi-head attention layer.""" def __init__( self, config: PretrainedConfig, dtype: Optional[torch.dtype] = None, device: Optional[str] = None, rotary_dim: Optional[int] = None, rotary_base: float = 10000.0, rotary_scale_base: Optional[float] = None, n_head: Optional[int] = None, n_head_kv: Optional[int] = None, head_dim: Optional[int] = None, bias: bool = True, causal: bool = True, softmax_scale: Optional[float] = None, layer_idx: Optional[int] = None, return_residual: bool = False, checkpointing: bool = False, ) -> None: super().__init__() # Rotary embedding self.rotary_dim = ( rotary_dim if rotary_dim is not None else getattr(config, "rotary_dim", 0) ) if self.rotary_dim > 0: rotary_cls = ( FlashRotaryEmbedding if config.flash_rotary else RotaryEmbedding ) if rotary_cls is None: rotary_cls = RotaryEmbedding rotary_kwargs = {} if rotary_cls is RotaryEmbedding: rotary_kwargs["max_position_embeddings"] = config.n_positions self.rotary_emb = rotary_cls( self.rotary_dim, base=rotary_base, scale_base=rotary_scale_base, device=device, **rotary_kwargs, ) # MLP self.n_head, self.n_head_kv, self.head_dim = _find_mha_dims( config, n_head=n_head, n_head_kv=n_head_kv, head_dim=head_dim ) op_size = self.head_dim * (self.n_head + 2 * self.n_head_kv) hidden_size = config.n_embd linear_cls = FusedDense if config.fused_dense else nn.Linear if linear_cls is None: linear_cls = nn.Linear self.Wqkv = linear_cls( hidden_size, op_size, bias=bias, device=device, dtype=dtype ) self.out_proj = linear_cls( hidden_size, hidden_size, bias=bias, device=device, dtype=dtype ) # Attention attn_cls = FlashSelfAttention if config.flash_attn else SelfAttention if attn_cls is None: attn_cls = SelfAttention cross_attn_cls = FlashCrossAttention if config.flash_attn else CrossAttention if cross_attn_cls is None: cross_attn_cls = CrossAttention self.inner_attn = attn_cls( causal=causal, softmax_scale=softmax_scale, attention_dropout=config.attn_pdrop, ) self.inner_cross_attn = cross_attn_cls( causal=causal, softmax_scale=softmax_scale, attention_dropout=config.attn_pdrop, ) self.flash_attn = config.flash_attn and attn_cls is FlashSelfAttention self.layer_idx = layer_idx self.return_residual = return_residual self.checkpointing = checkpointing def _forward_self_attn( self, x: torch.FloatTensor, key_padding_mask: Optional[torch.BoolTensor] ) -> torch.FloatTensor: qkv = self.Wqkv(x) qkv = rearrange( qkv, "... (three h d) -> ... three h d", three=3, d=self.head_dim ) if self.rotary_dim > 0: qkv = self.rotary_emb(qkv) if self.flash_attn: batch_size, seqlen = qkv.shape[0], qkv.shape[1] cu_seqlens, max_seqlen = None, None if key_padding_mask is not None: # If `key_padding_mask` is supplied, we need to unpad the input and retrieve # the `cu_seqlens` and `max_seqlen` to be used by `flash-attn` qkv, indices, cu_seqlens, max_seqlen = unpad_input( qkv, key_padding_mask ) if self.checkpointing: attn_output = torch.utils.checkpoint.checkpoint( self.inner_attn, qkv, cu_seqlens=cu_seqlens, max_seqlen=max_seqlen ) else: attn_output = self.inner_attn( qkv, cu_seqlens=cu_seqlens, max_seqlen=max_seqlen ).to(qkv.device) # If `key_padding_mask` is supplied, we need to pad the output back to the original shape return ( pad_input(attn_output, indices, batch_size, seqlen) if key_padding_mask is not None else attn_output ) if self.checkpointing: return torch.utils.checkpoint.checkpoint( self.inner_attn, qkv, key_padding_mask=key_padding_mask ) return self.inner_attn(qkv, key_padding_mask=key_padding_mask) def _forward_cross_attn( self, x: torch.FloatTensor, past_key_values: Optional[InferenceParams], key_padding_mask: Optional[torch.BoolTensor], ) -> torch.FloatTensor: batch_size = x.shape[0] qkv = self.Wqkv(x) q = qkv[..., : self.n_head * self.head_dim] q = rearrange(q, "... (h d) -> ... h d", d=self.head_dim) kv = qkv[..., self.n_head * self.head_dim :] kv = rearrange(kv, "... (two hkv d) -> ... two hkv d", two=2, d=self.head_dim) seqlen_offset = ( past_key_values.seqlen_offset if past_key_values is not None else 0 ) causal = None if seqlen_offset == 0 else False if self.rotary_dim > 0: q, kv = self.rotary_emb(q, kv=kv, seqlen_offset=seqlen_offset) if past_key_values is not None: kv = _update_kv_cache(kv, past_key_values, self.layer_idx) if self.flash_attn: batch_size, seqlen_q = q.shape[0], q.shape[1] seqlen_k = kv.shape[1] cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max_seqlen_k = ( None, None, None, None, ) if key_padding_mask is not None: kv, _, cu_seqlens_k, max_seqlen_k = unpad_input(kv, key_padding_mask) if seqlen_q == 1: key_padding_mask = torch.ones(batch_size, 1, device=q.device) elif seqlen_q != seqlen_k: key_padding_mask = key_padding_mask[:, -seqlen_q:] q, indices_q, cu_seqlens_q, max_seqlen_q = unpad_input( q, key_padding_mask ) if self.checkpointing: attn_output = torch.utils.checkpoint.checkpoint( self.inner_cross_attn, q, kv, causal=causal, cu_seqlens=cu_seqlens_q, max_seqlen=max_seqlen_q, cu_seqlens_k=cu_seqlens_k, max_seqlen_k=max_seqlen_k, ) else: attn_output = self.inner_cross_attn( q, kv, causal=causal, cu_seqlens=cu_seqlens_q, max_seqlen=max_seqlen_q, cu_seqlens_k=cu_seqlens_k, max_seqlen_k=max_seqlen_k, ) return ( pad_input(attn_output, indices_q, batch_size, max_seqlen_q) if key_padding_mask is not None else attn_output ) if self.checkpointing: return torch.utils.checkpoint.checkpoint( self.inner_cross_attn, q, kv, key_padding_mask=key_padding_mask, causal=causal, ) return self.inner_cross_attn( q, kv, key_padding_mask=key_padding_mask, causal=causal ) def forward( self, x: torch.FloatTensor, past_key_values: Optional[InferenceParams] = None, attention_mask: Optional[Union[torch.LongTensor, torch.BoolTensor]] = None, **kwargs, ) -> Tuple[torch.FloatTensor, torch.FloatTensor]: if attention_mask is not None: attention_mask = attention_mask.bool() else: attention_mask = None # MHA if self.n_head == self.n_head_kv: if past_key_values is None: # If `past_key_values` are not supplied, we run self-attention attn_output = self._forward_self_attn(x, attention_mask) else: # If `past_key_values` are supplied, it means that we might have cached values and # could take advantage of cross-attention attn_output = self._forward_cross_attn( x, past_key_values, attention_mask ) # MQA / GQA else: # Regardless of `past_key_values` being supplied or not, it always use cross-attention # because `q` and `kv` lengths might be different attn_output = self._forward_cross_attn(x, past_key_values, attention_mask) output = rearrange(attn_output, "... h d -> ... (h d)") output = self.out_proj(output) return output if not self.return_residual else (output, x) class ParallelBlock(nn.Module): """Parallel block. This block applies parallel mixer and MLP layers to the input (used in GPT-J and CodeGen). """ def __init__( self, config: PretrainedConfig, block_idx: Optional[int] = None, ) -> None: super().__init__() self.ln = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon) self.resid_dropout = nn.Dropout(config.resid_pdrop) self.block_idx = block_idx self.mixer = MHA(config, layer_idx=block_idx) self.mlp = MLP(config) def forward( self, hidden_states: torch.FloatTensor, past_key_values: Optional[Union[torch.FloatTensor, InferenceParams]] = None, attention_mask: Optional[torch.BoolTensor] = None, **kwargs, ) -> torch.FloatTensor: residual = hidden_states hidden_states = self.ln(hidden_states) attn_outputs = self.mixer( hidden_states, past_key_values=past_key_values, attention_mask=attention_mask, ) if isinstance(attn_outputs, tuple): attn_outputs = attn_outputs[0] attn_outputs = self.resid_dropout(attn_outputs) feed_forward_hidden_states = self.resid_dropout(self.mlp(hidden_states)) hidden_states = attn_outputs + feed_forward_hidden_states + residual return hidden_states class CausalLMHead(nn.Module): """Causal Language Modeling head. Reference: Improving Language Understanding by Generative Pre-Training. https://cdn.openai.com/research-covers/language-unsupervised/language_understanding_paper.pdf. """ def __init__(self, config: PretrainedConfig) -> None: super().__init__() self.ln = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon) self.linear = nn.Linear(config.n_embd, config.vocab_size) def forward(self, hidden_states: torch.FloatTensor) -> torch.FloatTensor: hidden_states = self.ln(hidden_states) logits = self.linear(hidden_states).to(torch.float32) return logits class CausalLMLoss(nn.Module): """Causal Language Modeling loss. Reference: Improving Language Understanding by Generative Pre-Training. https://cdn.openai.com/research-covers/language-unsupervised/language_understanding_paper.pdf. """ def __init__(self, shift_labels: bool = True) -> None: super().__init__() self.shift_labels = shift_labels self.loss_fct = nn.CrossEntropyLoss() def forward( self, logits: torch.FloatTensor, labels: torch.LongTensor ) -> torch.FloatTensor: if self.shift_labels: logits = logits[..., :-1, :].contiguous() labels = labels[..., 1:].contiguous() loss = self.loss_fct(logits.view(-1, logits.size(-1)), labels.view(-1)) return loss class PhiPreTrainedModel(PreTrainedModel): """Phi pre-trained model.""" config_class = PhiConfig base_model_prefix = "transformer" supports_gradient_checkpointing = False _no_split_modules = ["ParallelBlock"] def __init__(self, *inputs, **kwargs) -> None: super().__init__(*inputs, **kwargs) def _init_weights(self, module: nn.Module) -> None: if isinstance(module, (nn.Linear,)): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): if module.bias is not None: module.bias.data.zero_() module.weight.data.fill_(1.0) def prepare_inputs_for_generation( self, input_ids: torch.LongTensor, past_key_values: Optional[Union[torch.FloatTensor, InferenceParams]] = None, attention_mask: Optional[Union[torch.LongTensor, torch.BoolTensor]] = None, **kwargs, ) -> Dict[str, Any]: if past_key_values is None or not ( isinstance(past_key_values, InferenceParams) ): past_key_values = InferenceParams( max_seqlen=self.config.n_positions, max_batch_size=input_ids.shape[0], seqlen_offset=0, batch_size_offset=0, key_value_memory_dict={}, lengths_per_sample=None, ) else: # Assume that `past_key_values` has cached all tokens up to the last token in `input_ids` past_key_values.seqlen_offset = input_ids.shape[1] - 1 input_ids = input_ids[:, -1].unsqueeze(-1) attention_mask = attention_mask[:, -1].unsqueeze(-1) return { "input_ids": input_ids, "past_key_values": past_key_values, "attention_mask": attention_mask, } class PhiModel(PhiPreTrainedModel): """Phi model.""" _keys_to_ignore_on_load_missing = [""] _keys_to_ignore_on_load_unexpected = [r"h\.\d+\.mlp.(fc_in|fc_out)\.(weight|bias)"] def __init__(self, config: PhiConfig) -> None: super().__init__(config) self.embd = Embedding(config) self.h = nn.ModuleList( [ParallelBlock(config, block_idx=i) for i in range(config.n_layer)] ) self.gradient_checkpointing = False self.post_init() def get_input_embeddings(self): return self.embd def set_input_embeddings(self, new_embeddings) -> None: self.embd.wte = new_embeddings def forward( self, input_ids: torch.LongTensor, inputs_embeds: Optional[torch.FloatTensor] = None, past_key_values: Optional[Union[torch.FloatTensor, InferenceParams]] = None, attention_mask: Optional[torch.BoolTensor] = None, ) -> torch.FloatTensor: if input_ids is not None: hidden_states = self.embd(input_ids) elif inputs_embeds is not None: hidden_states = inputs_embeds else: raise ValueError("You have to specify either input_ids or inputs_embeds") for layer in self.h: hidden_states = layer( hidden_states, past_key_values=past_key_values, attention_mask=attention_mask, ) return hidden_states class PhiForCausalLM(PhiPreTrainedModel): """Phi for Causal Language Modeling.""" _keys_to_ignore_on_load_missing = [""] _keys_to_ignore_on_load_unexpected = [ r"transformer\.h\.\d+\.mlp.(fc_in|fc_out)\.(weight|bias)" ] supports_gradient_checkpointing = True _no_split_modules = ["ParallelBlock"] _skip_keys_device_placement = "past_key_values" def __init__(self, config: PhiConfig) -> None: super().__init__(config) self.transformer = PhiModel(config) self.lm_head = CausalLMHead(config) self.loss = CausalLMLoss() self.post_init() def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings) -> None: self.lm_head.linear = new_embeddings def forward( self, input_ids: torch.LongTensor, inputs_embeds: Optional[torch.FloatTensor] = None, past_key_values: Optional[Union[torch.FloatTensor, InferenceParams]] = None, attention_mask: Optional[torch.BoolTensor] = None, labels: Optional[torch.LongTensor] = None, **kwargs, ) -> CausalLMOutputWithPast: hidden_states = self.transformer( input_ids, inputs_embeds=inputs_embeds, past_key_values=past_key_values, attention_mask=attention_mask, ) lm_logits = self.lm_head(hidden_states) loss = None if labels is not None: loss = self.loss(lm_logits, labels) return CausalLMOutputWithPast( loss=loss, logits=lm_logits, past_key_values=past_key_values ) @dataclass class LlavaCausalLMOutputWithPast(ModelOutput): loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None past_key_values: Optional[List[torch.FloatTensor]] = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None image_hidden_states: Optional[Tuple[torch.FloatTensor]] = None class LlavaMultiModalProjector(nn.Module): def __init__(self, config: LlavaConfig): super().__init__() self.linear_1 = nn.Linear( config.vision_embed_dim, config.text_config.n_embd * config.projector_tokens_num, bias=True, ) self.act = nn.GELU() self.linear_2 = nn.Linear( config.text_config.n_embd * config.projector_tokens_num, config.text_config.n_embd * config.projector_tokens_num, bias=True, ) self.projector_tokens_num = config.projector_tokens_num def forward(self, image_features): hidden_states = self.linear_1(image_features) hidden_states = self.act(hidden_states) hidden_states = self.linear_2(hidden_states) hidden_states = hidden_states.reshape( hidden_states.shape[0], self.projector_tokens_num, int(hidden_states.shape[1] / self.projector_tokens_num), ) return hidden_states class LlavaPreTrainedModel(PreTrainedModel): config_class = LlavaConfig base_model_prefix = "model" supports_gradient_checkpointing = True _no_split_modules = ["LlavaVisionAttention"] _skip_keys_device_placement = "past_key_values" _supports_flash_attn_2 = True def __init__(self, config): super().__init__(config) def _init_weights(self, module): return @property def _supports_sdpa(self): """ Retrieve language_model's attribute to check whether the model supports SDPA or not. """ return self.language_model._supports_sdpa class LlavaForConditionalGeneration(LlavaPreTrainedModel): def __init__(self, config: LlavaConfig): super().__init__(config) clip_model = create_model(config.vision_tower_name) self.vision_model = clip_model.visual self.multi_modal_projector = LlavaMultiModalProjector(config) self.vocab_size = config.vocab_size self.language_model = PhiForCausalLM(config.text_config) self.pad_token_id = ( self.config.pad_token_id if self.config.pad_token_id is not None else -1 ) self.post_init() def get_input_embeddings(self): return self.language_model.get_input_embeddings() def set_input_embeddings(self, value): self.language_model.set_input_embeddings(value) def get_output_embeddings(self): return self.language_model.get_output_embeddings() def set_output_embeddings(self, new_embeddings): self.language_model.set_output_embeddings(new_embeddings) def set_decoder(self, decoder): self.language_model.transformer = decoder def get_decoder(self): return self.language_model.transformer def tie_weights(self): return self.language_model.tie_weights() def resize_token_embeddings( self, new_num_tokens: Optional[int] = None, pad_to_multiple_of=None ) -> nn.Embedding: model_embeds = self.language_model.resize_token_embeddings( new_num_tokens, pad_to_multiple_of ) # update vocab size self.config.text_config.vocab_size = model_embeds.num_embeddings self.config.vocab_size = model_embeds.num_embeddings self.vocab_size = model_embeds.num_embeddings return model_embeds def _merge_input_ids_with_image_features( self, image_features, inputs_embeds, input_ids, attention_mask, position_ids ): num_images, num_image_patches, embed_dim = image_features.shape batch_size, sequence_length = input_ids.shape left_padding = not torch.sum( input_ids[:, -1] == torch.tensor(self.pad_token_id) ) # 1. Create a mask to know where special image tokens are special_image_token_mask = input_ids == self.config.image_token_index num_special_image_tokens = torch.sum(special_image_token_mask, dim=-1) # Compute the maximum embed dimension max_embed_dim = ( num_special_image_tokens.max() * (num_image_patches - 1) ) + sequence_length batch_indices, non_image_indices = torch.where( input_ids != self.config.image_token_index ) # 2. Compute the positions where text should be written # Calculate new positions for text tokens in merged image-text sequence. # `special_image_token_mask` identifies image tokens. Each image token will be replaced by `nb_text_tokens_per_images - 1` text tokens. # `torch.cumsum` computes how each image token shifts subsequent text token positions. # - 1 to adjust for zero-based indexing, as `cumsum` inherently increases indices by one. new_token_positions = ( torch.cumsum((special_image_token_mask * (num_image_patches - 1) + 1), -1) - 1 ) nb_image_pad = max_embed_dim - 1 - new_token_positions[:, -1] if left_padding: new_token_positions += nb_image_pad[:, None] # offset for left padding text_to_overwrite = new_token_positions[batch_indices, non_image_indices] # 3. Create the full embedding, already padded to the maximum position final_embedding = torch.zeros( batch_size, max_embed_dim, embed_dim, dtype=inputs_embeds.dtype, device=inputs_embeds.device, ) final_attention_mask = torch.zeros( batch_size, max_embed_dim, dtype=attention_mask.dtype, device=inputs_embeds.device, ) # In case the Vision model or the Language model has been offloaded to CPU, we need to manually # set the corresponding tensors into their correct target device. target_device = inputs_embeds.device batch_indices, non_image_indices, text_to_overwrite = ( batch_indices.to(target_device), non_image_indices.to(target_device), text_to_overwrite.to(target_device), ) attention_mask = attention_mask.to(target_device) # 4. Fill the embeddings based on the mask. If we have ["hey" "", "how", "are"] # we need to index copy on [0, 577, 578, 579] for the text and [1:576] for the image features final_embedding[batch_indices, text_to_overwrite] = inputs_embeds[ batch_indices, non_image_indices ] final_attention_mask[batch_indices, text_to_overwrite] = attention_mask[ batch_indices, non_image_indices ] # 5. Fill the embeddings corresponding to the images. Anything that is still zeros needs filling image_to_overwrite = torch.all(final_embedding == 0, dim=-1) image_to_overwrite &= image_to_overwrite.cumsum(-1) - 1 >= nb_image_pad[ :, None ].to(target_device) if image_to_overwrite.sum() != image_features.shape[:-1].numel(): raise ValueError( f"The input provided to the model are wrong. The number of image tokens is {torch.sum(special_image_token_mask)} while" f" the number of image given to the model is {num_images}. This prevents correct indexing and breaks batch generation." ) final_embedding[image_to_overwrite] = ( image_features.contiguous().reshape(-1, embed_dim).to(target_device) ) final_attention_mask |= image_to_overwrite position_ids = (final_attention_mask.cumsum(-1) - 1).masked_fill_( (final_attention_mask == 0), 1 ) return final_embedding, final_attention_mask, position_ids def forward( self, input_ids: torch.LongTensor = None, pixel_values: torch.FloatTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, vision_feature_layer: Optional[int] = None, vision_feature_select_strategy: Optional[str] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, LlavaCausalLMOutputWithPast]: output_attentions = ( output_attentions if output_attentions is not None else self.config.output_attentions ) output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = ( return_dict if return_dict is not None else self.config.use_return_dict ) if inputs_embeds is None: # 1. Extra the input embeddings inputs_embeds = self.get_input_embeddings()(input_ids) # 2. Merge text and images if pixel_values is not None and input_ids.shape[1] != 1: image_outputs = self.vision_model(pixel_values) image_features = self.multi_modal_projector(image_outputs) ( inputs_embeds, attention_mask, position_ids, ) = self._merge_input_ids_with_image_features( image_features, inputs_embeds, input_ids, attention_mask, position_ids, ) # if labels is None: # labels = torch.full_like( # attention_mask, self.config.ignore_index # ).to(torch.long) else: # In case input_ids.shape[1] == 1 & pixel_values==None & past_key_values != None, we are in the case of # generation with cache if ( past_key_values is not None and pixel_values is not None and input_ids.shape[1] == 1 ): # Retrieve the first layer to inspect the logits and mask out the hidden states # that are set to 0 first_layer_past_key_value = past_key_values[0][0][:, :, :, 0] # Sum all dimensions of head_dim (-2) to avoid random errors such as: https://github.com/huggingface/transformers/pull/28032#issuecomment-1863691941 batch_index, non_attended_tokens = torch.where( first_layer_past_key_value.float().sum(-2) == 0 ) # Get the target length target_seqlen = first_layer_past_key_value.shape[-1] + 1 extended_attention_mask = torch.ones( ( attention_mask.shape[0], target_seqlen - attention_mask.shape[1], ), dtype=attention_mask.dtype, device=attention_mask.device, ) # Zero-out the places where we don't need to attend extended_attention_mask[batch_index, non_attended_tokens] = 0 attention_mask = torch.cat( (attention_mask, extended_attention_mask), dim=1 ) position_ids = torch.sum(attention_mask, dim=1).unsqueeze(-1) - 1 outputs = self.language_model( input_ids=None, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) logits = outputs[0] loss = None if labels is not None: # Shift so that tokens < n predict n if attention_mask is not None: shift_attention_mask = attention_mask[..., 1:] shift_logits = logits[..., :-1, :][ shift_attention_mask.to(logits.device) != 0 ].contiguous() shift_labels = labels[..., 1:][ shift_attention_mask.to(labels.device) != 0 ].contiguous() else: shift_logits = logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous() # Flatten the tokens loss_fct = nn.CrossEntropyLoss() loss = loss_fct( shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1).to(shift_logits.device), ) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return LlavaCausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def prepare_inputs_for_generation( self, input_ids, past_key_values=None, inputs_embeds=None, pixel_values=None, attention_mask=None, **kwargs, ): if past_key_values is not None: if isinstance(past_key_values, InferenceParams): cache_length = past_key_values.max_seqlen past_length = past_key_values.seqlen_offset else: cache_length = past_length = past_key_values[0][0].shape[2] # Keep only the unprocessed tokens: # 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where # some of the inputs are exclusivelly passed as part of the cache (e.g. when passing input_embeds as # input) if ( attention_mask is not None and attention_mask.shape[1] > input_ids.shape[1] ): input_ids = input_ids[:, -(attention_mask.shape[1] - past_length) :] # 2 - If the past_length is smaller than input_ids', then input_ids holds all input tokens. We can discard # input_ids based on the past_length. elif past_length < input_ids.shape[1]: input_ids = input_ids[:, past_length:] # 3 - Otherwise (past_length >= input_ids.shape[1]), let's assume input_ids only has unprocessed tokens. elif self.config.image_token_index in input_ids: input_ids = input_ids[:, input_ids.shape[1] - 1 :] # If the cache has seen more tokens than it can hold, then the cache has a size limit. Let's discard the # older attention values, as their corresponding values are not part of the input. if cache_length < past_length and attention_mask is not None: attention_mask = attention_mask[ :, -(cache_length + input_ids.shape[1]) : ] position_ids = kwargs.get("position_ids", None) if attention_mask is not None and position_ids is None: # create position_ids on the fly for batch generation position_ids = attention_mask.long().cumsum(-1) - 1 position_ids.masked_fill_(attention_mask == 0, 1) if past_key_values: position_ids = position_ids[:, -input_ids.shape[1] :] # if `inputs_embeds` are passed, we only want to use them in the 1st generation step if inputs_embeds is not None and past_key_values is None: model_inputs = {"inputs_embeds": inputs_embeds} else: model_inputs = {"input_ids": input_ids} model_inputs.update( { "position_ids": position_ids, "past_key_values": past_key_values, "use_cache": kwargs.get("use_cache"), "attention_mask": attention_mask, "pixel_values": pixel_values, } ) return model_inputs def _reorder_cache(self, *args, **kwargs): return self.language_model._reorder_cache(*args, **kwargs)