| | |
| | """ |
| | NeoLLM model with FANformer, SeeDNorm, ResFormer, Learnable Multipliers, |
| | and full attention augmented with optional Momentum, MEA, and LUCID operators. |
| | """ |
| |
|
| | import math |
| | from typing import Any, Callable, Optional, Union, Tuple, List |
| |
|
| | import torch |
| | import torch.nn.functional as F |
| | from torch import nn |
| | from cut_cross_entropy import linear_cross_entropy |
| | import torch.nn.functional as F |
| | from torch.utils.checkpoint import checkpoint |
| | from typing import Optional, Tuple |
| |
|
| | from transformers.activations import ACT2FN |
| | from transformers.generation import GenerationMixin |
| | from transformers.masking_utils import create_causal_mask |
| | from transformers.modeling_flash_attention_utils import FlashAttentionKwargs |
| | from transformers.modeling_layers import GradientCheckpointingLayer |
| | from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast |
| | from transformers.modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update |
| | from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel |
| | from transformers.processing_utils import Unpack |
| | from transformers.utils import TransformersKwargs, logging |
| | from configuration_neollm import NeoLLMConfig |
| |
|
| | from transformers import AutoConfig, AutoModel, AutoModelForCausalLM |
| |
|
| | logger = logging.get_logger(__name__) |
| |
|
| |
|
| | |
| |
|
| | class ScalarMultiplier(nn.Module): |
| | """ |
| | Scalar Learnable Multiplier: W̃ = s·W |
| | |
| | From "Learnable Multipliers: Freeing the Scale of Language Model Matrix Layers": |
| | Allows the effective matrix norm ||W̃|| = s·||W|| to adapt to data, escaping the |
| | WD-noise equilibrium that constrains ||W|| ∝ √(η/λ). |
| | |
| | Args: |
| | initial_value: Initial multiplier value (default: 1.0 for identity) |
| | """ |
| | def __init__(self, initial_value: float = 1.0): |
| | super().__init__() |
| | self.multiplier = nn.Parameter(torch.tensor(initial_value)) |
| | |
| | def forward(self, x: torch.Tensor) -> torch.Tensor: |
| | return self.multiplier * x |
| |
|
| |
|
| | class VectorMultiplier(nn.Module): |
| | """ |
| | Vector Learnable Multipliers: W̃ = diag(r)·W·diag(c) |
| | |
| | From "Learnable Multipliers: Freeing the Scale of Language Model Matrix Layers": |
| | Frees not only the overall matrix norm but also individual row/column norms from |
| | the WD-noise equilibrium, enabling richer feature scale diversity. |
| | |
| | Args: |
| | dim: Dimension size for the multiplier vector |
| | multiplier_type: Either "row" or "column" |
| | initial_value: Initial multiplier value (default: 1.0) |
| | """ |
| | def __init__(self, dim: int, multiplier_type: str = "row", initial_value: float = 1.0): |
| | super().__init__() |
| | self.multiplier_type = multiplier_type |
| | self.multiplier = nn.Parameter(torch.ones(dim) * initial_value) |
| | |
| | def forward(self, x: torch.Tensor) -> torch.Tensor: |
| | """ |
| | Apply row or column multiplier. |
| | |
| | For row multipliers: x shape is (batch, seq, out_features) or (batch, heads, seq, head_dim) |
| | For column multipliers: applied before matrix multiplication |
| | """ |
| | if self.multiplier_type == "row": |
| | |
| | return x * self.multiplier |
| | else: |
| | |
| | return x * self.multiplier |
| |
|
| |
|
| | class LinearWithMultipliers(nn.Module): |
| | """ |
| | Linear layer with optional row and/or column learnable multipliers. |
| | |
| | Implements: y = (r ⊙ (W @ (c ⊙ x))) + b |
| | where r and c are learnable multipliers, W is the base weight matrix. |
| | |
| | From "Learnable Multipliers: Freeing the Scale of Language Model Matrix Layers": |
| | The base matrix W remains subject to WD-noise equilibrium with ||W|| ∝ √(η/λ), |
| | while multipliers r,c learn freely to adapt the effective scale to data. |
| | |
| | Args: |
| | in_features: Input feature dimension |
| | out_features: Output feature dimension |
| | bias: Whether to include bias term |
| | use_row_multiplier: Enable row (output) multipliers |
| | use_column_multiplier: Enable column (input) multipliers |
| | """ |
| | def __init__( |
| | self, |
| | in_features: int, |
| | out_features: int, |
| | bias: bool = True, |
| | use_row_multiplier: bool = False, |
| | use_column_multiplier: bool = False |
| | ): |
| | super().__init__() |
| | |
| | |
| | self.linear = nn.Linear(in_features, out_features, bias=bias) |
| | |
| | |
| | self.use_row_multiplier = use_row_multiplier |
| | self.use_column_multiplier = use_column_multiplier |
| | |
| | if use_row_multiplier: |
| | self.row_multiplier = VectorMultiplier(out_features, multiplier_type="row") |
| | |
| | if use_column_multiplier: |
| | self.column_multiplier = VectorMultiplier(in_features, multiplier_type="column") |
| | |
| | def forward(self, x: torch.Tensor) -> torch.Tensor: |
| | |
| | if self.use_column_multiplier: |
| | x = self.column_multiplier(x) |
| | |
| | |
| | x = self.linear(x) |
| | |
| | |
| | if self.use_row_multiplier: |
| | x = self.row_multiplier(x) |
| | |
| | return x |
| |
|
| |
|
| | |
| |
|
| | class FANLayer(nn.Module): |
| | """ |
| | Fourier Analysis Network (FAN) layer for effective periodicity modeling. |
| | |
| | From "FANformer: Improving Large Language Models Through Effective Periodicity Modeling": |
| | FANLayer'(X) = [cos(WpX)||sin(WpX)||(Wp¯X + Bp¯)] |
| | |
| | This is the modified version (FANLayer') without activation function that gave |
| | the best results in the paper. |
| | """ |
| | |
| | def __init__(self, hidden_size: int, fan_ratio: float = 0.25): |
| | super().__init__() |
| | self.hidden_size = hidden_size |
| | self.fan_ratio = fan_ratio |
| | |
| | |
| | |
| | output_dim = hidden_size + int(hidden_size * fan_ratio) |
| | self.p_output_dim = int(output_dim * fan_ratio) |
| | self.g_output_dim = output_dim - self.p_output_dim * 2 |
| | |
| | |
| | self.input_linear = nn.Linear( |
| | hidden_size, |
| | self.p_output_dim + self.g_output_dim, |
| | bias=True |
| | ) |
| | |
| | |
| | self._init_weights() |
| | |
| | def _init_weights(self): |
| | """Initialize weights following the paper's recommendations.""" |
| | nn.init.normal_(self.input_linear.weight, mean=0.0, std=0.02) |
| | if self.input_linear.bias is not None: |
| | nn.init.zeros_(self.input_linear.bias) |
| | |
| | def forward(self, x: torch.Tensor) -> torch.Tensor: |
| | """ |
| | Apply Fourier transformation to input. |
| | |
| | Args: |
| | x: Input tensor of shape (batch, seq_len, hidden_size) |
| | |
| | Returns: |
| | Transformed tensor with Fourier components concatenated |
| | Shape: (batch, seq_len, hidden_size + periodic_dim) |
| | """ |
| | |
| | pg = self.input_linear(x) |
| | p, g = torch.split(pg, [self.p_output_dim, self.g_output_dim], dim=-1) |
| | |
| | |
| | x_fan = torch.cat([torch.cos(p), torch.sin(p), g], dim=-1) |
| | |
| | return x_fan |
| |
|
| |
|
| | class LNS(nn.Module): |
| | """ |
| | LayerNorm Scaling (LNS) - applies scaling factor 1/√ℓ as described in the paper. |
| | |
| | From "The Curse of Depth in Large Language Models": |
| | h^(ℓ) = LayerNorm(h^(ℓ)) × (1/√ℓ) |
| | |
| | This prevents exponential variance growth in deeper layers. |
| | """ |
| | def __init__(self, layer_idx: int): |
| | super().__init__() |
| | |
| | |
| | self.layer_idx = max(layer_idx + 1, 1) |
| | self.scale = 1.0 / math.sqrt(self.layer_idx) |
| | |
| | def forward(self, x: torch.Tensor) -> torch.Tensor: |
| | return x * self.scale |
| |
|
| |
|
| | class GPAS(nn.Module): |
| | """ |
| | Gradient-Preserving Activation Scaling (GPAS) |
| | Scales activations without penalizing gradients using stop-gradient. |
| | Applied in Pre-Norm style: after sub-layer output but before residual sum. |
| | """ |
| | def __init__(self, d_model: int): |
| | super().__init__() |
| | |
| | self.d_model = d_model |
| | self.alpha = nn.Parameter(torch.zeros(1)) |
| | |
| | def forward(self, x: torch.Tensor) -> torch.Tensor: |
| | x_detached = x.detach() |
| | scaled_component = F.silu(self.alpha) * x_detached |
| | x_scaled = x - scaled_component |
| | |
| | return x_scaled |
| |
|
| | class SeeDNorm(nn.Module): |
| | """ |
| | Self-Rescaled Dynamic Normalization (SeeDNorm) with dual dropout regularization. |
| | |
| | SeeDNorm(x) = [σ(x·β^T)·α + γ] ⊙ x/RMS(x) |
| | |
| | |
| | Args: |
| | dim: Hidden dimension size |
| | eps: Small constant for numerical stability |
| | dropout_input: Dropout on input features for dynamic mechanism (default: 0.0) |
| | dropout_hidden: Dropout on normalized hidden states (default: 0.0) |
| | """ |
| | |
| | def __init__( |
| | self, |
| | dim: int, |
| | eps: float = 1e-6, |
| | dropout_input: float = 0.01, |
| | dropout_hidden: float = 0.01, |
| | ): |
| | super().__init__() |
| | self.dim = dim |
| | self.eps = eps |
| | self.dropout_input = dropout_input |
| | self.dropout_hidden = dropout_hidden |
| | |
| | |
| | self.gamma = nn.Parameter(torch.ones(dim)) |
| | self.beta = nn.Parameter(torch.zeros(dim)) |
| | self.alpha = nn.Parameter(torch.ones(dim)) |
| | |
| | def _rms_norm(self, x: torch.Tensor) -> torch.Tensor: |
| | """Compute RMS normalization: x / RMS(x)""" |
| | return x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps) |
| | |
| | def forward(self, x: torch.Tensor) -> torch.Tensor: |
| | """ |
| | Apply Self-Rescaled Dynamic Normalization with dual dropout. |
| | |
| | Args: |
| | x: Input tensor of shape (..., dim) |
| | |
| | Returns: |
| | Normalized and dynamically scaled tensor of same shape |
| | """ |
| |
|
| | x_for_dynamic = F.dropout(x, p=self.dropout_input) |
| | rescale_factor = torch.tanh(torch.sum(x_for_dynamic * self.beta, |
| | dim=-1, keepdim=True)) |
| | |
| | |
| | dynamic_scale = rescale_factor * self.alpha + self.gamma |
| | |
| | |
| | x_normalized = self._rms_norm(x.float()) |
| | |
| | x_normalized = F.dropout(x_normalized, p=self.dropout_hidden) |
| | |
| | |
| | output = x_normalized * dynamic_scale.float() |
| | |
| | return output.type_as(x) |
| | |
| | def extra_repr(self) -> str: |
| | return (f"dim={self.dim}, eps={self.eps}, " |
| | f"dropout_input={self.dropout_input}, dropout_hidden={self.dropout_hidden}") |
| | |
| | class NeoLLMRotaryEmbedding(nn.Module): |
| | inv_freq: torch.Tensor |
| |
|
| | def __init__(self, config: NeoLLMConfig, device=None): |
| | super().__init__() |
| | self.max_seq_len_cached = config.max_position_embeddings |
| | self.original_max_seq_len = config.max_position_embeddings |
| | self.config = config |
| |
|
| | |
| | self.rope_type = "default" |
| | if hasattr(config, "rope_scaling") and config.rope_scaling is not None and isinstance(config.rope_scaling, dict): |
| | rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type")) |
| | if rope_type and rope_type in ROPE_INIT_FUNCTIONS: |
| | self.rope_type = rope_type |
| |
|
| | |
| | rope_init_fn = self.compute_default_rope_parameters |
| | if self.rope_type != "default": |
| | rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type] |
| | |
| | inv_freq, self.attention_scaling = rope_init_fn(self.config, device) |
| |
|
| | self.register_buffer("inv_freq", inv_freq, persistent=False) |
| | self.register_buffer("original_inv_freq", inv_freq.clone(), persistent=False) |
| |
|
| | @staticmethod |
| | def compute_default_rope_parameters( |
| | config: NeoLLMConfig = None, |
| | device: Optional["torch.device"] = None, |
| | seq_len: int = None, |
| | ) -> tuple["torch.Tensor", float]: |
| | """ |
| | Computes the inverse frequencies according to the original RoPE implementation |
| | |
| | Args: |
| | config: The model configuration. |
| | device: The device to use for initialization of the inverse frequencies. |
| | seq_len: The current sequence length. Unused for this type of RoPE. |
| | |
| | Returns: |
| | Tuple of (torch.Tensor, float), containing the inverse frequencies for the RoPE |
| | embeddings and the post-processing scaling factor applied to the computed cos/sin. |
| | """ |
| | base = config.rope_theta |
| | dim = getattr(config, "head_dim", None) or config.hidden_size // config.num_attention_heads |
| | partial_rotary_factor = getattr(config, "partial_rotary_factor", 1.0) |
| | dim = int(dim * partial_rotary_factor) |
| | |
| | attention_scaling = 1.0 |
| | |
| | |
| | inv_freq = 1.0 / ( |
| | base ** (torch.arange(0, dim, 2, dtype=torch.int64).to(device=device, dtype=torch.float) / dim) |
| | ) |
| | return inv_freq, attention_scaling |
| |
|
| | @torch.no_grad() |
| | @dynamic_rope_update |
| | def forward(self, x, position_ids): |
| | |
| | if position_ids.dim() == 1: |
| | position_ids = position_ids.unsqueeze(0) |
| | |
| | B = x.shape[0] |
| | if position_ids.shape[0] != B: |
| | |
| | position_ids = position_ids.expand(B, -1) |
| | |
| | device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu" |
| | |
| | |
| | inv_freq = self.inv_freq.to(device=x.device, dtype=torch.float32) |
| | |
| | with torch.autocast(device_type=device_type, enabled=False): |
| | |
| | freqs = position_ids.to(dtype=torch.float32).unsqueeze(-1) * inv_freq.unsqueeze(0).unsqueeze(0) |
| | |
| | |
| | emb = torch.cat((freqs, freqs), dim=-1) |
| | cos = emb.cos() * self.attention_scaling |
| | sin = emb.sin() * self.attention_scaling |
| | |
| | return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype) |
| | def rotate_half(x): |
| | """Rotates half the hidden dims of the input.""" |
| | x1 = x[..., : x.shape[-1] // 2] |
| | x2 = x[..., x.shape[-1] // 2 :] |
| | return torch.cat((-x2, x1), dim=-1) |
| |
|
| |
|
| | def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1): |
| | """Applies Rotary Position Embedding to the query and key tensors.""" |
| | cos = cos.unsqueeze(unsqueeze_dim) |
| | sin = sin.unsqueeze(unsqueeze_dim) |
| |
|
| | rotary_dim = cos.shape[-1] |
| | q_rot, q_pass = q[..., :rotary_dim], q[..., rotary_dim:] |
| | k_rot, k_pass = k[..., :rotary_dim], k[..., rotary_dim:] |
| |
|
| | q_embed = (q_rot * cos) + (rotate_half(q_rot) * sin) |
| | k_embed = (k_rot * cos) + (rotate_half(k_rot) * sin) |
| |
|
| | q_embed = torch.cat([q_embed, q_pass], dim=-1) |
| | k_embed = torch.cat([k_embed, k_pass], dim=-1) |
| | return q_embed, k_embed |
| |
|
| |
|
| | def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: |
| | """ |
| | This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch, |
| | num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim) |
| | """ |
| | batch, num_key_value_heads, slen, head_dim = hidden_states.shape |
| | if n_rep == 1: |
| | return hidden_states |
| | hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim) |
| | return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim) |
| |
|
| |
|
| | def causal_first_difference(x: torch.Tensor) -> torch.Tensor: |
| | """Causal first difference along sequence length without Python loops.""" |
| | previous = F.pad(x[..., :-1, :], (0, 0, 1, 0)) |
| | return x - previous |
| |
|
| |
|
| | def rms_key_unit_norm(x: torch.Tensor, eps: float) -> torch.Tensor: |
| | """RMS-style key normalization used by the LUCID preconditioner.""" |
| | scale = math.sqrt(x.shape[-1]) |
| | return F.normalize(x.float(), p=2, dim=-1, eps=eps) * scale |
| |
|
| |
|
| | def infer_key_validity(attention_mask: Optional[torch.Tensor], seq_len: int, num_heads: int) -> Optional[torch.Tensor]: |
| | """Infer valid key positions from a square additive attention mask when available.""" |
| | if attention_mask is None or attention_mask.ndim != 4: |
| | return None |
| | if attention_mask.shape[-2] != seq_len or attention_mask.shape[-1] != seq_len: |
| | return None |
| |
|
| | diag = attention_mask.diagonal(dim1=-2, dim2=-1) |
| | valid = torch.isfinite(diag) & (diag == 0) |
| |
|
| | if valid.shape[1] == 1 and num_heads != 1: |
| | valid = valid.expand(-1, num_heads, -1) |
| | elif valid.shape[1] != num_heads: |
| | valid = valid[:, :1, :].expand(-1, num_heads, -1) |
| |
|
| | return valid |
| |
|
| |
|
| | def head_linear_compose(hidden_states: torch.Tensor, mixing_matrix: torch.Tensor) -> torch.Tensor: |
| | """Head-level linear composition over head axis without Python loops.""" |
| | return torch.einsum("bhtd,hk->bktd", hidden_states, mixing_matrix.to(device=hidden_states.device, dtype=hidden_states.dtype)) |
| |
|
| |
|
| | def build_mea_reconstruction_matrix(num_component_heads: int, num_output_heads: int) -> torch.Tensor: |
| | """Build an identity-preserving MEA reconstruction initializer from component heads to output heads.""" |
| | matrix = torch.zeros(num_component_heads, num_output_heads, dtype=torch.float32) |
| | if num_component_heads <= 0 or num_output_heads <= 0: |
| | raise ValueError("MEA head counts must be positive") |
| |
|
| | output_indices = torch.arange(num_output_heads, dtype=torch.long) |
| | component_indices = torch.div(output_indices * num_component_heads, num_output_heads, rounding_mode="floor") |
| | matrix[component_indices, output_indices] = 1.0 |
| | return matrix |
| |
|
| |
|
| | class MEAHeadSeeDNorm(nn.Module): |
| | """ |
| | MEA head-level normalization using SeeDNorm grouped by KV structure (GQA-aware). |
| | |
| | In GQA, query heads that share the same K and V are structurally correlated — |
| | they received identical values and only differ in their Q projection. Normalizing |
| | them independently (as the original MEA paper assumes for MHA) ignores this |
| | correlation. Instead, we normalize per KV group: all query heads sharing the |
| | same KV head are flattened together and normalized as a single unit. |
| | |
| | With num_attention_heads=8 and num_key_value_heads=2 (num_kv_groups=4): |
| | - 2 independent SeeDNorm groups |
| | - each group covers 4 query heads × head_dim = 256 dims |
| | - SeeDNorm's dynamic scale operates over the group's full 256-dim space |
| | |
| | This allows SeeDNorm's dynamic scale to detect and compensate for |
| | LUCID decorrelation magnitude within each KV-coherent group of heads, |
| | while respecting the GQA structural dependency between heads. |
| | """ |
| |
|
| | def __init__(self, num_heads: int, head_dim: int, num_kv_groups: int, eps: float = 1e-6): |
| | super().__init__() |
| | self.num_heads = num_heads |
| | self.head_dim = head_dim |
| | self.num_kv_groups = num_kv_groups |
| | self.num_kv_heads = num_heads // num_kv_groups |
| | self.group_dim = num_kv_groups * head_dim |
| | |
| | self.norm = SeeDNorm(self.group_dim, eps=eps) |
| |
|
| | def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: |
| | batch, seq_len, num_heads, head_dim = hidden_states.shape |
| | if num_heads != self.num_heads or head_dim != self.head_dim: |
| | raise ValueError( |
| | f"MEAHeadSeeDNorm expected ({self.num_heads}, {self.head_dim}) heads, " |
| | f"received ({num_heads}, {head_dim})" |
| | ) |
| | |
| | |
| | grouped = hidden_states.reshape(batch, seq_len, self.num_kv_heads, self.group_dim) |
| | |
| | normed = self.norm(grouped) |
| | return normed.reshape(batch, seq_len, num_heads, head_dim) |
| |
|
| |
|
| | def eager_attention_forward( |
| | module: nn.Module, |
| | query: torch.Tensor, |
| | key: torch.Tensor, |
| | value: torch.Tensor, |
| | attention_mask: Optional[torch.Tensor], |
| | scaling: float, |
| | dropout: float = 0.0, |
| | **kwargs: Unpack[TransformersKwargs], |
| | ): |
| | key_states = repeat_kv(key, module.num_key_value_groups) |
| | value_states = repeat_kv(value, module.num_key_value_groups) |
| |
|
| | attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling |
| | if attention_mask is not None: |
| | causal_mask = attention_mask[:, :, :, : key_states.shape[-2]] |
| | attn_weights = attn_weights + causal_mask |
| |
|
| | attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype) |
| | attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training) |
| | attn_output = torch.matmul(attn_weights, value_states) |
| | attn_output = attn_output.transpose(1, 2).contiguous() |
| |
|
| | return attn_output, attn_weights |
| |
|
| |
|
| | class NeoLLMAttention(nn.Module): |
| | """ |
| | Full attention with FANformer, SeeDNorm, ResFormer, Learnable Multipliers, |
| | optional post-RoPE Momentum attention, full MEA head-level composition over |
| | K/V, and optional LUCID value preconditioning. |
| | """ |
| |
|
| | def __init__(self, config: NeoLLMConfig, layer_idx: int): |
| | super().__init__() |
| | self.config = config |
| | self.layer_idx = layer_idx |
| | self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads) |
| | self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads |
| | self.scaling = self.head_dim**-0.5 |
| | self.sqrt_head_dim = math.sqrt(self.head_dim) |
| | self.attention_dropout = config.attention_dropout |
| | self.is_causal = True |
| |
|
| | self.use_momentum_attention = getattr(config, "use_momentum_attention", False) |
| | self.momentum_gamma = float(getattr(config, "momentum_gamma", 0.0)) |
| | self.use_mea_attention = getattr(config, "use_mea_attention", False) |
| | self.mea_component_key_value_heads = int( |
| | getattr(config, "mea_component_key_value_heads", config.num_key_value_heads) |
| | ) |
| | self.mea_groupnorm_eps = float(getattr(config, "mea_groupnorm_eps", config.rms_norm_eps)) |
| | self.use_lucid_attention = getattr(config, "use_lucid_attention", False) |
| | self.lucid_attention_eps = float(getattr(config, "lucid_attention_eps", config.rms_norm_eps)) |
| |
|
| | self.fan_layer = FANLayer( |
| | hidden_size=config.hidden_size, |
| | fan_ratio=getattr(config, "fan_ratio", 0.125), |
| | ) |
| |
|
| | fan_output_dim = config.hidden_size + int(config.hidden_size * getattr(config, "fan_ratio", 0.125)) |
| |
|
| | self.q_proj = LinearWithMultipliers( |
| | fan_output_dim, |
| | config.num_attention_heads * self.head_dim * 2, |
| | bias=config.attention_bias, |
| | use_row_multiplier=True, |
| | use_column_multiplier=False, |
| | ) |
| | self.num_mea_component_heads = ( |
| | self.mea_component_key_value_heads if self.use_mea_attention else config.num_key_value_heads |
| | ) |
| | self.k_proj = nn.Linear( |
| | fan_output_dim, self.num_mea_component_heads * self.head_dim, bias=config.attention_bias |
| | ) |
| | self.v_proj = nn.Linear( |
| | fan_output_dim, self.num_mea_component_heads * self.head_dim, bias=config.attention_bias |
| | ) |
| | self.o_proj = LinearWithMultipliers( |
| | config.num_attention_heads * self.head_dim, |
| | config.hidden_size, |
| | bias=config.attention_bias, |
| | use_row_multiplier=True, |
| | use_column_multiplier=True, |
| | ) |
| |
|
| | self.q_norm = SeeDNorm(self.head_dim, eps=config.rms_norm_eps) |
| | self.k_norm = SeeDNorm(self.head_dim, eps=config.rms_norm_eps) |
| |
|
| | if self.use_mea_attention: |
| | self.mea_key_mix = nn.Parameter( |
| | build_mea_reconstruction_matrix(self.num_mea_component_heads, config.num_key_value_heads) |
| | ) |
| | self.mea_value_mix = nn.Parameter( |
| | build_mea_reconstruction_matrix(self.num_mea_component_heads, config.num_key_value_heads) |
| | ) |
| | self.mea_output_norm = MEAHeadSeeDNorm( |
| | num_heads=config.num_attention_heads, |
| | head_dim=self.head_dim, |
| | num_kv_groups=self.num_key_value_groups, |
| | eps=self.mea_groupnorm_eps, |
| | ) |
| | else: |
| | self.mea_key_mix = None |
| | self.mea_value_mix = None |
| | self.mea_output_norm = None |
| |
|
| | self.dropout = nn.Dropout(config.dropout_rate) |
| | self.lambda_1 = nn.Parameter(torch.tensor(0.5)) |
| | self.lambda_2 = nn.Parameter(torch.tensor(0.5)) |
| |
|
| | def _apply_momentum_attention( |
| | self, |
| | query_states: torch.Tensor, |
| | key_states: torch.Tensor, |
| | ) -> tuple[torch.Tensor, torch.Tensor]: |
| | """Apply post-RoPE momentum shear to Q and K only.""" |
| | if not self.use_momentum_attention or self.momentum_gamma == 0.0: |
| | return query_states, key_states |
| |
|
| | query_states = query_states + self.momentum_gamma * causal_first_difference(query_states) |
| | key_states = key_states + self.momentum_gamma * causal_first_difference(key_states) |
| | return query_states, key_states |
| |
|
| | def _apply_mea_head_mixing( |
| | self, |
| | key_states: torch.Tensor, |
| | value_states: torch.Tensor, |
| | ) -> tuple[torch.Tensor, torch.Tensor]: |
| | """Apply explicit KV head interaction before repeat_kv and attention.""" |
| | if not self.use_mea_attention: |
| | return key_states, value_states |
| |
|
| | mixed_keys = head_linear_compose(key_states, self.mea_key_mix).contiguous() |
| | mixed_values = head_linear_compose(value_states, self.mea_value_mix).contiguous() |
| | return mixed_keys, mixed_values |
| |
|
| | def _apply_lucid_preconditioner( |
| | self, |
| | key_states: torch.Tensor, |
| | value_states: torch.Tensor, |
| | attention_mask: Optional[torch.Tensor], |
| | ) -> torch.Tensor: |
| | """Compute LUCID preconditioned values via a batched lower-triangular solve.""" |
| | if not self.use_lucid_attention: |
| | return value_states |
| |
|
| | key_rn = rms_key_unit_norm(key_states, eps=self.lucid_attention_eps) |
| | precondition_logits = torch.matmul(key_rn, key_rn.transpose(-1, -2)) * self.scaling - self.sqrt_head_dim |
| | preconditioner = torch.tril(torch.exp(precondition_logits)) |
| |
|
| | key_validity = infer_key_validity(attention_mask, key_states.shape[-2], key_states.shape[1]) |
| | if key_validity is not None: |
| | pair_valid = key_validity.unsqueeze(-1) & key_validity.unsqueeze(-2) |
| | preconditioner = preconditioner * pair_valid.to(preconditioner.dtype) |
| |
|
| | eye = torch.eye( |
| | preconditioner.shape[-1], |
| | device=preconditioner.device, |
| | dtype=preconditioner.dtype, |
| | ).view(1, 1, preconditioner.shape[-1], preconditioner.shape[-1]) |
| | preconditioner = preconditioner * (1.0 - eye) + eye |
| |
|
| | lucid_values = torch.linalg.solve_triangular( |
| | preconditioner, |
| | value_states.float(), |
| | upper=False, |
| | unitriangular=True, |
| | ) |
| | return lucid_values.to(value_states.dtype).contiguous() |
| |
|
| | def _apply_mea_output_norm(self, attn_output: torch.Tensor) -> torch.Tensor: |
| | """Apply MEA GQA-grouped SeeDNorm on the per-head attention output.""" |
| | if not self.use_mea_attention: |
| | return attn_output |
| | return self.mea_output_norm(attn_output) |
| |
|
| | def forward( |
| | self, |
| | hidden_states: torch.Tensor, |
| | position_embeddings: tuple[torch.Tensor, torch.Tensor], |
| | attention_mask: Optional[torch.Tensor] = None, |
| | first_layer_fan: Optional[torch.Tensor] = None, |
| | **kwargs: Unpack[FlashAttentionKwargs], |
| | ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[torch.Tensor]]: |
| | """Forward pass for the full attention block.""" |
| | input_shape = hidden_states.shape[:-1] |
| |
|
| | hidden_states_fan = self.fan_layer(hidden_states) |
| | if first_layer_fan is not None: |
| | hidden_states_fan = self.lambda_1 * first_layer_fan + self.lambda_2 * hidden_states_fan |
| |
|
| | current_layer_fan = hidden_states_fan.clone() |
| | query_shape = (*input_shape, self.config.num_attention_heads, self.head_dim) |
| | key_value_shape = (*input_shape, self.num_mea_component_heads, self.head_dim) |
| |
|
| | query_states, gate = torch.chunk( |
| | self.q_proj(hidden_states_fan).view(*input_shape, self.config.num_attention_heads, self.head_dim * 2), 2, dim=-1 |
| | ) |
| | gate = gate.reshape(*input_shape, -1) |
| |
|
| | query_states = self.q_norm(query_states.view(query_shape)).transpose(1, 2) |
| | key_states = self.k_norm(self.k_proj(hidden_states_fan).view(key_value_shape)).transpose(1, 2) |
| | value_states = self.v_proj(hidden_states_fan).view(key_value_shape).transpose(1, 2) |
| |
|
| | cos, sin = position_embeddings |
| | query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) |
| | query_states, key_states = self._apply_momentum_attention(query_states, key_states) |
| | key_states, value_states = self._apply_mea_head_mixing(key_states, value_states) |
| | value_states = self._apply_lucid_preconditioner(key_states, value_states, attention_mask) |
| |
|
| | attention_interface: Callable = eager_attention_forward |
| | if self.config._attn_implementation != "eager": |
| | attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] |
| |
|
| | attn_output, attn_weights = attention_interface( |
| | self, |
| | query_states, |
| | key_states, |
| | value_states, |
| | attention_mask, |
| | dropout=0.0 if not self.training else self.attention_dropout, |
| | scaling=self.scaling, |
| | **kwargs, |
| | ) |
| |
|
| | attn_output = attn_output.reshape(*input_shape, -1, self.head_dim) |
| | attn_output = self._apply_mea_output_norm(attn_output) |
| | attn_output = attn_output.reshape(*input_shape, -1).contiguous() |
| | attn_output = attn_output * torch.sigmoid(gate) |
| | attn_output = self.o_proj(attn_output) |
| | attn_output = self.dropout(attn_output) |
| |
|
| | return attn_output, attn_weights, current_layer_fan |
| |
|
| | class PolyNorm(torch.nn.Module): |
| | def __init__(self, eps=1e-6): |
| | super(PolyNorm, self).__init__() |
| | self.weight = torch.nn.Parameter(torch.ones(3) / 3) |
| | self.bias = torch.nn.Parameter(torch.zeros(1)) |
| | self.eps = eps |
| |
|
| | def _norm(self, x): |
| | return x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps) |
| |
|
| | def forward(self, x): |
| | return self.weight[0] * self._norm(x**3) + self.weight[1] * self._norm(x**2) + self.weight[2] * self._norm(x) + self.bias |
| |
|
| |
|
| | class NeoLLMMLP(nn.Module): |
| | """ |
| | MLP with FANformer integration for featural periodicity modeling and |
| | Learnable Multipliers for adaptive scale control. |
| | |
| | This captures periodicities in the feature space (semantic/embedding dimensions) |
| | complementary to the relational periodicities captured by attention mechanisms. |
| | Works in conjunction with ResFormer for comprehensive information flow. |
| | |
| | Learnable Multipliers placement (from "Learnable Multipliers" paper Appendix C): |
| | - gate_proj: row multipliers only (controls gating mechanism scale) |
| | - up_proj: no multipliers (avoids redundancy with down_proj) |
| | - down_proj: row + column multipliers (maximally expressive output scaling) |
| | """ |
| | def __init__(self, config): |
| | super().__init__() |
| | self.config = config |
| | self.hidden_size = config.hidden_size |
| | self.intermediate_size = config.intermediate_size |
| | |
| | |
| | self.fan_layer = FANLayer( |
| | hidden_size=config.hidden_size, |
| | fan_ratio=getattr(config, 'fan_ratio_ffn', 0.0625) |
| | ) |
| | |
| | |
| | fan_output_dim = config.hidden_size + int(config.hidden_size * getattr(config, 'fan_ratio_ffn', 0.0625)) |
| | |
| | |
| | |
| | self.gate_proj = LinearWithMultipliers( |
| | fan_output_dim, |
| | self.intermediate_size, |
| | bias=False, |
| | use_row_multiplier=True, |
| | use_column_multiplier=False |
| | ) |
| | |
| | |
| | self.up_proj = nn.Linear(fan_output_dim, self.intermediate_size, bias=False) |
| | |
| | |
| | self.down_proj = LinearWithMultipliers( |
| | self.intermediate_size, |
| | self.hidden_size, |
| | bias=False, |
| | use_row_multiplier=True, |
| | use_column_multiplier=True |
| | ) |
| | |
| | self.act_fn = PolyNorm() |
| | |
| | |
| | self.dropout = nn.Dropout(config.dropout_rate) |
| |
|
| | def forward(self, x): |
| | |
| | x_fan = self.fan_layer(x) |
| | |
| | |
| | gate_output = self.act_fn(self.gate_proj(x_fan)) |
| | up_output = self.up_proj(x_fan) |
| | hidden = gate_output * up_output |
| | hidden = self.dropout(hidden) |
| | return self.down_proj(hidden) |
| |
|
| | class NeoLLMDecoderLayer(GradientCheckpointingLayer): |
| | """ |
| | Decoder layer with standard residual connections. |
| | |
| | Arquitectura: |
| | 1. Pre-norm (SeeDNorm) → LNS scaling → Self-Attention con ResFormer y Learnable Multipliers |
| | 2. Standard Residual Connection (suma simple) |
| | 3. GPAS activation scaling |
| | 4. Pre-norm (SeeDNorm) → LNS scaling → MLP con FANformer y Learnable Multipliers |
| | 5. Standard Residual Connection (suma simple) |
| | 6. GPAS activation scaling |
| | """ |
| | |
| | def __init__(self, config: NeoLLMConfig, layer_idx: int): |
| | super().__init__() |
| | self.hidden_size = config.hidden_size |
| | self.layer_idx = layer_idx |
| |
|
| | |
| | self.self_attn = NeoLLMAttention(config, layer_idx) |
| |
|
| | |
| | self.mlp = NeoLLMMLP(config) |
| |
|
| | |
| | self.input_layernorm = SeeDNorm(config.hidden_size, eps=config.rms_norm_eps) |
| | self.post_attention_layernorm = SeeDNorm(config.hidden_size, eps=config.rms_norm_eps) |
| | |
| | |
| | self.lns_attn = LNS(layer_idx) |
| | self.lns_mlp = LNS(layer_idx) |
| | |
| | |
| | self.gpas_attn = GPAS(config.hidden_size) |
| | self.gpas_mlp = GPAS(config.hidden_size) |
| | |
| | |
| | self.current_layer_fan = None |
| |
|
| | def forward( |
| | self, |
| | hidden_states: torch.Tensor, |
| | position_embeddings: tuple[torch.Tensor, torch.Tensor], |
| | attention_mask: Optional[torch.Tensor] = None, |
| | first_layer_fan: Optional[torch.Tensor] = None, |
| | output_attentions: Optional[bool] = False, |
| | **kwargs: Unpack[FlashAttentionKwargs], |
| | ) -> Tuple[torch.FloatTensor, Optional[torch.FloatTensor]]: |
| | |
| | |
| | |
| | residual = hidden_states |
| |
|
| | |
| | hidden_states = self.input_layernorm(hidden_states) |
| | |
| | |
| | hidden_states = self.lns_attn(hidden_states) |
| |
|
| | |
| | |
| | hidden_states, attn_weights, self.current_layer_fan = self.self_attn( |
| | hidden_states=hidden_states, |
| | attention_mask=attention_mask, |
| | position_embeddings=position_embeddings, |
| | first_layer_fan=first_layer_fan, |
| | **kwargs, |
| | ) |
| |
|
| | |
| | hidden_states = residual + hidden_states |
| | |
| | |
| | hidden_states = self.gpas_attn(hidden_states) |
| |
|
| | |
| | |
| | |
| | residual = hidden_states |
| | hidden_states = self.post_attention_layernorm(hidden_states) |
| | |
| | |
| | hidden_states = self.lns_mlp(hidden_states) |
| | |
| | |
| | hidden_states = self.mlp(hidden_states) |
| | |
| | |
| | hidden_states = residual + hidden_states |
| | |
| | |
| | hidden_states = self.gpas_mlp(hidden_states) |
| |
|
| | outputs = (hidden_states,) |
| | if output_attentions: |
| | outputs += (attn_weights,) |
| |
|
| | return outputs |
| |
|
| |
|
| | class NeoLLMPreTrainedModel(PreTrainedModel): |
| | """ |
| | Base class for NeoLLM models with custom weight initialization. |
| | |
| | Handles initialization for: |
| | - NeoLLMAttention (ResFormer lambda parameters) |
| | - GPAS (Gradient-Preserving Activation Scaling) |
| | - FANLayer (Fourier Analysis Network) |
| | - SeeDNorm (Self-Rescaled Dynamic Normalization) |
| | - Learnable Multipliers (ScalarMultiplier, VectorMultiplier) |
| | """ |
| | config: NeoLLMConfig |
| | base_model_prefix = "model" |
| | supports_gradient_checkpointing = True |
| | _no_split_modules = ["NeoLLMDecoderLayer"] |
| | _supports_flash_attn_2 = True |
| | _supports_sdpa = True |
| | _is_stateful = True |
| |
|
| | def _init_weights(self, module): |
| | """ |
| | Initialize weights for all custom modules in NeoLLM. |
| | |
| | Strategy: |
| | - Standard layers (Linear, Embedding): handled by parent class |
| | - Custom modules: specialized initialization per component |
| | - Learnable Multipliers: initialized to 1.0 for identity transformation |
| | """ |
| | super()._init_weights(module) |
| | |
| | if isinstance(module, NeoLLMAttention): |
| | |
| | |
| | |
| | if hasattr(module, 'lambda_1'): |
| | module.lambda_1.data.fill_(0.5) |
| | if hasattr(module, 'lambda_2'): |
| | module.lambda_2.data.fill_(0.5) |
| | if hasattr(module, 'mea_key_mix') and module.mea_key_mix is not None: |
| | module.mea_key_mix.data.copy_( |
| | build_mea_reconstruction_matrix( |
| | module.mea_key_mix.shape[0], |
| | module.mea_key_mix.shape[1], |
| | ).to(device=module.mea_key_mix.device, dtype=module.mea_key_mix.dtype) |
| | ) |
| | if hasattr(module, 'mea_value_mix') and module.mea_value_mix is not None: |
| | module.mea_value_mix.data.copy_( |
| | build_mea_reconstruction_matrix( |
| | module.mea_value_mix.shape[0], |
| | module.mea_value_mix.shape[1], |
| | ).to(device=module.mea_value_mix.device, dtype=module.mea_value_mix.dtype) |
| | ) |
| | |
| | elif isinstance(module, GPAS): |
| | |
| | |
| | module.alpha.data.fill_(0.0) |
| | |
| | elif isinstance(module, FANLayer): |
| | |
| | |
| | pass |
| | |
| | elif isinstance(module, SeeDNorm): |
| | |
| | |
| | |
| | |
| | pass |
| | |
| | elif isinstance(module, (ScalarMultiplier, VectorMultiplier)): |
| | |
| | |
| | |
| | if hasattr(module, 'multiplier'): |
| | module.multiplier.data.fill_(1.0) |
| |
|
| | class NeoLLMModel(NeoLLMPreTrainedModel): |
| | """ |
| | NeoLLM base model with transformer decoder architecture. |
| | |
| | Note on embeddings and weight tying: This model uses weight tying between |
| | embed_tokens and lm_head (shared weights). Following "Learnable Multipliers" |
| | paper analysis, we do NOT add multipliers to embeddings because: |
| | |
| | 1. Weight tying creates conflicting gradient paths: multipliers would scale |
| | gradients from embedding lookup but not from lm_head projection, causing |
| | the multiplier to receive incomplete optimization signals. |
| | |
| | 2. The paper explicitly warns against multipliers in lm_head (creates shortcuts |
| | for learning marginal token distribution), and with weight tying this |
| | restriction propagates to embeddings. |
| | |
| | 3. Compensating mechanisms provide scale adaptation immediately after embedding: |
| | - First layer attention has multipliers in Q/O projections |
| | - FANformer transforms the representation space |
| | - SeeDNorm provides input-dependent dynamic scaling |
| | - ResFormer propagates first-layer features with learnable scaling |
| | """ |
| | |
| | def __init__(self, config: NeoLLMConfig): |
| | super().__init__(config) |
| | |
| | |
| | |
| | |
| | self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, config.pad_token_id) |
| | |
| | |
| | self.layers = nn.ModuleList( |
| | [NeoLLMDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)] |
| | ) |
| | |
| | |
| | self.norm = SeeDNorm(config.hidden_size, eps=config.rms_norm_eps) |
| | self.rotary_emb = NeoLLMRotaryEmbedding(config=config) |
| | self.gradient_checkpointing = False |
| | |
| | |
| | self.first_layer_fan = None |
| | |
| | |
| | self.post_init() |
| |
|
| | def forward( |
| | self, |
| | input_ids: Optional[torch.LongTensor] = None, |
| | attention_mask: Optional[torch.Tensor] = None, |
| | position_ids: Optional[torch.LongTensor] = None, |
| | inputs_embeds: Optional[torch.FloatTensor] = None, |
| | output_hidden_states: Optional[bool] = None, |
| | output_attentions: Optional[bool] = None, |
| | return_dict: Optional[bool] = None, |
| | **kwargs: Unpack[TransformersKwargs], |
| | ) -> BaseModelOutputWithPast: |
| | output_hidden_states = ( |
| | output_hidden_states if output_hidden_states is not None |
| | else self.config.output_hidden_states |
| | ) |
| | output_attentions = ( |
| | output_attentions if output_attentions is not None |
| | else self.config.output_attentions |
| | ) |
| | return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
| |
|
| | if (input_ids is None) ^ (inputs_embeds is not None): |
| | raise ValueError("You must specify exactly one of input_ids or inputs_embeds") |
| |
|
| | if inputs_embeds is None: |
| | |
| | |
| | |
| | |
| | inputs_embeds = self.embed_tokens(input_ids) |
| |
|
| | if position_ids is None: |
| | position_ids = torch.arange(0, inputs_embeds.shape[1], device=inputs_embeds.device).unsqueeze(0) |
| |
|
| | causal_mask = create_causal_mask( |
| | config=self.config, |
| | input_embeds=inputs_embeds, |
| | attention_mask=attention_mask, |
| | cache_position=position_ids.squeeze(0), |
| | past_key_values=None, |
| | position_ids=position_ids, |
| | ) |
| |
|
| | hidden_states = inputs_embeds |
| | all_hidden_states = () if output_hidden_states else None |
| | all_attentions = () if output_attentions else None |
| |
|
| | |
| | position_embeddings = self.rotary_emb(hidden_states, position_ids) |
| |
|
| | |
| | self.first_layer_fan = None |
| |
|
| | for decoder_layer in self.layers[: self.config.num_hidden_layers]: |
| | if output_hidden_states: |
| | all_hidden_states = all_hidden_states + (hidden_states,) |
| |
|
| | layer_outputs = decoder_layer( |
| | hidden_states, |
| | position_embeddings=position_embeddings, |
| | attention_mask=causal_mask, |
| | first_layer_fan=self.first_layer_fan, |
| | output_attentions=output_attentions, |
| | **kwargs, |
| | ) |
| | |
| | hidden_states = layer_outputs[0] |
| | |
| | if output_attentions: |
| | all_attentions = all_attentions + (layer_outputs[1],) |
| | |
| | |
| | if self.first_layer_fan is None and hasattr(decoder_layer, 'current_layer_fan'): |
| | self.first_layer_fan = decoder_layer.current_layer_fan |
| |
|
| | |
| | hidden_states = self.norm(hidden_states) |
| |
|
| | if output_hidden_states: |
| | all_hidden_states = all_hidden_states + (hidden_states,) |
| |
|
| | if not return_dict: |
| | return tuple(v for v in [hidden_states, None, all_hidden_states, all_attentions] if v is not None) |
| |
|
| | return BaseModelOutputWithPast( |
| | last_hidden_state=hidden_states, |
| | past_key_values=None, |
| | hidden_states=all_hidden_states, |
| | attentions=all_attentions, |
| | ) |
| |
|
| |
|
| | @torch.compiler.disable |
| | def compute_cce_loss(hidden_states, labels, lm_head_weight, lm_head_bias=None, pad_token_id=None): |
| | """ |
| | CCE loss computation excluded from compilation. |
| | Preprocesses labels to eliminate torch.compile warnings. |
| | """ |
| | |
| | processed_labels = labels.to(hidden_states.device) |
| | |
| | |
| | if pad_token_id is not None: |
| | processed_labels = torch.where( |
| | processed_labels == pad_token_id, |
| | torch.tensor(-100, dtype=processed_labels.dtype, device=processed_labels.device), |
| | processed_labels |
| | ) |
| | |
| | return linear_cross_entropy( |
| | hidden_states, |
| | lm_head_weight, |
| | processed_labels, |
| | bias=lm_head_bias, |
| | shift=1, |
| | impl="cce_kahan_full_c", |
| | reduction="mean" |
| | ) |
| |
|
| |
|
| | class NeoLLMForCausalLM(NeoLLMPreTrainedModel, GenerationMixin): |
| | """ |
| | Causal Language Model with NeoLLM architecture. |
| | |
| | Supports ResFormer with standard residuals and optional StackMemory. |
| | |
| | Note on LM head: Following "Learnable Multipliers" paper recommendations, |
| | the output projection (lm_head) does NOT include learnable multipliers. |
| | """ |
| | _tied_weights_keys = {"lm_head.weight": "model.embed_tokens.weight"} |
| | |
| | def __init__(self, config): |
| | super().__init__(config) |
| | self.model = NeoLLMModel(config) |
| | self.vocab_size = config.vocab_size |
| | |
| | |
| | self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) |
| | |
| | self.post_init() |
| |
|
| |
|
| | def forward( |
| | self, |
| | input_ids: Optional[torch.LongTensor] = None, |
| | attention_mask: Optional[torch.Tensor] = None, |
| | position_ids: Optional[torch.LongTensor] = None, |
| | inputs_embeds: Optional[torch.FloatTensor] = None, |
| | labels: Optional[torch.LongTensor] = None, |
| | logits_to_keep: Union[int, torch.Tensor] = 0, |
| | output_hidden_states: Optional[bool] = None, |
| | return_dict: Optional[bool] = None, |
| | **kwargs: Unpack[TransformersKwargs], |
| | ) -> CausalLMOutputWithPast: |
| | outputs: BaseModelOutputWithPast = self.model( |
| | input_ids=input_ids, |
| | attention_mask=attention_mask, |
| | position_ids=position_ids, |
| | inputs_embeds=inputs_embeds, |
| | output_hidden_states=output_hidden_states, |
| | return_dict=return_dict, |
| | **kwargs, |
| | ) |
| | |
| | hidden_states = outputs.last_hidden_state |
| | |
| | |
| | if labels is not None: |
| | loss = compute_cce_loss( |
| | hidden_states, |
| | labels, |
| | self.lm_head.weight, |
| | getattr(self.lm_head, 'bias', None), |
| | self.config.pad_token_id |
| | ) |
| | logits = None |
| | else: |
| | |
| | slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep |
| | logits = self.lm_head(hidden_states[:, slice_indices, :]) |
| | loss = None |
| | |
| | return CausalLMOutputWithPast( |
| | loss=loss, |
| | logits=logits, |
| | past_key_values=None, |
| | hidden_states=outputs.hidden_states, |
| | attentions=outputs.attentions, |
| | ) |
| |
|
| | |
| |
|
| | __all__ = [ |
| | "NeoLLMForCausalLM", |
| | "NeoLLMModel", |
| | "NeoLLMPreTrainedModel", |
| | "NeoLLMConfig", |
| | "FANLayer", |
| | "SeeDNorm", |
| | "ScalarMultiplier", |
| | "VectorMultiplier", |
| | "LinearWithMultipliers", |
| | "MEAHeadRMSNorm", |
| | ] |
| |
|
| | |
| | AutoConfig.register("neollm", NeoLLMConfig) |
| | AutoModel.register(NeoLLMConfig, NeoLLMModel) |
| | AutoModelForCausalLM.register(NeoLLMConfig, NeoLLMForCausalLM) |