Llama-p-small / modeling_nano.py
crumb's picture
Update modeling_nano.py
6514035 verified
import math
import os
import random
import warnings
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint
from einops import repeat
from torch import nn
from torch.cuda.amp import autocast
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from transformers.activations import ACT2FN
from transformers.modeling_outputs import (
BaseModelOutputWithPastAndCrossAttentions,
CausalLMOutputWithCrossAttentions, QuestionAnsweringModelOutput,
SequenceClassifierOutputWithPast, TokenClassifierOutput)
from transformers.modeling_utils import PreTrainedModel, SequenceSummary
from transformers.utils import (ModelOutput, logging)
from transformers.utils.model_parallel_utils import (assert_device_map,
get_device_map)
from .configuration_nano import NanoConfig
from transformers.models.llama.modeling_llama import LlamaRMSNorm, LlamaDynamicNTKScalingRotaryEmbedding, LlamaRotaryEmbedding, LlamaLinearScalingRotaryEmbedding
def rotate_half(x):
"""Rotates half the hidden dims of the input."""
x1 = x[..., : x.shape[-1] // 2]
x2 = x[..., x.shape[-1] // 2 :]
return torch.cat((-x2, x1), dim=-1)
def apply_rotary_pos_emb(q, k, cos, sin, position_ids, unsqueeze_dim=1):
cos = cos[position_ids].unsqueeze(unsqueeze_dim)
sin = sin[position_ids].unsqueeze(unsqueeze_dim)
q_embed = (q * cos) + (rotate_half(q) * sin)
k_embed = (k * cos) + (rotate_half(k) * sin)
return q_embed, k_embed
class NanoAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.head_dim = config.hidden_size // config.num_attention_heads
assert (
self.head_dim * config.num_attention_heads == config.hidden_size
), "d_model must be divisible by n_head"
self.use_bias = config.use_bias
if not config.combined_qkv or config.kv_hidden_size is not None:
self.query = nn.Linear(
config.hidden_size, config.hidden_size, bias=self.use_bias
)
self.key = nn.Linear(
config.hidden_size
if not config.kv_hidden_size
else config.kv_hidden_size,
config.hidden_size,
bias=self.use_bias,
)
self.value = nn.Linear(
config.hidden_size
if not config.kv_hidden_size
else config.kv_hidden_size,
config.hidden_size,
bias=self.use_bias,
)
else:
self.qkv = nn.Linear(
config.hidden_size, config.hidden_size * 3, bias=self.use_bias
)
self.out = nn.Linear(config.hidden_size, config.hidden_size, bias=self.use_bias)
self._init_rope()
def _init_rope(self):
if self.config.rope_scaling is None:
self.rotary_emb = LlamaRotaryEmbedding(
self.head_dim,
max_position_embeddings=self.config.max_position_embeddings,
base=self.config.rope_theta,
)
else:
scaling_type = self.config.rope_scaling["type"]
scaling_factor = self.config.rope_scaling["factor"]
if scaling_type == "linear":
self.rotary_emb = LlamaLinearScalingRotaryEmbedding(
self.head_dim,
max_position_embeddings=self.config.max_position_embeddings,
scaling_factor=scaling_factor,
base=self.config.rope_theta,
)
elif scaling_type == "dynamic":
self.rotary_emb = LlamaDynamicNTKScalingRotaryEmbedding(
self.head_dim,
max_position_embeddings=self.config.max_position_embeddings,
scaling_factor=scaling_factor,
base=self.config.rope_theta,
)
else:
raise ValueError(f"Unknown RoPE scaling type {scaling_type}")
def forward(self, x0, x1=None, causal=False, mask=None, position_ids=None, use_cache=True, layer_past=None):
batch_size = x0.size(0)
def split_heads(x):
return x.view(
batch_size, -1, self.config.num_attention_heads, self.head_dim
).transpose(1, 2)
if not self.config.combined_qkv:
q = split_heads(self.query(x0))
k = split_heads(self.key(x1) if x1 is not None else self.key(x0))
v = split_heads(
self.value(x1 if x1 is not None else x0)
)
else:
q, k, v = self.qkv(x0).chunk(3,-1)
q = split_heads(q)
k = split_heads(k)
v = split_heads(v)
if layer_past is not None:
past_key, past_value = layer_past
k = torch.cat((past_key, k), dim=-2)
v = torch.cat((past_value, v), dim=-2)
cos, sin = self.rotary_emb(v, seq_len=v.shape[-2])
if self.config.full_adaptation_type != "no":
position_ids = position_ids.repeat_interleave(x0.shape[1]//position_ids.shape[-1],dim=1)
q, k = apply_rotary_pos_emb(q, k, cos, sin, position_ids)
if use_cache is True:
present = (k,v)
else:
present = None
attn_output = F.scaled_dot_product_attention(
q, k, v, attn_mask=None, dropout_p=0.0, is_causal=causal
)
attn_output = (
attn_output.transpose(1, 2)
.contiguous()
.view(batch_size, -1, self.config.hidden_size)
)
return self.out(attn_output), present
class NanoGLU(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.gate_proj = nn.Linear(
config.hidden_size, config.intermediate_size, bias=False
)
self.up_proj = nn.Linear(
config.hidden_size, config.intermediate_size, bias=False
)
self.down_proj = nn.Linear(
config.intermediate_size, config.hidden_size, bias=False
)
self.act_fn = ACT2FN[config.activation_function]
def forward(self, x):
return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
class NanoBlock(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.attn = NanoAttention(config)
self.ffn = NanoGLU(config)
ln_class = LlamaRMSNorm if config.layernorm=="llamarmsnorm" else nn.LayerNorm
self.ln1 = ln_class(config.hidden_size, eps=config.layer_norm_epsilon)
self.ln2 = ln_class(config.hidden_size, eps=config.layer_norm_epsilon)
if config.residual_alpha:
self.ffn_a = nn.Parameter(torch.tensor(0.))
self.attn_a = nn.Parameter(torch.tensor(0.))
else:
self.ffn_a = 1
self.attn_a = 1
def forward(self, x, mask=None, position_ids=None, use_cache=True, layer_past=None):
if self.config.ffn == "llamalike":
residual = x
x = self.ln1(x)
attn_out, attn_outs = self.attn(x, causal=True, mask=mask, position_ids=position_ids, use_cache=use_cache, layer_past=layer_past)
x = residual + attn_out * self.attn_a
residual = x
x = self.ln2(x)
x = self.ffn(x)
x = residual + x * self.ffn_a
else: # ffn == "parallel"
attn_in = self.ln1(x)
ffn_in = self.ln2(x)
attn_out, attn_outs = self.attn(attn_in, causal=True, mask=mask, position_ids=position_ids, use_cache=use_cache, layer_past=layer_past)
ffn_out = self.ffn(ffn_in)
x = x + attn_out * self.attn_a + ffn_out * self.ffn_a
if not use_cache: attn_outs = None
return (x, attn_outs)
class NanoPreTrainedModel(PreTrainedModel):
config_class = NanoConfig
base_model_prefix = "transformer"
is_parallelizable = False
supports_gradient_checkpointing = True
_no_split_modules = ["NanoBlock"]
_skip_keys_device_placement = "past_key_values"
def __init__(self, *inputs, **kwargs):
super().__init__(*inputs, **kwargs)
def _init_weights(self, module):
"""Initialize the weights."""
if isinstance(module, (nn.Linear)):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, NanoModel):
module.gradient_checkpointing = value
class Split(nn.Module):
def __init__(self, splits):
super().__init__()
self.splits=splits
def forward(self, x):
bs, tokens, _ = x.shape
# print("SPLIT X0 SHAPE", x.shape)
x = x.view(bs, tokens, self.splits, -1)
x = x.permute(0, 1, 2, 3).reshape(bs, tokens * self.splits, -1)
# print("SPLIT X1 SHAPE", x.shape)
return x
class Recombine(nn.Module):
def __init__(self, splits):
super().__init__()
self.splits = splits
def forward(self, x):
bs, _, _ = x.shape
# print("RECOMBINE X SHAPE", x.shape)
tokens = x.shape[1] // self.splits
# print("RECOMBINE TOKENS", tokens, bs)
x = x.view(bs, tokens, -1)
# print("RECOMBINE X1.SHAPE", x.shape)
return x
class Residual(nn.Module):
def __init__(self, module, a=None):
super().__init__()
self.module = module
self.a = nn.Parameter(torch.tensor(a, dtype=torch.bfloat16)) if a is not None else None
def forward(self, x):
return self.module(x) * (self.a if self.a is not None else 1) + x
class LoRA(nn.Module):
def __init__(self, d, r, a=1):
super().__init__()
self.fn_i = nn.Linear(d, r)
self.fn_o = nn.Linear(r, d)
self.a = nn.Parameter(torch.tensor(a, dtype=self.fn_i.weight.dtype))
def forward(self, x):
return self.fn_o(self.fn_i(x)) * self.a + x
def get_delta_w(self):
return torch.mm(self.fn_o.weight, self.fn_i.weight) * self.a
class NanoModel(NanoPreTrainedModel):
def __init__(self, config):
super().__init__(config)
ln_class = LlamaRMSNorm if config.layernorm=="llamarmsnorm" else nn.LayerNorm
if config.full_adaptation_type == "no":
if config.expanded_wte_size is not None:
self.wte = nn.Sequential(
nn.Embedding(config.vocab_size, config.expanded_wte_size),
nn.Linear(config.expanded_wte_size, config.hidden_size),
)
else:
self.wte = nn.Embedding(config.vocab_size, config.hidden_size)
else:
assert config.expanded_wte_size is not None, "experimental full adaptation of token embeddings requires expanded_wte_size to be set"
# self.wte = nn.Sequential(
# nn.Embedding(config.vocab_size, config.expanded_wte_size),
# LoRA(config.expanded_wte_size, config.experimental_full_adaption_rank),
# Split(config.expanded_wte_size//config.hidden_size)
# )
# print("going w/ adaptation")
self.d_0 = config.expanded_wte_size if (config.full_adaptation_has_pre_proj == False) else config.pre_proj_dim
# print("d_0", d_0)
self.wte = nn.Sequential(
nn.Embedding(config.vocab_size, config.expanded_wte_size),
(
nn.Linear(config.expanded_wte_size, config.pre_proj_dim) if config.full_adaptation_has_pre_proj else nn.Identity()
),
(
LoRA(self.d_0, config.experimental_full_adaption_rank)
if config.full_adaptation_type == "lora" else
nn.Linear(self.d_0, self.d_0)
if config.full_adaptation_type == "linear" else
Residual(
nn.Linear(self.d_0, self.d_0)
)
if config.full_adaptation_type == "linear-r" else
Residual(
nn.Linear(self.d_0, self.d_0), 1
)
if config.full_adaptation_type == "linear-ra" else
nn.Identity()
),
Split(self.d_0//config.hidden_size)
)
self.h = nn.ModuleList(
[NanoBlock(config) for i in range(config.num_hidden_layers)]
)
self.ln_f = ln_class(config.hidden_size, eps=config.layer_norm_epsilon)
self.model_parallel = False
self.device_map = None
self.gradient_checkpointing = False
self.post_init()
def get_input_embeddings(self):
return self.wte[0] if self.config.expanded_wte_size is not None else self.wte
def set_input_embeddings(self, new_embeddings):
if self.config.expanded_wte_size is not None:
self.wte[0] = new_embeddings
else:
self.wte = new_embeddings
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]:
# soooo not all of the params are able to be used, since I just copied this framework from modeling_gpt2
output_attentions = (
output_attentions
if output_attentions is not None
else self.config.output_attentions
)
output_hidden_states = (
output_hidden_states
if output_hidden_states is not None
else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = (
return_dict if return_dict is not None else self.config.use_return_dict
)
if input_ids is not None and inputs_embeds is not None:
raise ValueError(
"You cannot specify both input_ids and inputs_embeds at the same time"
)
elif input_ids is not None:
self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
batch_size = input_ids.shape[0]
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
batch_size = inputs_embeds.shape[0]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
if token_type_ids is not None:
token_type_ids = token_type_ids.view(-1, input_shape[-1])
if position_ids is not None:
position_ids = position_ids.view(-1, input_shape[-1])
if past_key_values is None:
past_length = 0
past_key_values = tuple([None] * len(self.h))
else:
past_length = past_key_values[0][0].size(-2)
if position_ids is None:
position_ids = torch.arange(
past_length,
input_shape[-1] + past_length,
dtype=torch.long,
device=device,
)
position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1])
if attention_mask is not None:
if batch_size <= 0:
raise ValueError("batch_size has to be defined and > 0")
attention_mask = attention_mask.view(batch_size, -1)
attention_mask = attention_mask[:, None, None, :]
attention_mask = attention_mask.to(dtype=self.dtype) # fp16 compatibility
attention_mask = (1.0 - attention_mask) * torch.finfo(self.dtype).min
if self.config.add_cross_attention and encoder_hidden_states is not None:
(
encoder_batch_size,
encoder_sequence_length,
_,
) = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
encoder_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_attention_mask = None
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
if inputs_embeds is None:
inputs_embeds = self.wte(input_ids)
# print("inputs embeds shape", inputs_embeds.shape)
hidden_states = inputs_embeds
if token_type_ids is not None:
token_type_embeds = self.wte(token_type_ids)
hidden_states = hidden_states + token_type_embeds
# output_shape = (-1,) + input_shape[1:] + (hidden_states.size(-1),)
output_shape = (-1,) + (hidden_states.shape[1],) + (hidden_states.size(-1),)
# print(output_shape, "output shape")
if self.gradient_checkpointing and self.training:
if use_cache:
# logger.warning_once(
# "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
# )
use_cache = False
presents = () if use_cache else None
all_self_attentions = () if output_attentions else None
all_cross_attentions = (
() if output_attentions and self.config.add_cross_attention else None
)
all_hidden_states = () if output_hidden_states else None
for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)):
if self.model_parallel:
torch.cuda.set_device(hidden_states.device)
if layer_past is not None:
layer_past = tuple(
past_state.to(hidden_states.device)
for past_state in layer_past
)
if attention_mask is not None:
attention_mask = attention_mask.to(hidden_states.device)
if isinstance(head_mask, torch.Tensor):
head_mask = head_mask.to(hidden_states.device)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
outputs = block(hidden_states, mask=attention_mask, position_ids=position_ids, use_cache=use_cache, layer_past=layer_past)
hidden_states = outputs[0]
if use_cache == True:
presents = presents + (outputs[1],)
hidden_states = self.ln_f(hidden_states)
hidden_states = hidden_states.view(output_shape)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [hidden_states, None, all_hidden_states, None, None]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=presents,
hidden_states=all_hidden_states,
attentions=None,
cross_attentions=None,
)
class NanoModelForCausalLM(NanoPreTrainedModel):
_tied_weights_keys = ["lm_head.weight"]
def __init__(self, config):
super().__init__(config)
self.transformer = NanoModel(config)
if config.full_adaptation_type == "no":
if (config.expanded_lm_head_size is not None):
self.lm_head = nn.Sequential(
nn.Linear(
config.hidden_size, config.expanded_lm_head_size, bias=config.lm_head_projection_bias
),
nn.Linear(
config.expanded_lm_head_size, config.vocab_size, bias=config.lm_head_bias
),
)
else:
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size)
else:
d_0 = config.expanded_lm_head_size if (not config.full_adaptation_has_pre_proj) else config.pre_proj_dim
self.lm_head = nn.Sequential(
Recombine(d_0//config.hidden_size),
nn.Identity() if not config.full_adaptation_has_pre_proj else nn.Linear(d_0, config.expanded_lm_head_size),
(
LoRA(config.expanded_lm_head_size, config.experimental_full_adaption_rank)
if config.full_adaptation_type == "lora" else
nn.Linear(config.expanded_lm_head_size, config.expanded_lm_head_size)
if config.full_adaptation_type == "linear" else
Residual(
nn.Linear(config.expanded_lm_head_size, config.expanded_lm_head_size)
)
if config.full_adaptation_type == "linear-r" else
Residual(
nn.Linear(config.expanded_lm_head_size, config.expanded_lm_head_size), 1
)
if config.full_adaptation_type == "linear-ra" else
nn.Identity()
),
nn.Linear(config.expanded_lm_head_size, config.vocab_size)
)
self.model_parallel = False
self.device_map = None
self.post_init()
def get_output_embeddings(self):
return self.lm_head if (self.config.experimental_full_adaption_rank is None and self.config.expanded_lm_head_size is None) else self.lm_head[-1]
def set_output_embeddings(self, new_embeddings):
self.lm_head = new_embeddings
def prepare_inputs_for_generation(
self, input_ids, past_key_values=None, inputs_embeds=None, **kwargs
):
token_type_ids = kwargs.get("token_type_ids", None)
# only last token for inputs_ids if past is defined in kwargs
if past_key_values:
input_ids = input_ids[:, -1].unsqueeze(-1)
if token_type_ids is not None:
token_type_ids = token_type_ids[:, -1].unsqueeze(-1)
attention_mask = kwargs.get("attention_mask", None)
position_ids = kwargs.get("position_ids", None)
if attention_mask is not None and position_ids is None:
# create position_ids on the fly for batch generation
position_ids = attention_mask.long().cumsum(-1) - 1
position_ids.masked_fill_(attention_mask == 0, 1)
if past_key_values:
position_ids = position_ids[:, -1].unsqueeze(-1)
else:
position_ids = None
# if `inputs_embeds` are passed, we only want to use them in the 1st generation step
if inputs_embeds is not None and past_key_values is None:
model_inputs = {"inputs_embeds": inputs_embeds}
else:
model_inputs = {"input_ids": input_ids}
model_inputs.update(
{
"past_key_values": past_key_values,
"use_cache": kwargs.get("use_cache"),
"position_ids": position_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
)
return model_inputs
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, CausalLMOutputWithCrossAttentions]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
`labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
"""
return_dict = (
return_dict if return_dict is not None else self.config.use_return_dict
)
transformer_outputs = self.transformer(
input_ids,
past_key_values=past_key_values,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = transformer_outputs[0]
# print("Hidden states shape", hidden_states.shape)
if self.model_parallel:
torch.cuda.set_device(self.transformer.first_device)
hidden_states = hidden_states.to(self.lm_head.weight.device)
lm_logits = self.lm_head(hidden_states)
loss = None
if labels is not None:
# move labels to correct device to enable model parallelism
labels = labels.to(lm_logits.device)
# Shift so that tokens < n predict n
shift_logits = lm_logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
# Flatten the tokens
loss_fct = CrossEntropyLoss()
loss = loss_fct(
shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)
)
if not return_dict:
output = (lm_logits,) + transformer_outputs[1:]
return ((loss,) + output) if loss is not None else output
return CausalLMOutputWithCrossAttentions(
loss=loss,
logits=lm_logits,
past_key_values=transformer_outputs.past_key_values,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
cross_attentions=transformer_outputs.cross_attentions,
)
@staticmethod
def _reorder_cache(
past_key_values: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor
) -> Tuple[Tuple[torch.Tensor]]:
"""
This function is used to re-order the `past_key_values` cache if [`~PreTrainedModel.beam_search`] or
[`~PreTrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct
beam_idx at every generation step.
"""
return tuple(
tuple(
past_state.index_select(0, beam_idx.to(past_state.device))
for past_state in layer_past
)
for layer_past in past_key_values
)
class VTMModelForCausalLM(NanoModelForCausalLM):
_tied_weights_keys = ["lm_head.3.weight"]
def __init__(self, config):
super().__init__(config)
class VTMPreProjModelForCausalLM(NanoModelForCausalLM):
_tied_weights_keys = ["lm_head.3.weight"]
def __init__(self, config):
super().__init__(config)
class PlusModelForCausalLM(NanoModelForCausalLM):
_tied_weights_keys = ["lm_head.1.weight"]
def __init__(self, config):
super().__init__(config)