# coding=utf-8 # Copyright 2022 EleutherAI and the Huggingface Inc. team. All rights reserved. # # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX # and OPT implementations in this library. It has been modified from its # original forms to accommodate minor architectural differences compared # to GPT-NeoX and OPT used by the Meta AI team that trained the model. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Pytorch DenseGAU RetNet model.""" from typing import List, Optional, Tuple, Union import math import torch import torch.utils.checkpoint import torch.nn.functional as F from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, \ SequenceClassifierOutputWithPast from transformers.modeling_utils import PreTrainedModel from transformers.utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, \ replace_return_docstrings from transformers import top_k_top_p_filtering from transformers.generation.configuration_utils import GenerationConfig from .configuration_dense_gau_retnet import DenseGauRetNetConfig logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "DenseGauRetNetConfig" # Copied from transformers.models.bart.modeling_bart._make_causal_mask def _make_causal_mask( input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0 ): """ Make causal mask used for bi-directional self-attention. """ bsz, tgt_len = input_ids_shape mask = torch.full((tgt_len, tgt_len), torch.tensor(torch.finfo(dtype).min, device=device), device=device) mask_cond = torch.arange(mask.size(-1), device=device) mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0) mask = mask.to(dtype) if past_key_values_length > 0: mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1) return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length) # Copied from transformers.models.bart.modeling_bart._expand_mask def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None): """ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. """ bsz, src_len = mask.size() tgt_len = tgt_len if tgt_len is not None else src_len expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype) inverted_mask = 1.0 - expanded_mask return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min) # Copied from transformers.models.llama.modeling_llama.LlamaRMSNorm class DenseGauRetNetRMSNorm(nn.Module): def __init__(self, hidden_size, eps=1e-6): super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, hidden_states): input_dtype = hidden_states.dtype variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) return (self.weight * hidden_states).to(input_dtype) # added for retention # Copied from https://github.com/microsoft/torchscale/blob/main/torchscale/component/multiscale_retention.py def rotate_every_two(x): x1 = x[:, :, :, ::2] x2 = x[:, :, :, 1::2] x = torch.stack((-x2, x1), dim=-1) return x.flatten(-2) # in einsum notation: rearrange(x, '... d j -> ... (d j)')\ def theta_shift(x, sin, cos): return (x * cos) + (rotate_every_two(x) * sin) #Parameter efficient HiddenProjection class HiddenProjection(nn.Module): def __init__(self, input_dim, mid_reduction_ratio=16, final_reduction_ratio=4): super(HiddenProjection, self).__init__() self.fc1 = nn.Linear(input_dim, input_dim // mid_reduction_ratio, bias=False) self.fc2 = nn.Linear(input_dim // mid_reduction_ratio, int(input_dim // final_reduction_ratio), bias=False) def forward(self, x): fc1_output = F.silu(self.fc1(x)) fc2_output = self.fc2(fc1_output) return fc2_output # Copied and modified from transformers.models.bart.modeling_bart._expand_mask class MultiScaleGauRetention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, config: DenseGauRetNetConfig): super().__init__() self.config = config self.hidden_size = config.hidden_size self.query_key_dim = config.query_key_dim self.num_heads = config.num_attention_heads self.factor = config.v_factor self.head_dim = config.hidden_size * self.factor // self.num_heads self.max_position_embeddings = config.max_position_embeddings self.q_proj = nn.Linear(self.hidden_size, self.query_key_dim, bias=False) self.k_proj = nn.Linear(self.hidden_size, self.query_key_dim, bias=False) self.key_dim = self.query_key_dim // self.num_heads self.scaling = self.key_dim ** -0.5 self.expansion_dim = int(config.hidden_size * self.factor) self.group_norm = DenseGauRetNetRMSNorm(self.expansion_dim // config.num_attention_heads, eps=config.rms_norm_eps) self.to_hidden = nn.Sequential( nn.Linear(config.hidden_size, self.expansion_dim * 2, bias=False), nn.SiLU() ) self.to_out = nn.Sequential( nn.Linear(self.expansion_dim, config.hidden_size, bias=False), nn.Dropout(0) ) self.config = config self.k_select = HiddenProjection(self.hidden_size, config.intermediate_k_select_scale, 2) self.v_select = HiddenProjection(self.hidden_size, config.intermediate_v_select_scale, 0.5) self.k_norm = DenseGauRetNetRMSNorm(self.query_key_dim, eps=config.rms_norm_eps) self.v_norm = DenseGauRetNetRMSNorm(self.expansion_dim, eps=config.rms_norm_eps) if config.deepnorm: self.alpha = math.pow(2.0 * config.num_hidden_layers, 0.25) else: self.alpha = 1.0 self.dropout_module = torch.nn.Dropout(config.dropout) self.reset_parameters() # def reset_parameters(self): nn.init.xavier_uniform_(self.q_proj.weight, gain=2 ** -2.5) nn.init.xavier_uniform_(self.k_proj.weight, gain=2 ** -2.5) nn.init.xavier_uniform_(self.k_select.fc1.weight, gain=2 ** -2.5) nn.init.xavier_uniform_(self.k_select.fc2.weight, gain=2 ** -2.5) nn.init.xavier_uniform_(self.v_select.fc1.weight, gain=2 ** -2.5) nn.init.xavier_uniform_(self.v_select.fc2.weight, gain=2 ** -2.5) for module in self.to_out.modules(): if isinstance(module, nn.Linear): nn.init.xavier_uniform_(module.weight, gain=2 ** -1) for module in self.to_hidden.modules(): if isinstance(module, nn.Linear): nn.init.xavier_uniform_(module.weight, gain=2 ** -2.5) def forward( self, forward_impl: 'parallel', hidden_states: torch.Tensor, rel_pos, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, output_attentions: bool = False, k_features=None, # dense v_features=None, # dense dense=False, dense_layers=0, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: bsz, tgt_len, _ = hidden_states.size() (sin, cos), inner_mask = rel_pos x = hidden_states q = F.silu(self.q_proj(x)) k = F.silu(self.k_proj(x)) v, gate = self.to_hidden(x).chunk(2, dim=-1) k *= self.scaling k_curr = k v_curr = v if dense: k_gate = self.k_select(hidden_states.clone()) for i, k_past in enumerate(k_features): k = k.clone() + F.silu(k_gate) * k_past k = self.k_norm(k) v_gate = self.v_select(hidden_states.clone()) for i, v_past in enumerate(v_features): v = v.clone() + F.silu(v_gate) * v_past v = self.v_norm(v) q = q.view(bsz, tgt_len, self.num_heads, self.key_dim).transpose(1, 2) k = k.view(bsz, tgt_len, self.num_heads, self.key_dim).transpose(1, 2) qr = theta_shift(q, sin, cos) kr = theta_shift(k, sin, cos) if forward_impl == 'parallel': output = self.parallel_forward(qr, kr, v, inner_mask) elif forward_impl == 'recurrent': output, past_key_value = self.recurrent_forward(qr, kr, v, inner_mask, past_key_value=past_key_value) output = self.group_norm(output) output = output.reshape(bsz, tgt_len, self.expansion_dim) * gate # gate output = self.to_out(output) output = self.dropout_module(output) return output, past_key_value, k_curr, v_curr def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() # retntion parallel forward def recurrent_forward( self, qr, kr, v, decay, past_key_value, ): bsz = v.size(0) v = v.view(bsz, self.num_heads, self.head_dim, 1) kv = kr * v if "prev_key_value" in past_key_value: prev_kv = past_key_value["prev_key_value"] prev_scale = past_key_value["scale"] scale = prev_scale * decay + 1 kv = prev_kv * (prev_scale.sqrt() * decay / scale.sqrt()).view(self.num_heads, 1, 1) + kv / scale.sqrt().view(self.num_heads, 1, 1) else: scale = torch.ones_like(decay) past_key_value["prev_key_value"] = kv past_key_value["scale"] = scale output = torch.sum(qr * kv, dim=3) return output, past_key_value def parallel_forward(self, qr, kr, v, mask): bsz, tgt_len, embed_dim = v.size() vr = v.view(bsz, tgt_len, self.num_heads, self.expansion_dim // self.num_heads).transpose(1, 2) qk_mat = qr @ kr.transpose(-1, -2) # bsz * m * tgt_len * tgt_len qk_mat = qk_mat * mask # invariant after normalization qk_mat = qk_mat / qk_mat.detach().abs().sum(dim=-1, keepdim=True).clamp(min=1, max=5e4) output = torch.matmul(qk_mat, vr) output = output.transpose(1, 2) return output class DenseGauRetNetDecoderLayer(nn.Module): def __init__(self, config: DenseGauRetNetConfig): super().__init__() self.hidden_size = config.hidden_size self.self_attn = MultiScaleGauRetention(config=config) self.input_layernorm = DenseGauRetNetRMSNorm(config.hidden_size, eps=config.rms_norm_eps) if config.deepnorm: self.alpha = math.pow(2.0 * config.num_hidden_layers, 0.25) else: self.alpha = 1.0 def forward( self, hidden_states: torch.Tensor, rel_pos=None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, output_attentions: Optional[bool] = False, k_features: Optional[List] = [], v_features: Optional[List] = [], dense=False, dense_layers=0, forward_impl: str = 'parallel', ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: residual = hidden_states hidden_states = self.input_layernorm(hidden_states) hidden_states, past_key_value, k_curr, v_curr = self.self_attn( hidden_states=hidden_states, rel_pos=rel_pos, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, output_attentions=output_attentions, k_features=k_features, v_features=v_features, dense=dense, dense_layers=dense_layers, forward_impl=forward_impl, ) hidden_states = residual + hidden_states return hidden_states, past_key_value, k_curr, v_curr DenseGauRetNet_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a Pytorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular Pytorch Module and refer to the Pytorch documentation for all matter related to general usage and behavior. Parameters: config ([`DenseGauRetNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ @add_start_docstrings( "The bare DenseGauRetNet Model outputting raw hidden-states without any specific head on top.", DenseGauRetNet_START_DOCSTRING, ) class DenseGauRetNetPreTrainedModel(PreTrainedModel): config_class = DenseGauRetNetConfig base_model_prefix = "model" supports_gradient_checkpointing = True _no_split_modules = ["DenseGauRetNetDecoderLayer"] _skip_keys_device_placement = "past_key_values" _keys_to_ignore_on_load_unexpected = [r"decoder\.version"] def _init_weights(self, module): std = self.config.initializer_range if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, DenseGauRetNetModel): module.gradient_checkpointing = value DenseGauRetNet_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`] and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy. - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.n_positions - 1]`. [What are position IDs?](../glossary#position-ids) past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ class RetNetRelPos(nn.Module): def __init__(self, decoder_embed_dim, decoder_retention_heads, query_key_dim): super().__init__() angle = 1.0 / (10000 ** torch.linspace(0, 1, (query_key_dim // decoder_retention_heads) // 2)) angle = angle.unsqueeze(-1).repeat(1, 2).flatten() decay = torch.log(1 - 2 ** (-5 - torch.arange(decoder_retention_heads, dtype=torch.float))) self.register_buffer("angle", angle) self.register_buffer("decay", decay) def forward(self, slen, activate_recurrent=False): if activate_recurrent: sin = torch.sin(self.angle * (slen - 1)) cos = torch.cos(self.angle * (slen - 1)) retention_rel_pos = ((sin, cos), self.decay.exp()) else: index = torch.arange(slen).to(self.decay) sin = torch.sin(index[:, None] * self.angle[None, :]) cos = torch.cos(index[:, None] * self.angle[None, :]) mask = torch.tril(torch.ones(slen, slen).to(self.decay)) mask = torch.masked_fill(index[:, None] - index[None, :], ~mask.bool(), float("inf")) mask = torch.exp(mask * self.decay[:, None, None]) mask = torch.nan_to_num(mask) mask = mask / mask.sum(dim=-1, keepdim=True).sqrt() retention_rel_pos = ((sin, cos), mask) return retention_rel_pos @add_start_docstrings( "The bare DenseGauRetNet Model outputting raw hidden-states without any specific head on top.", DenseGauRetNet_START_DOCSTRING, ) class DenseGauRetNetModel(DenseGauRetNetPreTrainedModel): """ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`DenseGauRetNetDecoderLayer`] Args: config: DenseGauRetNetConfig """ def __init__(self, config: DenseGauRetNetConfig): super().__init__(config) self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx) self.layers = nn.ModuleList([DenseGauRetNetDecoderLayer(config) for _ in range(config.num_hidden_layers)]) self.norm = DenseGauRetNetRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() self.retnet_rel_pos = RetNetRelPos(config.hidden_size, config.num_attention_heads, config.query_key_dim) if config.deepnorm: init_scale = math.pow(8.0 * config.num_hidden_layers, 0.25) for name, p in self.named_parameters(): if ( "fc1" in name or "fc2" in name or "gate_proj" in name or "down_proj" in name or "up_proj" in name or "out_proj" in name or "v_proj" in name or "to_hidden" in name or "to_output" in name ): p.data.div_(init_scale) def get_input_embeddings(self): return self.embed_tokens def set_input_embeddings(self, value): self.embed_tokens = value # Copied from transformers.models.bart.modeling_bart.BartDecoder._prepare_decoder_attention_mask def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length): # create causal mask # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] combined_attention_mask = None if input_shape[-1] > 1: combined_attention_mask = _make_causal_mask( input_shape, inputs_embeds.dtype, device=inputs_embeds.device, past_key_values_length=past_key_values_length, ) if attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to( inputs_embeds.device ) combined_attention_mask = ( expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask ) return combined_attention_mask def is_first_step(self, incremental_state): if incremental_state is None: return False return incremental_state.get("is_first_step", False) @add_start_docstrings_to_model_forward(DenseGauRetNet_INPUTS_DOCSTRING) def forward( self, forward_impl: Optional[str] = 'parallel', input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, sequence_offset=0, ) -> Union[Tuple, BaseModelOutputWithPast]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # retrieve input_ids and inputs_embeds if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") elif input_ids is not None: batch_size, seq_length = input_ids.shape elif inputs_embeds is not None: batch_size, seq_length, _ = inputs_embeds.shape else: raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") seq_length_with_past = seq_length past_key_values_length = 0 if position_ids is None: device = input_ids.device if input_ids is not None else inputs_embeds.device position_ids = torch.arange( past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device ) position_ids = position_ids.unsqueeze(0).view(-1, seq_length) else: position_ids = position_ids.view(-1, seq_length).long() if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) # embed positions if attention_mask is None: attention_mask = torch.ones( (batch_size, seq_length_with_past), dtype=torch.bool, device=inputs_embeds.device ) attention_mask = self._prepare_decoder_attention_mask( attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length ) hidden_states = inputs_embeds if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False # decoder layer all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None next_decoder_cache = [] if use_cache else None # k_features = [] v_features = [] dense_layers = 0 for idx, decoder_layer in enumerate(self.layers): if output_hidden_states: all_hidden_states += (hidden_states,) past_key_value = past_key_values[idx] if past_key_values is not None and len( past_key_values) != 0 else {} slen = input_ids.size(1) if forward_impl == 'recurrent': slen = sequence_offset rel_pos = self.retnet_rel_pos(slen, forward_impl == 'recurrent',) if self.gradient_checkpointing and self.training: def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs, output_attentions, None) return custom_forward hidden_states = layer_outputslayer_outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(decoder_layer), hidden_states, attention_mask, position_ids, None, ) else: dense = False if idx >= 1: dense = True layer_outputs, past_key_value, k_curr, v_curr = decoder_layer( hidden_states, rel_pos, forward_impl=forward_impl, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, output_attentions=output_attentions, k_features=k_features, v_features=v_features, dense=dense, dense_layers=dense_layers, ) dense_layers += 1 k_features.append(k_curr) v_features.append(v_curr) if len(k_features) > self.config.dense_block_layers: k_features.pop(0) if len(v_features) > self.config.dense_block_layers: v_features.pop(0) hidden_states = layer_outputs # used to be 3 ele,tmp 1 if use_cache: next_decoder_cache.append(past_key_value) if output_attentions: all_self_attns += (layer_outputs[1],) hidden_states = self.norm(hidden_states) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) next_cache = next_decoder_cache if use_cache else None if not return_dict: return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None) return BaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns, ) class DenseGauRetNetForCausalLM(DenseGauRetNetPreTrainedModel): _auto_class = "AutoModelForCausalLM" _tied_weights_keys = ["lm_head.weight"] def __init__(self, config): super().__init__(config) self.model = DenseGauRetNetModel(config) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.model.embed_tokens def set_input_embeddings(self, value): self.model.embed_tokens = value def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings def set_decoder(self, decoder): self.model = decoder def get_decoder(self): return self.model @add_start_docstrings_to_model_forward(DenseGauRetNet_INPUTS_DOCSTRING) # @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, forward_impl: str = 'parallel', sequence_offset=0, ) -> Union[Tuple, CausalLMOutputWithPast]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) outputs = self.model( forward_impl=forward_impl, input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, sequence_offset=sequence_offset, ) hidden_states = outputs[0] logits = self.lm_head(hidden_states) loss = None if labels is not None: # Shift so that tokens < n predict n shift_logits = logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous() # Flatten the tokens loss_fct = CrossEntropyLoss() shift_logits = shift_logits.view(-1, self.config.vocab_size) shift_labels = shift_labels.view(-1) # Enable model parallelism shift_labels = shift_labels.to(shift_logits.device) loss = loss_fct(shift_logits, shift_labels) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return CausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def prepare_inputs_for_generation( self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs ): if past_key_values: input_ids = input_ids[:, -1:] position_ids = kwargs.get("position_ids", None) if attention_mask is not None and position_ids is None: # create position_ids on the fly for batch generation position_ids = attention_mask.long().cumsum(-1) - 1 position_ids.masked_fill_(attention_mask == 0, 1) if past_key_values: position_ids = position_ids[:, -1].unsqueeze(-1) # if `inputs_embeds` are passed, we only want to use them in the 1st generation step if inputs_embeds is not None and past_key_values is None: model_inputs = {"inputs_embeds": inputs_embeds} else: model_inputs = {"input_ids": input_ids} model_inputs.update( { "position_ids": position_ids, "past_key_values": past_key_values, "use_cache": kwargs.get("use_cache"), "attention_mask": attention_mask, } ) return model_inputs @staticmethod def _reorder_cache(past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),) return reordered_past # infer mode def sample_token(self, logit, do_sample=False, top_k=1, top_p=1.0, temperature=1.0): if not do_sample: return torch.argmax(logit, dim=-1, keepdim=True) filtered = top_k_top_p_filtering(logit / temperature, top_k=top_k, top_p=top_p) return torch.multinomial(torch.softmax(filtered, dim=-1), num_samples=1) @torch.inference_mode() def generate( self, input_ids: Optional[torch.Tensor] = None, parallel_compute_prompt=False, generation_config: Optional[GenerationConfig] = None, **kwargs, ): # breakpoint() past_key_values = {} for p_i in range(input_ids.shape[1] - 1): outputs = self(input_ids[:, p_i:p_i + 1], forward_impl='recurrent', past_key_values=past_key_values, sequence_offset=p_i, return_dict=True, use_cache=True) past_key_values = outputs.past_key_values generated = input_ids[:, -1].unsqueeze(-1) # [B, 1] for i in range(generation_config.max_new_tokens): outputs = self(generated[:,-1:], forward_impl='recurrent', past_key_values=past_key_values, use_cache=True, return_dict=True, sequence_offset=input_ids.shape[-1]+generated.shape[-1]-2 #1 ) logit = outputs.logits[:, -1, :] # [batch_size, vocab_size] past_key_values = outputs.past_key_values token = self.sample_token(logit, do_sample=generation_config.do_sample, temperature=generation_config.temperature) generated = torch.cat([generated, token], dim=-1) return generated[:,1:] @add_start_docstrings( """ The DenseGauRetNet Model transformer with a sequence classification head on top (linear layer). [`DenseGauRetNetForSequenceClassification`] uses the last token in order to do the classification, as other causal models (e.g. GPT-2) do. Since it does classification on the last token, it requires to know the position of the last token. If a `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in each row of the batch). """, DenseGauRetNet_START_DOCSTRING, ) class DenseGauRetNetForSequenceClassification(DenseGauRetNetPreTrainedModel): _keys_to_ignore_on_load_missing = [r"lm_head.weight"] def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.model = DenseGauRetNetModel(config) self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.model.embed_tokens def set_input_embeddings(self, value): self.model.embed_tokens = value @add_start_docstrings_to_model_forward(DenseGauRetNet_INPUTS_DOCSTRING) def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, SequenceClassifierOutputWithPast]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.model( input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = transformer_outputs[0] logits = self.score(hidden_states) if input_ids is not None: batch_size = input_ids.shape[0] else: batch_size = inputs_embeds.shape[0] if self.config.pad_token_id is None and batch_size != 1: raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.") if self.config.pad_token_id is None: sequence_lengths = -1 else: if input_ids is not None: sequence_lengths = (torch.ne(input_ids, self.config.pad_token_id).sum(-1) - 1).to(logits.device) else: sequence_lengths = -1 pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths] loss = None if labels is not None: labels = labels.to(logits.device) if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(pooled_logits.squeeze(), labels.squeeze()) else: loss = loss_fct(pooled_logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(pooled_logits, labels) if not return_dict: output = (pooled_logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutputWithPast( loss=loss, logits=pooled_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, )