vaibhavad commited on
Commit
1bb5aa8
·
verified ·
1 Parent(s): a4cbc92

Create modeling_llama_encoder.py

Browse files
Files changed (1) hide show
  1. modeling_llama_encoder.py +150 -0
modeling_llama_encoder.py ADDED
@@ -0,0 +1,150 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Optional, Tuple, Union
2
+ import torch
3
+ from transformers import LlamaModel, LlamaPreTrainedModel, LlamaForCausalLM, AutoModel
4
+ from transformers.models.llama.modeling_llama import LlamaDecoderLayer, LlamaRMSNorm, LlamaRotaryEmbedding, LlamaConfig, LlamaMLP, LlamaAttention, LlamaFlashAttention2, LlamaSdpaAttention
5
+ from transformers.utils import logging
6
+ from torch import nn
7
+ from torch.nn import CrossEntropyLoss
8
+ import torch.nn.functional as F
9
+ from transformers.modeling_outputs import BaseModelOutputWithPast, MaskedLMOutput, CausalLMOutputWithPast, TokenClassifierOutput
10
+ from transformers.cache_utils import Cache, DynamicCache
11
+ from transformers.modeling_attn_mask_utils import AttentionMaskConverter
12
+ from transformers.cache_utils import Cache, DynamicCache, StaticCache
13
+
14
+ logger = logging.get_logger(__name__)
15
+
16
+ class ModifiedLlamaAttention(LlamaAttention):
17
+
18
+ def __init__(self, *args, **kwargs):
19
+ super().__init__(*args, **kwargs)
20
+ self.is_causal = False
21
+
22
+
23
+ class ModifiedLlamaFlashAttention2(LlamaFlashAttention2):
24
+
25
+ def __init__(self, *args, **kwargs):
26
+ super().__init__(*args, **kwargs)
27
+ self.is_causal = False
28
+
29
+
30
+ class ModifiedLlamaSdpaAttention(LlamaSdpaAttention):
31
+
32
+ def __init__(self, *args, **kwargs):
33
+ super().__init__(*args, **kwargs)
34
+ self.is_causal = False
35
+
36
+
37
+ LLAMA_ATTENTION_CLASSES = {
38
+ "eager": ModifiedLlamaAttention,
39
+ "flash_attention_2": ModifiedLlamaFlashAttention2,
40
+ "sdpa": ModifiedLlamaSdpaAttention,
41
+ }
42
+
43
+
44
+ class ModifiedLlamaDecoderLayer(LlamaDecoderLayer):
45
+ def __init__(self, config: LlamaConfig, layer_idx: int):
46
+ nn.Module.__init__(self)
47
+ self.hidden_size = config.hidden_size
48
+
49
+ self.self_attn = LLAMA_ATTENTION_CLASSES[config._attn_implementation](config=config, layer_idx=layer_idx)
50
+
51
+ self.mlp = LlamaMLP(config)
52
+ self.input_layernorm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
53
+ self.post_attention_layernorm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
54
+
55
+
56
+ class BidirectionalLlama(LlamaModel):
57
+ def __init__(self, config):
58
+ LlamaPreTrainedModel.__init__(self, config)
59
+ self.padding_idx = config.pad_token_id
60
+ self.vocab_size = config.vocab_size
61
+
62
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
63
+ self.layers = nn.ModuleList(
64
+ [ModifiedLlamaDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
65
+ )
66
+ self.norm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
67
+ self.rotary_emb = LlamaRotaryEmbedding(config=config)
68
+
69
+ self.gradient_checkpointing = False
70
+ # Initialize weights and apply final processing
71
+ self.post_init()
72
+
73
+
74
+ def _update_causal_mask(
75
+ self,
76
+ attention_mask: torch.Tensor,
77
+ input_tensor: torch.Tensor,
78
+ cache_position: torch.Tensor,
79
+ past_key_values: Cache,
80
+ output_attentions: bool,
81
+ ):
82
+ # TODO: As of torch==2.2.0, the `attention_mask` passed to the model in `generate` is 2D and of dynamic length even when the static
83
+ # KV cache is used. This is an issue for torch.compile which then recaptures cudagraphs at each decode steps due to the dynamic shapes.
84
+ # (`recording cudagraph tree for symint key 13`, etc.), which is VERY slow. A workaround is `@torch.compiler.disable`, but this prevents using
85
+ # `fullgraph=True`. See more context in https://github.com/huggingface/transformers/pull/29114
86
+
87
+ if self.config._attn_implementation == "flash_attention_2":
88
+ if attention_mask is not None and 0.0 in attention_mask:
89
+ return attention_mask
90
+ return None
91
+
92
+ # For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in
93
+ # order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail
94
+ # to infer the attention mask.
95
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
96
+ using_static_cache = isinstance(past_key_values, StaticCache)
97
+
98
+ # When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward
99
+ # if self.config._attn_implementation == "sdpa" and not using_static_cache and not output_attentions:
100
+ # if AttentionMaskConverter._ignore_causal_mask_sdpa(
101
+ # attention_mask,
102
+ # inputs_embeds=input_tensor,
103
+ # past_key_values_length=past_seen_tokens,
104
+ # is_training=self.training,
105
+ # ):
106
+ # return None
107
+
108
+ dtype, device = input_tensor.dtype, input_tensor.device
109
+ min_dtype = torch.finfo(dtype).min
110
+ sequence_length = input_tensor.shape[1]
111
+ if using_static_cache:
112
+ target_length = past_key_values.get_max_length()
113
+ else:
114
+ target_length = (
115
+ attention_mask.shape[-1]
116
+ if isinstance(attention_mask, torch.Tensor)
117
+ else past_seen_tokens + sequence_length + 1
118
+ )
119
+
120
+ if attention_mask is not None and attention_mask.dim() == 4:
121
+ # in this case we assume that the mask comes already in inverted form and requires no inversion or slicing
122
+ if attention_mask.max() != 0:
123
+ raise ValueError("Custom 4D attention mask should be passed in inverted form with max==0`")
124
+ causal_mask = attention_mask
125
+ else:
126
+ causal_mask = torch.zeros(
127
+ (sequence_length, target_length), dtype=dtype, device=device
128
+ )
129
+ causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1)
130
+ causal_mask = causal_mask[None, None, :, :].expand(input_tensor.shape[0], 1, -1, -1)
131
+ if attention_mask is not None:
132
+ causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit
133
+ mask_length = attention_mask.shape[-1]
134
+ padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :]
135
+ padding_mask = padding_mask == 0
136
+ causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(
137
+ padding_mask, min_dtype
138
+ )
139
+ if (
140
+ self.config._attn_implementation == "sdpa"
141
+ and attention_mask is not None
142
+ and attention_mask.device.type == "cuda"
143
+ and not output_attentions
144
+ ):
145
+ # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when
146
+ # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path.
147
+ # Details: https://github.com/pytorch/pytorch/issues/110213
148
+ causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype)
149
+
150
+ return causal_mask