yuewang-sf commited on
Commit
e9c9819
1 Parent(s): c683a90

Upload instructcodet5p-16b

Browse files
added_tokens.json ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "\t\t": 50294,
3
+ "\t\t\t": 50293,
4
+ "\t\t\t\t": 50292,
5
+ "\t\t\t\t\t": 50291,
6
+ "\t\t\t\t\t\t": 50290,
7
+ "\t\t\t\t\t\t\t": 50289,
8
+ "\t\t\t\t\t\t\t\t": 50288,
9
+ "\t\t\t\t\t\t\t\t\t": 50287,
10
+ " ": 50286,
11
+ " ": 50285,
12
+ " ": 50284,
13
+ " ": 50283,
14
+ " ": 50282,
15
+ " ": 50281,
16
+ " ": 50280,
17
+ " ": 50279,
18
+ " ": 50278,
19
+ " ": 50277,
20
+ " ": 50276,
21
+ " ": 50275,
22
+ " ": 50274,
23
+ " ": 50273,
24
+ " ": 50272,
25
+ " ": 50271,
26
+ " ": 50270,
27
+ " ": 50269,
28
+ " ": 50268,
29
+ " ": 50267,
30
+ " ": 50266,
31
+ " ": 50265,
32
+ " ": 50264,
33
+ " ": 50263,
34
+ " ": 50262,
35
+ " ": 50261,
36
+ " ": 50260,
37
+ " ": 50259,
38
+ " ": 50258,
39
+ " ": 50257
40
+ }
config.json ADDED
@@ -0,0 +1,182 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "instructcodet5p-16b",
3
+ "architectures": [
4
+ "CodeT5pEncoderDecoderModel"
5
+ ],
6
+ "decoder": {
7
+ "_name_or_path": "codet5p-16b-decoder",
8
+ "activation_function": "gelu_new",
9
+ "add_cross_attention": true,
10
+ "architectures": [
11
+ "CodeT5pForCausalLM"
12
+ ],
13
+ "attn_pdrop": 0.0,
14
+ "bad_words_ids": null,
15
+ "bos_token_id": 1,
16
+ "chunk_size_feed_forward": 0,
17
+ "cross_attention_hidden_size": null,
18
+ "decoder_start_token_id": null,
19
+ "diversity_penalty": 0.0,
20
+ "do_sample": false,
21
+ "early_stopping": false,
22
+ "embd_pdrop": 0.0,
23
+ "encoder_no_repeat_ngram_size": 0,
24
+ "eos_token_id": 50256,
25
+ "exponential_decay_length_penalty": null,
26
+ "finetuning_task": null,
27
+ "forced_bos_token_id": null,
28
+ "forced_eos_token_id": null,
29
+ "gradient_checkpointing": false,
30
+ "id2label": {
31
+ "0": "LABEL_0",
32
+ "1": "LABEL_1"
33
+ },
34
+ "initializer_range": 0.02,
35
+ "is_decoder": true,
36
+ "is_encoder_decoder": false,
37
+ "label2id": {
38
+ "LABEL_0": 0,
39
+ "LABEL_1": 1
40
+ },
41
+ "layer_norm_epsilon": 1e-05,
42
+ "length_penalty": 1.0,
43
+ "max_length": 20,
44
+ "min_length": 0,
45
+ "model_type": "codet5p_module",
46
+ "n_ctx": 2048,
47
+ "n_embd": 6144,
48
+ "n_head": 24,
49
+ "n_inner": null,
50
+ "n_layer": 34,
51
+ "n_positions": 2048,
52
+ "no_repeat_ngram_size": 0,
53
+ "num_beam_groups": 1,
54
+ "num_beams": 1,
55
+ "num_return_sequences": 1,
56
+ "output_attentions": false,
57
+ "output_hidden_states": false,
58
+ "output_scores": false,
59
+ "pad_token_id": null,
60
+ "prefix": null,
61
+ "problem_type": null,
62
+ "pruned_heads": {},
63
+ "remove_invalid_values": false,
64
+ "repetition_penalty": 1.0,
65
+ "resid_pdrop": 0.0,
66
+ "return_dict": true,
67
+ "return_dict_in_generate": false,
68
+ "rotary_dim": 64,
69
+ "scale_attn_weights": true,
70
+ "sep_token_id": null,
71
+ "summary_activation": null,
72
+ "summary_first_dropout": 0.1,
73
+ "summary_proj_to_labels": true,
74
+ "summary_type": "cls_index",
75
+ "summary_use_proj": true,
76
+ "task_specific_params": null,
77
+ "temperature": 1.0,
78
+ "tf_legacy_loss": false,
79
+ "tie_encoder_decoder": false,
80
+ "tie_word_embeddings": false,
81
+ "tokenizer_class": "GPT2Tokenizer",
82
+ "top_k": 50,
83
+ "top_p": 1.0,
84
+ "torch_dtype": "float16",
85
+ "torchscript": false,
86
+ "transformers_version": "4.21.3",
87
+ "typical_p": 1.0,
88
+ "use_bfloat16": false,
89
+ "use_cache": true,
90
+ "vocab_size": 51200
91
+ },
92
+ "encoder": {
93
+ "_name_or_path": "codet5p-350m-encoder",
94
+ "activation_function": "gelu_new",
95
+ "add_cross_attention": false,
96
+ "architectures": [
97
+ "CodeT5pModel"
98
+ ],
99
+ "attn_pdrop": 0.0,
100
+ "bad_words_ids": null,
101
+ "bos_token_id": 1,
102
+ "chunk_size_feed_forward": 0,
103
+ "cross_attention_hidden_size": null,
104
+ "decoder_start_token_id": null,
105
+ "diversity_penalty": 0.0,
106
+ "do_sample": false,
107
+ "early_stopping": false,
108
+ "embd_pdrop": 0.0,
109
+ "encoder_no_repeat_ngram_size": 0,
110
+ "eos_token_id": 50256,
111
+ "exponential_decay_length_penalty": null,
112
+ "finetuning_task": null,
113
+ "forced_bos_token_id": null,
114
+ "forced_eos_token_id": null,
115
+ "gradient_checkpointing": false,
116
+ "id2label": {
117
+ "0": "LABEL_0",
118
+ "1": "LABEL_1"
119
+ },
120
+ "initializer_range": 0.02,
121
+ "is_decoder": false,
122
+ "is_encoder_decoder": false,
123
+ "label2id": {
124
+ "LABEL_0": 0,
125
+ "LABEL_1": 1
126
+ },
127
+ "layer_norm_epsilon": 1e-05,
128
+ "length_penalty": 1.0,
129
+ "max_length": 20,
130
+ "min_length": 0,
131
+ "model_type": "codet5p_module",
132
+ "n_ctx": 2048,
133
+ "n_embd": 1024,
134
+ "n_head": 16,
135
+ "n_inner": null,
136
+ "n_layer": 20,
137
+ "n_positions": 2048,
138
+ "no_repeat_ngram_size": 0,
139
+ "num_beam_groups": 1,
140
+ "num_beams": 1,
141
+ "num_return_sequences": 1,
142
+ "output_attentions": false,
143
+ "output_hidden_states": false,
144
+ "output_scores": false,
145
+ "pad_token_id": null,
146
+ "prefix": null,
147
+ "problem_type": null,
148
+ "pruned_heads": {},
149
+ "remove_invalid_values": false,
150
+ "repetition_penalty": 1.0,
151
+ "resid_pdrop": 0.0,
152
+ "return_dict": true,
153
+ "return_dict_in_generate": false,
154
+ "rotary_dim": 32,
155
+ "scale_attn_weights": true,
156
+ "sep_token_id": null,
157
+ "summary_activation": null,
158
+ "summary_first_dropout": 0.1,
159
+ "summary_proj_to_labels": true,
160
+ "summary_type": "cls_index",
161
+ "summary_use_proj": true,
162
+ "task_specific_params": null,
163
+ "temperature": 1.0,
164
+ "tf_legacy_loss": false,
165
+ "tie_encoder_decoder": false,
166
+ "tie_word_embeddings": false,
167
+ "tokenizer_class": "GPT2Tokenizer",
168
+ "top_k": 50,
169
+ "top_p": 1.0,
170
+ "torch_dtype": "float16",
171
+ "torchscript": false,
172
+ "transformers_version": "4.21.3",
173
+ "typical_p": 1.0,
174
+ "use_bfloat16": false,
175
+ "use_cache": true,
176
+ "vocab_size": 51200
177
+ },
178
+ "is_encoder_decoder": true,
179
+ "model_type": "codet5p",
180
+ "torch_dtype": "float16",
181
+ "transformers_version": null
182
+ }
configuration_codet5p.py ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 Salesforce authors, The EleutherAI, and HuggingFace Teams. All rights reserved.
3
+
4
+ """ CodeT5+ model configuration"""
5
+ from transformers.configuration_utils import PretrainedConfig
6
+ from transformers.utils import logging
7
+ import copy
8
+
9
+ logger = logging.get_logger(__name__)
10
+
11
+
12
+ # Adapted from transformers.models.codegen.configuration_codegen.CodeGenConfig
13
+ class CodeT5pModuleConfig(PretrainedConfig):
14
+ model_type = "codet5p_module"
15
+ attribute_map = {
16
+ "max_position_embeddings": "n_positions",
17
+ "hidden_size": "n_embd",
18
+ "num_attention_heads": "n_head",
19
+ "num_hidden_layers": "n_layer",
20
+ }
21
+
22
+ def __init__(
23
+ self,
24
+ vocab_size=50400,
25
+ n_positions=2048,
26
+ n_ctx=2048,
27
+ n_embd=4096,
28
+ n_layer=28,
29
+ n_head=16,
30
+ rotary_dim=64,
31
+ n_inner=None,
32
+ activation_function="gelu_new",
33
+ resid_pdrop=0.0,
34
+ embd_pdrop=0.0,
35
+ attn_pdrop=0.0,
36
+ layer_norm_epsilon=1e-5,
37
+ initializer_range=0.02,
38
+ scale_attn_weights=True,
39
+ use_cache=True,
40
+ bos_token_id=50256,
41
+ eos_token_id=50256,
42
+ tie_word_embeddings=False,
43
+ **kwargs
44
+ ):
45
+ self.vocab_size = vocab_size
46
+ self.n_ctx = n_ctx
47
+ self.n_positions = n_positions
48
+ self.n_embd = n_embd
49
+ self.n_layer = n_layer
50
+ self.n_head = n_head
51
+ self.n_inner = n_inner
52
+ self.rotary_dim = rotary_dim
53
+ self.activation_function = activation_function
54
+ self.resid_pdrop = resid_pdrop
55
+ self.embd_pdrop = embd_pdrop
56
+ self.attn_pdrop = attn_pdrop
57
+ self.layer_norm_epsilon = layer_norm_epsilon
58
+ self.initializer_range = initializer_range
59
+ self.scale_attn_weights = scale_attn_weights
60
+ self.use_cache = use_cache
61
+
62
+ self.bos_token_id = bos_token_id
63
+ self.eos_token_id = eos_token_id
64
+
65
+ super().__init__(
66
+ bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs
67
+ )
68
+
69
+
70
+ # Adapted from transformers.models.encoder_decoder.configuration_encoder_decoder.EncoderDecoderConfig
71
+ class CodeT5pConfig(PretrainedConfig):
72
+ model_type = "codet5p"
73
+ is_composition = True
74
+
75
+ def __init__(self, **kwargs):
76
+ super().__init__(**kwargs)
77
+ assert (
78
+ "encoder" in kwargs and "decoder" in kwargs
79
+ ), "Config has to be initialized with encoder and decoder config"
80
+ encoder_config = kwargs.pop("encoder")
81
+ decoder_config = kwargs.pop("decoder")
82
+ encoder_model_type = encoder_config.pop("model_type")
83
+ decoder_model_type = decoder_config.pop("model_type")
84
+
85
+ if encoder_model_type != decoder_model_type:
86
+ logger.warning("Encoder and decoder model types are different")
87
+
88
+ self.encoder = CodeT5pModuleConfig(**encoder_config)
89
+ self.decoder = CodeT5pModuleConfig(**decoder_config)
90
+ self.is_encoder_decoder = True
91
+
92
+ @classmethod
93
+ def from_encoder_decoder_configs(
94
+ cls, encoder_config: PretrainedConfig, decoder_config: PretrainedConfig, **kwargs
95
+ ) -> PretrainedConfig:
96
+ logger.info("Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config")
97
+ decoder_config.is_decoder = True
98
+ decoder_config.add_cross_attention = True
99
+
100
+ return cls(encoder=encoder_config.to_dict(), decoder=decoder_config.to_dict(), **kwargs)
101
+
102
+ def to_dict(self):
103
+ """
104
+ Serializes this instance to a Python dictionary. Override the default *to_dict()* from *PretrainedConfig*.
105
+
106
+ Returns:
107
+ `Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance,
108
+ """
109
+ output = copy.deepcopy(self.__dict__)
110
+ output["encoder"] = self.encoder.to_dict()
111
+ output["decoder"] = self.decoder.to_dict()
112
+ output["model_type"] = self.__class__.model_type
113
+ return output
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
modeling_codet5p.py ADDED
@@ -0,0 +1,979 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 Salesforce authors, The EleutherAI, and HuggingFace Teams. All rights reserved.
3
+ """ PyTorch CodeT5+ 2B 6B 16B models.
4
+ The implementation is mainly based on transformers.models.codegen.modeling_codegen by adding cross-attention
5
+ and transformers.models.encoder_decoder.modeling_encoder_decoder.EncoderDecoderModel.
6
+ """
7
+ from typing import Optional, Tuple, Union
8
+ import torch
9
+ import torch.utils.checkpoint
10
+ from torch import nn
11
+ from torch.nn import CrossEntropyLoss
12
+
13
+ from transformers.activations import ACT2FN
14
+ from transformers.modeling_outputs import BaseModelOutput, Seq2SeqLMOutput, \
15
+ BaseModelOutputWithPast, CausalLMOutputWithPast, \
16
+ BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions
17
+ from transformers.modeling_utils import PreTrainedModel
18
+ from transformers.configuration_utils import PretrainedConfig
19
+ from transformers.utils import add_code_sample_docstrings, add_start_docstrings, logging
20
+ from .configuration_codet5p import CodeT5pConfig, CodeT5pModuleConfig
21
+
22
+ logger = logging.get_logger(__name__)
23
+
24
+ CODET5P_PRETRAINED_MODEL_ARCHIVE_LIST = [
25
+ "Salesforce/codet5p-220m",
26
+ "Salesforce/codet5p-770m",
27
+ "Salesforce/codet5p-2b",
28
+ "Salesforce/codet5p-6b",
29
+ "Salesforce/codet5p-16b",
30
+ # See all CodeT5+ models at https://huggingface.co/models?filter=codet5p
31
+ ]
32
+
33
+
34
+ # Copied from transformers.models.gptj.modeling_gptj.fixed_pos_embedding
35
+ def fixed_pos_embedding(x, seq_dim=1, seq_len=None):
36
+ dim = x.shape[-1]
37
+ if seq_len is None:
38
+ seq_len = x.shape[seq_dim]
39
+ inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, 2) / dim))
40
+ sinusoid_inp = (
41
+ torch.einsum("i , j -> i j", torch.arange(seq_len, dtype=torch.float), inv_freq).to(x.device).float()
42
+ )
43
+ return torch.sin(sinusoid_inp), torch.cos(sinusoid_inp)
44
+
45
+
46
+ # Copied from transformers.models.gptj.modeling_gptj.rotate_every_two
47
+ def rotate_every_two(x):
48
+ x1 = x[:, :, :, ::2]
49
+ x2 = x[:, :, :, 1::2]
50
+ x = torch.stack((-x2, x1), dim=-1)
51
+ return x.flatten(-2) # in einsum notation: rearrange(x, '... d j -> ... (d j)')
52
+
53
+
54
+ # Copied from transformers.models.gptj.modeling_gptj.duplicate_interleave
55
+ def duplicate_interleave(m):
56
+ """
57
+ A simple version of `torch.repeat_interleave` for duplicating a matrix while interleaving the copy.
58
+ """
59
+ dim0 = m.shape[0]
60
+ m = m.view(-1, 1) # flatten the matrix
61
+ m = m.repeat(1, 2) # repeat all elements into the 2nd dimension
62
+ m = m.view(dim0, -1) # reshape into a matrix, interleaving the copy
63
+ return m
64
+
65
+
66
+ # Copied from transformers.models.gptj.modeling_gptj.apply_rotary_pos_emb
67
+ def apply_rotary_pos_emb(x, sincos, offset=0):
68
+ sin, cos = (duplicate_interleave(t)[None, offset: x.shape[1] + offset, None, :] for t in sincos)
69
+ # einsum notation for lambda t: repeat(t[offset:x.shape[1]+offset,:], "n d -> () n () (d j)", j=2)
70
+ return (x * cos) + (rotate_every_two(x) * sin)
71
+
72
+
73
+ # Adapted from transformers.models.codegen.modeling_codegen.CodeGenAttention
74
+ class CodeT5pAttention(nn.Module):
75
+ def __init__(self, config, is_cross_attention=False, is_decoder=True):
76
+ super().__init__()
77
+
78
+ max_positions = config.max_position_embeddings
79
+ self.register_buffer(
80
+ "causal_mask",
81
+ torch.tril(torch.ones((max_positions, max_positions), dtype=torch.uint8)).view(
82
+ 1, 1, max_positions, max_positions
83
+ ),
84
+ )
85
+
86
+ self.attn_dropout = nn.Dropout(config.attn_pdrop)
87
+ self.resid_dropout = nn.Dropout(config.resid_pdrop)
88
+
89
+ self.embed_dim = config.hidden_size
90
+ self.num_attention_heads = config.num_attention_heads
91
+ self.head_dim = self.embed_dim // self.num_attention_heads
92
+ if self.head_dim * self.num_attention_heads != self.embed_dim:
93
+ raise ValueError(
94
+ f"embed_dim must be divisible by num_attention_heads (got `embed_dim`: {self.embed_dim} and"
95
+ f" `num_attention_heads`: {self.num_attention_heads})."
96
+ )
97
+
98
+ self.scale_attn = torch.sqrt(torch.tensor(self.head_dim, dtype=torch.float32)).to(torch.get_default_dtype())
99
+ self.is_decoder = is_decoder
100
+ self.is_cross_attention = is_cross_attention
101
+ if self.is_cross_attention:
102
+ self.qkv_proj = nn.Linear(self.embed_dim, self.embed_dim * 2, bias=False)
103
+ self.q_attn = nn.Linear(self.embed_dim, self.embed_dim, bias=False)
104
+ else:
105
+ self.qkv_proj = nn.Linear(self.embed_dim, self.embed_dim * 3, bias=False)
106
+
107
+ self.out_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=False)
108
+ self.rotary_dim = None
109
+ if config.rotary_dim is not None:
110
+ self.rotary_dim = config.rotary_dim
111
+
112
+ def _split_heads(self, x, n_head, dim_head, mp_num):
113
+ reshaped = x.reshape(x.shape[:-1] + (n_head // mp_num, dim_head))
114
+ reshaped = reshaped.reshape(x.shape[:-2] + (-1,) + reshaped.shape[-1:])
115
+ return reshaped
116
+
117
+ def _merge_heads(self, tensor, num_attention_heads, attn_head_size):
118
+ """
119
+ Merges attn_head_size dim and num_attn_heads dim into n_ctx
120
+ """
121
+ if len(tensor.shape) == 5:
122
+ tensor = tensor.permute(0, 1, 3, 2, 4).contiguous()
123
+ elif len(tensor.shape) == 4:
124
+ tensor = tensor.permute(0, 2, 1, 3).contiguous()
125
+ else:
126
+ raise ValueError(f"Input tensor rank should be one of [4, 5], but is: {len(tensor.shape)}")
127
+ new_shape = tensor.size()[:-2] + (num_attention_heads * attn_head_size,)
128
+ return tensor.view(new_shape)
129
+
130
+ def _attn(
131
+ self,
132
+ query,
133
+ key,
134
+ value,
135
+ attention_mask=None,
136
+ head_mask=None,
137
+ ):
138
+ # Keep the attention weights computation in fp32 to avoid overflow issues
139
+ query = query.to(torch.float32)
140
+ key = key.to(torch.float32)
141
+
142
+ attn_weights = torch.matmul(query, key.transpose(-1, -2))
143
+ attn_weights = attn_weights / self.scale_attn
144
+
145
+ if not self.is_cross_attention and self.is_decoder:
146
+ # compute causal mask from causal mask buffer
147
+ query_length, key_length = query.size(-2), key.size(-2)
148
+ causal_mask = self.causal_mask[:, :, key_length - query_length: key_length, :key_length]
149
+ mask_value = torch.finfo(attn_weights.dtype).min
150
+ # Need to be a tensor, otherwise we get error: `RuntimeError: expected scalar type float but found double`.
151
+ # Need to be on the same device, otherwise `RuntimeError: ..., x and y to be on the same device`
152
+ mask_value = torch.tensor(mask_value, dtype=attn_weights.dtype).to(attn_weights.device)
153
+ attn_weights = torch.where(causal_mask.bool(), attn_weights, mask_value)
154
+
155
+ if attention_mask is not None:
156
+ # Apply the attention mask
157
+ attn_weights = attn_weights + attention_mask
158
+
159
+ attn_weights = nn.Softmax(dim=-1)(attn_weights)
160
+ attn_weights = attn_weights.to(value.dtype)
161
+ attn_weights = self.attn_dropout(attn_weights)
162
+
163
+ # Mask heads if we want to
164
+ if head_mask is not None:
165
+ attn_weights = attn_weights * head_mask
166
+
167
+ attn_output = torch.matmul(attn_weights, value)
168
+
169
+ return attn_output, attn_weights
170
+
171
+ def forward(
172
+ self,
173
+ hidden_states: Optional[torch.FloatTensor],
174
+ attention_mask: Optional[torch.FloatTensor] = None,
175
+ layer_past: Optional[Tuple[torch.Tensor]] = None,
176
+ head_mask: Optional[torch.FloatTensor] = None,
177
+ encoder_hidden_states: Optional[torch.Tensor] = None,
178
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
179
+ use_cache: Optional[bool] = False,
180
+ output_attentions: Optional[bool] = False,
181
+ ) -> Union[
182
+ Tuple[torch.Tensor, Tuple[torch.Tensor]],
183
+ Optional[Tuple[torch.Tensor, Tuple[torch.Tensor], Tuple[torch.Tensor, ...]]],
184
+ ]:
185
+
186
+ if encoder_hidden_states is not None:
187
+ if not hasattr(self, "q_attn"):
188
+ raise ValueError(
189
+ "If class is used as cross attention, the weights `q_attn` have to be defined. "
190
+ "Please make sure to instantiate class with `GPT2Attention(..., is_cross_attention=True)`."
191
+ )
192
+
193
+ mp_num = 4
194
+ local_dim = self.head_dim * self.num_attention_heads // mp_num
195
+ q = self.q_attn(hidden_states)
196
+ q_split = q.reshape(q.shape[:-1] + (mp_num, -1))
197
+ query = torch.split(q_split, local_dim, dim=-1)[0]
198
+
199
+ qkv = self.qkv_proj(encoder_hidden_states)
200
+ qkv_split = qkv.reshape(qkv.shape[:-1] + (mp_num, -1))
201
+ value, key = torch.split(qkv_split, local_dim, dim=-1)
202
+
203
+ attention_mask = encoder_attention_mask
204
+ else:
205
+ qkv = self.qkv_proj(hidden_states)
206
+ mp_num = 4
207
+ qkv_split = qkv.reshape(qkv.shape[:-1] + (mp_num, -1))
208
+
209
+ local_dim = self.head_dim * self.num_attention_heads // mp_num
210
+ query, value, key = torch.split(qkv_split, local_dim, dim=-1)
211
+
212
+ query = self._split_heads(query, self.num_attention_heads, self.head_dim, mp_num=mp_num)
213
+ key = self._split_heads(key, self.num_attention_heads, self.head_dim, mp_num=mp_num)
214
+
215
+ value = self._split_heads(value, self.num_attention_heads, self.head_dim, mp_num=mp_num)
216
+ value = value.permute(0, 2, 1, 3)
217
+
218
+ seq_len = key.shape[1]
219
+ offset = 0
220
+
221
+ if layer_past is not None:
222
+ offset = layer_past[0].shape[-2]
223
+ seq_len += offset
224
+
225
+ if self.rotary_dim is not None:
226
+ k_rot = key[:, :, :, : self.rotary_dim]
227
+ k_pass = key[:, :, :, self.rotary_dim:]
228
+
229
+ q_rot = query[:, :, :, : self.rotary_dim]
230
+ q_pass = query[:, :, :, self.rotary_dim:]
231
+
232
+ sincos = fixed_pos_embedding(k_rot, 1, seq_len=seq_len)
233
+ k_rot = apply_rotary_pos_emb(k_rot, sincos, offset=offset)
234
+ seq_len_q = query.shape[1]
235
+ sincos_q = fixed_pos_embedding(q_rot, 1, seq_len=seq_len_q)
236
+ q_rot = apply_rotary_pos_emb(q_rot, sincos_q, offset=offset)
237
+
238
+ key = torch.cat([k_rot, k_pass], dim=-1)
239
+ query = torch.cat([q_rot, q_pass], dim=-1)
240
+ else:
241
+ sincos = fixed_pos_embedding(key, 1, seq_len=seq_len)
242
+ key = apply_rotary_pos_emb(key, sincos, offset=offset)
243
+ query = apply_rotary_pos_emb(query, sincos, offset=offset)
244
+
245
+ key = key.permute(0, 2, 1, 3)
246
+ query = query.permute(0, 2, 1, 3)
247
+
248
+ if layer_past is not None:
249
+ past_key = layer_past[0]
250
+ past_value = layer_past[1]
251
+ key = torch.cat((past_key, key), dim=-2)
252
+ value = torch.cat((past_value, value), dim=-2)
253
+
254
+ if use_cache is True:
255
+ present = (key, value)
256
+ else:
257
+ present = None
258
+
259
+ # compute self-attention: V x Softmax(QK^T)
260
+ attn_output, attn_weights = self._attn(query, key, value, attention_mask, head_mask)
261
+
262
+ attn_output = self._merge_heads(attn_output, self.num_attention_heads, self.head_dim)
263
+ attn_output = self.out_proj(attn_output)
264
+ attn_output = self.resid_dropout(attn_output)
265
+
266
+ outputs = (attn_output, present)
267
+ if output_attentions:
268
+ outputs += (attn_weights,)
269
+
270
+ return outputs # a, present, (attentions)
271
+
272
+
273
+ # Adapted from transformers.models.codegen.modeling_codegen.CodeGenMLP
274
+ class CodeT5pMLP(nn.Module):
275
+ def __init__(self, intermediate_size, config): # in MLP: intermediate_size= 4 * embed_dim
276
+ super().__init__()
277
+ embed_dim = config.n_embd
278
+
279
+ self.fc_in = nn.Linear(embed_dim, intermediate_size)
280
+ self.fc_out = nn.Linear(intermediate_size, embed_dim)
281
+
282
+ self.act = ACT2FN[config.activation_function]
283
+ self.dropout = nn.Dropout(config.resid_pdrop)
284
+
285
+ def forward(self, hidden_states: Optional[torch.FloatTensor]) -> torch.FloatTensor:
286
+ hidden_states = self.fc_in(hidden_states)
287
+ hidden_states = self.act(hidden_states)
288
+ hidden_states = self.fc_out(hidden_states)
289
+ hidden_states = self.dropout(hidden_states)
290
+ return hidden_states
291
+
292
+
293
+ # Adapted from transformers.models.codegen.modeling_codegen.CodeGenBlock
294
+ class CodeT5pBlock(nn.Module):
295
+ def __init__(self, config, layer_idx=None):
296
+ super().__init__()
297
+ inner_dim = config.n_inner if config.n_inner is not None else 4 * config.n_embd
298
+ self.ln_1 = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
299
+
300
+ if config.is_decoder is False:
301
+ self.attn = CodeT5pAttention(config, is_cross_attention=False, is_decoder=False)
302
+ else:
303
+ self.attn = CodeT5pAttention(config)
304
+ self.mlp = CodeT5pMLP(inner_dim, config)
305
+
306
+ # Adding 1 cross-attention layer at the final decoder layer
307
+ self.add_cross_attention_by_layer = True \
308
+ if config.add_cross_attention and layer_idx == config.n_layer - 1 else False
309
+
310
+ if config.add_cross_attention and self.add_cross_attention_by_layer:
311
+ self.crossattention = CodeT5pAttention(config, is_cross_attention=True)
312
+
313
+ def forward(
314
+ self,
315
+ hidden_states: Optional[torch.FloatTensor],
316
+ layer_past: Optional[Tuple[torch.Tensor]] = None,
317
+ attention_mask: Optional[torch.FloatTensor] = None,
318
+ head_mask: Optional[torch.FloatTensor] = None,
319
+ encoder_hidden_states: Optional[torch.Tensor] = None,
320
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
321
+ use_cache: Optional[bool] = False,
322
+ output_attentions: Optional[bool] = False,
323
+ ) -> Union[Tuple[torch.Tensor], Optional[Tuple[torch.Tensor, Tuple[torch.FloatTensor, ...]]]]:
324
+ residual = hidden_states
325
+ hidden_states = self.ln_1(hidden_states)
326
+ attn_outputs = self.attn(
327
+ hidden_states,
328
+ layer_past=layer_past,
329
+ attention_mask=attention_mask,
330
+ head_mask=head_mask,
331
+ use_cache=use_cache,
332
+ output_attentions=output_attentions,
333
+ )
334
+ attn_output = attn_outputs[0] # output_attn: a, present, (attentions)
335
+ outputs = attn_outputs[1:]
336
+ feed_forward_hidden_states = self.mlp(hidden_states)
337
+
338
+ if encoder_hidden_states is not None and self.add_cross_attention_by_layer:
339
+ # add one self-attention block for cross-attention
340
+ if not hasattr(self, "crossattention"):
341
+ raise ValueError(
342
+ f"If `encoder_hidden_states` are passed, {self} has to be instantiated with "
343
+ "cross-attention layers by setting `config.add_cross_attention=True`"
344
+ )
345
+ # residual = hidden_states
346
+ # hidden_states = self.ln_cross_attn(residual)
347
+ cross_attn_outputs = self.crossattention(
348
+ hidden_states,
349
+ attention_mask=attention_mask,
350
+ head_mask=head_mask,
351
+ encoder_hidden_states=encoder_hidden_states,
352
+ encoder_attention_mask=encoder_attention_mask,
353
+ output_attentions=output_attentions,
354
+ )
355
+ xattn_output = cross_attn_outputs[0]
356
+ attn_output = attn_output + xattn_output
357
+ outputs = outputs + cross_attn_outputs[2:] # add cross attentions if we output attention weights
358
+
359
+ hidden_states = attn_output + feed_forward_hidden_states + residual
360
+
361
+ if use_cache:
362
+ outputs = (hidden_states,) + outputs
363
+ else:
364
+ outputs = (hidden_states,) + outputs[1:]
365
+
366
+ return outputs # hidden_states, present, (attentions)
367
+
368
+
369
+ # Adapted from transformers.models.codegen.modeling_codegen.CodeGenPreTrainedModel
370
+ class CodeT5pPreTrainedModel(PreTrainedModel):
371
+ """
372
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
373
+ models.
374
+ """
375
+ config_class = CodeT5pModuleConfig
376
+ base_model_prefix = "transformer"
377
+ supports_gradient_checkpointing = True
378
+ _no_split_modules = ["CodeT5pBlock"]
379
+
380
+ def __init__(self, *inputs, **kwargs):
381
+ super().__init__(*inputs, **kwargs)
382
+
383
+ def _init_weights(self, module):
384
+ """Initialize the weights."""
385
+ if isinstance(module, (nn.Linear,)):
386
+ # Slightly different from Mesh Transformer JAX which uses truncated_normal for initialization
387
+ # cf https://github.com/pytorch/pytorch/pull/5617
388
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
389
+ if module.bias is not None:
390
+ module.bias.data.zero_()
391
+ elif isinstance(module, nn.Embedding):
392
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
393
+ if module.padding_idx is not None:
394
+ module.weight.data[module.padding_idx].zero_()
395
+ elif isinstance(module, nn.LayerNorm):
396
+ module.bias.data.zero_()
397
+ module.weight.data.fill_(1.0)
398
+
399
+ def _set_gradient_checkpointing(self, module, value=False):
400
+ if isinstance(module, CodeT5pModel):
401
+ module.gradient_checkpointing = value
402
+
403
+
404
+ # Adapted from transformers.models.codegen.modeling_codegen.CodeGenModel
405
+ class CodeT5pModel(CodeT5pPreTrainedModel):
406
+ def __init__(self, config):
407
+ super().__init__(config)
408
+
409
+ self.embed_dim = config.n_embd
410
+ self.vocab_size = config.vocab_size
411
+ self.wte = nn.Embedding(config.vocab_size, self.embed_dim)
412
+ self.drop = nn.Dropout(config.embd_pdrop)
413
+ self.h = nn.ModuleList([CodeT5pBlock(config, idx) for idx in range(config.n_layer)])
414
+ self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon)
415
+ self.rotary_dim = min(config.rotary_dim, config.n_ctx // config.num_attention_heads)
416
+
417
+ self.gradient_checkpointing = False
418
+
419
+ # Initialize weights and apply final processing
420
+ self.post_init()
421
+
422
+ def get_input_embeddings(self):
423
+ return self.wte
424
+
425
+ def set_input_embeddings(self, new_embeddings):
426
+ self.wte = new_embeddings
427
+
428
+ def forward(
429
+ self,
430
+ input_ids: Optional[torch.LongTensor] = None,
431
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
432
+ attention_mask: Optional[torch.FloatTensor] = None,
433
+ token_type_ids: Optional[torch.LongTensor] = None,
434
+ position_ids: Optional[torch.LongTensor] = None,
435
+ head_mask: Optional[torch.FloatTensor] = None,
436
+ inputs_embeds: Optional[torch.FloatTensor] = None,
437
+ encoder_hidden_states: Optional[torch.Tensor] = None,
438
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
439
+ use_cache: Optional[bool] = None,
440
+ output_attentions: Optional[bool] = None,
441
+ output_hidden_states: Optional[bool] = None,
442
+ return_dict: Optional[bool] = None,
443
+ ) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]:
444
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
445
+ output_hidden_states = (
446
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
447
+ )
448
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
449
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
450
+
451
+ if input_ids is not None and inputs_embeds is not None:
452
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
453
+ elif input_ids is not None:
454
+ input_shape = input_ids.size()
455
+ input_ids = input_ids.view(-1, input_shape[-1])
456
+ batch_size = input_ids.shape[0]
457
+ elif inputs_embeds is not None:
458
+ input_shape = inputs_embeds.size()[:-1]
459
+ batch_size = inputs_embeds.shape[0]
460
+ else:
461
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
462
+
463
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
464
+
465
+ if token_type_ids is not None:
466
+ token_type_ids = token_type_ids.view(-1, input_shape[-1])
467
+
468
+ if position_ids is not None:
469
+ position_ids = position_ids.view(-1, input_shape[-1])
470
+
471
+ if past_key_values is None:
472
+ past_length = 0
473
+ past_key_values = tuple([None] * len(self.h))
474
+ else:
475
+ past_length = past_key_values[0][0].size(-2)
476
+
477
+ if position_ids is None:
478
+ position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device)
479
+ position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1])
480
+
481
+ # Attention mask.
482
+ if attention_mask is not None:
483
+ if batch_size <= 0:
484
+ raise ValueError("batch_size has to be defined and > 0")
485
+ attention_mask = attention_mask.view(batch_size, -1)
486
+ # We create a 3D attention mask from a 2D tensor mask.
487
+ # Sizes are [batch_size, 1, 1, to_seq_length]
488
+ # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
489
+ # this attention mask is more simple than the triangular masking of causal attention
490
+ # used in OpenAI GPT, we just need to prepare the broadcast dimension here.
491
+ attention_mask = attention_mask[:, None, None, :]
492
+
493
+ # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
494
+ # masked positions, this operation will create a tensor which is 0.0 for
495
+ # positions we want to attend and the dtype's smallest value for masked positions.
496
+ # Since we are adding it to the raw scores before the softmax, this is
497
+ # effectively the same as removing these entirely.
498
+ attention_mask = attention_mask.to(dtype=self.dtype) # fp16 compatibility
499
+ attention_mask = (1.0 - attention_mask) * torch.finfo(self.dtype).min
500
+
501
+ # If a 2D or 3D attention mask is provided for the cross-attention
502
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
503
+ if self.config.add_cross_attention and encoder_hidden_states is not None:
504
+ encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
505
+ encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
506
+ if encoder_attention_mask is None:
507
+ encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
508
+ encoder_attention_mask = self.invert_attention_mask(encoder_attention_mask)
509
+ else:
510
+ encoder_attention_mask = None
511
+
512
+ # Prepare head mask if needed
513
+ # 1.0 in head_mask indicate we keep the head
514
+ # attention_probs has shape bsz x num_attention_heads x N x N
515
+ # head_mask has shape n_layer x batch x num_attention_heads x N x N
516
+ head_mask = self.get_head_mask(head_mask, self.config.n_layer)
517
+
518
+ if inputs_embeds is None:
519
+ inputs_embeds = self.wte(input_ids)
520
+
521
+ hidden_states = inputs_embeds
522
+
523
+ if token_type_ids is not None:
524
+ token_type_embeds = self.wte(token_type_ids)
525
+ hidden_states = hidden_states + token_type_embeds
526
+
527
+ hidden_states = self.drop(hidden_states)
528
+
529
+ output_shape = input_shape + (hidden_states.size(-1),)
530
+
531
+ presents = () if use_cache else None
532
+ all_self_attentions = () if output_attentions else None
533
+ all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
534
+ all_hidden_states = () if output_hidden_states else None
535
+ for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)):
536
+ if output_hidden_states:
537
+ all_hidden_states = all_hidden_states + (hidden_states,)
538
+
539
+ if self.gradient_checkpointing and self.training:
540
+ if use_cache:
541
+ logger.warning(
542
+ "`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting "
543
+ "`use_cache=False`..."
544
+ )
545
+ use_cache = False
546
+
547
+ def create_custom_forward(module):
548
+ def custom_forward(*inputs):
549
+ # None for past_key_value
550
+ return module(*inputs, use_cache, output_attentions)
551
+
552
+ return custom_forward
553
+
554
+ outputs = torch.utils.checkpoint.checkpoint(
555
+ create_custom_forward(block),
556
+ hidden_states,
557
+ None,
558
+ attention_mask,
559
+ head_mask[i],
560
+ encoder_hidden_states,
561
+ encoder_attention_mask,
562
+ )
563
+ else:
564
+ outputs = block(
565
+ hidden_states,
566
+ layer_past=layer_past,
567
+ attention_mask=attention_mask,
568
+ head_mask=head_mask[i],
569
+ encoder_hidden_states=encoder_hidden_states,
570
+ encoder_attention_mask=encoder_attention_mask,
571
+ use_cache=use_cache,
572
+ output_attentions=output_attentions,
573
+ )
574
+
575
+ hidden_states = outputs[0]
576
+ if use_cache is True:
577
+ presents = presents + (outputs[1],)
578
+
579
+ if output_attentions:
580
+ all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],)
581
+ if self.config.add_cross_attention and self.add_cross_attention_by_layer:
582
+ all_cross_attentions = all_cross_attentions + (outputs[3 if use_cache else 2],)
583
+
584
+ hidden_states = self.ln_f(hidden_states)
585
+
586
+ hidden_states = hidden_states.view(output_shape)
587
+ # Add last hidden state
588
+ if output_hidden_states:
589
+ all_hidden_states = all_hidden_states + (hidden_states,)
590
+
591
+ if not return_dict:
592
+ return tuple(
593
+ v for v in [hidden_states, presents, all_hidden_states, all_self_attentions, all_cross_attentions] if
594
+ v is not None)
595
+
596
+ return BaseModelOutputWithPastAndCrossAttentions(
597
+ last_hidden_state=hidden_states,
598
+ past_key_values=presents,
599
+ hidden_states=all_hidden_states,
600
+ attentions=all_self_attentions,
601
+ cross_attentions=all_cross_attentions,
602
+ )
603
+
604
+
605
+ # Adapted from transformers.models.codegen.modeling_codegen.CodeGenForCausalLM
606
+ class CodeT5pForCausalLM(CodeT5pPreTrainedModel):
607
+ _keys_to_ignore_on_load_missing = [r"h\.\d+\.attn\.causal_mask"]
608
+
609
+ def __init__(self, config):
610
+ super().__init__(config)
611
+ self.transformer = CodeT5pModel(config)
612
+ self.lm_head = nn.Linear(config.n_embd, config.vocab_size)
613
+
614
+ # Initialize weights and apply final processing
615
+ self.post_init()
616
+
617
+ def get_output_embeddings(self):
618
+ return self.lm_head
619
+
620
+ def set_output_embeddings(self, new_embeddings):
621
+ self.lm_head = new_embeddings
622
+
623
+ def prepare_inputs_for_generation(self, input_ids, past_key_values=None, **kwargs):
624
+ token_type_ids = kwargs.get("token_type_ids", None)
625
+ # only last token for inputs_ids if past is defined in kwargs
626
+ if past_key_values:
627
+ input_ids = input_ids[:, -1].unsqueeze(-1)
628
+ if token_type_ids is not None:
629
+ token_type_ids = token_type_ids[:, -1].unsqueeze(-1)
630
+
631
+ attention_mask = kwargs.get("attention_mask", None)
632
+ position_ids = kwargs.get("position_ids", None)
633
+
634
+ if attention_mask is not None and position_ids is None:
635
+ # create position_ids on the fly for batch generation
636
+ position_ids = attention_mask.long().cumsum(-1) - 1
637
+ position_ids.masked_fill_(attention_mask == 0, 1)
638
+ if past_key_values:
639
+ position_ids = position_ids[:, -1].unsqueeze(-1)
640
+ else:
641
+ position_ids = None
642
+ return {
643
+ "input_ids": input_ids,
644
+ "past_key_values": past_key_values,
645
+ "use_cache": kwargs.get("use_cache"),
646
+ "position_ids": position_ids,
647
+ "attention_mask": attention_mask,
648
+ "token_type_ids": token_type_ids,
649
+ }
650
+
651
+ def forward(
652
+ self,
653
+ input_ids: Optional[torch.LongTensor] = None,
654
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
655
+ attention_mask: Optional[torch.FloatTensor] = None,
656
+ token_type_ids: Optional[torch.LongTensor] = None,
657
+ position_ids: Optional[torch.LongTensor] = None,
658
+ head_mask: Optional[torch.FloatTensor] = None,
659
+ inputs_embeds: Optional[torch.FloatTensor] = None,
660
+ encoder_hidden_states: Optional[torch.Tensor] = None,
661
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
662
+ labels: Optional[torch.LongTensor] = None,
663
+ use_cache: Optional[bool] = None,
664
+ output_attentions: Optional[bool] = None,
665
+ output_hidden_states: Optional[bool] = None,
666
+ return_dict: Optional[bool] = None,
667
+ ) -> Union[Tuple, CausalLMOutputWithCrossAttentions]:
668
+ r"""
669
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
670
+ Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
671
+ `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
672
+ are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
673
+ """
674
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
675
+
676
+ transformer_outputs = self.transformer(
677
+ input_ids,
678
+ past_key_values=past_key_values,
679
+ attention_mask=attention_mask,
680
+ token_type_ids=token_type_ids,
681
+ position_ids=position_ids,
682
+ head_mask=head_mask,
683
+ inputs_embeds=inputs_embeds,
684
+ encoder_hidden_states=encoder_hidden_states,
685
+ encoder_attention_mask=encoder_attention_mask,
686
+ use_cache=use_cache,
687
+ output_attentions=output_attentions,
688
+ output_hidden_states=output_hidden_states,
689
+ return_dict=return_dict,
690
+ )
691
+ hidden_states = transformer_outputs[0]
692
+
693
+ # make sure sampling in fp16 works correctly and
694
+ # compute loss in fp32 to match with mesh-tf version
695
+ # https://github.com/EleutherAI/gpt-neo/blob/89ce74164da2fb16179106f54e2269b5da8db333/models/gpt2/gpt2.py#L179
696
+ lm_logits = self.lm_head(hidden_states).to(torch.float32)
697
+
698
+ loss = None
699
+ if labels is not None:
700
+ # Shift so that tokens < n predict n
701
+ shift_logits = lm_logits[..., :-1, :].contiguous()
702
+ shift_labels = labels[..., 1:].contiguous()
703
+ # Flatten the tokens
704
+ loss_fct = CrossEntropyLoss()
705
+ loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
706
+
707
+ loss = loss.to(hidden_states.dtype)
708
+
709
+ if not return_dict:
710
+ output = (lm_logits,) + transformer_outputs[1:]
711
+ return ((loss,) + output) if loss is not None else output
712
+
713
+ return CausalLMOutputWithCrossAttentions(
714
+ loss=loss,
715
+ logits=lm_logits,
716
+ past_key_values=transformer_outputs.past_key_values,
717
+ hidden_states=transformer_outputs.hidden_states,
718
+ attentions=transformer_outputs.attentions,
719
+ cross_attentions=transformer_outputs.cross_attentions,
720
+ )
721
+
722
+ @staticmethod
723
+ def _reorder_cache(
724
+ past_key_values: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor
725
+ ) -> Tuple[Tuple[torch.Tensor]]:
726
+ """
727
+ This function is used to re-order the `past_key_values` cache if [`~PretrainedModel.beam_search`] or
728
+ [`~PretrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct
729
+ beam_idx at every generation step.
730
+ """
731
+ return tuple(
732
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past)
733
+ for layer_past in past_key_values
734
+ )
735
+
736
+
737
+ def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int):
738
+ """
739
+ Shift input ids one token to the right.
740
+ """
741
+ shifted_input_ids = input_ids.new_zeros(input_ids.shape)
742
+ shifted_input_ids[:, 1:] = input_ids[:, :-1].clone()
743
+ if decoder_start_token_id is None:
744
+ raise ValueError("Make sure to set the decoder_start_token_id attribute of the model's configuration.")
745
+ shifted_input_ids[:, 0] = decoder_start_token_id
746
+
747
+ if pad_token_id is None:
748
+ raise ValueError("Make sure to set the pad_token_id attribute of the model's configuration.")
749
+ # replace possible -100 values in labels by `pad_token_id`
750
+ shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
751
+
752
+ return shifted_input_ids
753
+
754
+
755
+ # Adapted from transformers.models.encoder_decoder.modeling_encoder_decoder.EncoderDecoderModel
756
+ class CodeT5pEncoderDecoderModel(PreTrainedModel):
757
+ config_class = CodeT5pConfig
758
+
759
+ def __init__(
760
+ self,
761
+ config: Optional[PretrainedConfig] = None,
762
+ encoder: Optional[PreTrainedModel] = None,
763
+ decoder: Optional[PreTrainedModel] = None,
764
+ ):
765
+ if config is None and (encoder is None or decoder is None):
766
+ raise ValueError("Either a configuration or an encoder and a decoder has to be provided.")
767
+ if config is None:
768
+ config = CodeT5pConfig.from_encoder_decoder_configs(encoder.config, decoder.config)
769
+ else:
770
+ if not isinstance(config, self.config_class):
771
+ raise ValueError(f"Config: {config} has to be of type {self.config_class}")
772
+
773
+ if config.decoder.cross_attention_hidden_size is not None:
774
+ if config.decoder.cross_attention_hidden_size != config.encoder.hidden_size:
775
+ raise ValueError(
776
+ "If `cross_attention_hidden_size` is specified in the decoder's configuration, it has to be equal"
777
+ f" to the encoder's `hidden_size`. Got {config.decoder.cross_attention_hidden_size} for"
778
+ f" `config.decoder.cross_attention_hidden_size` and {config.encoder.hidden_size} for"
779
+ " `config.encoder.hidden_size`."
780
+ )
781
+
782
+ # initialize with config
783
+ super().__init__(config)
784
+
785
+ if encoder is None:
786
+ encoder = CodeT5pModel(config.encoder)
787
+
788
+ if decoder is None:
789
+ decoder = CodeT5pForCausalLM(config.decoder)
790
+
791
+ self.encoder = encoder
792
+ self.decoder = decoder
793
+
794
+ if self.encoder.config.to_dict() != self.config.encoder.to_dict():
795
+ logger.warning(
796
+ f"Config of the encoder: {self.encoder.__class__} is overwritten by shared encoder config:"
797
+ f" {self.config.encoder}"
798
+ )
799
+ if self.decoder.config.to_dict() != self.config.decoder.to_dict():
800
+ logger.warning(
801
+ f"Config of the decoder: {self.decoder.__class__} is overwritten by shared decoder config:"
802
+ f" {self.config.decoder}"
803
+ )
804
+
805
+ # make sure that the individual model's config refers to the shared config
806
+ # so that the updates to the config will be synced
807
+ self.encoder.config = self.config.encoder
808
+ self.decoder.config = self.config.decoder
809
+
810
+ # encoder outputs might need to be projected to different dimension for decoder
811
+ if (
812
+ self.encoder.config.hidden_size != self.decoder.config.hidden_size
813
+ and self.decoder.config.cross_attention_hidden_size is None
814
+ ):
815
+ self.enc_to_dec_proj = nn.Linear(self.encoder.config.hidden_size, self.decoder.config.hidden_size)
816
+
817
+ if self.encoder.get_output_embeddings() is not None:
818
+ raise ValueError(
819
+ f"The encoder {self.encoder} should not have a LM Head. Please use a model without LM Head"
820
+ )
821
+ # tie encoder, decoder weights if config set accordingly
822
+ self.tie_weights()
823
+
824
+ def tie_weights(self):
825
+ # tie encoder & decoder if needed
826
+ if self.config.tie_encoder_decoder:
827
+ # tie encoder and decoder base model
828
+ decoder_base_model_prefix = self.decoder.base_model_prefix
829
+ self._tie_encoder_decoder_weights(
830
+ self.encoder, self.decoder._modules[decoder_base_model_prefix], self.decoder.base_model_prefix
831
+ )
832
+
833
+ def get_encoder(self):
834
+ return self.encoder
835
+
836
+ def get_decoder(self):
837
+ return self.decoder
838
+
839
+ def get_input_embeddings(self):
840
+ return self.encoder.get_input_embeddings()
841
+
842
+ def get_output_embeddings(self):
843
+ return self.decoder.get_output_embeddings()
844
+
845
+ def set_output_embeddings(self, new_embeddings):
846
+ return self.decoder.set_output_embeddings(new_embeddings)
847
+
848
+ @classmethod
849
+ def from_pretrained(cls, *args, **kwargs):
850
+ # At the moment fast initialization is not supported for composite models
851
+ if kwargs.get("_fast_init", False):
852
+ logger.warning(
853
+ "Fast initialization is currently not supported for EncoderDecoderModel. "
854
+ "Falling back to slow initialization..."
855
+ )
856
+ kwargs["_fast_init"] = False
857
+ return super().from_pretrained(*args, **kwargs)
858
+
859
+ def forward(
860
+ self,
861
+ input_ids: Optional[torch.LongTensor] = None,
862
+ attention_mask: Optional[torch.FloatTensor] = None,
863
+ decoder_input_ids: Optional[torch.LongTensor] = None,
864
+ decoder_attention_mask: Optional[torch.BoolTensor] = None,
865
+ encoder_outputs: Optional[Tuple[torch.FloatTensor]] = None,
866
+ past_key_values: Tuple[Tuple[torch.FloatTensor]] = None,
867
+ inputs_embeds: Optional[torch.FloatTensor] = None,
868
+ decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
869
+ labels: Optional[torch.LongTensor] = None,
870
+ use_cache: Optional[bool] = None,
871
+ output_attentions: Optional[bool] = None,
872
+ output_hidden_states: Optional[bool] = None,
873
+ return_dict: Optional[bool] = None,
874
+ **kwargs,
875
+ ) -> Union[Tuple, Seq2SeqLMOutput]:
876
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
877
+
878
+ kwargs_encoder = {argument: value for argument, value in kwargs.items() if not argument.startswith("decoder_")}
879
+
880
+ kwargs_decoder = {
881
+ argument[len("decoder_"):]: value for argument, value in kwargs.items() if argument.startswith("decoder_")
882
+ }
883
+
884
+ if encoder_outputs is None:
885
+ encoder_outputs = self.encoder(
886
+ input_ids=input_ids,
887
+ attention_mask=attention_mask,
888
+ inputs_embeds=inputs_embeds,
889
+ output_attentions=output_attentions,
890
+ output_hidden_states=output_hidden_states,
891
+ return_dict=return_dict,
892
+ **kwargs_encoder,
893
+ )
894
+ elif isinstance(encoder_outputs, tuple):
895
+ encoder_outputs = BaseModelOutput(*encoder_outputs)
896
+
897
+ encoder_hidden_states = encoder_outputs[0]
898
+
899
+ # optionally project encoder_hidden_states
900
+ if (
901
+ self.encoder.config.hidden_size != self.decoder.config.hidden_size
902
+ and self.decoder.config.cross_attention_hidden_size is None
903
+ ):
904
+ encoder_hidden_states = self.enc_to_dec_proj(encoder_hidden_states)
905
+
906
+ if (labels is not None) and (decoder_input_ids is None and decoder_inputs_embeds is None):
907
+ decoder_input_ids = shift_tokens_right(
908
+ labels, self.config.pad_token_id, self.config.decoder_start_token_id
909
+ )
910
+
911
+ # Decode
912
+ decoder_outputs = self.decoder(
913
+ input_ids=decoder_input_ids,
914
+ attention_mask=decoder_attention_mask,
915
+ encoder_hidden_states=encoder_hidden_states,
916
+ encoder_attention_mask=attention_mask,
917
+ inputs_embeds=decoder_inputs_embeds,
918
+ output_attentions=output_attentions,
919
+ output_hidden_states=output_hidden_states,
920
+ use_cache=use_cache,
921
+ past_key_values=past_key_values,
922
+ return_dict=return_dict,
923
+ **kwargs_decoder,
924
+ )
925
+
926
+ # Compute loss independent from decoder (as some shift the logits inside them)
927
+ loss = None
928
+ if labels is not None:
929
+ # warnings.warn(DEPRECATION_WARNING, FutureWarning)
930
+ logits = decoder_outputs.logits if return_dict else decoder_outputs[0]
931
+ loss_fct = CrossEntropyLoss()
932
+ loss = loss_fct(logits.reshape(-1, self.decoder.config.vocab_size), labels.view(-1))
933
+
934
+ if not return_dict:
935
+ if loss is not None:
936
+ return (loss,) + decoder_outputs + encoder_outputs
937
+ else:
938
+ return decoder_outputs + encoder_outputs
939
+
940
+ return Seq2SeqLMOutput(
941
+ loss=loss,
942
+ logits=decoder_outputs.logits,
943
+ past_key_values=decoder_outputs.past_key_values,
944
+ decoder_hidden_states=decoder_outputs.hidden_states,
945
+ decoder_attentions=decoder_outputs.attentions,
946
+ cross_attentions=decoder_outputs.cross_attentions,
947
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
948
+ encoder_hidden_states=encoder_outputs.hidden_states,
949
+ encoder_attentions=encoder_outputs.attentions,
950
+ )
951
+
952
+ def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):
953
+ return shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id)
954
+
955
+ def prepare_inputs_for_generation(
956
+ self, input_ids, past=None, attention_mask=None, use_cache=None, encoder_outputs=None, **kwargs
957
+ ):
958
+ decoder_inputs = self.decoder.prepare_inputs_for_generation(input_ids, past=past)
959
+ decoder_attention_mask = decoder_inputs["attention_mask"] if "attention_mask" in decoder_inputs else None
960
+ input_dict = {
961
+ "attention_mask": attention_mask,
962
+ "decoder_attention_mask": decoder_attention_mask,
963
+ "decoder_input_ids": decoder_inputs["input_ids"],
964
+ "encoder_outputs": encoder_outputs,
965
+ "past_key_values": decoder_inputs["past_key_values"],
966
+ "use_cache": use_cache,
967
+ }
968
+ return input_dict
969
+
970
+ def resize_token_embeddings(self, *args, **kwargs):
971
+ raise NotImplementedError(
972
+ "Resizing the embedding layers via the EncoderDecoderModel directly is not supported. Please use the"
973
+ " respective methods of the wrapped objects (model.encoder.resize_token_embeddings(...) or"
974
+ " model.decoder.resize_token_embeddings(...))"
975
+ )
976
+
977
+ def _reorder_cache(self, past, beam_idx):
978
+ # apply decoder cache reordering here
979
+ return self.decoder._reorder_cache(past, beam_idx)
pytorch_model-00001-of-00005.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9967b7e82e270cb35283f2c144e43f78d0035089e547f92fc25a6e4661e1976b
3
+ size 7999540051
pytorch_model-00002-of-00005.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:284ebb62d85f6cf8daed2b19555d4c770ddb7910528998b16777a5cd298aebdd
3
+ size 7890287073
pytorch_model-00003-of-00005.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b7653b082f14d3100eef585651cd334b2ffa8647d00cdf593bff8a207ec8787a
3
+ size 7886055003
pytorch_model-00004-of-00005.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bd28a8395e410610b1ad22b3f65c68da77e2f2aded6f3e8167e67aab8ebeb518
3
+ size 7890237937
pytorch_model-00005-of-00005.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c0dc206ba4cb142254bfb570964a925bd296e1a72d7ca7311fdea745f537d032
3
+ size 1552098187
pytorch_model.bin.index.json ADDED
@@ -0,0 +1,507 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 33218048000
4
+ },
5
+ "weight_map": {
6
+ "decoder.lm_head.bias": "pytorch_model-00005-of-00005.bin",
7
+ "decoder.lm_head.weight": "pytorch_model-00005-of-00005.bin",
8
+ "decoder.transformer.h.0.attn.causal_mask": "pytorch_model-00001-of-00005.bin",
9
+ "decoder.transformer.h.0.attn.out_proj.weight": "pytorch_model-00001-of-00005.bin",
10
+ "decoder.transformer.h.0.attn.qkv_proj.weight": "pytorch_model-00001-of-00005.bin",
11
+ "decoder.transformer.h.0.ln_1.bias": "pytorch_model-00001-of-00005.bin",
12
+ "decoder.transformer.h.0.ln_1.weight": "pytorch_model-00001-of-00005.bin",
13
+ "decoder.transformer.h.0.mlp.fc_in.bias": "pytorch_model-00001-of-00005.bin",
14
+ "decoder.transformer.h.0.mlp.fc_in.weight": "pytorch_model-00001-of-00005.bin",
15
+ "decoder.transformer.h.0.mlp.fc_out.bias": "pytorch_model-00001-of-00005.bin",
16
+ "decoder.transformer.h.0.mlp.fc_out.weight": "pytorch_model-00001-of-00005.bin",
17
+ "decoder.transformer.h.1.attn.causal_mask": "pytorch_model-00001-of-00005.bin",
18
+ "decoder.transformer.h.1.attn.out_proj.weight": "pytorch_model-00001-of-00005.bin",
19
+ "decoder.transformer.h.1.attn.qkv_proj.weight": "pytorch_model-00001-of-00005.bin",
20
+ "decoder.transformer.h.1.ln_1.bias": "pytorch_model-00001-of-00005.bin",
21
+ "decoder.transformer.h.1.ln_1.weight": "pytorch_model-00001-of-00005.bin",
22
+ "decoder.transformer.h.1.mlp.fc_in.bias": "pytorch_model-00001-of-00005.bin",
23
+ "decoder.transformer.h.1.mlp.fc_in.weight": "pytorch_model-00001-of-00005.bin",
24
+ "decoder.transformer.h.1.mlp.fc_out.bias": "pytorch_model-00001-of-00005.bin",
25
+ "decoder.transformer.h.1.mlp.fc_out.weight": "pytorch_model-00001-of-00005.bin",
26
+ "decoder.transformer.h.10.attn.causal_mask": "pytorch_model-00002-of-00005.bin",
27
+ "decoder.transformer.h.10.attn.out_proj.weight": "pytorch_model-00002-of-00005.bin",
28
+ "decoder.transformer.h.10.attn.qkv_proj.weight": "pytorch_model-00002-of-00005.bin",
29
+ "decoder.transformer.h.10.ln_1.bias": "pytorch_model-00002-of-00005.bin",
30
+ "decoder.transformer.h.10.ln_1.weight": "pytorch_model-00002-of-00005.bin",
31
+ "decoder.transformer.h.10.mlp.fc_in.bias": "pytorch_model-00002-of-00005.bin",
32
+ "decoder.transformer.h.10.mlp.fc_in.weight": "pytorch_model-00002-of-00005.bin",
33
+ "decoder.transformer.h.10.mlp.fc_out.bias": "pytorch_model-00002-of-00005.bin",
34
+ "decoder.transformer.h.10.mlp.fc_out.weight": "pytorch_model-00002-of-00005.bin",
35
+ "decoder.transformer.h.11.attn.causal_mask": "pytorch_model-00002-of-00005.bin",
36
+ "decoder.transformer.h.11.attn.out_proj.weight": "pytorch_model-00002-of-00005.bin",
37
+ "decoder.transformer.h.11.attn.qkv_proj.weight": "pytorch_model-00002-of-00005.bin",
38
+ "decoder.transformer.h.11.ln_1.bias": "pytorch_model-00002-of-00005.bin",
39
+ "decoder.transformer.h.11.ln_1.weight": "pytorch_model-00002-of-00005.bin",
40
+ "decoder.transformer.h.11.mlp.fc_in.bias": "pytorch_model-00002-of-00005.bin",
41
+ "decoder.transformer.h.11.mlp.fc_in.weight": "pytorch_model-00002-of-00005.bin",
42
+ "decoder.transformer.h.11.mlp.fc_out.bias": "pytorch_model-00002-of-00005.bin",
43
+ "decoder.transformer.h.11.mlp.fc_out.weight": "pytorch_model-00002-of-00005.bin",
44
+ "decoder.transformer.h.12.attn.causal_mask": "pytorch_model-00002-of-00005.bin",
45
+ "decoder.transformer.h.12.attn.out_proj.weight": "pytorch_model-00002-of-00005.bin",
46
+ "decoder.transformer.h.12.attn.qkv_proj.weight": "pytorch_model-00002-of-00005.bin",
47
+ "decoder.transformer.h.12.ln_1.bias": "pytorch_model-00002-of-00005.bin",
48
+ "decoder.transformer.h.12.ln_1.weight": "pytorch_model-00002-of-00005.bin",
49
+ "decoder.transformer.h.12.mlp.fc_in.bias": "pytorch_model-00002-of-00005.bin",
50
+ "decoder.transformer.h.12.mlp.fc_in.weight": "pytorch_model-00002-of-00005.bin",
51
+ "decoder.transformer.h.12.mlp.fc_out.bias": "pytorch_model-00002-of-00005.bin",
52
+ "decoder.transformer.h.12.mlp.fc_out.weight": "pytorch_model-00002-of-00005.bin",
53
+ "decoder.transformer.h.13.attn.causal_mask": "pytorch_model-00002-of-00005.bin",
54
+ "decoder.transformer.h.13.attn.out_proj.weight": "pytorch_model-00002-of-00005.bin",
55
+ "decoder.transformer.h.13.attn.qkv_proj.weight": "pytorch_model-00002-of-00005.bin",
56
+ "decoder.transformer.h.13.ln_1.bias": "pytorch_model-00002-of-00005.bin",
57
+ "decoder.transformer.h.13.ln_1.weight": "pytorch_model-00002-of-00005.bin",
58
+ "decoder.transformer.h.13.mlp.fc_in.bias": "pytorch_model-00002-of-00005.bin",
59
+ "decoder.transformer.h.13.mlp.fc_in.weight": "pytorch_model-00002-of-00005.bin",
60
+ "decoder.transformer.h.13.mlp.fc_out.bias": "pytorch_model-00002-of-00005.bin",
61
+ "decoder.transformer.h.13.mlp.fc_out.weight": "pytorch_model-00002-of-00005.bin",
62
+ "decoder.transformer.h.14.attn.causal_mask": "pytorch_model-00002-of-00005.bin",
63
+ "decoder.transformer.h.14.attn.out_proj.weight": "pytorch_model-00002-of-00005.bin",
64
+ "decoder.transformer.h.14.attn.qkv_proj.weight": "pytorch_model-00002-of-00005.bin",
65
+ "decoder.transformer.h.14.ln_1.bias": "pytorch_model-00002-of-00005.bin",
66
+ "decoder.transformer.h.14.ln_1.weight": "pytorch_model-00002-of-00005.bin",
67
+ "decoder.transformer.h.14.mlp.fc_in.bias": "pytorch_model-00002-of-00005.bin",
68
+ "decoder.transformer.h.14.mlp.fc_in.weight": "pytorch_model-00002-of-00005.bin",
69
+ "decoder.transformer.h.14.mlp.fc_out.bias": "pytorch_model-00002-of-00005.bin",
70
+ "decoder.transformer.h.14.mlp.fc_out.weight": "pytorch_model-00002-of-00005.bin",
71
+ "decoder.transformer.h.15.attn.causal_mask": "pytorch_model-00002-of-00005.bin",
72
+ "decoder.transformer.h.15.attn.out_proj.weight": "pytorch_model-00002-of-00005.bin",
73
+ "decoder.transformer.h.15.attn.qkv_proj.weight": "pytorch_model-00002-of-00005.bin",
74
+ "decoder.transformer.h.15.ln_1.bias": "pytorch_model-00002-of-00005.bin",
75
+ "decoder.transformer.h.15.ln_1.weight": "pytorch_model-00002-of-00005.bin",
76
+ "decoder.transformer.h.15.mlp.fc_in.bias": "pytorch_model-00002-of-00005.bin",
77
+ "decoder.transformer.h.15.mlp.fc_in.weight": "pytorch_model-00002-of-00005.bin",
78
+ "decoder.transformer.h.15.mlp.fc_out.bias": "pytorch_model-00002-of-00005.bin",
79
+ "decoder.transformer.h.15.mlp.fc_out.weight": "pytorch_model-00002-of-00005.bin",
80
+ "decoder.transformer.h.16.attn.causal_mask": "pytorch_model-00002-of-00005.bin",
81
+ "decoder.transformer.h.16.attn.out_proj.weight": "pytorch_model-00003-of-00005.bin",
82
+ "decoder.transformer.h.16.attn.qkv_proj.weight": "pytorch_model-00003-of-00005.bin",
83
+ "decoder.transformer.h.16.ln_1.bias": "pytorch_model-00002-of-00005.bin",
84
+ "decoder.transformer.h.16.ln_1.weight": "pytorch_model-00002-of-00005.bin",
85
+ "decoder.transformer.h.16.mlp.fc_in.bias": "pytorch_model-00003-of-00005.bin",
86
+ "decoder.transformer.h.16.mlp.fc_in.weight": "pytorch_model-00003-of-00005.bin",
87
+ "decoder.transformer.h.16.mlp.fc_out.bias": "pytorch_model-00003-of-00005.bin",
88
+ "decoder.transformer.h.16.mlp.fc_out.weight": "pytorch_model-00003-of-00005.bin",
89
+ "decoder.transformer.h.17.attn.causal_mask": "pytorch_model-00003-of-00005.bin",
90
+ "decoder.transformer.h.17.attn.out_proj.weight": "pytorch_model-00003-of-00005.bin",
91
+ "decoder.transformer.h.17.attn.qkv_proj.weight": "pytorch_model-00003-of-00005.bin",
92
+ "decoder.transformer.h.17.ln_1.bias": "pytorch_model-00003-of-00005.bin",
93
+ "decoder.transformer.h.17.ln_1.weight": "pytorch_model-00003-of-00005.bin",
94
+ "decoder.transformer.h.17.mlp.fc_in.bias": "pytorch_model-00003-of-00005.bin",
95
+ "decoder.transformer.h.17.mlp.fc_in.weight": "pytorch_model-00003-of-00005.bin",
96
+ "decoder.transformer.h.17.mlp.fc_out.bias": "pytorch_model-00003-of-00005.bin",
97
+ "decoder.transformer.h.17.mlp.fc_out.weight": "pytorch_model-00003-of-00005.bin",
98
+ "decoder.transformer.h.18.attn.causal_mask": "pytorch_model-00003-of-00005.bin",
99
+ "decoder.transformer.h.18.attn.out_proj.weight": "pytorch_model-00003-of-00005.bin",
100
+ "decoder.transformer.h.18.attn.qkv_proj.weight": "pytorch_model-00003-of-00005.bin",
101
+ "decoder.transformer.h.18.ln_1.bias": "pytorch_model-00003-of-00005.bin",
102
+ "decoder.transformer.h.18.ln_1.weight": "pytorch_model-00003-of-00005.bin",
103
+ "decoder.transformer.h.18.mlp.fc_in.bias": "pytorch_model-00003-of-00005.bin",
104
+ "decoder.transformer.h.18.mlp.fc_in.weight": "pytorch_model-00003-of-00005.bin",
105
+ "decoder.transformer.h.18.mlp.fc_out.bias": "pytorch_model-00003-of-00005.bin",
106
+ "decoder.transformer.h.18.mlp.fc_out.weight": "pytorch_model-00003-of-00005.bin",
107
+ "decoder.transformer.h.19.attn.causal_mask": "pytorch_model-00003-of-00005.bin",
108
+ "decoder.transformer.h.19.attn.out_proj.weight": "pytorch_model-00003-of-00005.bin",
109
+ "decoder.transformer.h.19.attn.qkv_proj.weight": "pytorch_model-00003-of-00005.bin",
110
+ "decoder.transformer.h.19.ln_1.bias": "pytorch_model-00003-of-00005.bin",
111
+ "decoder.transformer.h.19.ln_1.weight": "pytorch_model-00003-of-00005.bin",
112
+ "decoder.transformer.h.19.mlp.fc_in.bias": "pytorch_model-00003-of-00005.bin",
113
+ "decoder.transformer.h.19.mlp.fc_in.weight": "pytorch_model-00003-of-00005.bin",
114
+ "decoder.transformer.h.19.mlp.fc_out.bias": "pytorch_model-00003-of-00005.bin",
115
+ "decoder.transformer.h.19.mlp.fc_out.weight": "pytorch_model-00003-of-00005.bin",
116
+ "decoder.transformer.h.2.attn.causal_mask": "pytorch_model-00001-of-00005.bin",
117
+ "decoder.transformer.h.2.attn.out_proj.weight": "pytorch_model-00001-of-00005.bin",
118
+ "decoder.transformer.h.2.attn.qkv_proj.weight": "pytorch_model-00001-of-00005.bin",
119
+ "decoder.transformer.h.2.ln_1.bias": "pytorch_model-00001-of-00005.bin",
120
+ "decoder.transformer.h.2.ln_1.weight": "pytorch_model-00001-of-00005.bin",
121
+ "decoder.transformer.h.2.mlp.fc_in.bias": "pytorch_model-00001-of-00005.bin",
122
+ "decoder.transformer.h.2.mlp.fc_in.weight": "pytorch_model-00001-of-00005.bin",
123
+ "decoder.transformer.h.2.mlp.fc_out.bias": "pytorch_model-00001-of-00005.bin",
124
+ "decoder.transformer.h.2.mlp.fc_out.weight": "pytorch_model-00001-of-00005.bin",
125
+ "decoder.transformer.h.20.attn.causal_mask": "pytorch_model-00003-of-00005.bin",
126
+ "decoder.transformer.h.20.attn.out_proj.weight": "pytorch_model-00003-of-00005.bin",
127
+ "decoder.transformer.h.20.attn.qkv_proj.weight": "pytorch_model-00003-of-00005.bin",
128
+ "decoder.transformer.h.20.ln_1.bias": "pytorch_model-00003-of-00005.bin",
129
+ "decoder.transformer.h.20.ln_1.weight": "pytorch_model-00003-of-00005.bin",
130
+ "decoder.transformer.h.20.mlp.fc_in.bias": "pytorch_model-00003-of-00005.bin",
131
+ "decoder.transformer.h.20.mlp.fc_in.weight": "pytorch_model-00003-of-00005.bin",
132
+ "decoder.transformer.h.20.mlp.fc_out.bias": "pytorch_model-00003-of-00005.bin",
133
+ "decoder.transformer.h.20.mlp.fc_out.weight": "pytorch_model-00003-of-00005.bin",
134
+ "decoder.transformer.h.21.attn.causal_mask": "pytorch_model-00003-of-00005.bin",
135
+ "decoder.transformer.h.21.attn.out_proj.weight": "pytorch_model-00003-of-00005.bin",
136
+ "decoder.transformer.h.21.attn.qkv_proj.weight": "pytorch_model-00003-of-00005.bin",
137
+ "decoder.transformer.h.21.ln_1.bias": "pytorch_model-00003-of-00005.bin",
138
+ "decoder.transformer.h.21.ln_1.weight": "pytorch_model-00003-of-00005.bin",
139
+ "decoder.transformer.h.21.mlp.fc_in.bias": "pytorch_model-00003-of-00005.bin",
140
+ "decoder.transformer.h.21.mlp.fc_in.weight": "pytorch_model-00003-of-00005.bin",
141
+ "decoder.transformer.h.21.mlp.fc_out.bias": "pytorch_model-00003-of-00005.bin",
142
+ "decoder.transformer.h.21.mlp.fc_out.weight": "pytorch_model-00003-of-00005.bin",
143
+ "decoder.transformer.h.22.attn.causal_mask": "pytorch_model-00003-of-00005.bin",
144
+ "decoder.transformer.h.22.attn.out_proj.weight": "pytorch_model-00003-of-00005.bin",
145
+ "decoder.transformer.h.22.attn.qkv_proj.weight": "pytorch_model-00003-of-00005.bin",
146
+ "decoder.transformer.h.22.ln_1.bias": "pytorch_model-00003-of-00005.bin",
147
+ "decoder.transformer.h.22.ln_1.weight": "pytorch_model-00003-of-00005.bin",
148
+ "decoder.transformer.h.22.mlp.fc_in.bias": "pytorch_model-00003-of-00005.bin",
149
+ "decoder.transformer.h.22.mlp.fc_in.weight": "pytorch_model-00003-of-00005.bin",
150
+ "decoder.transformer.h.22.mlp.fc_out.bias": "pytorch_model-00003-of-00005.bin",
151
+ "decoder.transformer.h.22.mlp.fc_out.weight": "pytorch_model-00003-of-00005.bin",
152
+ "decoder.transformer.h.23.attn.causal_mask": "pytorch_model-00003-of-00005.bin",
153
+ "decoder.transformer.h.23.attn.out_proj.weight": "pytorch_model-00003-of-00005.bin",
154
+ "decoder.transformer.h.23.attn.qkv_proj.weight": "pytorch_model-00003-of-00005.bin",
155
+ "decoder.transformer.h.23.ln_1.bias": "pytorch_model-00003-of-00005.bin",
156
+ "decoder.transformer.h.23.ln_1.weight": "pytorch_model-00003-of-00005.bin",
157
+ "decoder.transformer.h.23.mlp.fc_in.bias": "pytorch_model-00003-of-00005.bin",
158
+ "decoder.transformer.h.23.mlp.fc_in.weight": "pytorch_model-00003-of-00005.bin",
159
+ "decoder.transformer.h.23.mlp.fc_out.bias": "pytorch_model-00003-of-00005.bin",
160
+ "decoder.transformer.h.23.mlp.fc_out.weight": "pytorch_model-00003-of-00005.bin",
161
+ "decoder.transformer.h.24.attn.causal_mask": "pytorch_model-00003-of-00005.bin",
162
+ "decoder.transformer.h.24.attn.out_proj.weight": "pytorch_model-00003-of-00005.bin",
163
+ "decoder.transformer.h.24.attn.qkv_proj.weight": "pytorch_model-00003-of-00005.bin",
164
+ "decoder.transformer.h.24.ln_1.bias": "pytorch_model-00003-of-00005.bin",
165
+ "decoder.transformer.h.24.ln_1.weight": "pytorch_model-00003-of-00005.bin",
166
+ "decoder.transformer.h.24.mlp.fc_in.bias": "pytorch_model-00003-of-00005.bin",
167
+ "decoder.transformer.h.24.mlp.fc_in.weight": "pytorch_model-00003-of-00005.bin",
168
+ "decoder.transformer.h.24.mlp.fc_out.bias": "pytorch_model-00004-of-00005.bin",
169
+ "decoder.transformer.h.24.mlp.fc_out.weight": "pytorch_model-00004-of-00005.bin",
170
+ "decoder.transformer.h.25.attn.causal_mask": "pytorch_model-00004-of-00005.bin",
171
+ "decoder.transformer.h.25.attn.out_proj.weight": "pytorch_model-00004-of-00005.bin",
172
+ "decoder.transformer.h.25.attn.qkv_proj.weight": "pytorch_model-00004-of-00005.bin",
173
+ "decoder.transformer.h.25.ln_1.bias": "pytorch_model-00004-of-00005.bin",
174
+ "decoder.transformer.h.25.ln_1.weight": "pytorch_model-00004-of-00005.bin",
175
+ "decoder.transformer.h.25.mlp.fc_in.bias": "pytorch_model-00004-of-00005.bin",
176
+ "decoder.transformer.h.25.mlp.fc_in.weight": "pytorch_model-00004-of-00005.bin",
177
+ "decoder.transformer.h.25.mlp.fc_out.bias": "pytorch_model-00004-of-00005.bin",
178
+ "decoder.transformer.h.25.mlp.fc_out.weight": "pytorch_model-00004-of-00005.bin",
179
+ "decoder.transformer.h.26.attn.causal_mask": "pytorch_model-00004-of-00005.bin",
180
+ "decoder.transformer.h.26.attn.out_proj.weight": "pytorch_model-00004-of-00005.bin",
181
+ "decoder.transformer.h.26.attn.qkv_proj.weight": "pytorch_model-00004-of-00005.bin",
182
+ "decoder.transformer.h.26.ln_1.bias": "pytorch_model-00004-of-00005.bin",
183
+ "decoder.transformer.h.26.ln_1.weight": "pytorch_model-00004-of-00005.bin",
184
+ "decoder.transformer.h.26.mlp.fc_in.bias": "pytorch_model-00004-of-00005.bin",
185
+ "decoder.transformer.h.26.mlp.fc_in.weight": "pytorch_model-00004-of-00005.bin",
186
+ "decoder.transformer.h.26.mlp.fc_out.bias": "pytorch_model-00004-of-00005.bin",
187
+ "decoder.transformer.h.26.mlp.fc_out.weight": "pytorch_model-00004-of-00005.bin",
188
+ "decoder.transformer.h.27.attn.causal_mask": "pytorch_model-00004-of-00005.bin",
189
+ "decoder.transformer.h.27.attn.out_proj.weight": "pytorch_model-00004-of-00005.bin",
190
+ "decoder.transformer.h.27.attn.qkv_proj.weight": "pytorch_model-00004-of-00005.bin",
191
+ "decoder.transformer.h.27.ln_1.bias": "pytorch_model-00004-of-00005.bin",
192
+ "decoder.transformer.h.27.ln_1.weight": "pytorch_model-00004-of-00005.bin",
193
+ "decoder.transformer.h.27.mlp.fc_in.bias": "pytorch_model-00004-of-00005.bin",
194
+ "decoder.transformer.h.27.mlp.fc_in.weight": "pytorch_model-00004-of-00005.bin",
195
+ "decoder.transformer.h.27.mlp.fc_out.bias": "pytorch_model-00004-of-00005.bin",
196
+ "decoder.transformer.h.27.mlp.fc_out.weight": "pytorch_model-00004-of-00005.bin",
197
+ "decoder.transformer.h.28.attn.causal_mask": "pytorch_model-00004-of-00005.bin",
198
+ "decoder.transformer.h.28.attn.out_proj.weight": "pytorch_model-00004-of-00005.bin",
199
+ "decoder.transformer.h.28.attn.qkv_proj.weight": "pytorch_model-00004-of-00005.bin",
200
+ "decoder.transformer.h.28.ln_1.bias": "pytorch_model-00004-of-00005.bin",
201
+ "decoder.transformer.h.28.ln_1.weight": "pytorch_model-00004-of-00005.bin",
202
+ "decoder.transformer.h.28.mlp.fc_in.bias": "pytorch_model-00004-of-00005.bin",
203
+ "decoder.transformer.h.28.mlp.fc_in.weight": "pytorch_model-00004-of-00005.bin",
204
+ "decoder.transformer.h.28.mlp.fc_out.bias": "pytorch_model-00004-of-00005.bin",
205
+ "decoder.transformer.h.28.mlp.fc_out.weight": "pytorch_model-00004-of-00005.bin",
206
+ "decoder.transformer.h.29.attn.causal_mask": "pytorch_model-00004-of-00005.bin",
207
+ "decoder.transformer.h.29.attn.out_proj.weight": "pytorch_model-00004-of-00005.bin",
208
+ "decoder.transformer.h.29.attn.qkv_proj.weight": "pytorch_model-00004-of-00005.bin",
209
+ "decoder.transformer.h.29.ln_1.bias": "pytorch_model-00004-of-00005.bin",
210
+ "decoder.transformer.h.29.ln_1.weight": "pytorch_model-00004-of-00005.bin",
211
+ "decoder.transformer.h.29.mlp.fc_in.bias": "pytorch_model-00004-of-00005.bin",
212
+ "decoder.transformer.h.29.mlp.fc_in.weight": "pytorch_model-00004-of-00005.bin",
213
+ "decoder.transformer.h.29.mlp.fc_out.bias": "pytorch_model-00004-of-00005.bin",
214
+ "decoder.transformer.h.29.mlp.fc_out.weight": "pytorch_model-00004-of-00005.bin",
215
+ "decoder.transformer.h.3.attn.causal_mask": "pytorch_model-00001-of-00005.bin",
216
+ "decoder.transformer.h.3.attn.out_proj.weight": "pytorch_model-00001-of-00005.bin",
217
+ "decoder.transformer.h.3.attn.qkv_proj.weight": "pytorch_model-00001-of-00005.bin",
218
+ "decoder.transformer.h.3.ln_1.bias": "pytorch_model-00001-of-00005.bin",
219
+ "decoder.transformer.h.3.ln_1.weight": "pytorch_model-00001-of-00005.bin",
220
+ "decoder.transformer.h.3.mlp.fc_in.bias": "pytorch_model-00001-of-00005.bin",
221
+ "decoder.transformer.h.3.mlp.fc_in.weight": "pytorch_model-00001-of-00005.bin",
222
+ "decoder.transformer.h.3.mlp.fc_out.bias": "pytorch_model-00001-of-00005.bin",
223
+ "decoder.transformer.h.3.mlp.fc_out.weight": "pytorch_model-00001-of-00005.bin",
224
+ "decoder.transformer.h.30.attn.causal_mask": "pytorch_model-00004-of-00005.bin",
225
+ "decoder.transformer.h.30.attn.out_proj.weight": "pytorch_model-00004-of-00005.bin",
226
+ "decoder.transformer.h.30.attn.qkv_proj.weight": "pytorch_model-00004-of-00005.bin",
227
+ "decoder.transformer.h.30.ln_1.bias": "pytorch_model-00004-of-00005.bin",
228
+ "decoder.transformer.h.30.ln_1.weight": "pytorch_model-00004-of-00005.bin",
229
+ "decoder.transformer.h.30.mlp.fc_in.bias": "pytorch_model-00004-of-00005.bin",
230
+ "decoder.transformer.h.30.mlp.fc_in.weight": "pytorch_model-00004-of-00005.bin",
231
+ "decoder.transformer.h.30.mlp.fc_out.bias": "pytorch_model-00004-of-00005.bin",
232
+ "decoder.transformer.h.30.mlp.fc_out.weight": "pytorch_model-00004-of-00005.bin",
233
+ "decoder.transformer.h.31.attn.causal_mask": "pytorch_model-00004-of-00005.bin",
234
+ "decoder.transformer.h.31.attn.out_proj.weight": "pytorch_model-00004-of-00005.bin",
235
+ "decoder.transformer.h.31.attn.qkv_proj.weight": "pytorch_model-00004-of-00005.bin",
236
+ "decoder.transformer.h.31.ln_1.bias": "pytorch_model-00004-of-00005.bin",
237
+ "decoder.transformer.h.31.ln_1.weight": "pytorch_model-00004-of-00005.bin",
238
+ "decoder.transformer.h.31.mlp.fc_in.bias": "pytorch_model-00004-of-00005.bin",
239
+ "decoder.transformer.h.31.mlp.fc_in.weight": "pytorch_model-00004-of-00005.bin",
240
+ "decoder.transformer.h.31.mlp.fc_out.bias": "pytorch_model-00004-of-00005.bin",
241
+ "decoder.transformer.h.31.mlp.fc_out.weight": "pytorch_model-00004-of-00005.bin",
242
+ "decoder.transformer.h.32.attn.causal_mask": "pytorch_model-00004-of-00005.bin",
243
+ "decoder.transformer.h.32.attn.out_proj.weight": "pytorch_model-00004-of-00005.bin",
244
+ "decoder.transformer.h.32.attn.qkv_proj.weight": "pytorch_model-00004-of-00005.bin",
245
+ "decoder.transformer.h.32.ln_1.bias": "pytorch_model-00004-of-00005.bin",
246
+ "decoder.transformer.h.32.ln_1.weight": "pytorch_model-00004-of-00005.bin",
247
+ "decoder.transformer.h.32.mlp.fc_in.bias": "pytorch_model-00004-of-00005.bin",
248
+ "decoder.transformer.h.32.mlp.fc_in.weight": "pytorch_model-00004-of-00005.bin",
249
+ "decoder.transformer.h.32.mlp.fc_out.bias": "pytorch_model-00004-of-00005.bin",
250
+ "decoder.transformer.h.32.mlp.fc_out.weight": "pytorch_model-00004-of-00005.bin",
251
+ "decoder.transformer.h.33.attn.causal_mask": "pytorch_model-00004-of-00005.bin",
252
+ "decoder.transformer.h.33.attn.out_proj.weight": "pytorch_model-00004-of-00005.bin",
253
+ "decoder.transformer.h.33.attn.qkv_proj.weight": "pytorch_model-00004-of-00005.bin",
254
+ "decoder.transformer.h.33.crossattention.causal_mask": "pytorch_model-00005-of-00005.bin",
255
+ "decoder.transformer.h.33.crossattention.out_proj.weight": "pytorch_model-00005-of-00005.bin",
256
+ "decoder.transformer.h.33.crossattention.q_attn.weight": "pytorch_model-00005-of-00005.bin",
257
+ "decoder.transformer.h.33.crossattention.qkv_proj.weight": "pytorch_model-00005-of-00005.bin",
258
+ "decoder.transformer.h.33.ln_1.bias": "pytorch_model-00004-of-00005.bin",
259
+ "decoder.transformer.h.33.ln_1.weight": "pytorch_model-00004-of-00005.bin",
260
+ "decoder.transformer.h.33.mlp.fc_in.bias": "pytorch_model-00005-of-00005.bin",
261
+ "decoder.transformer.h.33.mlp.fc_in.weight": "pytorch_model-00005-of-00005.bin",
262
+ "decoder.transformer.h.33.mlp.fc_out.bias": "pytorch_model-00005-of-00005.bin",
263
+ "decoder.transformer.h.33.mlp.fc_out.weight": "pytorch_model-00005-of-00005.bin",
264
+ "decoder.transformer.h.4.attn.causal_mask": "pytorch_model-00001-of-00005.bin",
265
+ "decoder.transformer.h.4.attn.out_proj.weight": "pytorch_model-00001-of-00005.bin",
266
+ "decoder.transformer.h.4.attn.qkv_proj.weight": "pytorch_model-00001-of-00005.bin",
267
+ "decoder.transformer.h.4.ln_1.bias": "pytorch_model-00001-of-00005.bin",
268
+ "decoder.transformer.h.4.ln_1.weight": "pytorch_model-00001-of-00005.bin",
269
+ "decoder.transformer.h.4.mlp.fc_in.bias": "pytorch_model-00001-of-00005.bin",
270
+ "decoder.transformer.h.4.mlp.fc_in.weight": "pytorch_model-00001-of-00005.bin",
271
+ "decoder.transformer.h.4.mlp.fc_out.bias": "pytorch_model-00001-of-00005.bin",
272
+ "decoder.transformer.h.4.mlp.fc_out.weight": "pytorch_model-00001-of-00005.bin",
273
+ "decoder.transformer.h.5.attn.causal_mask": "pytorch_model-00001-of-00005.bin",
274
+ "decoder.transformer.h.5.attn.out_proj.weight": "pytorch_model-00001-of-00005.bin",
275
+ "decoder.transformer.h.5.attn.qkv_proj.weight": "pytorch_model-00001-of-00005.bin",
276
+ "decoder.transformer.h.5.ln_1.bias": "pytorch_model-00001-of-00005.bin",
277
+ "decoder.transformer.h.5.ln_1.weight": "pytorch_model-00001-of-00005.bin",
278
+ "decoder.transformer.h.5.mlp.fc_in.bias": "pytorch_model-00001-of-00005.bin",
279
+ "decoder.transformer.h.5.mlp.fc_in.weight": "pytorch_model-00001-of-00005.bin",
280
+ "decoder.transformer.h.5.mlp.fc_out.bias": "pytorch_model-00001-of-00005.bin",
281
+ "decoder.transformer.h.5.mlp.fc_out.weight": "pytorch_model-00001-of-00005.bin",
282
+ "decoder.transformer.h.6.attn.causal_mask": "pytorch_model-00001-of-00005.bin",
283
+ "decoder.transformer.h.6.attn.out_proj.weight": "pytorch_model-00001-of-00005.bin",
284
+ "decoder.transformer.h.6.attn.qkv_proj.weight": "pytorch_model-00001-of-00005.bin",
285
+ "decoder.transformer.h.6.ln_1.bias": "pytorch_model-00001-of-00005.bin",
286
+ "decoder.transformer.h.6.ln_1.weight": "pytorch_model-00001-of-00005.bin",
287
+ "decoder.transformer.h.6.mlp.fc_in.bias": "pytorch_model-00001-of-00005.bin",
288
+ "decoder.transformer.h.6.mlp.fc_in.weight": "pytorch_model-00001-of-00005.bin",
289
+ "decoder.transformer.h.6.mlp.fc_out.bias": "pytorch_model-00001-of-00005.bin",
290
+ "decoder.transformer.h.6.mlp.fc_out.weight": "pytorch_model-00001-of-00005.bin",
291
+ "decoder.transformer.h.7.attn.causal_mask": "pytorch_model-00001-of-00005.bin",
292
+ "decoder.transformer.h.7.attn.out_proj.weight": "pytorch_model-00001-of-00005.bin",
293
+ "decoder.transformer.h.7.attn.qkv_proj.weight": "pytorch_model-00001-of-00005.bin",
294
+ "decoder.transformer.h.7.ln_1.bias": "pytorch_model-00001-of-00005.bin",
295
+ "decoder.transformer.h.7.ln_1.weight": "pytorch_model-00001-of-00005.bin",
296
+ "decoder.transformer.h.7.mlp.fc_in.bias": "pytorch_model-00002-of-00005.bin",
297
+ "decoder.transformer.h.7.mlp.fc_in.weight": "pytorch_model-00002-of-00005.bin",
298
+ "decoder.transformer.h.7.mlp.fc_out.bias": "pytorch_model-00002-of-00005.bin",
299
+ "decoder.transformer.h.7.mlp.fc_out.weight": "pytorch_model-00002-of-00005.bin",
300
+ "decoder.transformer.h.8.attn.causal_mask": "pytorch_model-00002-of-00005.bin",
301
+ "decoder.transformer.h.8.attn.out_proj.weight": "pytorch_model-00002-of-00005.bin",
302
+ "decoder.transformer.h.8.attn.qkv_proj.weight": "pytorch_model-00002-of-00005.bin",
303
+ "decoder.transformer.h.8.ln_1.bias": "pytorch_model-00002-of-00005.bin",
304
+ "decoder.transformer.h.8.ln_1.weight": "pytorch_model-00002-of-00005.bin",
305
+ "decoder.transformer.h.8.mlp.fc_in.bias": "pytorch_model-00002-of-00005.bin",
306
+ "decoder.transformer.h.8.mlp.fc_in.weight": "pytorch_model-00002-of-00005.bin",
307
+ "decoder.transformer.h.8.mlp.fc_out.bias": "pytorch_model-00002-of-00005.bin",
308
+ "decoder.transformer.h.8.mlp.fc_out.weight": "pytorch_model-00002-of-00005.bin",
309
+ "decoder.transformer.h.9.attn.causal_mask": "pytorch_model-00002-of-00005.bin",
310
+ "decoder.transformer.h.9.attn.out_proj.weight": "pytorch_model-00002-of-00005.bin",
311
+ "decoder.transformer.h.9.attn.qkv_proj.weight": "pytorch_model-00002-of-00005.bin",
312
+ "decoder.transformer.h.9.ln_1.bias": "pytorch_model-00002-of-00005.bin",
313
+ "decoder.transformer.h.9.ln_1.weight": "pytorch_model-00002-of-00005.bin",
314
+ "decoder.transformer.h.9.mlp.fc_in.bias": "pytorch_model-00002-of-00005.bin",
315
+ "decoder.transformer.h.9.mlp.fc_in.weight": "pytorch_model-00002-of-00005.bin",
316
+ "decoder.transformer.h.9.mlp.fc_out.bias": "pytorch_model-00002-of-00005.bin",
317
+ "decoder.transformer.h.9.mlp.fc_out.weight": "pytorch_model-00002-of-00005.bin",
318
+ "decoder.transformer.ln_f.bias": "pytorch_model-00005-of-00005.bin",
319
+ "decoder.transformer.ln_f.weight": "pytorch_model-00005-of-00005.bin",
320
+ "decoder.transformer.wte.weight": "pytorch_model-00001-of-00005.bin",
321
+ "enc_to_dec_proj.bias": "pytorch_model-00005-of-00005.bin",
322
+ "enc_to_dec_proj.weight": "pytorch_model-00005-of-00005.bin",
323
+ "encoder.h.0.attn.causal_mask": "pytorch_model-00001-of-00005.bin",
324
+ "encoder.h.0.attn.out_proj.weight": "pytorch_model-00001-of-00005.bin",
325
+ "encoder.h.0.attn.qkv_proj.weight": "pytorch_model-00001-of-00005.bin",
326
+ "encoder.h.0.ln_1.bias": "pytorch_model-00001-of-00005.bin",
327
+ "encoder.h.0.ln_1.weight": "pytorch_model-00001-of-00005.bin",
328
+ "encoder.h.0.mlp.fc_in.bias": "pytorch_model-00001-of-00005.bin",
329
+ "encoder.h.0.mlp.fc_in.weight": "pytorch_model-00001-of-00005.bin",
330
+ "encoder.h.0.mlp.fc_out.bias": "pytorch_model-00001-of-00005.bin",
331
+ "encoder.h.0.mlp.fc_out.weight": "pytorch_model-00001-of-00005.bin",
332
+ "encoder.h.1.attn.causal_mask": "pytorch_model-00001-of-00005.bin",
333
+ "encoder.h.1.attn.out_proj.weight": "pytorch_model-00001-of-00005.bin",
334
+ "encoder.h.1.attn.qkv_proj.weight": "pytorch_model-00001-of-00005.bin",
335
+ "encoder.h.1.ln_1.bias": "pytorch_model-00001-of-00005.bin",
336
+ "encoder.h.1.ln_1.weight": "pytorch_model-00001-of-00005.bin",
337
+ "encoder.h.1.mlp.fc_in.bias": "pytorch_model-00001-of-00005.bin",
338
+ "encoder.h.1.mlp.fc_in.weight": "pytorch_model-00001-of-00005.bin",
339
+ "encoder.h.1.mlp.fc_out.bias": "pytorch_model-00001-of-00005.bin",
340
+ "encoder.h.1.mlp.fc_out.weight": "pytorch_model-00001-of-00005.bin",
341
+ "encoder.h.10.attn.causal_mask": "pytorch_model-00001-of-00005.bin",
342
+ "encoder.h.10.attn.out_proj.weight": "pytorch_model-00001-of-00005.bin",
343
+ "encoder.h.10.attn.qkv_proj.weight": "pytorch_model-00001-of-00005.bin",
344
+ "encoder.h.10.ln_1.bias": "pytorch_model-00001-of-00005.bin",
345
+ "encoder.h.10.ln_1.weight": "pytorch_model-00001-of-00005.bin",
346
+ "encoder.h.10.mlp.fc_in.bias": "pytorch_model-00001-of-00005.bin",
347
+ "encoder.h.10.mlp.fc_in.weight": "pytorch_model-00001-of-00005.bin",
348
+ "encoder.h.10.mlp.fc_out.bias": "pytorch_model-00001-of-00005.bin",
349
+ "encoder.h.10.mlp.fc_out.weight": "pytorch_model-00001-of-00005.bin",
350
+ "encoder.h.11.attn.causal_mask": "pytorch_model-00001-of-00005.bin",
351
+ "encoder.h.11.attn.out_proj.weight": "pytorch_model-00001-of-00005.bin",
352
+ "encoder.h.11.attn.qkv_proj.weight": "pytorch_model-00001-of-00005.bin",
353
+ "encoder.h.11.ln_1.bias": "pytorch_model-00001-of-00005.bin",
354
+ "encoder.h.11.ln_1.weight": "pytorch_model-00001-of-00005.bin",
355
+ "encoder.h.11.mlp.fc_in.bias": "pytorch_model-00001-of-00005.bin",
356
+ "encoder.h.11.mlp.fc_in.weight": "pytorch_model-00001-of-00005.bin",
357
+ "encoder.h.11.mlp.fc_out.bias": "pytorch_model-00001-of-00005.bin",
358
+ "encoder.h.11.mlp.fc_out.weight": "pytorch_model-00001-of-00005.bin",
359
+ "encoder.h.12.attn.causal_mask": "pytorch_model-00001-of-00005.bin",
360
+ "encoder.h.12.attn.out_proj.weight": "pytorch_model-00001-of-00005.bin",
361
+ "encoder.h.12.attn.qkv_proj.weight": "pytorch_model-00001-of-00005.bin",
362
+ "encoder.h.12.ln_1.bias": "pytorch_model-00001-of-00005.bin",
363
+ "encoder.h.12.ln_1.weight": "pytorch_model-00001-of-00005.bin",
364
+ "encoder.h.12.mlp.fc_in.bias": "pytorch_model-00001-of-00005.bin",
365
+ "encoder.h.12.mlp.fc_in.weight": "pytorch_model-00001-of-00005.bin",
366
+ "encoder.h.12.mlp.fc_out.bias": "pytorch_model-00001-of-00005.bin",
367
+ "encoder.h.12.mlp.fc_out.weight": "pytorch_model-00001-of-00005.bin",
368
+ "encoder.h.13.attn.causal_mask": "pytorch_model-00001-of-00005.bin",
369
+ "encoder.h.13.attn.out_proj.weight": "pytorch_model-00001-of-00005.bin",
370
+ "encoder.h.13.attn.qkv_proj.weight": "pytorch_model-00001-of-00005.bin",
371
+ "encoder.h.13.ln_1.bias": "pytorch_model-00001-of-00005.bin",
372
+ "encoder.h.13.ln_1.weight": "pytorch_model-00001-of-00005.bin",
373
+ "encoder.h.13.mlp.fc_in.bias": "pytorch_model-00001-of-00005.bin",
374
+ "encoder.h.13.mlp.fc_in.weight": "pytorch_model-00001-of-00005.bin",
375
+ "encoder.h.13.mlp.fc_out.bias": "pytorch_model-00001-of-00005.bin",
376
+ "encoder.h.13.mlp.fc_out.weight": "pytorch_model-00001-of-00005.bin",
377
+ "encoder.h.14.attn.causal_mask": "pytorch_model-00001-of-00005.bin",
378
+ "encoder.h.14.attn.out_proj.weight": "pytorch_model-00001-of-00005.bin",
379
+ "encoder.h.14.attn.qkv_proj.weight": "pytorch_model-00001-of-00005.bin",
380
+ "encoder.h.14.ln_1.bias": "pytorch_model-00001-of-00005.bin",
381
+ "encoder.h.14.ln_1.weight": "pytorch_model-00001-of-00005.bin",
382
+ "encoder.h.14.mlp.fc_in.bias": "pytorch_model-00001-of-00005.bin",
383
+ "encoder.h.14.mlp.fc_in.weight": "pytorch_model-00001-of-00005.bin",
384
+ "encoder.h.14.mlp.fc_out.bias": "pytorch_model-00001-of-00005.bin",
385
+ "encoder.h.14.mlp.fc_out.weight": "pytorch_model-00001-of-00005.bin",
386
+ "encoder.h.15.attn.causal_mask": "pytorch_model-00001-of-00005.bin",
387
+ "encoder.h.15.attn.out_proj.weight": "pytorch_model-00001-of-00005.bin",
388
+ "encoder.h.15.attn.qkv_proj.weight": "pytorch_model-00001-of-00005.bin",
389
+ "encoder.h.15.ln_1.bias": "pytorch_model-00001-of-00005.bin",
390
+ "encoder.h.15.ln_1.weight": "pytorch_model-00001-of-00005.bin",
391
+ "encoder.h.15.mlp.fc_in.bias": "pytorch_model-00001-of-00005.bin",
392
+ "encoder.h.15.mlp.fc_in.weight": "pytorch_model-00001-of-00005.bin",
393
+ "encoder.h.15.mlp.fc_out.bias": "pytorch_model-00001-of-00005.bin",
394
+ "encoder.h.15.mlp.fc_out.weight": "pytorch_model-00001-of-00005.bin",
395
+ "encoder.h.16.attn.causal_mask": "pytorch_model-00001-of-00005.bin",
396
+ "encoder.h.16.attn.out_proj.weight": "pytorch_model-00001-of-00005.bin",
397
+ "encoder.h.16.attn.qkv_proj.weight": "pytorch_model-00001-of-00005.bin",
398
+ "encoder.h.16.ln_1.bias": "pytorch_model-00001-of-00005.bin",
399
+ "encoder.h.16.ln_1.weight": "pytorch_model-00001-of-00005.bin",
400
+ "encoder.h.16.mlp.fc_in.bias": "pytorch_model-00001-of-00005.bin",
401
+ "encoder.h.16.mlp.fc_in.weight": "pytorch_model-00001-of-00005.bin",
402
+ "encoder.h.16.mlp.fc_out.bias": "pytorch_model-00001-of-00005.bin",
403
+ "encoder.h.16.mlp.fc_out.weight": "pytorch_model-00001-of-00005.bin",
404
+ "encoder.h.17.attn.causal_mask": "pytorch_model-00001-of-00005.bin",
405
+ "encoder.h.17.attn.out_proj.weight": "pytorch_model-00001-of-00005.bin",
406
+ "encoder.h.17.attn.qkv_proj.weight": "pytorch_model-00001-of-00005.bin",
407
+ "encoder.h.17.ln_1.bias": "pytorch_model-00001-of-00005.bin",
408
+ "encoder.h.17.ln_1.weight": "pytorch_model-00001-of-00005.bin",
409
+ "encoder.h.17.mlp.fc_in.bias": "pytorch_model-00001-of-00005.bin",
410
+ "encoder.h.17.mlp.fc_in.weight": "pytorch_model-00001-of-00005.bin",
411
+ "encoder.h.17.mlp.fc_out.bias": "pytorch_model-00001-of-00005.bin",
412
+ "encoder.h.17.mlp.fc_out.weight": "pytorch_model-00001-of-00005.bin",
413
+ "encoder.h.18.attn.causal_mask": "pytorch_model-00001-of-00005.bin",
414
+ "encoder.h.18.attn.out_proj.weight": "pytorch_model-00001-of-00005.bin",
415
+ "encoder.h.18.attn.qkv_proj.weight": "pytorch_model-00001-of-00005.bin",
416
+ "encoder.h.18.ln_1.bias": "pytorch_model-00001-of-00005.bin",
417
+ "encoder.h.18.ln_1.weight": "pytorch_model-00001-of-00005.bin",
418
+ "encoder.h.18.mlp.fc_in.bias": "pytorch_model-00001-of-00005.bin",
419
+ "encoder.h.18.mlp.fc_in.weight": "pytorch_model-00001-of-00005.bin",
420
+ "encoder.h.18.mlp.fc_out.bias": "pytorch_model-00001-of-00005.bin",
421
+ "encoder.h.18.mlp.fc_out.weight": "pytorch_model-00001-of-00005.bin",
422
+ "encoder.h.19.attn.causal_mask": "pytorch_model-00001-of-00005.bin",
423
+ "encoder.h.19.attn.out_proj.weight": "pytorch_model-00001-of-00005.bin",
424
+ "encoder.h.19.attn.qkv_proj.weight": "pytorch_model-00001-of-00005.bin",
425
+ "encoder.h.19.ln_1.bias": "pytorch_model-00001-of-00005.bin",
426
+ "encoder.h.19.ln_1.weight": "pytorch_model-00001-of-00005.bin",
427
+ "encoder.h.19.mlp.fc_in.bias": "pytorch_model-00001-of-00005.bin",
428
+ "encoder.h.19.mlp.fc_in.weight": "pytorch_model-00001-of-00005.bin",
429
+ "encoder.h.19.mlp.fc_out.bias": "pytorch_model-00001-of-00005.bin",
430
+ "encoder.h.19.mlp.fc_out.weight": "pytorch_model-00001-of-00005.bin",
431
+ "encoder.h.2.attn.causal_mask": "pytorch_model-00001-of-00005.bin",
432
+ "encoder.h.2.attn.out_proj.weight": "pytorch_model-00001-of-00005.bin",
433
+ "encoder.h.2.attn.qkv_proj.weight": "pytorch_model-00001-of-00005.bin",
434
+ "encoder.h.2.ln_1.bias": "pytorch_model-00001-of-00005.bin",
435
+ "encoder.h.2.ln_1.weight": "pytorch_model-00001-of-00005.bin",
436
+ "encoder.h.2.mlp.fc_in.bias": "pytorch_model-00001-of-00005.bin",
437
+ "encoder.h.2.mlp.fc_in.weight": "pytorch_model-00001-of-00005.bin",
438
+ "encoder.h.2.mlp.fc_out.bias": "pytorch_model-00001-of-00005.bin",
439
+ "encoder.h.2.mlp.fc_out.weight": "pytorch_model-00001-of-00005.bin",
440
+ "encoder.h.3.attn.causal_mask": "pytorch_model-00001-of-00005.bin",
441
+ "encoder.h.3.attn.out_proj.weight": "pytorch_model-00001-of-00005.bin",
442
+ "encoder.h.3.attn.qkv_proj.weight": "pytorch_model-00001-of-00005.bin",
443
+ "encoder.h.3.ln_1.bias": "pytorch_model-00001-of-00005.bin",
444
+ "encoder.h.3.ln_1.weight": "pytorch_model-00001-of-00005.bin",
445
+ "encoder.h.3.mlp.fc_in.bias": "pytorch_model-00001-of-00005.bin",
446
+ "encoder.h.3.mlp.fc_in.weight": "pytorch_model-00001-of-00005.bin",
447
+ "encoder.h.3.mlp.fc_out.bias": "pytorch_model-00001-of-00005.bin",
448
+ "encoder.h.3.mlp.fc_out.weight": "pytorch_model-00001-of-00005.bin",
449
+ "encoder.h.4.attn.causal_mask": "pytorch_model-00001-of-00005.bin",
450
+ "encoder.h.4.attn.out_proj.weight": "pytorch_model-00001-of-00005.bin",
451
+ "encoder.h.4.attn.qkv_proj.weight": "pytorch_model-00001-of-00005.bin",
452
+ "encoder.h.4.ln_1.bias": "pytorch_model-00001-of-00005.bin",
453
+ "encoder.h.4.ln_1.weight": "pytorch_model-00001-of-00005.bin",
454
+ "encoder.h.4.mlp.fc_in.bias": "pytorch_model-00001-of-00005.bin",
455
+ "encoder.h.4.mlp.fc_in.weight": "pytorch_model-00001-of-00005.bin",
456
+ "encoder.h.4.mlp.fc_out.bias": "pytorch_model-00001-of-00005.bin",
457
+ "encoder.h.4.mlp.fc_out.weight": "pytorch_model-00001-of-00005.bin",
458
+ "encoder.h.5.attn.causal_mask": "pytorch_model-00001-of-00005.bin",
459
+ "encoder.h.5.attn.out_proj.weight": "pytorch_model-00001-of-00005.bin",
460
+ "encoder.h.5.attn.qkv_proj.weight": "pytorch_model-00001-of-00005.bin",
461
+ "encoder.h.5.ln_1.bias": "pytorch_model-00001-of-00005.bin",
462
+ "encoder.h.5.ln_1.weight": "pytorch_model-00001-of-00005.bin",
463
+ "encoder.h.5.mlp.fc_in.bias": "pytorch_model-00001-of-00005.bin",
464
+ "encoder.h.5.mlp.fc_in.weight": "pytorch_model-00001-of-00005.bin",
465
+ "encoder.h.5.mlp.fc_out.bias": "pytorch_model-00001-of-00005.bin",
466
+ "encoder.h.5.mlp.fc_out.weight": "pytorch_model-00001-of-00005.bin",
467
+ "encoder.h.6.attn.causal_mask": "pytorch_model-00001-of-00005.bin",
468
+ "encoder.h.6.attn.out_proj.weight": "pytorch_model-00001-of-00005.bin",
469
+ "encoder.h.6.attn.qkv_proj.weight": "pytorch_model-00001-of-00005.bin",
470
+ "encoder.h.6.ln_1.bias": "pytorch_model-00001-of-00005.bin",
471
+ "encoder.h.6.ln_1.weight": "pytorch_model-00001-of-00005.bin",
472
+ "encoder.h.6.mlp.fc_in.bias": "pytorch_model-00001-of-00005.bin",
473
+ "encoder.h.6.mlp.fc_in.weight": "pytorch_model-00001-of-00005.bin",
474
+ "encoder.h.6.mlp.fc_out.bias": "pytorch_model-00001-of-00005.bin",
475
+ "encoder.h.6.mlp.fc_out.weight": "pytorch_model-00001-of-00005.bin",
476
+ "encoder.h.7.attn.causal_mask": "pytorch_model-00001-of-00005.bin",
477
+ "encoder.h.7.attn.out_proj.weight": "pytorch_model-00001-of-00005.bin",
478
+ "encoder.h.7.attn.qkv_proj.weight": "pytorch_model-00001-of-00005.bin",
479
+ "encoder.h.7.ln_1.bias": "pytorch_model-00001-of-00005.bin",
480
+ "encoder.h.7.ln_1.weight": "pytorch_model-00001-of-00005.bin",
481
+ "encoder.h.7.mlp.fc_in.bias": "pytorch_model-00001-of-00005.bin",
482
+ "encoder.h.7.mlp.fc_in.weight": "pytorch_model-00001-of-00005.bin",
483
+ "encoder.h.7.mlp.fc_out.bias": "pytorch_model-00001-of-00005.bin",
484
+ "encoder.h.7.mlp.fc_out.weight": "pytorch_model-00001-of-00005.bin",
485
+ "encoder.h.8.attn.causal_mask": "pytorch_model-00001-of-00005.bin",
486
+ "encoder.h.8.attn.out_proj.weight": "pytorch_model-00001-of-00005.bin",
487
+ "encoder.h.8.attn.qkv_proj.weight": "pytorch_model-00001-of-00005.bin",
488
+ "encoder.h.8.ln_1.bias": "pytorch_model-00001-of-00005.bin",
489
+ "encoder.h.8.ln_1.weight": "pytorch_model-00001-of-00005.bin",
490
+ "encoder.h.8.mlp.fc_in.bias": "pytorch_model-00001-of-00005.bin",
491
+ "encoder.h.8.mlp.fc_in.weight": "pytorch_model-00001-of-00005.bin",
492
+ "encoder.h.8.mlp.fc_out.bias": "pytorch_model-00001-of-00005.bin",
493
+ "encoder.h.8.mlp.fc_out.weight": "pytorch_model-00001-of-00005.bin",
494
+ "encoder.h.9.attn.causal_mask": "pytorch_model-00001-of-00005.bin",
495
+ "encoder.h.9.attn.out_proj.weight": "pytorch_model-00001-of-00005.bin",
496
+ "encoder.h.9.attn.qkv_proj.weight": "pytorch_model-00001-of-00005.bin",
497
+ "encoder.h.9.ln_1.bias": "pytorch_model-00001-of-00005.bin",
498
+ "encoder.h.9.ln_1.weight": "pytorch_model-00001-of-00005.bin",
499
+ "encoder.h.9.mlp.fc_in.bias": "pytorch_model-00001-of-00005.bin",
500
+ "encoder.h.9.mlp.fc_in.weight": "pytorch_model-00001-of-00005.bin",
501
+ "encoder.h.9.mlp.fc_out.bias": "pytorch_model-00001-of-00005.bin",
502
+ "encoder.h.9.mlp.fc_out.weight": "pytorch_model-00001-of-00005.bin",
503
+ "encoder.ln_f.bias": "pytorch_model-00001-of-00005.bin",
504
+ "encoder.ln_f.weight": "pytorch_model-00001-of-00005.bin",
505
+ "encoder.wte.weight": "pytorch_model-00001-of-00005.bin"
506
+ }
507
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<|endoftext|>",
3
+ "eos_token": "<|endoftext|>",
4
+ "pad_token": "<|endoftext|>",
5
+ "unk_token": "<|endoftext|>"
6
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "bos_token": "<|endoftext|>",
4
+ "eos_token": "<|endoftext|>",
5
+ "model_max_length": 2048,
6
+ "name_or_path": "Salesforce/codegen-350M-mono",
7
+ "special_tokens_map_file": null,
8
+ "tokenizer_class": "CodeGenTokenizer",
9
+ "unk_token": "<|endoftext|>"
10
+ }
vocab.json ADDED
The diff for this file is too large to render. See raw diff