lukasmoeller commited on
Commit
82a66d3
1 Parent(s): d722848

Upload ReplitLM

Browse files
config.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "replit/replit-code-v1-3b",
3
+ "alibi": true,
4
+ "alibi_bias_max": 8,
5
+ "architectures": [
6
+ "ReplitLM"
7
+ ],
8
+ "attn_clip_qkv": null,
9
+ "attn_impl": "torch",
10
+ "attn_pdrop": 0,
11
+ "attn_qk_ln": false,
12
+ "attn_uses_sequence_id": false,
13
+ "auto_map": {
14
+ "AutoConfig": "configuration_replit_lm.ReplitLMConfig",
15
+ "AutoModelForCausalLM": "replit_lm.ReplitLM"
16
+ },
17
+ "d_model": 2560,
18
+ "emb_init_std": null,
19
+ "emb_init_uniform_lim": null,
20
+ "emb_pdrop": 0,
21
+ "embedding_fraction": 1.0,
22
+ "fan_mode": "fan_in",
23
+ "init_device": "cpu",
24
+ "init_div_is_residual": true,
25
+ "init_gain": 0,
26
+ "init_nonlinearity": "relu",
27
+ "init_std": 0.02,
28
+ "logit_scale": null,
29
+ "low_precision_layernorm": true,
30
+ "max_seq_len": 2048,
31
+ "mlp_ratio": 4,
32
+ "model_type": "replit_lm",
33
+ "n_heads": 32,
34
+ "n_layers": 32,
35
+ "no_bias": true,
36
+ "param_init_fn": "kaiming_normal_",
37
+ "prefix_lm": false,
38
+ "resid_pdrop": 0,
39
+ "softmax_scale": null,
40
+ "tokenizer_name": "replit/replit-code-v1-3b",
41
+ "torch_dtype": "float32",
42
+ "transformers_version": "4.28.1",
43
+ "use_cache": false,
44
+ "verbose": 0,
45
+ "vocab_size": 32768
46
+ }
configuration_replit_lm.py ADDED
@@ -0,0 +1,168 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 MosaicML Examples authors
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ """Forked for ReplitLM"""
5
+
6
+ """A HuggingFace-style model configuration."""
7
+
8
+
9
+ from typing import Optional, Tuple, Union
10
+ from transformers import PretrainedConfig
11
+ class ReplitLMConfig(PretrainedConfig):
12
+ model_type = 'replit_lm'
13
+
14
+ def __init__(
15
+ self,
16
+ d_model: int = 2048,
17
+ n_heads: int = 16,
18
+ n_layers: int = 24,
19
+ mlp_ratio: int = 4,
20
+ max_seq_len: int = 2048,
21
+ vocab_size: int = 50368,
22
+ attn_pdrop: float = 0.0,
23
+ resid_pdrop: float = 0.0,
24
+ emb_pdrop: float = 0.0,
25
+ attn_impl: str = 'triton',
26
+ attn_qk_ln: bool = False,
27
+ attn_clip_qkv: Optional[float] = None,
28
+ softmax_scale: Optional[float] = None,
29
+ prefix_lm: Optional[bool] = False,
30
+ attn_uses_sequence_id: Optional[bool] = False,
31
+ alibi: bool = False,
32
+ alibi_bias_max: int = 8,
33
+ init_device: str = 'cpu',
34
+ logit_scale: Optional[Union[float, str]] = None,
35
+ no_bias: bool = False,
36
+ verbose: int = 0,
37
+ param_init_fn: str = 'kaiming_normal_',
38
+ init_div_is_residual: Union[int, float, str, bool] = True,
39
+ init_std: float = 0.02,
40
+ emb_init_std: Optional[float] = None,
41
+ emb_init_uniform_lim: Optional[Union[Tuple[float, float],
42
+ float]] = None,
43
+ init_gain: float = 0,
44
+ fan_mode: str = 'fan_in',
45
+ init_nonlinearity: str = 'relu',
46
+ embedding_fraction: float = 1.0,
47
+ low_precision_layernorm: bool = True,
48
+ use_cache: bool = False,
49
+ **kwargs,
50
+ ):
51
+ """The ReplitLM configuration class.
52
+
53
+ Args:
54
+ d_model (int): The size of the embedding dimension of the model.
55
+ n_heads (int): The number of attention heads.
56
+ n_layers (int): The number of layers in the model.
57
+ mlp_ratio (int): The ratio of the up/down scale in the MLP.
58
+ max_seq_len (int): The maximum sequence length of the model.
59
+ vocab_size (int): The size of the vocabulary.
60
+ attn_pdrop (float): The dropout probability for the attention layers.
61
+ resid_pdrop (float): The dropout probability applied to the attention output before combining with residual.
62
+ emb_pdrop (float): The dropout probability for the embedding layer.
63
+ attn_impl (str): The attention implementation to use. One of 'torch', 'flash', or 'triton'.
64
+ attn_qk_ln (bool): Whether to apply layer normalization to the queries and keys in the attention layer.
65
+ attn_clip_qkv (Optional[float]): If not None, clip the queries, keys, and values in the attention layer to
66
+ this value.
67
+ softmax_scale (Optional[float]): If not None, scale the softmax in the attention layer by this value. If None,
68
+ use the default scale of ``1/sqrt(d_keys)``.
69
+ prefix_lm (Optional[bool]): Whether the model should operate as a Prefix LM. This requires passing an
70
+ extra `prefix_mask` argument which indicates which tokens belong to the prefix. Tokens in the prefix
71
+ can attend to one another bi-directionally. Tokens outside the prefix use causal attention.
72
+ attn_uses_sequence_id (Optional[bool]): Whether to restrict attention to tokens that have the same sequence_id.
73
+ When the model is in `train` mode, this requires passing an extra `sequence_id` argument which indicates
74
+ which sub-sequence each token belongs to.
75
+ Defaults to ``False`` meaning any provided `sequence_id` will be ignored.
76
+ alibi (bool): Whether to use the alibi bias instead of position embeddings.
77
+ alibi_bias_max (int): The maximum value of the alibi bias.
78
+ init_device (str): The device to use for parameter initialization.
79
+ logit_scale (Optional[Union[float, str]]): If not None, scale the logits by this value.
80
+ no_bias (bool): Whether to use bias in all layers.
81
+ verbose (int): The verbosity level. 0 is silent.
82
+ param_init_fn (str): The parameter initialization scheme to use. One of 'default_', 'baseline_', 'kaiming_uniform_',
83
+ 'kaiming_normal_', 'neox_init_', 'small_init_', 'xavier_uniform_', or 'xavier_normal_'.
84
+ init_div_is_residual (Union[int, float, str, bool]): Value to divide initial weights by if ``module._is_residual`` is True.
85
+ init_std (float): The standard deviation of the normal distribution used to initialize the model,
86
+ if using the baseline_ parameter initialization scheme.
87
+ emb_init_std (Optional[float]): The standard deviation of the normal distribution used to initialize the embedding layer.
88
+ emb_init_uniform_lim (Optional[Union[Tuple[float, float], float]]): The lower and upper limits of the uniform distribution
89
+ used to initialize the embedding layer. Mutually exclusive with ``emb_init_std``.
90
+ init_gain (float): The gain to use for parameter initialization with kaiming or xavier initialization schemes.
91
+ fan_mode (str): The fan mode to use for parameter initialization with kaiming initialization schemes.
92
+ init_nonlinearity (str): The nonlinearity to use for parameter initialization with kaiming initialization schemes.
93
+ embedding_fraction (float): The fraction to scale the gradients of the embedding layer by.
94
+ low_precision_layernorm (bool): Whether to use low precision layer normalization.
95
+ use_cache (bool): Whether or not the model should return the last key/values attentions
96
+ """
97
+ self.d_model = d_model
98
+ self.n_heads = n_heads
99
+ self.n_layers = n_layers
100
+ self.mlp_ratio = mlp_ratio
101
+ self.max_seq_len = max_seq_len
102
+ self.vocab_size = vocab_size
103
+ self.attn_pdrop = attn_pdrop
104
+ self.resid_pdrop = resid_pdrop
105
+ self.emb_pdrop = emb_pdrop
106
+ self.attn_impl = attn_impl
107
+ self.attn_qk_ln = attn_qk_ln
108
+ self.attn_clip_qkv = attn_clip_qkv
109
+ self.softmax_scale = softmax_scale
110
+ self.prefix_lm = prefix_lm
111
+ self.attn_uses_sequence_id = attn_uses_sequence_id
112
+ self.alibi = alibi
113
+ self.alibi_bias_max = alibi_bias_max
114
+ self.init_device = init_device
115
+ self.logit_scale = logit_scale
116
+ self.no_bias = no_bias
117
+ self.verbose = verbose
118
+ self.param_init_fn = param_init_fn
119
+ self.init_div_is_residual = init_div_is_residual
120
+ self.init_std = init_std
121
+ self.emb_init_std = emb_init_std
122
+ self.emb_init_uniform_lim = emb_init_uniform_lim
123
+ self.init_std = init_std
124
+ self.init_gain = init_gain
125
+ self.fan_mode = fan_mode
126
+ self.init_nonlinearity = init_nonlinearity
127
+ self.embedding_fraction = embedding_fraction
128
+ self.low_precision_layernorm = low_precision_layernorm
129
+ self.use_cache = use_cache
130
+ if 'name' in kwargs:
131
+ del kwargs['name']
132
+ if 'loss_fn' in kwargs:
133
+ del kwargs['loss_fn']
134
+ super().__init__(**kwargs)
135
+
136
+ self._validate_config()
137
+
138
+ def _validate_config(self):
139
+ if self.d_model % self.n_heads != 0:
140
+ raise ValueError('d_model must be divisible by n_heads')
141
+ if any(prob < 0 or prob > 1
142
+ for prob in [self.attn_pdrop, self.resid_pdrop, self.emb_pdrop]):
143
+ raise ValueError(
144
+ 'attn_pdrop, resid_pdrop, emb_pdrop are probabilities and must be between 0 and 1'
145
+ )
146
+ if self.attn_impl not in ['torch', 'flash', 'triton']:
147
+ raise ValueError(f'Unknown attn_impl={self.attn_impl}')
148
+ if self.prefix_lm and self.attn_impl not in ['torch', 'triton']:
149
+ raise NotImplementedError(
150
+ 'prefix_lm only implemented with torch and triton attention.')
151
+ if self.alibi and self.attn_impl not in ['torch', 'triton']:
152
+ raise NotImplementedError(
153
+ 'alibi only implemented with torch and triton attention.')
154
+ if self.attn_uses_sequence_id and self.attn_impl not in [
155
+ 'torch', 'triton'
156
+ ]:
157
+ raise NotImplementedError(
158
+ 'attn_uses_sequence_id only implemented with torch and triton attention.'
159
+ )
160
+ if self.embedding_fraction > 1 or self.embedding_fraction <= 0:
161
+ raise ValueError(
162
+ 'model.embedding_fraction must be between 0 (exclusive) and 1 (inclusive)!'
163
+ )
164
+ if isinstance(self.logit_scale,
165
+ str) and self.logit_scale != 'inv_sqrt_d_model':
166
+ raise ValueError(
167
+ f"{self.logit_scale=} is not recognized as an option; use numeric value or 'inv_sqrt_d_model'."
168
+ )
generation_config.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "transformers_version": "4.28.1",
4
+ "use_cache": false
5
+ }
pytorch_model-00001-of-00002.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ace7a3b244a7802aec01e040880c6f1bd2bfb5099732ba2e30b164e9ba9c987d
3
+ size 9983144733
pytorch_model-00002-of-00002.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:962355661d4729fa5c6529699fcea195da3b79cadd346f42dcb7678d82c96e48
3
+ size 419464289
pytorch_model.bin.index.json ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 10402539520
4
+ },
5
+ "weight_map": {
6
+ "transformer.blocks.0.attn.Wqkv.weight": "pytorch_model-00001-of-00002.bin",
7
+ "transformer.blocks.0.attn.out_proj.weight": "pytorch_model-00001-of-00002.bin",
8
+ "transformer.blocks.0.ln_1.weight": "pytorch_model-00001-of-00002.bin",
9
+ "transformer.blocks.0.ln_2.weight": "pytorch_model-00001-of-00002.bin",
10
+ "transformer.blocks.0.mlp.mlp_down.weight": "pytorch_model-00001-of-00002.bin",
11
+ "transformer.blocks.0.mlp.mlp_up.weight": "pytorch_model-00001-of-00002.bin",
12
+ "transformer.blocks.1.attn.Wqkv.weight": "pytorch_model-00001-of-00002.bin",
13
+ "transformer.blocks.1.attn.out_proj.weight": "pytorch_model-00001-of-00002.bin",
14
+ "transformer.blocks.1.ln_1.weight": "pytorch_model-00001-of-00002.bin",
15
+ "transformer.blocks.1.ln_2.weight": "pytorch_model-00001-of-00002.bin",
16
+ "transformer.blocks.1.mlp.mlp_down.weight": "pytorch_model-00001-of-00002.bin",
17
+ "transformer.blocks.1.mlp.mlp_up.weight": "pytorch_model-00001-of-00002.bin",
18
+ "transformer.blocks.10.attn.Wqkv.weight": "pytorch_model-00001-of-00002.bin",
19
+ "transformer.blocks.10.attn.out_proj.weight": "pytorch_model-00001-of-00002.bin",
20
+ "transformer.blocks.10.ln_1.weight": "pytorch_model-00001-of-00002.bin",
21
+ "transformer.blocks.10.ln_2.weight": "pytorch_model-00001-of-00002.bin",
22
+ "transformer.blocks.10.mlp.mlp_down.weight": "pytorch_model-00001-of-00002.bin",
23
+ "transformer.blocks.10.mlp.mlp_up.weight": "pytorch_model-00001-of-00002.bin",
24
+ "transformer.blocks.11.attn.Wqkv.weight": "pytorch_model-00001-of-00002.bin",
25
+ "transformer.blocks.11.attn.out_proj.weight": "pytorch_model-00001-of-00002.bin",
26
+ "transformer.blocks.11.ln_1.weight": "pytorch_model-00001-of-00002.bin",
27
+ "transformer.blocks.11.ln_2.weight": "pytorch_model-00001-of-00002.bin",
28
+ "transformer.blocks.11.mlp.mlp_down.weight": "pytorch_model-00001-of-00002.bin",
29
+ "transformer.blocks.11.mlp.mlp_up.weight": "pytorch_model-00001-of-00002.bin",
30
+ "transformer.blocks.12.attn.Wqkv.weight": "pytorch_model-00001-of-00002.bin",
31
+ "transformer.blocks.12.attn.out_proj.weight": "pytorch_model-00001-of-00002.bin",
32
+ "transformer.blocks.12.ln_1.weight": "pytorch_model-00001-of-00002.bin",
33
+ "transformer.blocks.12.ln_2.weight": "pytorch_model-00001-of-00002.bin",
34
+ "transformer.blocks.12.mlp.mlp_down.weight": "pytorch_model-00001-of-00002.bin",
35
+ "transformer.blocks.12.mlp.mlp_up.weight": "pytorch_model-00001-of-00002.bin",
36
+ "transformer.blocks.13.attn.Wqkv.weight": "pytorch_model-00001-of-00002.bin",
37
+ "transformer.blocks.13.attn.out_proj.weight": "pytorch_model-00001-of-00002.bin",
38
+ "transformer.blocks.13.ln_1.weight": "pytorch_model-00001-of-00002.bin",
39
+ "transformer.blocks.13.ln_2.weight": "pytorch_model-00001-of-00002.bin",
40
+ "transformer.blocks.13.mlp.mlp_down.weight": "pytorch_model-00001-of-00002.bin",
41
+ "transformer.blocks.13.mlp.mlp_up.weight": "pytorch_model-00001-of-00002.bin",
42
+ "transformer.blocks.14.attn.Wqkv.weight": "pytorch_model-00001-of-00002.bin",
43
+ "transformer.blocks.14.attn.out_proj.weight": "pytorch_model-00001-of-00002.bin",
44
+ "transformer.blocks.14.ln_1.weight": "pytorch_model-00001-of-00002.bin",
45
+ "transformer.blocks.14.ln_2.weight": "pytorch_model-00001-of-00002.bin",
46
+ "transformer.blocks.14.mlp.mlp_down.weight": "pytorch_model-00001-of-00002.bin",
47
+ "transformer.blocks.14.mlp.mlp_up.weight": "pytorch_model-00001-of-00002.bin",
48
+ "transformer.blocks.15.attn.Wqkv.weight": "pytorch_model-00001-of-00002.bin",
49
+ "transformer.blocks.15.attn.out_proj.weight": "pytorch_model-00001-of-00002.bin",
50
+ "transformer.blocks.15.ln_1.weight": "pytorch_model-00001-of-00002.bin",
51
+ "transformer.blocks.15.ln_2.weight": "pytorch_model-00001-of-00002.bin",
52
+ "transformer.blocks.15.mlp.mlp_down.weight": "pytorch_model-00001-of-00002.bin",
53
+ "transformer.blocks.15.mlp.mlp_up.weight": "pytorch_model-00001-of-00002.bin",
54
+ "transformer.blocks.16.attn.Wqkv.weight": "pytorch_model-00001-of-00002.bin",
55
+ "transformer.blocks.16.attn.out_proj.weight": "pytorch_model-00001-of-00002.bin",
56
+ "transformer.blocks.16.ln_1.weight": "pytorch_model-00001-of-00002.bin",
57
+ "transformer.blocks.16.ln_2.weight": "pytorch_model-00001-of-00002.bin",
58
+ "transformer.blocks.16.mlp.mlp_down.weight": "pytorch_model-00001-of-00002.bin",
59
+ "transformer.blocks.16.mlp.mlp_up.weight": "pytorch_model-00001-of-00002.bin",
60
+ "transformer.blocks.17.attn.Wqkv.weight": "pytorch_model-00001-of-00002.bin",
61
+ "transformer.blocks.17.attn.out_proj.weight": "pytorch_model-00001-of-00002.bin",
62
+ "transformer.blocks.17.ln_1.weight": "pytorch_model-00001-of-00002.bin",
63
+ "transformer.blocks.17.ln_2.weight": "pytorch_model-00001-of-00002.bin",
64
+ "transformer.blocks.17.mlp.mlp_down.weight": "pytorch_model-00001-of-00002.bin",
65
+ "transformer.blocks.17.mlp.mlp_up.weight": "pytorch_model-00001-of-00002.bin",
66
+ "transformer.blocks.18.attn.Wqkv.weight": "pytorch_model-00001-of-00002.bin",
67
+ "transformer.blocks.18.attn.out_proj.weight": "pytorch_model-00001-of-00002.bin",
68
+ "transformer.blocks.18.ln_1.weight": "pytorch_model-00001-of-00002.bin",
69
+ "transformer.blocks.18.ln_2.weight": "pytorch_model-00001-of-00002.bin",
70
+ "transformer.blocks.18.mlp.mlp_down.weight": "pytorch_model-00001-of-00002.bin",
71
+ "transformer.blocks.18.mlp.mlp_up.weight": "pytorch_model-00001-of-00002.bin",
72
+ "transformer.blocks.19.attn.Wqkv.weight": "pytorch_model-00001-of-00002.bin",
73
+ "transformer.blocks.19.attn.out_proj.weight": "pytorch_model-00001-of-00002.bin",
74
+ "transformer.blocks.19.ln_1.weight": "pytorch_model-00001-of-00002.bin",
75
+ "transformer.blocks.19.ln_2.weight": "pytorch_model-00001-of-00002.bin",
76
+ "transformer.blocks.19.mlp.mlp_down.weight": "pytorch_model-00001-of-00002.bin",
77
+ "transformer.blocks.19.mlp.mlp_up.weight": "pytorch_model-00001-of-00002.bin",
78
+ "transformer.blocks.2.attn.Wqkv.weight": "pytorch_model-00001-of-00002.bin",
79
+ "transformer.blocks.2.attn.out_proj.weight": "pytorch_model-00001-of-00002.bin",
80
+ "transformer.blocks.2.ln_1.weight": "pytorch_model-00001-of-00002.bin",
81
+ "transformer.blocks.2.ln_2.weight": "pytorch_model-00001-of-00002.bin",
82
+ "transformer.blocks.2.mlp.mlp_down.weight": "pytorch_model-00001-of-00002.bin",
83
+ "transformer.blocks.2.mlp.mlp_up.weight": "pytorch_model-00001-of-00002.bin",
84
+ "transformer.blocks.20.attn.Wqkv.weight": "pytorch_model-00001-of-00002.bin",
85
+ "transformer.blocks.20.attn.out_proj.weight": "pytorch_model-00001-of-00002.bin",
86
+ "transformer.blocks.20.ln_1.weight": "pytorch_model-00001-of-00002.bin",
87
+ "transformer.blocks.20.ln_2.weight": "pytorch_model-00001-of-00002.bin",
88
+ "transformer.blocks.20.mlp.mlp_down.weight": "pytorch_model-00001-of-00002.bin",
89
+ "transformer.blocks.20.mlp.mlp_up.weight": "pytorch_model-00001-of-00002.bin",
90
+ "transformer.blocks.21.attn.Wqkv.weight": "pytorch_model-00001-of-00002.bin",
91
+ "transformer.blocks.21.attn.out_proj.weight": "pytorch_model-00001-of-00002.bin",
92
+ "transformer.blocks.21.ln_1.weight": "pytorch_model-00001-of-00002.bin",
93
+ "transformer.blocks.21.ln_2.weight": "pytorch_model-00001-of-00002.bin",
94
+ "transformer.blocks.21.mlp.mlp_down.weight": "pytorch_model-00001-of-00002.bin",
95
+ "transformer.blocks.21.mlp.mlp_up.weight": "pytorch_model-00001-of-00002.bin",
96
+ "transformer.blocks.22.attn.Wqkv.weight": "pytorch_model-00001-of-00002.bin",
97
+ "transformer.blocks.22.attn.out_proj.weight": "pytorch_model-00001-of-00002.bin",
98
+ "transformer.blocks.22.ln_1.weight": "pytorch_model-00001-of-00002.bin",
99
+ "transformer.blocks.22.ln_2.weight": "pytorch_model-00001-of-00002.bin",
100
+ "transformer.blocks.22.mlp.mlp_down.weight": "pytorch_model-00001-of-00002.bin",
101
+ "transformer.blocks.22.mlp.mlp_up.weight": "pytorch_model-00001-of-00002.bin",
102
+ "transformer.blocks.23.attn.Wqkv.weight": "pytorch_model-00001-of-00002.bin",
103
+ "transformer.blocks.23.attn.out_proj.weight": "pytorch_model-00001-of-00002.bin",
104
+ "transformer.blocks.23.ln_1.weight": "pytorch_model-00001-of-00002.bin",
105
+ "transformer.blocks.23.ln_2.weight": "pytorch_model-00001-of-00002.bin",
106
+ "transformer.blocks.23.mlp.mlp_down.weight": "pytorch_model-00001-of-00002.bin",
107
+ "transformer.blocks.23.mlp.mlp_up.weight": "pytorch_model-00001-of-00002.bin",
108
+ "transformer.blocks.24.attn.Wqkv.weight": "pytorch_model-00001-of-00002.bin",
109
+ "transformer.blocks.24.attn.out_proj.weight": "pytorch_model-00001-of-00002.bin",
110
+ "transformer.blocks.24.ln_1.weight": "pytorch_model-00001-of-00002.bin",
111
+ "transformer.blocks.24.ln_2.weight": "pytorch_model-00001-of-00002.bin",
112
+ "transformer.blocks.24.mlp.mlp_down.weight": "pytorch_model-00001-of-00002.bin",
113
+ "transformer.blocks.24.mlp.mlp_up.weight": "pytorch_model-00001-of-00002.bin",
114
+ "transformer.blocks.25.attn.Wqkv.weight": "pytorch_model-00001-of-00002.bin",
115
+ "transformer.blocks.25.attn.out_proj.weight": "pytorch_model-00001-of-00002.bin",
116
+ "transformer.blocks.25.ln_1.weight": "pytorch_model-00001-of-00002.bin",
117
+ "transformer.blocks.25.ln_2.weight": "pytorch_model-00001-of-00002.bin",
118
+ "transformer.blocks.25.mlp.mlp_down.weight": "pytorch_model-00001-of-00002.bin",
119
+ "transformer.blocks.25.mlp.mlp_up.weight": "pytorch_model-00001-of-00002.bin",
120
+ "transformer.blocks.26.attn.Wqkv.weight": "pytorch_model-00001-of-00002.bin",
121
+ "transformer.blocks.26.attn.out_proj.weight": "pytorch_model-00001-of-00002.bin",
122
+ "transformer.blocks.26.ln_1.weight": "pytorch_model-00001-of-00002.bin",
123
+ "transformer.blocks.26.ln_2.weight": "pytorch_model-00001-of-00002.bin",
124
+ "transformer.blocks.26.mlp.mlp_down.weight": "pytorch_model-00001-of-00002.bin",
125
+ "transformer.blocks.26.mlp.mlp_up.weight": "pytorch_model-00001-of-00002.bin",
126
+ "transformer.blocks.27.attn.Wqkv.weight": "pytorch_model-00001-of-00002.bin",
127
+ "transformer.blocks.27.attn.out_proj.weight": "pytorch_model-00001-of-00002.bin",
128
+ "transformer.blocks.27.ln_1.weight": "pytorch_model-00001-of-00002.bin",
129
+ "transformer.blocks.27.ln_2.weight": "pytorch_model-00001-of-00002.bin",
130
+ "transformer.blocks.27.mlp.mlp_down.weight": "pytorch_model-00001-of-00002.bin",
131
+ "transformer.blocks.27.mlp.mlp_up.weight": "pytorch_model-00001-of-00002.bin",
132
+ "transformer.blocks.28.attn.Wqkv.weight": "pytorch_model-00001-of-00002.bin",
133
+ "transformer.blocks.28.attn.out_proj.weight": "pytorch_model-00001-of-00002.bin",
134
+ "transformer.blocks.28.ln_1.weight": "pytorch_model-00001-of-00002.bin",
135
+ "transformer.blocks.28.ln_2.weight": "pytorch_model-00001-of-00002.bin",
136
+ "transformer.blocks.28.mlp.mlp_down.weight": "pytorch_model-00001-of-00002.bin",
137
+ "transformer.blocks.28.mlp.mlp_up.weight": "pytorch_model-00001-of-00002.bin",
138
+ "transformer.blocks.29.attn.Wqkv.weight": "pytorch_model-00001-of-00002.bin",
139
+ "transformer.blocks.29.attn.out_proj.weight": "pytorch_model-00001-of-00002.bin",
140
+ "transformer.blocks.29.ln_1.weight": "pytorch_model-00001-of-00002.bin",
141
+ "transformer.blocks.29.ln_2.weight": "pytorch_model-00001-of-00002.bin",
142
+ "transformer.blocks.29.mlp.mlp_down.weight": "pytorch_model-00001-of-00002.bin",
143
+ "transformer.blocks.29.mlp.mlp_up.weight": "pytorch_model-00001-of-00002.bin",
144
+ "transformer.blocks.3.attn.Wqkv.weight": "pytorch_model-00001-of-00002.bin",
145
+ "transformer.blocks.3.attn.out_proj.weight": "pytorch_model-00001-of-00002.bin",
146
+ "transformer.blocks.3.ln_1.weight": "pytorch_model-00001-of-00002.bin",
147
+ "transformer.blocks.3.ln_2.weight": "pytorch_model-00001-of-00002.bin",
148
+ "transformer.blocks.3.mlp.mlp_down.weight": "pytorch_model-00001-of-00002.bin",
149
+ "transformer.blocks.3.mlp.mlp_up.weight": "pytorch_model-00001-of-00002.bin",
150
+ "transformer.blocks.30.attn.Wqkv.weight": "pytorch_model-00001-of-00002.bin",
151
+ "transformer.blocks.30.attn.out_proj.weight": "pytorch_model-00001-of-00002.bin",
152
+ "transformer.blocks.30.ln_1.weight": "pytorch_model-00001-of-00002.bin",
153
+ "transformer.blocks.30.ln_2.weight": "pytorch_model-00001-of-00002.bin",
154
+ "transformer.blocks.30.mlp.mlp_down.weight": "pytorch_model-00002-of-00002.bin",
155
+ "transformer.blocks.30.mlp.mlp_up.weight": "pytorch_model-00001-of-00002.bin",
156
+ "transformer.blocks.31.attn.Wqkv.weight": "pytorch_model-00002-of-00002.bin",
157
+ "transformer.blocks.31.attn.out_proj.weight": "pytorch_model-00002-of-00002.bin",
158
+ "transformer.blocks.31.ln_1.weight": "pytorch_model-00002-of-00002.bin",
159
+ "transformer.blocks.31.ln_2.weight": "pytorch_model-00002-of-00002.bin",
160
+ "transformer.blocks.31.mlp.mlp_down.weight": "pytorch_model-00002-of-00002.bin",
161
+ "transformer.blocks.31.mlp.mlp_up.weight": "pytorch_model-00002-of-00002.bin",
162
+ "transformer.blocks.4.attn.Wqkv.weight": "pytorch_model-00001-of-00002.bin",
163
+ "transformer.blocks.4.attn.out_proj.weight": "pytorch_model-00001-of-00002.bin",
164
+ "transformer.blocks.4.ln_1.weight": "pytorch_model-00001-of-00002.bin",
165
+ "transformer.blocks.4.ln_2.weight": "pytorch_model-00001-of-00002.bin",
166
+ "transformer.blocks.4.mlp.mlp_down.weight": "pytorch_model-00001-of-00002.bin",
167
+ "transformer.blocks.4.mlp.mlp_up.weight": "pytorch_model-00001-of-00002.bin",
168
+ "transformer.blocks.5.attn.Wqkv.weight": "pytorch_model-00001-of-00002.bin",
169
+ "transformer.blocks.5.attn.out_proj.weight": "pytorch_model-00001-of-00002.bin",
170
+ "transformer.blocks.5.ln_1.weight": "pytorch_model-00001-of-00002.bin",
171
+ "transformer.blocks.5.ln_2.weight": "pytorch_model-00001-of-00002.bin",
172
+ "transformer.blocks.5.mlp.mlp_down.weight": "pytorch_model-00001-of-00002.bin",
173
+ "transformer.blocks.5.mlp.mlp_up.weight": "pytorch_model-00001-of-00002.bin",
174
+ "transformer.blocks.6.attn.Wqkv.weight": "pytorch_model-00001-of-00002.bin",
175
+ "transformer.blocks.6.attn.out_proj.weight": "pytorch_model-00001-of-00002.bin",
176
+ "transformer.blocks.6.ln_1.weight": "pytorch_model-00001-of-00002.bin",
177
+ "transformer.blocks.6.ln_2.weight": "pytorch_model-00001-of-00002.bin",
178
+ "transformer.blocks.6.mlp.mlp_down.weight": "pytorch_model-00001-of-00002.bin",
179
+ "transformer.blocks.6.mlp.mlp_up.weight": "pytorch_model-00001-of-00002.bin",
180
+ "transformer.blocks.7.attn.Wqkv.weight": "pytorch_model-00001-of-00002.bin",
181
+ "transformer.blocks.7.attn.out_proj.weight": "pytorch_model-00001-of-00002.bin",
182
+ "transformer.blocks.7.ln_1.weight": "pytorch_model-00001-of-00002.bin",
183
+ "transformer.blocks.7.ln_2.weight": "pytorch_model-00001-of-00002.bin",
184
+ "transformer.blocks.7.mlp.mlp_down.weight": "pytorch_model-00001-of-00002.bin",
185
+ "transformer.blocks.7.mlp.mlp_up.weight": "pytorch_model-00001-of-00002.bin",
186
+ "transformer.blocks.8.attn.Wqkv.weight": "pytorch_model-00001-of-00002.bin",
187
+ "transformer.blocks.8.attn.out_proj.weight": "pytorch_model-00001-of-00002.bin",
188
+ "transformer.blocks.8.ln_1.weight": "pytorch_model-00001-of-00002.bin",
189
+ "transformer.blocks.8.ln_2.weight": "pytorch_model-00001-of-00002.bin",
190
+ "transformer.blocks.8.mlp.mlp_down.weight": "pytorch_model-00001-of-00002.bin",
191
+ "transformer.blocks.8.mlp.mlp_up.weight": "pytorch_model-00001-of-00002.bin",
192
+ "transformer.blocks.9.attn.Wqkv.weight": "pytorch_model-00001-of-00002.bin",
193
+ "transformer.blocks.9.attn.out_proj.weight": "pytorch_model-00001-of-00002.bin",
194
+ "transformer.blocks.9.ln_1.weight": "pytorch_model-00001-of-00002.bin",
195
+ "transformer.blocks.9.ln_2.weight": "pytorch_model-00001-of-00002.bin",
196
+ "transformer.blocks.9.mlp.mlp_down.weight": "pytorch_model-00001-of-00002.bin",
197
+ "transformer.blocks.9.mlp.mlp_up.weight": "pytorch_model-00001-of-00002.bin",
198
+ "transformer.ln_f.weight": "pytorch_model-00002-of-00002.bin",
199
+ "transformer.wte.weight": "pytorch_model-00001-of-00002.bin"
200
+ }
201
+ }