xiaowu0162 commited on
Commit
dbf7aed
1 Parent(s): f830a83

Upload 7 files

Browse files
config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/mnt/efs/people/diwun/models/bigcode_starcoder-3b/",
3
+ "activation_function": "gelu_pytorch_tanh",
4
+ "architectures": [
5
+ "GPTBigCodeForCausalLM"
6
+ ],
7
+ "attention_softmax_in_fp32": true,
8
+ "attn_pdrop": 0.1,
9
+ "bos_token_id": 0,
10
+ "embd_pdrop": 0.1,
11
+ "eos_token_id": 0,
12
+ "inference_runner": 0,
13
+ "initializer_range": 0.02,
14
+ "layer_norm_epsilon": 1e-05,
15
+ "max_batch_size": null,
16
+ "max_sequence_length": null,
17
+ "model_type": "gpt_bigcode",
18
+ "multi_query": true,
19
+ "n_embd": 2816,
20
+ "n_head": 22,
21
+ "n_inner": 11264,
22
+ "n_layer": 36,
23
+ "n_positions": 8192,
24
+ "pad_key_length": true,
25
+ "pre_allocate_kv_cache": false,
26
+ "resid_pdrop": 0.1,
27
+ "scale_attention_softmax_in_fp32": true,
28
+ "scale_attn_weights": true,
29
+ "summary_activation": null,
30
+ "summary_first_dropout": 0.1,
31
+ "summary_proj_to_labels": true,
32
+ "summary_type": "cls_index",
33
+ "summary_use_proj": true,
34
+ "torch_dtype": "float32",
35
+ "transformers_version": "4.28.0",
36
+ "use_cache": true,
37
+ "validate_runner_input": true,
38
+ "vocab_size": 49154
39
+ }
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 0,
4
+ "eos_token_id": 0,
5
+ "transformers_version": "4.28.0"
6
+ }
latest ADDED
@@ -0,0 +1 @@
 
 
1
+ checkpoint
pytorch_model-00001-of-00002.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:664718676440069f36d9d240ced5e31fa6b99186e61db175f64cb10d4fd7f614
3
+ size 9998336057
pytorch_model-00002-of-00002.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5ff44d42bdbd82a968f6670309a2e9368f6bca37fea990bc8703933e93965689
3
+ size 2728747037
pytorch_model.bin.index.json ADDED
@@ -0,0 +1,444 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 12726937600
4
+ },
5
+ "weight_map": {
6
+ "lm_head.weight": "pytorch_model-00002-of-00002.bin",
7
+ "transformer.h.0.attn.c_attn.bias": "pytorch_model-00001-of-00002.bin",
8
+ "transformer.h.0.attn.c_attn.weight": "pytorch_model-00001-of-00002.bin",
9
+ "transformer.h.0.attn.c_proj.bias": "pytorch_model-00001-of-00002.bin",
10
+ "transformer.h.0.attn.c_proj.weight": "pytorch_model-00001-of-00002.bin",
11
+ "transformer.h.0.ln_1.bias": "pytorch_model-00001-of-00002.bin",
12
+ "transformer.h.0.ln_1.weight": "pytorch_model-00001-of-00002.bin",
13
+ "transformer.h.0.ln_2.bias": "pytorch_model-00001-of-00002.bin",
14
+ "transformer.h.0.ln_2.weight": "pytorch_model-00001-of-00002.bin",
15
+ "transformer.h.0.mlp.c_fc.bias": "pytorch_model-00001-of-00002.bin",
16
+ "transformer.h.0.mlp.c_fc.weight": "pytorch_model-00001-of-00002.bin",
17
+ "transformer.h.0.mlp.c_proj.bias": "pytorch_model-00001-of-00002.bin",
18
+ "transformer.h.0.mlp.c_proj.weight": "pytorch_model-00001-of-00002.bin",
19
+ "transformer.h.1.attn.c_attn.bias": "pytorch_model-00001-of-00002.bin",
20
+ "transformer.h.1.attn.c_attn.weight": "pytorch_model-00001-of-00002.bin",
21
+ "transformer.h.1.attn.c_proj.bias": "pytorch_model-00001-of-00002.bin",
22
+ "transformer.h.1.attn.c_proj.weight": "pytorch_model-00001-of-00002.bin",
23
+ "transformer.h.1.ln_1.bias": "pytorch_model-00001-of-00002.bin",
24
+ "transformer.h.1.ln_1.weight": "pytorch_model-00001-of-00002.bin",
25
+ "transformer.h.1.ln_2.bias": "pytorch_model-00001-of-00002.bin",
26
+ "transformer.h.1.ln_2.weight": "pytorch_model-00001-of-00002.bin",
27
+ "transformer.h.1.mlp.c_fc.bias": "pytorch_model-00001-of-00002.bin",
28
+ "transformer.h.1.mlp.c_fc.weight": "pytorch_model-00001-of-00002.bin",
29
+ "transformer.h.1.mlp.c_proj.bias": "pytorch_model-00001-of-00002.bin",
30
+ "transformer.h.1.mlp.c_proj.weight": "pytorch_model-00001-of-00002.bin",
31
+ "transformer.h.10.attn.c_attn.bias": "pytorch_model-00001-of-00002.bin",
32
+ "transformer.h.10.attn.c_attn.weight": "pytorch_model-00001-of-00002.bin",
33
+ "transformer.h.10.attn.c_proj.bias": "pytorch_model-00001-of-00002.bin",
34
+ "transformer.h.10.attn.c_proj.weight": "pytorch_model-00001-of-00002.bin",
35
+ "transformer.h.10.ln_1.bias": "pytorch_model-00001-of-00002.bin",
36
+ "transformer.h.10.ln_1.weight": "pytorch_model-00001-of-00002.bin",
37
+ "transformer.h.10.ln_2.bias": "pytorch_model-00001-of-00002.bin",
38
+ "transformer.h.10.ln_2.weight": "pytorch_model-00001-of-00002.bin",
39
+ "transformer.h.10.mlp.c_fc.bias": "pytorch_model-00001-of-00002.bin",
40
+ "transformer.h.10.mlp.c_fc.weight": "pytorch_model-00001-of-00002.bin",
41
+ "transformer.h.10.mlp.c_proj.bias": "pytorch_model-00001-of-00002.bin",
42
+ "transformer.h.10.mlp.c_proj.weight": "pytorch_model-00001-of-00002.bin",
43
+ "transformer.h.11.attn.c_attn.bias": "pytorch_model-00001-of-00002.bin",
44
+ "transformer.h.11.attn.c_attn.weight": "pytorch_model-00001-of-00002.bin",
45
+ "transformer.h.11.attn.c_proj.bias": "pytorch_model-00001-of-00002.bin",
46
+ "transformer.h.11.attn.c_proj.weight": "pytorch_model-00001-of-00002.bin",
47
+ "transformer.h.11.ln_1.bias": "pytorch_model-00001-of-00002.bin",
48
+ "transformer.h.11.ln_1.weight": "pytorch_model-00001-of-00002.bin",
49
+ "transformer.h.11.ln_2.bias": "pytorch_model-00001-of-00002.bin",
50
+ "transformer.h.11.ln_2.weight": "pytorch_model-00001-of-00002.bin",
51
+ "transformer.h.11.mlp.c_fc.bias": "pytorch_model-00001-of-00002.bin",
52
+ "transformer.h.11.mlp.c_fc.weight": "pytorch_model-00001-of-00002.bin",
53
+ "transformer.h.11.mlp.c_proj.bias": "pytorch_model-00001-of-00002.bin",
54
+ "transformer.h.11.mlp.c_proj.weight": "pytorch_model-00001-of-00002.bin",
55
+ "transformer.h.12.attn.c_attn.bias": "pytorch_model-00001-of-00002.bin",
56
+ "transformer.h.12.attn.c_attn.weight": "pytorch_model-00001-of-00002.bin",
57
+ "transformer.h.12.attn.c_proj.bias": "pytorch_model-00001-of-00002.bin",
58
+ "transformer.h.12.attn.c_proj.weight": "pytorch_model-00001-of-00002.bin",
59
+ "transformer.h.12.ln_1.bias": "pytorch_model-00001-of-00002.bin",
60
+ "transformer.h.12.ln_1.weight": "pytorch_model-00001-of-00002.bin",
61
+ "transformer.h.12.ln_2.bias": "pytorch_model-00001-of-00002.bin",
62
+ "transformer.h.12.ln_2.weight": "pytorch_model-00001-of-00002.bin",
63
+ "transformer.h.12.mlp.c_fc.bias": "pytorch_model-00001-of-00002.bin",
64
+ "transformer.h.12.mlp.c_fc.weight": "pytorch_model-00001-of-00002.bin",
65
+ "transformer.h.12.mlp.c_proj.bias": "pytorch_model-00001-of-00002.bin",
66
+ "transformer.h.12.mlp.c_proj.weight": "pytorch_model-00001-of-00002.bin",
67
+ "transformer.h.13.attn.c_attn.bias": "pytorch_model-00001-of-00002.bin",
68
+ "transformer.h.13.attn.c_attn.weight": "pytorch_model-00001-of-00002.bin",
69
+ "transformer.h.13.attn.c_proj.bias": "pytorch_model-00001-of-00002.bin",
70
+ "transformer.h.13.attn.c_proj.weight": "pytorch_model-00001-of-00002.bin",
71
+ "transformer.h.13.ln_1.bias": "pytorch_model-00001-of-00002.bin",
72
+ "transformer.h.13.ln_1.weight": "pytorch_model-00001-of-00002.bin",
73
+ "transformer.h.13.ln_2.bias": "pytorch_model-00001-of-00002.bin",
74
+ "transformer.h.13.ln_2.weight": "pytorch_model-00001-of-00002.bin",
75
+ "transformer.h.13.mlp.c_fc.bias": "pytorch_model-00001-of-00002.bin",
76
+ "transformer.h.13.mlp.c_fc.weight": "pytorch_model-00001-of-00002.bin",
77
+ "transformer.h.13.mlp.c_proj.bias": "pytorch_model-00001-of-00002.bin",
78
+ "transformer.h.13.mlp.c_proj.weight": "pytorch_model-00001-of-00002.bin",
79
+ "transformer.h.14.attn.c_attn.bias": "pytorch_model-00001-of-00002.bin",
80
+ "transformer.h.14.attn.c_attn.weight": "pytorch_model-00001-of-00002.bin",
81
+ "transformer.h.14.attn.c_proj.bias": "pytorch_model-00001-of-00002.bin",
82
+ "transformer.h.14.attn.c_proj.weight": "pytorch_model-00001-of-00002.bin",
83
+ "transformer.h.14.ln_1.bias": "pytorch_model-00001-of-00002.bin",
84
+ "transformer.h.14.ln_1.weight": "pytorch_model-00001-of-00002.bin",
85
+ "transformer.h.14.ln_2.bias": "pytorch_model-00001-of-00002.bin",
86
+ "transformer.h.14.ln_2.weight": "pytorch_model-00001-of-00002.bin",
87
+ "transformer.h.14.mlp.c_fc.bias": "pytorch_model-00001-of-00002.bin",
88
+ "transformer.h.14.mlp.c_fc.weight": "pytorch_model-00001-of-00002.bin",
89
+ "transformer.h.14.mlp.c_proj.bias": "pytorch_model-00001-of-00002.bin",
90
+ "transformer.h.14.mlp.c_proj.weight": "pytorch_model-00001-of-00002.bin",
91
+ "transformer.h.15.attn.c_attn.bias": "pytorch_model-00001-of-00002.bin",
92
+ "transformer.h.15.attn.c_attn.weight": "pytorch_model-00001-of-00002.bin",
93
+ "transformer.h.15.attn.c_proj.bias": "pytorch_model-00001-of-00002.bin",
94
+ "transformer.h.15.attn.c_proj.weight": "pytorch_model-00001-of-00002.bin",
95
+ "transformer.h.15.ln_1.bias": "pytorch_model-00001-of-00002.bin",
96
+ "transformer.h.15.ln_1.weight": "pytorch_model-00001-of-00002.bin",
97
+ "transformer.h.15.ln_2.bias": "pytorch_model-00001-of-00002.bin",
98
+ "transformer.h.15.ln_2.weight": "pytorch_model-00001-of-00002.bin",
99
+ "transformer.h.15.mlp.c_fc.bias": "pytorch_model-00001-of-00002.bin",
100
+ "transformer.h.15.mlp.c_fc.weight": "pytorch_model-00001-of-00002.bin",
101
+ "transformer.h.15.mlp.c_proj.bias": "pytorch_model-00001-of-00002.bin",
102
+ "transformer.h.15.mlp.c_proj.weight": "pytorch_model-00001-of-00002.bin",
103
+ "transformer.h.16.attn.c_attn.bias": "pytorch_model-00001-of-00002.bin",
104
+ "transformer.h.16.attn.c_attn.weight": "pytorch_model-00001-of-00002.bin",
105
+ "transformer.h.16.attn.c_proj.bias": "pytorch_model-00001-of-00002.bin",
106
+ "transformer.h.16.attn.c_proj.weight": "pytorch_model-00001-of-00002.bin",
107
+ "transformer.h.16.ln_1.bias": "pytorch_model-00001-of-00002.bin",
108
+ "transformer.h.16.ln_1.weight": "pytorch_model-00001-of-00002.bin",
109
+ "transformer.h.16.ln_2.bias": "pytorch_model-00001-of-00002.bin",
110
+ "transformer.h.16.ln_2.weight": "pytorch_model-00001-of-00002.bin",
111
+ "transformer.h.16.mlp.c_fc.bias": "pytorch_model-00001-of-00002.bin",
112
+ "transformer.h.16.mlp.c_fc.weight": "pytorch_model-00001-of-00002.bin",
113
+ "transformer.h.16.mlp.c_proj.bias": "pytorch_model-00001-of-00002.bin",
114
+ "transformer.h.16.mlp.c_proj.weight": "pytorch_model-00001-of-00002.bin",
115
+ "transformer.h.17.attn.c_attn.bias": "pytorch_model-00001-of-00002.bin",
116
+ "transformer.h.17.attn.c_attn.weight": "pytorch_model-00001-of-00002.bin",
117
+ "transformer.h.17.attn.c_proj.bias": "pytorch_model-00001-of-00002.bin",
118
+ "transformer.h.17.attn.c_proj.weight": "pytorch_model-00001-of-00002.bin",
119
+ "transformer.h.17.ln_1.bias": "pytorch_model-00001-of-00002.bin",
120
+ "transformer.h.17.ln_1.weight": "pytorch_model-00001-of-00002.bin",
121
+ "transformer.h.17.ln_2.bias": "pytorch_model-00001-of-00002.bin",
122
+ "transformer.h.17.ln_2.weight": "pytorch_model-00001-of-00002.bin",
123
+ "transformer.h.17.mlp.c_fc.bias": "pytorch_model-00001-of-00002.bin",
124
+ "transformer.h.17.mlp.c_fc.weight": "pytorch_model-00001-of-00002.bin",
125
+ "transformer.h.17.mlp.c_proj.bias": "pytorch_model-00001-of-00002.bin",
126
+ "transformer.h.17.mlp.c_proj.weight": "pytorch_model-00001-of-00002.bin",
127
+ "transformer.h.18.attn.c_attn.bias": "pytorch_model-00001-of-00002.bin",
128
+ "transformer.h.18.attn.c_attn.weight": "pytorch_model-00001-of-00002.bin",
129
+ "transformer.h.18.attn.c_proj.bias": "pytorch_model-00001-of-00002.bin",
130
+ "transformer.h.18.attn.c_proj.weight": "pytorch_model-00001-of-00002.bin",
131
+ "transformer.h.18.ln_1.bias": "pytorch_model-00001-of-00002.bin",
132
+ "transformer.h.18.ln_1.weight": "pytorch_model-00001-of-00002.bin",
133
+ "transformer.h.18.ln_2.bias": "pytorch_model-00001-of-00002.bin",
134
+ "transformer.h.18.ln_2.weight": "pytorch_model-00001-of-00002.bin",
135
+ "transformer.h.18.mlp.c_fc.bias": "pytorch_model-00001-of-00002.bin",
136
+ "transformer.h.18.mlp.c_fc.weight": "pytorch_model-00001-of-00002.bin",
137
+ "transformer.h.18.mlp.c_proj.bias": "pytorch_model-00001-of-00002.bin",
138
+ "transformer.h.18.mlp.c_proj.weight": "pytorch_model-00001-of-00002.bin",
139
+ "transformer.h.19.attn.c_attn.bias": "pytorch_model-00001-of-00002.bin",
140
+ "transformer.h.19.attn.c_attn.weight": "pytorch_model-00001-of-00002.bin",
141
+ "transformer.h.19.attn.c_proj.bias": "pytorch_model-00001-of-00002.bin",
142
+ "transformer.h.19.attn.c_proj.weight": "pytorch_model-00001-of-00002.bin",
143
+ "transformer.h.19.ln_1.bias": "pytorch_model-00001-of-00002.bin",
144
+ "transformer.h.19.ln_1.weight": "pytorch_model-00001-of-00002.bin",
145
+ "transformer.h.19.ln_2.bias": "pytorch_model-00001-of-00002.bin",
146
+ "transformer.h.19.ln_2.weight": "pytorch_model-00001-of-00002.bin",
147
+ "transformer.h.19.mlp.c_fc.bias": "pytorch_model-00001-of-00002.bin",
148
+ "transformer.h.19.mlp.c_fc.weight": "pytorch_model-00001-of-00002.bin",
149
+ "transformer.h.19.mlp.c_proj.bias": "pytorch_model-00001-of-00002.bin",
150
+ "transformer.h.19.mlp.c_proj.weight": "pytorch_model-00001-of-00002.bin",
151
+ "transformer.h.2.attn.c_attn.bias": "pytorch_model-00001-of-00002.bin",
152
+ "transformer.h.2.attn.c_attn.weight": "pytorch_model-00001-of-00002.bin",
153
+ "transformer.h.2.attn.c_proj.bias": "pytorch_model-00001-of-00002.bin",
154
+ "transformer.h.2.attn.c_proj.weight": "pytorch_model-00001-of-00002.bin",
155
+ "transformer.h.2.ln_1.bias": "pytorch_model-00001-of-00002.bin",
156
+ "transformer.h.2.ln_1.weight": "pytorch_model-00001-of-00002.bin",
157
+ "transformer.h.2.ln_2.bias": "pytorch_model-00001-of-00002.bin",
158
+ "transformer.h.2.ln_2.weight": "pytorch_model-00001-of-00002.bin",
159
+ "transformer.h.2.mlp.c_fc.bias": "pytorch_model-00001-of-00002.bin",
160
+ "transformer.h.2.mlp.c_fc.weight": "pytorch_model-00001-of-00002.bin",
161
+ "transformer.h.2.mlp.c_proj.bias": "pytorch_model-00001-of-00002.bin",
162
+ "transformer.h.2.mlp.c_proj.weight": "pytorch_model-00001-of-00002.bin",
163
+ "transformer.h.20.attn.c_attn.bias": "pytorch_model-00001-of-00002.bin",
164
+ "transformer.h.20.attn.c_attn.weight": "pytorch_model-00001-of-00002.bin",
165
+ "transformer.h.20.attn.c_proj.bias": "pytorch_model-00001-of-00002.bin",
166
+ "transformer.h.20.attn.c_proj.weight": "pytorch_model-00001-of-00002.bin",
167
+ "transformer.h.20.ln_1.bias": "pytorch_model-00001-of-00002.bin",
168
+ "transformer.h.20.ln_1.weight": "pytorch_model-00001-of-00002.bin",
169
+ "transformer.h.20.ln_2.bias": "pytorch_model-00001-of-00002.bin",
170
+ "transformer.h.20.ln_2.weight": "pytorch_model-00001-of-00002.bin",
171
+ "transformer.h.20.mlp.c_fc.bias": "pytorch_model-00001-of-00002.bin",
172
+ "transformer.h.20.mlp.c_fc.weight": "pytorch_model-00001-of-00002.bin",
173
+ "transformer.h.20.mlp.c_proj.bias": "pytorch_model-00001-of-00002.bin",
174
+ "transformer.h.20.mlp.c_proj.weight": "pytorch_model-00001-of-00002.bin",
175
+ "transformer.h.21.attn.c_attn.bias": "pytorch_model-00001-of-00002.bin",
176
+ "transformer.h.21.attn.c_attn.weight": "pytorch_model-00001-of-00002.bin",
177
+ "transformer.h.21.attn.c_proj.bias": "pytorch_model-00001-of-00002.bin",
178
+ "transformer.h.21.attn.c_proj.weight": "pytorch_model-00001-of-00002.bin",
179
+ "transformer.h.21.ln_1.bias": "pytorch_model-00001-of-00002.bin",
180
+ "transformer.h.21.ln_1.weight": "pytorch_model-00001-of-00002.bin",
181
+ "transformer.h.21.ln_2.bias": "pytorch_model-00001-of-00002.bin",
182
+ "transformer.h.21.ln_2.weight": "pytorch_model-00001-of-00002.bin",
183
+ "transformer.h.21.mlp.c_fc.bias": "pytorch_model-00001-of-00002.bin",
184
+ "transformer.h.21.mlp.c_fc.weight": "pytorch_model-00001-of-00002.bin",
185
+ "transformer.h.21.mlp.c_proj.bias": "pytorch_model-00001-of-00002.bin",
186
+ "transformer.h.21.mlp.c_proj.weight": "pytorch_model-00001-of-00002.bin",
187
+ "transformer.h.22.attn.c_attn.bias": "pytorch_model-00001-of-00002.bin",
188
+ "transformer.h.22.attn.c_attn.weight": "pytorch_model-00001-of-00002.bin",
189
+ "transformer.h.22.attn.c_proj.bias": "pytorch_model-00001-of-00002.bin",
190
+ "transformer.h.22.attn.c_proj.weight": "pytorch_model-00001-of-00002.bin",
191
+ "transformer.h.22.ln_1.bias": "pytorch_model-00001-of-00002.bin",
192
+ "transformer.h.22.ln_1.weight": "pytorch_model-00001-of-00002.bin",
193
+ "transformer.h.22.ln_2.bias": "pytorch_model-00001-of-00002.bin",
194
+ "transformer.h.22.ln_2.weight": "pytorch_model-00001-of-00002.bin",
195
+ "transformer.h.22.mlp.c_fc.bias": "pytorch_model-00001-of-00002.bin",
196
+ "transformer.h.22.mlp.c_fc.weight": "pytorch_model-00001-of-00002.bin",
197
+ "transformer.h.22.mlp.c_proj.bias": "pytorch_model-00001-of-00002.bin",
198
+ "transformer.h.22.mlp.c_proj.weight": "pytorch_model-00001-of-00002.bin",
199
+ "transformer.h.23.attn.c_attn.bias": "pytorch_model-00001-of-00002.bin",
200
+ "transformer.h.23.attn.c_attn.weight": "pytorch_model-00001-of-00002.bin",
201
+ "transformer.h.23.attn.c_proj.bias": "pytorch_model-00001-of-00002.bin",
202
+ "transformer.h.23.attn.c_proj.weight": "pytorch_model-00001-of-00002.bin",
203
+ "transformer.h.23.ln_1.bias": "pytorch_model-00001-of-00002.bin",
204
+ "transformer.h.23.ln_1.weight": "pytorch_model-00001-of-00002.bin",
205
+ "transformer.h.23.ln_2.bias": "pytorch_model-00001-of-00002.bin",
206
+ "transformer.h.23.ln_2.weight": "pytorch_model-00001-of-00002.bin",
207
+ "transformer.h.23.mlp.c_fc.bias": "pytorch_model-00001-of-00002.bin",
208
+ "transformer.h.23.mlp.c_fc.weight": "pytorch_model-00001-of-00002.bin",
209
+ "transformer.h.23.mlp.c_proj.bias": "pytorch_model-00001-of-00002.bin",
210
+ "transformer.h.23.mlp.c_proj.weight": "pytorch_model-00001-of-00002.bin",
211
+ "transformer.h.24.attn.c_attn.bias": "pytorch_model-00001-of-00002.bin",
212
+ "transformer.h.24.attn.c_attn.weight": "pytorch_model-00001-of-00002.bin",
213
+ "transformer.h.24.attn.c_proj.bias": "pytorch_model-00001-of-00002.bin",
214
+ "transformer.h.24.attn.c_proj.weight": "pytorch_model-00001-of-00002.bin",
215
+ "transformer.h.24.ln_1.bias": "pytorch_model-00001-of-00002.bin",
216
+ "transformer.h.24.ln_1.weight": "pytorch_model-00001-of-00002.bin",
217
+ "transformer.h.24.ln_2.bias": "pytorch_model-00001-of-00002.bin",
218
+ "transformer.h.24.ln_2.weight": "pytorch_model-00001-of-00002.bin",
219
+ "transformer.h.24.mlp.c_fc.bias": "pytorch_model-00001-of-00002.bin",
220
+ "transformer.h.24.mlp.c_fc.weight": "pytorch_model-00001-of-00002.bin",
221
+ "transformer.h.24.mlp.c_proj.bias": "pytorch_model-00001-of-00002.bin",
222
+ "transformer.h.24.mlp.c_proj.weight": "pytorch_model-00001-of-00002.bin",
223
+ "transformer.h.25.attn.c_attn.bias": "pytorch_model-00001-of-00002.bin",
224
+ "transformer.h.25.attn.c_attn.weight": "pytorch_model-00001-of-00002.bin",
225
+ "transformer.h.25.attn.c_proj.bias": "pytorch_model-00001-of-00002.bin",
226
+ "transformer.h.25.attn.c_proj.weight": "pytorch_model-00001-of-00002.bin",
227
+ "transformer.h.25.ln_1.bias": "pytorch_model-00001-of-00002.bin",
228
+ "transformer.h.25.ln_1.weight": "pytorch_model-00001-of-00002.bin",
229
+ "transformer.h.25.ln_2.bias": "pytorch_model-00001-of-00002.bin",
230
+ "transformer.h.25.ln_2.weight": "pytorch_model-00001-of-00002.bin",
231
+ "transformer.h.25.mlp.c_fc.bias": "pytorch_model-00001-of-00002.bin",
232
+ "transformer.h.25.mlp.c_fc.weight": "pytorch_model-00001-of-00002.bin",
233
+ "transformer.h.25.mlp.c_proj.bias": "pytorch_model-00001-of-00002.bin",
234
+ "transformer.h.25.mlp.c_proj.weight": "pytorch_model-00001-of-00002.bin",
235
+ "transformer.h.26.attn.c_attn.bias": "pytorch_model-00001-of-00002.bin",
236
+ "transformer.h.26.attn.c_attn.weight": "pytorch_model-00001-of-00002.bin",
237
+ "transformer.h.26.attn.c_proj.bias": "pytorch_model-00001-of-00002.bin",
238
+ "transformer.h.26.attn.c_proj.weight": "pytorch_model-00001-of-00002.bin",
239
+ "transformer.h.26.ln_1.bias": "pytorch_model-00001-of-00002.bin",
240
+ "transformer.h.26.ln_1.weight": "pytorch_model-00001-of-00002.bin",
241
+ "transformer.h.26.ln_2.bias": "pytorch_model-00001-of-00002.bin",
242
+ "transformer.h.26.ln_2.weight": "pytorch_model-00001-of-00002.bin",
243
+ "transformer.h.26.mlp.c_fc.bias": "pytorch_model-00001-of-00002.bin",
244
+ "transformer.h.26.mlp.c_fc.weight": "pytorch_model-00001-of-00002.bin",
245
+ "transformer.h.26.mlp.c_proj.bias": "pytorch_model-00001-of-00002.bin",
246
+ "transformer.h.26.mlp.c_proj.weight": "pytorch_model-00001-of-00002.bin",
247
+ "transformer.h.27.attn.c_attn.bias": "pytorch_model-00001-of-00002.bin",
248
+ "transformer.h.27.attn.c_attn.weight": "pytorch_model-00001-of-00002.bin",
249
+ "transformer.h.27.attn.c_proj.bias": "pytorch_model-00001-of-00002.bin",
250
+ "transformer.h.27.attn.c_proj.weight": "pytorch_model-00001-of-00002.bin",
251
+ "transformer.h.27.ln_1.bias": "pytorch_model-00001-of-00002.bin",
252
+ "transformer.h.27.ln_1.weight": "pytorch_model-00001-of-00002.bin",
253
+ "transformer.h.27.ln_2.bias": "pytorch_model-00001-of-00002.bin",
254
+ "transformer.h.27.ln_2.weight": "pytorch_model-00001-of-00002.bin",
255
+ "transformer.h.27.mlp.c_fc.bias": "pytorch_model-00001-of-00002.bin",
256
+ "transformer.h.27.mlp.c_fc.weight": "pytorch_model-00001-of-00002.bin",
257
+ "transformer.h.27.mlp.c_proj.bias": "pytorch_model-00001-of-00002.bin",
258
+ "transformer.h.27.mlp.c_proj.weight": "pytorch_model-00001-of-00002.bin",
259
+ "transformer.h.28.attn.c_attn.bias": "pytorch_model-00001-of-00002.bin",
260
+ "transformer.h.28.attn.c_attn.weight": "pytorch_model-00001-of-00002.bin",
261
+ "transformer.h.28.attn.c_proj.bias": "pytorch_model-00001-of-00002.bin",
262
+ "transformer.h.28.attn.c_proj.weight": "pytorch_model-00001-of-00002.bin",
263
+ "transformer.h.28.ln_1.bias": "pytorch_model-00001-of-00002.bin",
264
+ "transformer.h.28.ln_1.weight": "pytorch_model-00001-of-00002.bin",
265
+ "transformer.h.28.ln_2.bias": "pytorch_model-00001-of-00002.bin",
266
+ "transformer.h.28.ln_2.weight": "pytorch_model-00001-of-00002.bin",
267
+ "transformer.h.28.mlp.c_fc.bias": "pytorch_model-00001-of-00002.bin",
268
+ "transformer.h.28.mlp.c_fc.weight": "pytorch_model-00001-of-00002.bin",
269
+ "transformer.h.28.mlp.c_proj.bias": "pytorch_model-00001-of-00002.bin",
270
+ "transformer.h.28.mlp.c_proj.weight": "pytorch_model-00001-of-00002.bin",
271
+ "transformer.h.29.attn.c_attn.bias": "pytorch_model-00001-of-00002.bin",
272
+ "transformer.h.29.attn.c_attn.weight": "pytorch_model-00001-of-00002.bin",
273
+ "transformer.h.29.attn.c_proj.bias": "pytorch_model-00001-of-00002.bin",
274
+ "transformer.h.29.attn.c_proj.weight": "pytorch_model-00001-of-00002.bin",
275
+ "transformer.h.29.ln_1.bias": "pytorch_model-00001-of-00002.bin",
276
+ "transformer.h.29.ln_1.weight": "pytorch_model-00001-of-00002.bin",
277
+ "transformer.h.29.ln_2.bias": "pytorch_model-00001-of-00002.bin",
278
+ "transformer.h.29.ln_2.weight": "pytorch_model-00001-of-00002.bin",
279
+ "transformer.h.29.mlp.c_fc.bias": "pytorch_model-00002-of-00002.bin",
280
+ "transformer.h.29.mlp.c_fc.weight": "pytorch_model-00002-of-00002.bin",
281
+ "transformer.h.29.mlp.c_proj.bias": "pytorch_model-00002-of-00002.bin",
282
+ "transformer.h.29.mlp.c_proj.weight": "pytorch_model-00002-of-00002.bin",
283
+ "transformer.h.3.attn.c_attn.bias": "pytorch_model-00001-of-00002.bin",
284
+ "transformer.h.3.attn.c_attn.weight": "pytorch_model-00001-of-00002.bin",
285
+ "transformer.h.3.attn.c_proj.bias": "pytorch_model-00001-of-00002.bin",
286
+ "transformer.h.3.attn.c_proj.weight": "pytorch_model-00001-of-00002.bin",
287
+ "transformer.h.3.ln_1.bias": "pytorch_model-00001-of-00002.bin",
288
+ "transformer.h.3.ln_1.weight": "pytorch_model-00001-of-00002.bin",
289
+ "transformer.h.3.ln_2.bias": "pytorch_model-00001-of-00002.bin",
290
+ "transformer.h.3.ln_2.weight": "pytorch_model-00001-of-00002.bin",
291
+ "transformer.h.3.mlp.c_fc.bias": "pytorch_model-00001-of-00002.bin",
292
+ "transformer.h.3.mlp.c_fc.weight": "pytorch_model-00001-of-00002.bin",
293
+ "transformer.h.3.mlp.c_proj.bias": "pytorch_model-00001-of-00002.bin",
294
+ "transformer.h.3.mlp.c_proj.weight": "pytorch_model-00001-of-00002.bin",
295
+ "transformer.h.30.attn.c_attn.bias": "pytorch_model-00002-of-00002.bin",
296
+ "transformer.h.30.attn.c_attn.weight": "pytorch_model-00002-of-00002.bin",
297
+ "transformer.h.30.attn.c_proj.bias": "pytorch_model-00002-of-00002.bin",
298
+ "transformer.h.30.attn.c_proj.weight": "pytorch_model-00002-of-00002.bin",
299
+ "transformer.h.30.ln_1.bias": "pytorch_model-00002-of-00002.bin",
300
+ "transformer.h.30.ln_1.weight": "pytorch_model-00002-of-00002.bin",
301
+ "transformer.h.30.ln_2.bias": "pytorch_model-00002-of-00002.bin",
302
+ "transformer.h.30.ln_2.weight": "pytorch_model-00002-of-00002.bin",
303
+ "transformer.h.30.mlp.c_fc.bias": "pytorch_model-00002-of-00002.bin",
304
+ "transformer.h.30.mlp.c_fc.weight": "pytorch_model-00002-of-00002.bin",
305
+ "transformer.h.30.mlp.c_proj.bias": "pytorch_model-00002-of-00002.bin",
306
+ "transformer.h.30.mlp.c_proj.weight": "pytorch_model-00002-of-00002.bin",
307
+ "transformer.h.31.attn.c_attn.bias": "pytorch_model-00002-of-00002.bin",
308
+ "transformer.h.31.attn.c_attn.weight": "pytorch_model-00002-of-00002.bin",
309
+ "transformer.h.31.attn.c_proj.bias": "pytorch_model-00002-of-00002.bin",
310
+ "transformer.h.31.attn.c_proj.weight": "pytorch_model-00002-of-00002.bin",
311
+ "transformer.h.31.ln_1.bias": "pytorch_model-00002-of-00002.bin",
312
+ "transformer.h.31.ln_1.weight": "pytorch_model-00002-of-00002.bin",
313
+ "transformer.h.31.ln_2.bias": "pytorch_model-00002-of-00002.bin",
314
+ "transformer.h.31.ln_2.weight": "pytorch_model-00002-of-00002.bin",
315
+ "transformer.h.31.mlp.c_fc.bias": "pytorch_model-00002-of-00002.bin",
316
+ "transformer.h.31.mlp.c_fc.weight": "pytorch_model-00002-of-00002.bin",
317
+ "transformer.h.31.mlp.c_proj.bias": "pytorch_model-00002-of-00002.bin",
318
+ "transformer.h.31.mlp.c_proj.weight": "pytorch_model-00002-of-00002.bin",
319
+ "transformer.h.32.attn.c_attn.bias": "pytorch_model-00002-of-00002.bin",
320
+ "transformer.h.32.attn.c_attn.weight": "pytorch_model-00002-of-00002.bin",
321
+ "transformer.h.32.attn.c_proj.bias": "pytorch_model-00002-of-00002.bin",
322
+ "transformer.h.32.attn.c_proj.weight": "pytorch_model-00002-of-00002.bin",
323
+ "transformer.h.32.ln_1.bias": "pytorch_model-00002-of-00002.bin",
324
+ "transformer.h.32.ln_1.weight": "pytorch_model-00002-of-00002.bin",
325
+ "transformer.h.32.ln_2.bias": "pytorch_model-00002-of-00002.bin",
326
+ "transformer.h.32.ln_2.weight": "pytorch_model-00002-of-00002.bin",
327
+ "transformer.h.32.mlp.c_fc.bias": "pytorch_model-00002-of-00002.bin",
328
+ "transformer.h.32.mlp.c_fc.weight": "pytorch_model-00002-of-00002.bin",
329
+ "transformer.h.32.mlp.c_proj.bias": "pytorch_model-00002-of-00002.bin",
330
+ "transformer.h.32.mlp.c_proj.weight": "pytorch_model-00002-of-00002.bin",
331
+ "transformer.h.33.attn.c_attn.bias": "pytorch_model-00002-of-00002.bin",
332
+ "transformer.h.33.attn.c_attn.weight": "pytorch_model-00002-of-00002.bin",
333
+ "transformer.h.33.attn.c_proj.bias": "pytorch_model-00002-of-00002.bin",
334
+ "transformer.h.33.attn.c_proj.weight": "pytorch_model-00002-of-00002.bin",
335
+ "transformer.h.33.ln_1.bias": "pytorch_model-00002-of-00002.bin",
336
+ "transformer.h.33.ln_1.weight": "pytorch_model-00002-of-00002.bin",
337
+ "transformer.h.33.ln_2.bias": "pytorch_model-00002-of-00002.bin",
338
+ "transformer.h.33.ln_2.weight": "pytorch_model-00002-of-00002.bin",
339
+ "transformer.h.33.mlp.c_fc.bias": "pytorch_model-00002-of-00002.bin",
340
+ "transformer.h.33.mlp.c_fc.weight": "pytorch_model-00002-of-00002.bin",
341
+ "transformer.h.33.mlp.c_proj.bias": "pytorch_model-00002-of-00002.bin",
342
+ "transformer.h.33.mlp.c_proj.weight": "pytorch_model-00002-of-00002.bin",
343
+ "transformer.h.34.attn.c_attn.bias": "pytorch_model-00002-of-00002.bin",
344
+ "transformer.h.34.attn.c_attn.weight": "pytorch_model-00002-of-00002.bin",
345
+ "transformer.h.34.attn.c_proj.bias": "pytorch_model-00002-of-00002.bin",
346
+ "transformer.h.34.attn.c_proj.weight": "pytorch_model-00002-of-00002.bin",
347
+ "transformer.h.34.ln_1.bias": "pytorch_model-00002-of-00002.bin",
348
+ "transformer.h.34.ln_1.weight": "pytorch_model-00002-of-00002.bin",
349
+ "transformer.h.34.ln_2.bias": "pytorch_model-00002-of-00002.bin",
350
+ "transformer.h.34.ln_2.weight": "pytorch_model-00002-of-00002.bin",
351
+ "transformer.h.34.mlp.c_fc.bias": "pytorch_model-00002-of-00002.bin",
352
+ "transformer.h.34.mlp.c_fc.weight": "pytorch_model-00002-of-00002.bin",
353
+ "transformer.h.34.mlp.c_proj.bias": "pytorch_model-00002-of-00002.bin",
354
+ "transformer.h.34.mlp.c_proj.weight": "pytorch_model-00002-of-00002.bin",
355
+ "transformer.h.35.attn.c_attn.bias": "pytorch_model-00002-of-00002.bin",
356
+ "transformer.h.35.attn.c_attn.weight": "pytorch_model-00002-of-00002.bin",
357
+ "transformer.h.35.attn.c_proj.bias": "pytorch_model-00002-of-00002.bin",
358
+ "transformer.h.35.attn.c_proj.weight": "pytorch_model-00002-of-00002.bin",
359
+ "transformer.h.35.ln_1.bias": "pytorch_model-00002-of-00002.bin",
360
+ "transformer.h.35.ln_1.weight": "pytorch_model-00002-of-00002.bin",
361
+ "transformer.h.35.ln_2.bias": "pytorch_model-00002-of-00002.bin",
362
+ "transformer.h.35.ln_2.weight": "pytorch_model-00002-of-00002.bin",
363
+ "transformer.h.35.mlp.c_fc.bias": "pytorch_model-00002-of-00002.bin",
364
+ "transformer.h.35.mlp.c_fc.weight": "pytorch_model-00002-of-00002.bin",
365
+ "transformer.h.35.mlp.c_proj.bias": "pytorch_model-00002-of-00002.bin",
366
+ "transformer.h.35.mlp.c_proj.weight": "pytorch_model-00002-of-00002.bin",
367
+ "transformer.h.4.attn.c_attn.bias": "pytorch_model-00001-of-00002.bin",
368
+ "transformer.h.4.attn.c_attn.weight": "pytorch_model-00001-of-00002.bin",
369
+ "transformer.h.4.attn.c_proj.bias": "pytorch_model-00001-of-00002.bin",
370
+ "transformer.h.4.attn.c_proj.weight": "pytorch_model-00001-of-00002.bin",
371
+ "transformer.h.4.ln_1.bias": "pytorch_model-00001-of-00002.bin",
372
+ "transformer.h.4.ln_1.weight": "pytorch_model-00001-of-00002.bin",
373
+ "transformer.h.4.ln_2.bias": "pytorch_model-00001-of-00002.bin",
374
+ "transformer.h.4.ln_2.weight": "pytorch_model-00001-of-00002.bin",
375
+ "transformer.h.4.mlp.c_fc.bias": "pytorch_model-00001-of-00002.bin",
376
+ "transformer.h.4.mlp.c_fc.weight": "pytorch_model-00001-of-00002.bin",
377
+ "transformer.h.4.mlp.c_proj.bias": "pytorch_model-00001-of-00002.bin",
378
+ "transformer.h.4.mlp.c_proj.weight": "pytorch_model-00001-of-00002.bin",
379
+ "transformer.h.5.attn.c_attn.bias": "pytorch_model-00001-of-00002.bin",
380
+ "transformer.h.5.attn.c_attn.weight": "pytorch_model-00001-of-00002.bin",
381
+ "transformer.h.5.attn.c_proj.bias": "pytorch_model-00001-of-00002.bin",
382
+ "transformer.h.5.attn.c_proj.weight": "pytorch_model-00001-of-00002.bin",
383
+ "transformer.h.5.ln_1.bias": "pytorch_model-00001-of-00002.bin",
384
+ "transformer.h.5.ln_1.weight": "pytorch_model-00001-of-00002.bin",
385
+ "transformer.h.5.ln_2.bias": "pytorch_model-00001-of-00002.bin",
386
+ "transformer.h.5.ln_2.weight": "pytorch_model-00001-of-00002.bin",
387
+ "transformer.h.5.mlp.c_fc.bias": "pytorch_model-00001-of-00002.bin",
388
+ "transformer.h.5.mlp.c_fc.weight": "pytorch_model-00001-of-00002.bin",
389
+ "transformer.h.5.mlp.c_proj.bias": "pytorch_model-00001-of-00002.bin",
390
+ "transformer.h.5.mlp.c_proj.weight": "pytorch_model-00001-of-00002.bin",
391
+ "transformer.h.6.attn.c_attn.bias": "pytorch_model-00001-of-00002.bin",
392
+ "transformer.h.6.attn.c_attn.weight": "pytorch_model-00001-of-00002.bin",
393
+ "transformer.h.6.attn.c_proj.bias": "pytorch_model-00001-of-00002.bin",
394
+ "transformer.h.6.attn.c_proj.weight": "pytorch_model-00001-of-00002.bin",
395
+ "transformer.h.6.ln_1.bias": "pytorch_model-00001-of-00002.bin",
396
+ "transformer.h.6.ln_1.weight": "pytorch_model-00001-of-00002.bin",
397
+ "transformer.h.6.ln_2.bias": "pytorch_model-00001-of-00002.bin",
398
+ "transformer.h.6.ln_2.weight": "pytorch_model-00001-of-00002.bin",
399
+ "transformer.h.6.mlp.c_fc.bias": "pytorch_model-00001-of-00002.bin",
400
+ "transformer.h.6.mlp.c_fc.weight": "pytorch_model-00001-of-00002.bin",
401
+ "transformer.h.6.mlp.c_proj.bias": "pytorch_model-00001-of-00002.bin",
402
+ "transformer.h.6.mlp.c_proj.weight": "pytorch_model-00001-of-00002.bin",
403
+ "transformer.h.7.attn.c_attn.bias": "pytorch_model-00001-of-00002.bin",
404
+ "transformer.h.7.attn.c_attn.weight": "pytorch_model-00001-of-00002.bin",
405
+ "transformer.h.7.attn.c_proj.bias": "pytorch_model-00001-of-00002.bin",
406
+ "transformer.h.7.attn.c_proj.weight": "pytorch_model-00001-of-00002.bin",
407
+ "transformer.h.7.ln_1.bias": "pytorch_model-00001-of-00002.bin",
408
+ "transformer.h.7.ln_1.weight": "pytorch_model-00001-of-00002.bin",
409
+ "transformer.h.7.ln_2.bias": "pytorch_model-00001-of-00002.bin",
410
+ "transformer.h.7.ln_2.weight": "pytorch_model-00001-of-00002.bin",
411
+ "transformer.h.7.mlp.c_fc.bias": "pytorch_model-00001-of-00002.bin",
412
+ "transformer.h.7.mlp.c_fc.weight": "pytorch_model-00001-of-00002.bin",
413
+ "transformer.h.7.mlp.c_proj.bias": "pytorch_model-00001-of-00002.bin",
414
+ "transformer.h.7.mlp.c_proj.weight": "pytorch_model-00001-of-00002.bin",
415
+ "transformer.h.8.attn.c_attn.bias": "pytorch_model-00001-of-00002.bin",
416
+ "transformer.h.8.attn.c_attn.weight": "pytorch_model-00001-of-00002.bin",
417
+ "transformer.h.8.attn.c_proj.bias": "pytorch_model-00001-of-00002.bin",
418
+ "transformer.h.8.attn.c_proj.weight": "pytorch_model-00001-of-00002.bin",
419
+ "transformer.h.8.ln_1.bias": "pytorch_model-00001-of-00002.bin",
420
+ "transformer.h.8.ln_1.weight": "pytorch_model-00001-of-00002.bin",
421
+ "transformer.h.8.ln_2.bias": "pytorch_model-00001-of-00002.bin",
422
+ "transformer.h.8.ln_2.weight": "pytorch_model-00001-of-00002.bin",
423
+ "transformer.h.8.mlp.c_fc.bias": "pytorch_model-00001-of-00002.bin",
424
+ "transformer.h.8.mlp.c_fc.weight": "pytorch_model-00001-of-00002.bin",
425
+ "transformer.h.8.mlp.c_proj.bias": "pytorch_model-00001-of-00002.bin",
426
+ "transformer.h.8.mlp.c_proj.weight": "pytorch_model-00001-of-00002.bin",
427
+ "transformer.h.9.attn.c_attn.bias": "pytorch_model-00001-of-00002.bin",
428
+ "transformer.h.9.attn.c_attn.weight": "pytorch_model-00001-of-00002.bin",
429
+ "transformer.h.9.attn.c_proj.bias": "pytorch_model-00001-of-00002.bin",
430
+ "transformer.h.9.attn.c_proj.weight": "pytorch_model-00001-of-00002.bin",
431
+ "transformer.h.9.ln_1.bias": "pytorch_model-00001-of-00002.bin",
432
+ "transformer.h.9.ln_1.weight": "pytorch_model-00001-of-00002.bin",
433
+ "transformer.h.9.ln_2.bias": "pytorch_model-00001-of-00002.bin",
434
+ "transformer.h.9.ln_2.weight": "pytorch_model-00001-of-00002.bin",
435
+ "transformer.h.9.mlp.c_fc.bias": "pytorch_model-00001-of-00002.bin",
436
+ "transformer.h.9.mlp.c_fc.weight": "pytorch_model-00001-of-00002.bin",
437
+ "transformer.h.9.mlp.c_proj.bias": "pytorch_model-00001-of-00002.bin",
438
+ "transformer.h.9.mlp.c_proj.weight": "pytorch_model-00001-of-00002.bin",
439
+ "transformer.ln_f.bias": "pytorch_model-00002-of-00002.bin",
440
+ "transformer.ln_f.weight": "pytorch_model-00002-of-00002.bin",
441
+ "transformer.wpe.weight": "pytorch_model-00001-of-00002.bin",
442
+ "transformer.wte.weight": "pytorch_model-00001-of-00002.bin"
443
+ }
444
+ }
zero_to_fp32.py ADDED
@@ -0,0 +1,484 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # This script extracts fp32 consolidated weights from a zero 2 and 3 DeepSpeed checkpoints. It gets
4
+ # copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
5
+ # the future. Once extracted, the weights don't require DeepSpeed and can be used in any
6
+ # application.
7
+ #
8
+ # example: python zero_to_fp32.py . pytorch_model.bin
9
+
10
+ import argparse
11
+ import torch
12
+ import glob
13
+ import math
14
+ import os
15
+ import re
16
+ from collections import OrderedDict
17
+
18
+ # while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
19
+ # DeepSpeed data structures it has to be available in the current python environment.
20
+ import deepspeed
21
+ from deepspeed.utils import logger
22
+ from deepspeed.checkpoint.constants import (DS_VERSION,
23
+ OPTIMIZER_STATE_DICT,
24
+ PARAM_SHAPES,
25
+ SINGLE_PARTITION_OF_FP32_GROUPS,
26
+ FP32_FLAT_GROUPS,
27
+ ZERO_STAGE,
28
+ PARTITION_COUNT,
29
+ PARAM_SHAPES,
30
+ BUFFER_NAMES)
31
+
32
+ debug = 0
33
+
34
+ # load to cpu
35
+ device = torch.device('cpu')
36
+
37
+
38
+ def atoi(text):
39
+ return int(text) if text.isdigit() else text
40
+
41
+
42
+ def natural_keys(text):
43
+ '''
44
+ alist.sort(key=natural_keys) sorts in human order
45
+ http://nedbatchelder.com/blog/200712/human_sorting.html
46
+ (See Toothy's implementation in the comments)
47
+ '''
48
+ return [atoi(c) for c in re.split(r'(\d+)', text)]
49
+
50
+
51
+ def get_model_state_file(checkpoint_dir, zero_stage):
52
+ if not os.path.isdir(checkpoint_dir):
53
+ raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
54
+
55
+ # there should be only one file
56
+ if zero_stage == 2:
57
+ file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
58
+ elif zero_stage == 3:
59
+ file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
60
+
61
+ if not os.path.exists(file):
62
+ raise FileNotFoundError(f"can't find model states file at '{file}'")
63
+
64
+ return file
65
+
66
+
67
+ def get_optim_files(checkpoint_dir):
68
+ # XXX: need to test that this simple glob rule works for multi-node setup too
69
+ optim_files = sorted(glob.glob(os.path.join(checkpoint_dir,
70
+ "*_optim_states.pt")),
71
+ key=natural_keys)
72
+
73
+ if len(optim_files) == 0:
74
+ raise FileNotFoundError(
75
+ f"can't find '*_optim_states.pt' files in directory '{checkpoint_dir}'")
76
+
77
+ return optim_files
78
+
79
+
80
+ def parse_model_state(file):
81
+ state_dict = torch.load(file, map_location=device)
82
+
83
+ if BUFFER_NAMES not in state_dict:
84
+ raise ValueError(f"{file} is not a model state checkpoint")
85
+ buffer_names = state_dict[BUFFER_NAMES]
86
+ if debug:
87
+ print("Found buffers:", buffer_names)
88
+
89
+ # recover just the buffers while restoring them to fp32 if they were saved in fp16
90
+ buffers = {
91
+ k: v.float()
92
+ for k,
93
+ v in state_dict["module"].items() if k in buffer_names
94
+ }
95
+ param_shapes = state_dict[PARAM_SHAPES]
96
+
97
+ ds_version = state_dict.get(DS_VERSION, None)
98
+
99
+ return buffers, param_shapes, ds_version
100
+
101
+
102
+ def parse_optim_states(files, ds_checkpoint_dir):
103
+
104
+ total_files = len(files)
105
+ state_dicts = []
106
+ for f in files:
107
+ state_dicts.append(torch.load(f, map_location=device))
108
+
109
+ if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]:
110
+ raise ValueError(f"{files[0]} is not a zero checkpoint")
111
+ zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
112
+ world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
113
+
114
+ # For ZeRO-2 each param group can have different partition_count as data parallelism for expert
115
+ # parameters can be different from data parallelism for non-expert parameters. So we can just
116
+ # use the max of the partition_count to get the dp world_size.
117
+
118
+ if type(world_size) is list:
119
+ world_size = max(world_size)
120
+
121
+ if world_size != total_files:
122
+ raise ValueError(
123
+ f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
124
+ "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
125
+ )
126
+
127
+ # the groups are named differently in each stage
128
+ if zero_stage == 2:
129
+ fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
130
+ elif zero_stage == 3:
131
+ fp32_groups_key = FP32_FLAT_GROUPS
132
+ else:
133
+ raise ValueError(f"unknown zero stage {zero_stage}")
134
+
135
+ if zero_stage == 2:
136
+ fp32_flat_groups = [
137
+ state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key]
138
+ for i in range(len(state_dicts))
139
+ ]
140
+ elif zero_stage == 3:
141
+ # if there is more than one param group, there will be multiple flattened tensors - one
142
+ # flattened tensor per group - for simplicity merge them into a single tensor
143
+ #
144
+ # XXX: could make the script more memory efficient for when there are multiple groups - it
145
+ # will require matching the sub-lists of param_shapes for each param group flattened tensor
146
+
147
+ fp32_flat_groups = [
148
+ torch.cat(state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key],
149
+ 0) for i in range(len(state_dicts))
150
+ ]
151
+
152
+ return zero_stage, world_size, fp32_flat_groups
153
+
154
+
155
+ def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir):
156
+ """
157
+ Returns fp32 state_dict reconstructed from ds checkpoint
158
+
159
+ Args:
160
+ - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
161
+
162
+ """
163
+ print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
164
+
165
+ optim_files = get_optim_files(ds_checkpoint_dir)
166
+ zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
167
+ print(
168
+ f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
169
+
170
+ model_file = get_model_state_file(ds_checkpoint_dir, zero_stage)
171
+ buffers, param_shapes, ds_version = parse_model_state(model_file)
172
+ print(f'Parsing checkpoint created by deepspeed=={ds_version}')
173
+
174
+ if zero_stage == 2:
175
+ return _get_fp32_state_dict_from_zero2_checkpoint(world_size,
176
+ param_shapes,
177
+ fp32_flat_groups,
178
+ buffers)
179
+ elif zero_stage == 3:
180
+ return _get_fp32_state_dict_from_zero3_checkpoint(world_size,
181
+ param_shapes,
182
+ fp32_flat_groups,
183
+ buffers)
184
+
185
+
186
+ def _get_fp32_state_dict_from_zero2_checkpoint(world_size,
187
+ param_shapes,
188
+ fp32_flat_groups,
189
+ buffers):
190
+
191
+ # Reconstruction protocol:
192
+ #
193
+ # XXX: document this
194
+
195
+ if debug:
196
+ for i in range(world_size):
197
+ for j in range(len(fp32_flat_groups[0])):
198
+ print(
199
+ f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
200
+
201
+ # XXX: memory usage doubles here (zero2)
202
+ num_param_groups = len(fp32_flat_groups[0])
203
+ merged_single_partition_of_fp32_groups = []
204
+ for i in range(num_param_groups):
205
+ merged_partitions = [sd[i] for sd in fp32_flat_groups]
206
+ full_single_fp32_vector = torch.cat(merged_partitions, 0)
207
+ merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
208
+ avail_numel = sum([
209
+ full_single_fp32_vector.numel()
210
+ for full_single_fp32_vector in merged_single_partition_of_fp32_groups
211
+ ])
212
+
213
+ if debug:
214
+ wanted_params = sum([len(shapes) for shapes in param_shapes])
215
+ wanted_numel = sum(
216
+ [sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
217
+ # not asserting if there is a mismatch due to possible padding
218
+ print(f"Have {avail_numel} numels to process.")
219
+ print(f"Need {wanted_numel} numels in {wanted_params} params.")
220
+
221
+ state_dict = OrderedDict()
222
+
223
+ # buffers
224
+ state_dict.update(buffers)
225
+ if debug:
226
+ print(f"added {len(buffers)} buffers")
227
+
228
+ # params
229
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
230
+ # out-of-core computing solution
231
+ total_numel = 0
232
+ total_params = 0
233
+ for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
234
+ offset = 0
235
+ avail_numel = full_single_fp32_vector.numel()
236
+ for name, shape in shapes.items():
237
+
238
+ unpartitioned_numel = shape.numel()
239
+ total_numel += unpartitioned_numel
240
+ total_params += 1
241
+
242
+ if debug:
243
+ print(
244
+ f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} "
245
+ )
246
+ state_dict[name] = full_single_fp32_vector.narrow(
247
+ 0,
248
+ offset,
249
+ unpartitioned_numel).view(shape)
250
+ offset += unpartitioned_numel
251
+
252
+ # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
253
+ # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
254
+ # paddings performed in the code it's almost impossible to predict the exact numbers w/o the
255
+ # live optimizer object, so we are checking that the numbers are within the right range
256
+ align_to = 2 * world_size
257
+
258
+ def zero2_align(x):
259
+ return align_to * math.ceil(x / align_to)
260
+
261
+ if debug:
262
+ print(f"original offset={offset}, avail_numel={avail_numel}")
263
+
264
+ offset = zero2_align(offset)
265
+ avail_numel = zero2_align(avail_numel)
266
+
267
+ if debug:
268
+ print(f"aligned offset={offset}, avail_numel={avail_numel}")
269
+
270
+ # Sanity check
271
+ if offset != avail_numel:
272
+ raise ValueError(
273
+ f"consumed {offset} numels out of {avail_numel} - something is wrong")
274
+
275
+ print(
276
+ f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements"
277
+ )
278
+
279
+ return state_dict
280
+
281
+
282
+ def zero3_partitioned_param_info(unpartitioned_numel, world_size):
283
+ remainder = unpartitioned_numel % world_size
284
+ padding_numel = (world_size - remainder) if remainder else 0
285
+ partitioned_numel = math.ceil(unpartitioned_numel / world_size)
286
+ return partitioned_numel, padding_numel
287
+
288
+
289
+ def _get_fp32_state_dict_from_zero3_checkpoint(world_size,
290
+ param_shapes,
291
+ fp32_flat_groups,
292
+ buffers):
293
+
294
+ # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
295
+ # param, re-consolidating each param, while dealing with padding if any
296
+
297
+ avail_numel = fp32_flat_groups[0].numel() * world_size
298
+ # merge list of dicts, preserving order
299
+ param_shapes = {k: v for d in param_shapes for k, v in d.items()}
300
+
301
+ if debug:
302
+ for i in range(world_size):
303
+ print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
304
+
305
+ wanted_params = len(param_shapes)
306
+ wanted_numel = sum(shape.numel() for shape in param_shapes.values())
307
+ # not asserting if there is a mismatch due to possible padding
308
+ print(f"Have {avail_numel} numels to process.")
309
+ print(f"Need {wanted_numel} numels in {wanted_params} params.")
310
+
311
+ state_dict = OrderedDict()
312
+
313
+ # buffers
314
+ state_dict.update(buffers)
315
+ if debug:
316
+ print(f"added {len(buffers)} buffers")
317
+
318
+ # params
319
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
320
+ # out-of-core computing solution
321
+ offset = 0
322
+ total_numel = 0
323
+ total_params = 0
324
+ for name, shape in param_shapes.items():
325
+
326
+ unpartitioned_numel = shape.numel()
327
+ total_numel += unpartitioned_numel
328
+ total_params += 1
329
+
330
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
331
+
332
+ if debug:
333
+ print(
334
+ f"{total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
335
+ )
336
+
337
+ # XXX: memory usage doubles here
338
+ state_dict[name] = torch.cat(
339
+ tuple(fp32_flat_groups[i].narrow(0,
340
+ offset,
341
+ partitioned_numel)
342
+ for i in range(world_size)),
343
+ 0).narrow(0,
344
+ 0,
345
+ unpartitioned_numel).view(shape)
346
+ offset += partitioned_numel
347
+
348
+ offset *= world_size
349
+
350
+ # Sanity check
351
+ if offset != avail_numel:
352
+ raise ValueError(
353
+ f"consumed {offset} numels out of {avail_numel} - something is wrong")
354
+
355
+ print(
356
+ f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements"
357
+ )
358
+
359
+ return state_dict
360
+
361
+
362
+ def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None):
363
+ """
364
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
365
+ ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
366
+ via a model hub.
367
+
368
+ Args:
369
+ - ``checkpoint_dir``: path to the desired checkpoint folder
370
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
371
+
372
+ Returns:
373
+ - pytorch ``state_dict``
374
+
375
+ Note: this approach may not work if your application doesn't have sufficient free CPU memory and
376
+ you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
377
+ the checkpoint.
378
+
379
+ A typical usage might be ::
380
+
381
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
382
+ # do the training and checkpoint saving
383
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
384
+ model = model.cpu() # move to cpu
385
+ model.load_state_dict(state_dict)
386
+ # submit to model hub or save the model to share with others
387
+
388
+ In this example the ``model`` will no longer be usable in the deepspeed context of the same
389
+ application. i.e. you will need to re-initialize the deepspeed engine, since
390
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
391
+
392
+ If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
393
+
394
+ """
395
+ if tag is None:
396
+ latest_path = os.path.join(checkpoint_dir, 'latest')
397
+ if os.path.isfile(latest_path):
398
+ with open(latest_path, 'r') as fd:
399
+ tag = fd.read().strip()
400
+ else:
401
+ raise ValueError(f"Unable to find 'latest' file at {latest_path}")
402
+
403
+ ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
404
+
405
+ if not os.path.isdir(ds_checkpoint_dir):
406
+ raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
407
+
408
+ return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir)
409
+
410
+
411
+ def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir, output_file, tag=None):
412
+ """
413
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
414
+ loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
415
+
416
+ Args:
417
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
418
+ - ``output_file``: path to the pytorch fp32 state_dict output file (e.g. path/pytorch_model.bin)
419
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
420
+ """
421
+
422
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
423
+ print(f"Saving fp32 state dict to {output_file}")
424
+ torch.save(state_dict, output_file)
425
+
426
+
427
+ def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
428
+ """
429
+ 1. Put the provided model to cpu
430
+ 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
431
+ 3. Load it into the provided model
432
+
433
+ Args:
434
+ - ``model``: the model object to update
435
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
436
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
437
+
438
+ Returns:
439
+ - ``model`: modified model
440
+
441
+ Make sure you have plenty of CPU memory available before you call this function. If you don't
442
+ have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
443
+ conveniently placed for you in the checkpoint folder.
444
+
445
+ A typical usage might be ::
446
+
447
+ from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
448
+ model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
449
+ # submit to model hub or save the model to share with others
450
+
451
+ Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
452
+ of the same application. i.e. you will need to re-initialize the deepspeed engine, since
453
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
454
+
455
+ """
456
+ logger.info(f"Extracting fp32 weights")
457
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
458
+
459
+ logger.info(f"Overwriting model with fp32 weights")
460
+ model = model.cpu()
461
+ model.load_state_dict(state_dict, strict=False)
462
+
463
+ return model
464
+
465
+
466
+ if __name__ == "__main__":
467
+
468
+ parser = argparse.ArgumentParser()
469
+ parser.add_argument(
470
+ "checkpoint_dir",
471
+ type=str,
472
+ help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
473
+ parser.add_argument(
474
+ "output_file",
475
+ type=str,
476
+ help=
477
+ "path to the pytorch fp32 state_dict output file (e.g. path/checkpoint-12/pytorch_model.bin)"
478
+ )
479
+ parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
480
+ args = parser.parse_args()
481
+
482
+ debug = args.debug
483
+
484
+ convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir, args.output_file)