malteos commited on
Commit
69cfb5c
1 Parent(s): 52fe250
config.json ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "apply_residual_connection_post_layernorm": false,
3
+ "attention_dropout": 0.0,
4
+ "bos_token_id": 1,
5
+ "eos_token_id": 2,
6
+ "hidden_dropout": 0.0,
7
+ "hidden_size": 64,
8
+ "initializer_range": 0.02,
9
+ "layer_norm_epsilon": 1e-05,
10
+ "model_type": "bloom",
11
+ "n_head": 8,
12
+ "n_layer": 2,
13
+ "pretraining_tp": 1,
14
+ "slow_but_exact": false,
15
+ "transformers_version": "4.24.0.dev0",
16
+ "use_cache": true,
17
+ "vocab_size": 250880
18
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
pytorch_model.bin.index.json ADDED
@@ -0,0 +1,372 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 12494913536
4
+ },
5
+ "weight_map": {
6
+ "h.0.input_layernorm.bias": "pytorch_model_00002-of-00032.bin",
7
+ "h.0.input_layernorm.weight": "pytorch_model_00002-of-00032.bin",
8
+ "h.0.mlp.dense_4h_to_h.bias": "pytorch_model_00002-of-00032.bin",
9
+ "h.0.mlp.dense_4h_to_h.weight": "pytorch_model_00002-of-00032.bin",
10
+ "h.0.mlp.dense_h_to_4h.bias": "pytorch_model_00002-of-00032.bin",
11
+ "h.0.mlp.dense_h_to_4h.weight": "pytorch_model_00002-of-00032.bin",
12
+ "h.0.post_attention_layernorm.bias": "pytorch_model_00002-of-00032.bin",
13
+ "h.0.post_attention_layernorm.weight": "pytorch_model_00002-of-00032.bin",
14
+ "h.0.self_attention.dense.bias": "pytorch_model_00002-of-00032.bin",
15
+ "h.0.self_attention.dense.weight": "pytorch_model_00002-of-00032.bin",
16
+ "h.0.self_attention.query_key_value.bias": "pytorch_model_00002-of-00032.bin",
17
+ "h.0.self_attention.query_key_value.weight": "pytorch_model_00002-of-00032.bin",
18
+ "h.1.input_layernorm.bias": "pytorch_model_00003-of-00032.bin",
19
+ "h.1.input_layernorm.weight": "pytorch_model_00003-of-00032.bin",
20
+ "h.1.mlp.dense_4h_to_h.bias": "pytorch_model_00003-of-00032.bin",
21
+ "h.1.mlp.dense_4h_to_h.weight": "pytorch_model_00003-of-00032.bin",
22
+ "h.1.mlp.dense_h_to_4h.bias": "pytorch_model_00003-of-00032.bin",
23
+ "h.1.mlp.dense_h_to_4h.weight": "pytorch_model_00003-of-00032.bin",
24
+ "h.1.post_attention_layernorm.bias": "pytorch_model_00003-of-00032.bin",
25
+ "h.1.post_attention_layernorm.weight": "pytorch_model_00003-of-00032.bin",
26
+ "h.1.self_attention.dense.bias": "pytorch_model_00003-of-00032.bin",
27
+ "h.1.self_attention.dense.weight": "pytorch_model_00003-of-00032.bin",
28
+ "h.1.self_attention.query_key_value.bias": "pytorch_model_00003-of-00032.bin",
29
+ "h.1.self_attention.query_key_value.weight": "pytorch_model_00003-of-00032.bin",
30
+ "h.10.input_layernorm.bias": "pytorch_model_00012-of-00032.bin",
31
+ "h.10.input_layernorm.weight": "pytorch_model_00012-of-00032.bin",
32
+ "h.10.mlp.dense_4h_to_h.bias": "pytorch_model_00012-of-00032.bin",
33
+ "h.10.mlp.dense_4h_to_h.weight": "pytorch_model_00012-of-00032.bin",
34
+ "h.10.mlp.dense_h_to_4h.bias": "pytorch_model_00012-of-00032.bin",
35
+ "h.10.mlp.dense_h_to_4h.weight": "pytorch_model_00012-of-00032.bin",
36
+ "h.10.post_attention_layernorm.bias": "pytorch_model_00012-of-00032.bin",
37
+ "h.10.post_attention_layernorm.weight": "pytorch_model_00012-of-00032.bin",
38
+ "h.10.self_attention.dense.bias": "pytorch_model_00012-of-00032.bin",
39
+ "h.10.self_attention.dense.weight": "pytorch_model_00012-of-00032.bin",
40
+ "h.10.self_attention.query_key_value.bias": "pytorch_model_00012-of-00032.bin",
41
+ "h.10.self_attention.query_key_value.weight": "pytorch_model_00012-of-00032.bin",
42
+ "h.11.input_layernorm.bias": "pytorch_model_00013-of-00032.bin",
43
+ "h.11.input_layernorm.weight": "pytorch_model_00013-of-00032.bin",
44
+ "h.11.mlp.dense_4h_to_h.bias": "pytorch_model_00013-of-00032.bin",
45
+ "h.11.mlp.dense_4h_to_h.weight": "pytorch_model_00013-of-00032.bin",
46
+ "h.11.mlp.dense_h_to_4h.bias": "pytorch_model_00013-of-00032.bin",
47
+ "h.11.mlp.dense_h_to_4h.weight": "pytorch_model_00013-of-00032.bin",
48
+ "h.11.post_attention_layernorm.bias": "pytorch_model_00013-of-00032.bin",
49
+ "h.11.post_attention_layernorm.weight": "pytorch_model_00013-of-00032.bin",
50
+ "h.11.self_attention.dense.bias": "pytorch_model_00013-of-00032.bin",
51
+ "h.11.self_attention.dense.weight": "pytorch_model_00013-of-00032.bin",
52
+ "h.11.self_attention.query_key_value.bias": "pytorch_model_00013-of-00032.bin",
53
+ "h.11.self_attention.query_key_value.weight": "pytorch_model_00013-of-00032.bin",
54
+ "h.12.input_layernorm.bias": "pytorch_model_00014-of-00032.bin",
55
+ "h.12.input_layernorm.weight": "pytorch_model_00014-of-00032.bin",
56
+ "h.12.mlp.dense_4h_to_h.bias": "pytorch_model_00014-of-00032.bin",
57
+ "h.12.mlp.dense_4h_to_h.weight": "pytorch_model_00014-of-00032.bin",
58
+ "h.12.mlp.dense_h_to_4h.bias": "pytorch_model_00014-of-00032.bin",
59
+ "h.12.mlp.dense_h_to_4h.weight": "pytorch_model_00014-of-00032.bin",
60
+ "h.12.post_attention_layernorm.bias": "pytorch_model_00014-of-00032.bin",
61
+ "h.12.post_attention_layernorm.weight": "pytorch_model_00014-of-00032.bin",
62
+ "h.12.self_attention.dense.bias": "pytorch_model_00014-of-00032.bin",
63
+ "h.12.self_attention.dense.weight": "pytorch_model_00014-of-00032.bin",
64
+ "h.12.self_attention.query_key_value.bias": "pytorch_model_00014-of-00032.bin",
65
+ "h.12.self_attention.query_key_value.weight": "pytorch_model_00014-of-00032.bin",
66
+ "h.13.input_layernorm.bias": "pytorch_model_00015-of-00032.bin",
67
+ "h.13.input_layernorm.weight": "pytorch_model_00015-of-00032.bin",
68
+ "h.13.mlp.dense_4h_to_h.bias": "pytorch_model_00015-of-00032.bin",
69
+ "h.13.mlp.dense_4h_to_h.weight": "pytorch_model_00015-of-00032.bin",
70
+ "h.13.mlp.dense_h_to_4h.bias": "pytorch_model_00015-of-00032.bin",
71
+ "h.13.mlp.dense_h_to_4h.weight": "pytorch_model_00015-of-00032.bin",
72
+ "h.13.post_attention_layernorm.bias": "pytorch_model_00015-of-00032.bin",
73
+ "h.13.post_attention_layernorm.weight": "pytorch_model_00015-of-00032.bin",
74
+ "h.13.self_attention.dense.bias": "pytorch_model_00015-of-00032.bin",
75
+ "h.13.self_attention.dense.weight": "pytorch_model_00015-of-00032.bin",
76
+ "h.13.self_attention.query_key_value.bias": "pytorch_model_00015-of-00032.bin",
77
+ "h.13.self_attention.query_key_value.weight": "pytorch_model_00015-of-00032.bin",
78
+ "h.14.input_layernorm.bias": "pytorch_model_00016-of-00032.bin",
79
+ "h.14.input_layernorm.weight": "pytorch_model_00016-of-00032.bin",
80
+ "h.14.mlp.dense_4h_to_h.bias": "pytorch_model_00016-of-00032.bin",
81
+ "h.14.mlp.dense_4h_to_h.weight": "pytorch_model_00016-of-00032.bin",
82
+ "h.14.mlp.dense_h_to_4h.bias": "pytorch_model_00016-of-00032.bin",
83
+ "h.14.mlp.dense_h_to_4h.weight": "pytorch_model_00016-of-00032.bin",
84
+ "h.14.post_attention_layernorm.bias": "pytorch_model_00016-of-00032.bin",
85
+ "h.14.post_attention_layernorm.weight": "pytorch_model_00016-of-00032.bin",
86
+ "h.14.self_attention.dense.bias": "pytorch_model_00016-of-00032.bin",
87
+ "h.14.self_attention.dense.weight": "pytorch_model_00016-of-00032.bin",
88
+ "h.14.self_attention.query_key_value.bias": "pytorch_model_00016-of-00032.bin",
89
+ "h.14.self_attention.query_key_value.weight": "pytorch_model_00016-of-00032.bin",
90
+ "h.15.input_layernorm.bias": "pytorch_model_00017-of-00032.bin",
91
+ "h.15.input_layernorm.weight": "pytorch_model_00017-of-00032.bin",
92
+ "h.15.mlp.dense_4h_to_h.bias": "pytorch_model_00017-of-00032.bin",
93
+ "h.15.mlp.dense_4h_to_h.weight": "pytorch_model_00017-of-00032.bin",
94
+ "h.15.mlp.dense_h_to_4h.bias": "pytorch_model_00017-of-00032.bin",
95
+ "h.15.mlp.dense_h_to_4h.weight": "pytorch_model_00017-of-00032.bin",
96
+ "h.15.post_attention_layernorm.bias": "pytorch_model_00017-of-00032.bin",
97
+ "h.15.post_attention_layernorm.weight": "pytorch_model_00017-of-00032.bin",
98
+ "h.15.self_attention.dense.bias": "pytorch_model_00017-of-00032.bin",
99
+ "h.15.self_attention.dense.weight": "pytorch_model_00017-of-00032.bin",
100
+ "h.15.self_attention.query_key_value.bias": "pytorch_model_00017-of-00032.bin",
101
+ "h.15.self_attention.query_key_value.weight": "pytorch_model_00017-of-00032.bin",
102
+ "h.16.input_layernorm.bias": "pytorch_model_00018-of-00032.bin",
103
+ "h.16.input_layernorm.weight": "pytorch_model_00018-of-00032.bin",
104
+ "h.16.mlp.dense_4h_to_h.bias": "pytorch_model_00018-of-00032.bin",
105
+ "h.16.mlp.dense_4h_to_h.weight": "pytorch_model_00018-of-00032.bin",
106
+ "h.16.mlp.dense_h_to_4h.bias": "pytorch_model_00018-of-00032.bin",
107
+ "h.16.mlp.dense_h_to_4h.weight": "pytorch_model_00018-of-00032.bin",
108
+ "h.16.post_attention_layernorm.bias": "pytorch_model_00018-of-00032.bin",
109
+ "h.16.post_attention_layernorm.weight": "pytorch_model_00018-of-00032.bin",
110
+ "h.16.self_attention.dense.bias": "pytorch_model_00018-of-00032.bin",
111
+ "h.16.self_attention.dense.weight": "pytorch_model_00018-of-00032.bin",
112
+ "h.16.self_attention.query_key_value.bias": "pytorch_model_00018-of-00032.bin",
113
+ "h.16.self_attention.query_key_value.weight": "pytorch_model_00018-of-00032.bin",
114
+ "h.17.input_layernorm.bias": "pytorch_model_00019-of-00032.bin",
115
+ "h.17.input_layernorm.weight": "pytorch_model_00019-of-00032.bin",
116
+ "h.17.mlp.dense_4h_to_h.bias": "pytorch_model_00019-of-00032.bin",
117
+ "h.17.mlp.dense_4h_to_h.weight": "pytorch_model_00019-of-00032.bin",
118
+ "h.17.mlp.dense_h_to_4h.bias": "pytorch_model_00019-of-00032.bin",
119
+ "h.17.mlp.dense_h_to_4h.weight": "pytorch_model_00019-of-00032.bin",
120
+ "h.17.post_attention_layernorm.bias": "pytorch_model_00019-of-00032.bin",
121
+ "h.17.post_attention_layernorm.weight": "pytorch_model_00019-of-00032.bin",
122
+ "h.17.self_attention.dense.bias": "pytorch_model_00019-of-00032.bin",
123
+ "h.17.self_attention.dense.weight": "pytorch_model_00019-of-00032.bin",
124
+ "h.17.self_attention.query_key_value.bias": "pytorch_model_00019-of-00032.bin",
125
+ "h.17.self_attention.query_key_value.weight": "pytorch_model_00019-of-00032.bin",
126
+ "h.18.input_layernorm.bias": "pytorch_model_00020-of-00032.bin",
127
+ "h.18.input_layernorm.weight": "pytorch_model_00020-of-00032.bin",
128
+ "h.18.mlp.dense_4h_to_h.bias": "pytorch_model_00020-of-00032.bin",
129
+ "h.18.mlp.dense_4h_to_h.weight": "pytorch_model_00020-of-00032.bin",
130
+ "h.18.mlp.dense_h_to_4h.bias": "pytorch_model_00020-of-00032.bin",
131
+ "h.18.mlp.dense_h_to_4h.weight": "pytorch_model_00020-of-00032.bin",
132
+ "h.18.post_attention_layernorm.bias": "pytorch_model_00020-of-00032.bin",
133
+ "h.18.post_attention_layernorm.weight": "pytorch_model_00020-of-00032.bin",
134
+ "h.18.self_attention.dense.bias": "pytorch_model_00020-of-00032.bin",
135
+ "h.18.self_attention.dense.weight": "pytorch_model_00020-of-00032.bin",
136
+ "h.18.self_attention.query_key_value.bias": "pytorch_model_00020-of-00032.bin",
137
+ "h.18.self_attention.query_key_value.weight": "pytorch_model_00020-of-00032.bin",
138
+ "h.19.input_layernorm.bias": "pytorch_model_00021-of-00032.bin",
139
+ "h.19.input_layernorm.weight": "pytorch_model_00021-of-00032.bin",
140
+ "h.19.mlp.dense_4h_to_h.bias": "pytorch_model_00021-of-00032.bin",
141
+ "h.19.mlp.dense_4h_to_h.weight": "pytorch_model_00021-of-00032.bin",
142
+ "h.19.mlp.dense_h_to_4h.bias": "pytorch_model_00021-of-00032.bin",
143
+ "h.19.mlp.dense_h_to_4h.weight": "pytorch_model_00021-of-00032.bin",
144
+ "h.19.post_attention_layernorm.bias": "pytorch_model_00021-of-00032.bin",
145
+ "h.19.post_attention_layernorm.weight": "pytorch_model_00021-of-00032.bin",
146
+ "h.19.self_attention.dense.bias": "pytorch_model_00021-of-00032.bin",
147
+ "h.19.self_attention.dense.weight": "pytorch_model_00021-of-00032.bin",
148
+ "h.19.self_attention.query_key_value.bias": "pytorch_model_00021-of-00032.bin",
149
+ "h.19.self_attention.query_key_value.weight": "pytorch_model_00021-of-00032.bin",
150
+ "h.2.input_layernorm.bias": "pytorch_model_00004-of-00032.bin",
151
+ "h.2.input_layernorm.weight": "pytorch_model_00004-of-00032.bin",
152
+ "h.2.mlp.dense_4h_to_h.bias": "pytorch_model_00004-of-00032.bin",
153
+ "h.2.mlp.dense_4h_to_h.weight": "pytorch_model_00004-of-00032.bin",
154
+ "h.2.mlp.dense_h_to_4h.bias": "pytorch_model_00004-of-00032.bin",
155
+ "h.2.mlp.dense_h_to_4h.weight": "pytorch_model_00004-of-00032.bin",
156
+ "h.2.post_attention_layernorm.bias": "pytorch_model_00004-of-00032.bin",
157
+ "h.2.post_attention_layernorm.weight": "pytorch_model_00004-of-00032.bin",
158
+ "h.2.self_attention.dense.bias": "pytorch_model_00004-of-00032.bin",
159
+ "h.2.self_attention.dense.weight": "pytorch_model_00004-of-00032.bin",
160
+ "h.2.self_attention.query_key_value.bias": "pytorch_model_00004-of-00032.bin",
161
+ "h.2.self_attention.query_key_value.weight": "pytorch_model_00004-of-00032.bin",
162
+ "h.20.input_layernorm.bias": "pytorch_model_00022-of-00032.bin",
163
+ "h.20.input_layernorm.weight": "pytorch_model_00022-of-00032.bin",
164
+ "h.20.mlp.dense_4h_to_h.bias": "pytorch_model_00022-of-00032.bin",
165
+ "h.20.mlp.dense_4h_to_h.weight": "pytorch_model_00022-of-00032.bin",
166
+ "h.20.mlp.dense_h_to_4h.bias": "pytorch_model_00022-of-00032.bin",
167
+ "h.20.mlp.dense_h_to_4h.weight": "pytorch_model_00022-of-00032.bin",
168
+ "h.20.post_attention_layernorm.bias": "pytorch_model_00022-of-00032.bin",
169
+ "h.20.post_attention_layernorm.weight": "pytorch_model_00022-of-00032.bin",
170
+ "h.20.self_attention.dense.bias": "pytorch_model_00022-of-00032.bin",
171
+ "h.20.self_attention.dense.weight": "pytorch_model_00022-of-00032.bin",
172
+ "h.20.self_attention.query_key_value.bias": "pytorch_model_00022-of-00032.bin",
173
+ "h.20.self_attention.query_key_value.weight": "pytorch_model_00022-of-00032.bin",
174
+ "h.21.input_layernorm.bias": "pytorch_model_00023-of-00032.bin",
175
+ "h.21.input_layernorm.weight": "pytorch_model_00023-of-00032.bin",
176
+ "h.21.mlp.dense_4h_to_h.bias": "pytorch_model_00023-of-00032.bin",
177
+ "h.21.mlp.dense_4h_to_h.weight": "pytorch_model_00023-of-00032.bin",
178
+ "h.21.mlp.dense_h_to_4h.bias": "pytorch_model_00023-of-00032.bin",
179
+ "h.21.mlp.dense_h_to_4h.weight": "pytorch_model_00023-of-00032.bin",
180
+ "h.21.post_attention_layernorm.bias": "pytorch_model_00023-of-00032.bin",
181
+ "h.21.post_attention_layernorm.weight": "pytorch_model_00023-of-00032.bin",
182
+ "h.21.self_attention.dense.bias": "pytorch_model_00023-of-00032.bin",
183
+ "h.21.self_attention.dense.weight": "pytorch_model_00023-of-00032.bin",
184
+ "h.21.self_attention.query_key_value.bias": "pytorch_model_00023-of-00032.bin",
185
+ "h.21.self_attention.query_key_value.weight": "pytorch_model_00023-of-00032.bin",
186
+ "h.22.input_layernorm.bias": "pytorch_model_00024-of-00032.bin",
187
+ "h.22.input_layernorm.weight": "pytorch_model_00024-of-00032.bin",
188
+ "h.22.mlp.dense_4h_to_h.bias": "pytorch_model_00024-of-00032.bin",
189
+ "h.22.mlp.dense_4h_to_h.weight": "pytorch_model_00024-of-00032.bin",
190
+ "h.22.mlp.dense_h_to_4h.bias": "pytorch_model_00024-of-00032.bin",
191
+ "h.22.mlp.dense_h_to_4h.weight": "pytorch_model_00024-of-00032.bin",
192
+ "h.22.post_attention_layernorm.bias": "pytorch_model_00024-of-00032.bin",
193
+ "h.22.post_attention_layernorm.weight": "pytorch_model_00024-of-00032.bin",
194
+ "h.22.self_attention.dense.bias": "pytorch_model_00024-of-00032.bin",
195
+ "h.22.self_attention.dense.weight": "pytorch_model_00024-of-00032.bin",
196
+ "h.22.self_attention.query_key_value.bias": "pytorch_model_00024-of-00032.bin",
197
+ "h.22.self_attention.query_key_value.weight": "pytorch_model_00024-of-00032.bin",
198
+ "h.23.input_layernorm.bias": "pytorch_model_00025-of-00032.bin",
199
+ "h.23.input_layernorm.weight": "pytorch_model_00025-of-00032.bin",
200
+ "h.23.mlp.dense_4h_to_h.bias": "pytorch_model_00025-of-00032.bin",
201
+ "h.23.mlp.dense_4h_to_h.weight": "pytorch_model_00025-of-00032.bin",
202
+ "h.23.mlp.dense_h_to_4h.bias": "pytorch_model_00025-of-00032.bin",
203
+ "h.23.mlp.dense_h_to_4h.weight": "pytorch_model_00025-of-00032.bin",
204
+ "h.23.post_attention_layernorm.bias": "pytorch_model_00025-of-00032.bin",
205
+ "h.23.post_attention_layernorm.weight": "pytorch_model_00025-of-00032.bin",
206
+ "h.23.self_attention.dense.bias": "pytorch_model_00025-of-00032.bin",
207
+ "h.23.self_attention.dense.weight": "pytorch_model_00025-of-00032.bin",
208
+ "h.23.self_attention.query_key_value.bias": "pytorch_model_00025-of-00032.bin",
209
+ "h.23.self_attention.query_key_value.weight": "pytorch_model_00025-of-00032.bin",
210
+ "h.24.input_layernorm.bias": "pytorch_model_00026-of-00032.bin",
211
+ "h.24.input_layernorm.weight": "pytorch_model_00026-of-00032.bin",
212
+ "h.24.mlp.dense_4h_to_h.bias": "pytorch_model_00026-of-00032.bin",
213
+ "h.24.mlp.dense_4h_to_h.weight": "pytorch_model_00026-of-00032.bin",
214
+ "h.24.mlp.dense_h_to_4h.bias": "pytorch_model_00026-of-00032.bin",
215
+ "h.24.mlp.dense_h_to_4h.weight": "pytorch_model_00026-of-00032.bin",
216
+ "h.24.post_attention_layernorm.bias": "pytorch_model_00026-of-00032.bin",
217
+ "h.24.post_attention_layernorm.weight": "pytorch_model_00026-of-00032.bin",
218
+ "h.24.self_attention.dense.bias": "pytorch_model_00026-of-00032.bin",
219
+ "h.24.self_attention.dense.weight": "pytorch_model_00026-of-00032.bin",
220
+ "h.24.self_attention.query_key_value.bias": "pytorch_model_00026-of-00032.bin",
221
+ "h.24.self_attention.query_key_value.weight": "pytorch_model_00026-of-00032.bin",
222
+ "h.25.input_layernorm.bias": "pytorch_model_00027-of-00032.bin",
223
+ "h.25.input_layernorm.weight": "pytorch_model_00027-of-00032.bin",
224
+ "h.25.mlp.dense_4h_to_h.bias": "pytorch_model_00027-of-00032.bin",
225
+ "h.25.mlp.dense_4h_to_h.weight": "pytorch_model_00027-of-00032.bin",
226
+ "h.25.mlp.dense_h_to_4h.bias": "pytorch_model_00027-of-00032.bin",
227
+ "h.25.mlp.dense_h_to_4h.weight": "pytorch_model_00027-of-00032.bin",
228
+ "h.25.post_attention_layernorm.bias": "pytorch_model_00027-of-00032.bin",
229
+ "h.25.post_attention_layernorm.weight": "pytorch_model_00027-of-00032.bin",
230
+ "h.25.self_attention.dense.bias": "pytorch_model_00027-of-00032.bin",
231
+ "h.25.self_attention.dense.weight": "pytorch_model_00027-of-00032.bin",
232
+ "h.25.self_attention.query_key_value.bias": "pytorch_model_00027-of-00032.bin",
233
+ "h.25.self_attention.query_key_value.weight": "pytorch_model_00027-of-00032.bin",
234
+ "h.26.input_layernorm.bias": "pytorch_model_00028-of-00032.bin",
235
+ "h.26.input_layernorm.weight": "pytorch_model_00028-of-00032.bin",
236
+ "h.26.mlp.dense_4h_to_h.bias": "pytorch_model_00028-of-00032.bin",
237
+ "h.26.mlp.dense_4h_to_h.weight": "pytorch_model_00028-of-00032.bin",
238
+ "h.26.mlp.dense_h_to_4h.bias": "pytorch_model_00028-of-00032.bin",
239
+ "h.26.mlp.dense_h_to_4h.weight": "pytorch_model_00028-of-00032.bin",
240
+ "h.26.post_attention_layernorm.bias": "pytorch_model_00028-of-00032.bin",
241
+ "h.26.post_attention_layernorm.weight": "pytorch_model_00028-of-00032.bin",
242
+ "h.26.self_attention.dense.bias": "pytorch_model_00028-of-00032.bin",
243
+ "h.26.self_attention.dense.weight": "pytorch_model_00028-of-00032.bin",
244
+ "h.26.self_attention.query_key_value.bias": "pytorch_model_00028-of-00032.bin",
245
+ "h.26.self_attention.query_key_value.weight": "pytorch_model_00028-of-00032.bin",
246
+ "h.27.input_layernorm.bias": "pytorch_model_00029-of-00032.bin",
247
+ "h.27.input_layernorm.weight": "pytorch_model_00029-of-00032.bin",
248
+ "h.27.mlp.dense_4h_to_h.bias": "pytorch_model_00029-of-00032.bin",
249
+ "h.27.mlp.dense_4h_to_h.weight": "pytorch_model_00029-of-00032.bin",
250
+ "h.27.mlp.dense_h_to_4h.bias": "pytorch_model_00029-of-00032.bin",
251
+ "h.27.mlp.dense_h_to_4h.weight": "pytorch_model_00029-of-00032.bin",
252
+ "h.27.post_attention_layernorm.bias": "pytorch_model_00029-of-00032.bin",
253
+ "h.27.post_attention_layernorm.weight": "pytorch_model_00029-of-00032.bin",
254
+ "h.27.self_attention.dense.bias": "pytorch_model_00029-of-00032.bin",
255
+ "h.27.self_attention.dense.weight": "pytorch_model_00029-of-00032.bin",
256
+ "h.27.self_attention.query_key_value.bias": "pytorch_model_00029-of-00032.bin",
257
+ "h.27.self_attention.query_key_value.weight": "pytorch_model_00029-of-00032.bin",
258
+ "h.28.input_layernorm.bias": "pytorch_model_00030-of-00032.bin",
259
+ "h.28.input_layernorm.weight": "pytorch_model_00030-of-00032.bin",
260
+ "h.28.mlp.dense_4h_to_h.bias": "pytorch_model_00030-of-00032.bin",
261
+ "h.28.mlp.dense_4h_to_h.weight": "pytorch_model_00030-of-00032.bin",
262
+ "h.28.mlp.dense_h_to_4h.bias": "pytorch_model_00030-of-00032.bin",
263
+ "h.28.mlp.dense_h_to_4h.weight": "pytorch_model_00030-of-00032.bin",
264
+ "h.28.post_attention_layernorm.bias": "pytorch_model_00030-of-00032.bin",
265
+ "h.28.post_attention_layernorm.weight": "pytorch_model_00030-of-00032.bin",
266
+ "h.28.self_attention.dense.bias": "pytorch_model_00030-of-00032.bin",
267
+ "h.28.self_attention.dense.weight": "pytorch_model_00030-of-00032.bin",
268
+ "h.28.self_attention.query_key_value.bias": "pytorch_model_00030-of-00032.bin",
269
+ "h.28.self_attention.query_key_value.weight": "pytorch_model_00030-of-00032.bin",
270
+ "h.29.input_layernorm.bias": "pytorch_model_00031-of-00032.bin",
271
+ "h.29.input_layernorm.weight": "pytorch_model_00031-of-00032.bin",
272
+ "h.29.mlp.dense_4h_to_h.bias": "pytorch_model_00031-of-00032.bin",
273
+ "h.29.mlp.dense_4h_to_h.weight": "pytorch_model_00031-of-00032.bin",
274
+ "h.29.mlp.dense_h_to_4h.bias": "pytorch_model_00031-of-00032.bin",
275
+ "h.29.mlp.dense_h_to_4h.weight": "pytorch_model_00031-of-00032.bin",
276
+ "h.29.post_attention_layernorm.bias": "pytorch_model_00031-of-00032.bin",
277
+ "h.29.post_attention_layernorm.weight": "pytorch_model_00031-of-00032.bin",
278
+ "h.29.self_attention.dense.bias": "pytorch_model_00031-of-00032.bin",
279
+ "h.29.self_attention.dense.weight": "pytorch_model_00031-of-00032.bin",
280
+ "h.29.self_attention.query_key_value.bias": "pytorch_model_00031-of-00032.bin",
281
+ "h.29.self_attention.query_key_value.weight": "pytorch_model_00031-of-00032.bin",
282
+ "h.3.input_layernorm.bias": "pytorch_model_00005-of-00032.bin",
283
+ "h.3.input_layernorm.weight": "pytorch_model_00005-of-00032.bin",
284
+ "h.3.mlp.dense_4h_to_h.bias": "pytorch_model_00005-of-00032.bin",
285
+ "h.3.mlp.dense_4h_to_h.weight": "pytorch_model_00005-of-00032.bin",
286
+ "h.3.mlp.dense_h_to_4h.bias": "pytorch_model_00005-of-00032.bin",
287
+ "h.3.mlp.dense_h_to_4h.weight": "pytorch_model_00005-of-00032.bin",
288
+ "h.3.post_attention_layernorm.bias": "pytorch_model_00005-of-00032.bin",
289
+ "h.3.post_attention_layernorm.weight": "pytorch_model_00005-of-00032.bin",
290
+ "h.3.self_attention.dense.bias": "pytorch_model_00005-of-00032.bin",
291
+ "h.3.self_attention.dense.weight": "pytorch_model_00005-of-00032.bin",
292
+ "h.3.self_attention.query_key_value.bias": "pytorch_model_00005-of-00032.bin",
293
+ "h.3.self_attention.query_key_value.weight": "pytorch_model_00005-of-00032.bin",
294
+ "h.4.input_layernorm.bias": "pytorch_model_00006-of-00032.bin",
295
+ "h.4.input_layernorm.weight": "pytorch_model_00006-of-00032.bin",
296
+ "h.4.mlp.dense_4h_to_h.bias": "pytorch_model_00006-of-00032.bin",
297
+ "h.4.mlp.dense_4h_to_h.weight": "pytorch_model_00006-of-00032.bin",
298
+ "h.4.mlp.dense_h_to_4h.bias": "pytorch_model_00006-of-00032.bin",
299
+ "h.4.mlp.dense_h_to_4h.weight": "pytorch_model_00006-of-00032.bin",
300
+ "h.4.post_attention_layernorm.bias": "pytorch_model_00006-of-00032.bin",
301
+ "h.4.post_attention_layernorm.weight": "pytorch_model_00006-of-00032.bin",
302
+ "h.4.self_attention.dense.bias": "pytorch_model_00006-of-00032.bin",
303
+ "h.4.self_attention.dense.weight": "pytorch_model_00006-of-00032.bin",
304
+ "h.4.self_attention.query_key_value.bias": "pytorch_model_00006-of-00032.bin",
305
+ "h.4.self_attention.query_key_value.weight": "pytorch_model_00006-of-00032.bin",
306
+ "h.5.input_layernorm.bias": "pytorch_model_00007-of-00032.bin",
307
+ "h.5.input_layernorm.weight": "pytorch_model_00007-of-00032.bin",
308
+ "h.5.mlp.dense_4h_to_h.bias": "pytorch_model_00007-of-00032.bin",
309
+ "h.5.mlp.dense_4h_to_h.weight": "pytorch_model_00007-of-00032.bin",
310
+ "h.5.mlp.dense_h_to_4h.bias": "pytorch_model_00007-of-00032.bin",
311
+ "h.5.mlp.dense_h_to_4h.weight": "pytorch_model_00007-of-00032.bin",
312
+ "h.5.post_attention_layernorm.bias": "pytorch_model_00007-of-00032.bin",
313
+ "h.5.post_attention_layernorm.weight": "pytorch_model_00007-of-00032.bin",
314
+ "h.5.self_attention.dense.bias": "pytorch_model_00007-of-00032.bin",
315
+ "h.5.self_attention.dense.weight": "pytorch_model_00007-of-00032.bin",
316
+ "h.5.self_attention.query_key_value.bias": "pytorch_model_00007-of-00032.bin",
317
+ "h.5.self_attention.query_key_value.weight": "pytorch_model_00007-of-00032.bin",
318
+ "h.6.input_layernorm.bias": "pytorch_model_00008-of-00032.bin",
319
+ "h.6.input_layernorm.weight": "pytorch_model_00008-of-00032.bin",
320
+ "h.6.mlp.dense_4h_to_h.bias": "pytorch_model_00008-of-00032.bin",
321
+ "h.6.mlp.dense_4h_to_h.weight": "pytorch_model_00008-of-00032.bin",
322
+ "h.6.mlp.dense_h_to_4h.bias": "pytorch_model_00008-of-00032.bin",
323
+ "h.6.mlp.dense_h_to_4h.weight": "pytorch_model_00008-of-00032.bin",
324
+ "h.6.post_attention_layernorm.bias": "pytorch_model_00008-of-00032.bin",
325
+ "h.6.post_attention_layernorm.weight": "pytorch_model_00008-of-00032.bin",
326
+ "h.6.self_attention.dense.bias": "pytorch_model_00008-of-00032.bin",
327
+ "h.6.self_attention.dense.weight": "pytorch_model_00008-of-00032.bin",
328
+ "h.6.self_attention.query_key_value.bias": "pytorch_model_00008-of-00032.bin",
329
+ "h.6.self_attention.query_key_value.weight": "pytorch_model_00008-of-00032.bin",
330
+ "h.7.input_layernorm.bias": "pytorch_model_00009-of-00032.bin",
331
+ "h.7.input_layernorm.weight": "pytorch_model_00009-of-00032.bin",
332
+ "h.7.mlp.dense_4h_to_h.bias": "pytorch_model_00009-of-00032.bin",
333
+ "h.7.mlp.dense_4h_to_h.weight": "pytorch_model_00009-of-00032.bin",
334
+ "h.7.mlp.dense_h_to_4h.bias": "pytorch_model_00009-of-00032.bin",
335
+ "h.7.mlp.dense_h_to_4h.weight": "pytorch_model_00009-of-00032.bin",
336
+ "h.7.post_attention_layernorm.bias": "pytorch_model_00009-of-00032.bin",
337
+ "h.7.post_attention_layernorm.weight": "pytorch_model_00009-of-00032.bin",
338
+ "h.7.self_attention.dense.bias": "pytorch_model_00009-of-00032.bin",
339
+ "h.7.self_attention.dense.weight": "pytorch_model_00009-of-00032.bin",
340
+ "h.7.self_attention.query_key_value.bias": "pytorch_model_00009-of-00032.bin",
341
+ "h.7.self_attention.query_key_value.weight": "pytorch_model_00009-of-00032.bin",
342
+ "h.8.input_layernorm.bias": "pytorch_model_00010-of-00032.bin",
343
+ "h.8.input_layernorm.weight": "pytorch_model_00010-of-00032.bin",
344
+ "h.8.mlp.dense_4h_to_h.bias": "pytorch_model_00010-of-00032.bin",
345
+ "h.8.mlp.dense_4h_to_h.weight": "pytorch_model_00010-of-00032.bin",
346
+ "h.8.mlp.dense_h_to_4h.bias": "pytorch_model_00010-of-00032.bin",
347
+ "h.8.mlp.dense_h_to_4h.weight": "pytorch_model_00010-of-00032.bin",
348
+ "h.8.post_attention_layernorm.bias": "pytorch_model_00010-of-00032.bin",
349
+ "h.8.post_attention_layernorm.weight": "pytorch_model_00010-of-00032.bin",
350
+ "h.8.self_attention.dense.bias": "pytorch_model_00010-of-00032.bin",
351
+ "h.8.self_attention.dense.weight": "pytorch_model_00010-of-00032.bin",
352
+ "h.8.self_attention.query_key_value.bias": "pytorch_model_00010-of-00032.bin",
353
+ "h.8.self_attention.query_key_value.weight": "pytorch_model_00010-of-00032.bin",
354
+ "h.9.input_layernorm.bias": "pytorch_model_00011-of-00032.bin",
355
+ "h.9.input_layernorm.weight": "pytorch_model_00011-of-00032.bin",
356
+ "h.9.mlp.dense_4h_to_h.bias": "pytorch_model_00011-of-00032.bin",
357
+ "h.9.mlp.dense_4h_to_h.weight": "pytorch_model_00011-of-00032.bin",
358
+ "h.9.mlp.dense_h_to_4h.bias": "pytorch_model_00011-of-00032.bin",
359
+ "h.9.mlp.dense_h_to_4h.weight": "pytorch_model_00011-of-00032.bin",
360
+ "h.9.post_attention_layernorm.bias": "pytorch_model_00011-of-00032.bin",
361
+ "h.9.post_attention_layernorm.weight": "pytorch_model_00011-of-00032.bin",
362
+ "h.9.self_attention.dense.bias": "pytorch_model_00011-of-00032.bin",
363
+ "h.9.self_attention.dense.weight": "pytorch_model_00011-of-00032.bin",
364
+ "h.9.self_attention.query_key_value.bias": "pytorch_model_00011-of-00032.bin",
365
+ "h.9.self_attention.query_key_value.weight": "pytorch_model_00011-of-00032.bin",
366
+ "ln_f.bias": "pytorch_model_00032-of-00032.bin",
367
+ "ln_f.weight": "pytorch_model_00032-of-00032.bin",
368
+ "word_embeddings.weight": "pytorch_model_00001-of-00032.bin",
369
+ "word_embeddings_layernorm.bias": "pytorch_model_00001-of-00032.bin",
370
+ "word_embeddings_layernorm.weight": "pytorch_model_00001-of-00032.bin"
371
+ }
372
+ }
pytorch_model_00001-of-00032.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f87df470ca847afd30dbbedf51f26dc78e16b181cdf27bc5b90e9852ffbdf552
3
+ size 412107939
pytorch_model_00002-of-00032.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c87d4714a2d13ff875e09858aec6e2311ed6af8c7b88dd9e5fcdd4731ac62a97
3
+ size 402762945
pytorch_model_00003-of-00032.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:32ea55e05038880ec77868765f901509b31d807818dff13c22535391aa435cf0
3
+ size 402762945
pytorch_model_00004-of-00032.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4841f44c4e306dcc63d728e1f01b688003d30930a9f5e0033b9c553715d5bad6
3
+ size 402762945
pytorch_model_00005-of-00032.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:022b4d9710a0d935e5160f80969572673dd72bc6e9704fee3746f24716ad0885
3
+ size 402762945
pytorch_model_00006-of-00032.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:136925c731167d1bcee5504a59baedb2b8c8fd35ddd6175a58ffbac4d300ec7f
3
+ size 402762945
pytorch_model_00007-of-00032.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9b3996d9f17491876c6389a4a4fdd421357ea09bac32d733c77595c146d0ba5e
3
+ size 402762945
pytorch_model_00008-of-00032.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:21823e5e2b4d553ae47ba2e44856d5ee080725e700638380176b7923ee9141f1
3
+ size 402762945
pytorch_model_00009-of-00032.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:62d05f68092c3e1feaf61a11560e8251a6051d922e653c5a3ae6f986d229a1d3
3
+ size 402762945
pytorch_model_00010-of-00032.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fc55b367f7c705bfeca567939e3891cd78fda2ba1287dc688b9cb0a2ff68c1e7
3
+ size 402762945
pytorch_model_00011-of-00032.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4b931f6b34bf61cf85e2b1e816140a23a0aca7966be0e752fce7239d5bd7a5dd
3
+ size 402762945
pytorch_model_00012-of-00032.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:89bb9b8c81d6b44a9c8dae1396f87ca75693ef3d76cd695e32c452cf6b644f7c
3
+ size 402762945
pytorch_model_00013-of-00032.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:074289143c81abfe9f764c390213a09be6e746f1e1d2fb7b7120e383bed34ec9
3
+ size 402762945
pytorch_model_00014-of-00032.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dc6b37f1a244d4ecd71d4240d4db0cd4a7b128737dbe3a526ff58ae2770b8fdd
3
+ size 402762945
pytorch_model_00015-of-00032.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8ecd83aa78126ab1fb8c40a9b9bbce7acd0550c81b4d324e1025ee30c4d69cce
3
+ size 402762945
pytorch_model_00016-of-00032.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ce1885fda2328fa734d0ea62b1d726601064b5edbb3cd2b462ef6441e7089ff4
3
+ size 402762945
pytorch_model_00017-of-00032.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aed49a398729b3da23ebd5379b704876fa50bda3a030ae5b74b4729461c2f99b
3
+ size 402762945
pytorch_model_00018-of-00032.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:70d501e78e2d83d4f578e1344f72cba92353e5513b3225a502d74d5290234dba
3
+ size 402762945
pytorch_model_00019-of-00032.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e8616f0a7cd61b8f39aedcafc943d609c86b9c93a7a05cb859daababb8c494d
3
+ size 402762945
pytorch_model_00020-of-00032.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7840383337a61f8c1e41c41ed151bf98fc3a83d7a31167f3993265edcb422115
3
+ size 402762945
pytorch_model_00021-of-00032.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:483117090b37d42241290d2bd8cec03c73f66c096213b379633c83c60f6711e7
3
+ size 402762945
pytorch_model_00022-of-00032.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8843fabe2718f27ed62f85d8a2086f95358141231d2b38fcda14958f13f786b4
3
+ size 402762945
pytorch_model_00023-of-00032.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4c26e8cae004a660d0bbb547fa2a1be800cc1e1d62751526b99a2ca6641f0169
3
+ size 402762945
pytorch_model_00024-of-00032.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9feedc0454fdac63e3fd1ac07344d94a5c23cb1f766915f3f5e4489b241b0fa8
3
+ size 402762945
pytorch_model_00025-of-00032.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b6b6b80768f151a8031353485b70400a1d63cb3929e053e317bd8e81363bab65
3
+ size 402762945
pytorch_model_00026-of-00032.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6a473c4b47c48dfa074f5bcf96caa5765e1fc8cded7a37579795b80547ccd588
3
+ size 402762945
pytorch_model_00027-of-00032.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8769ee0049527de780e7d61329df46fe9198e56ac83d68b04e058e2a1f44877e
3
+ size 402762945
pytorch_model_00028-of-00032.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e941cd0bc7bdaf5bbfe3ef027f4a734e9d4af5a252cc4258c1d30c7cb1558497
3
+ size 402762945
pytorch_model_00029-of-00032.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:64a262fec32fe30801678b195077961206322790105ce0594d56a57fa90a2e39
3
+ size 402762945
pytorch_model_00030-of-00032.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:39ddd58c9a32909b3512b9f9393377708c702a3456f9789e4c21483a7184d04f
3
+ size 402762945
pytorch_model_00031-of-00032.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8209a216e145a53d713f6e3e60b55521c6db833eb89778beae01a5b53b231b3b
3
+ size 402762945
pytorch_model_00032-of-00032.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:53428bf97cc29cf3c2ad2c759fe0afb712936f5fb41ccdb6948ff517cec0453c
3
+ size 17319
special_tokens_map.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<|endoftext|>",
3
+ "eos_token": "<|endoftext|>",
4
+ "unk_token": "<|endoftext|>"
5
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "bos_token": "<|endoftext|>",
4
+ "eos_token": "<|endoftext|>",
5
+ "name_or_path": "/data/datasets/huggingface_transformers/pytorch/bloom-1b7-twc-german",
6
+ "special_tokens_map_file": null,
7
+ "tokenizer_class": "GPT2Tokenizer",
8
+ "unk_token": "<|endoftext|>"
9
+ }
vocab.json ADDED
The diff for this file is too large to render. See raw diff