jssky commited on
Commit
891774d
·
verified ·
1 Parent(s): 0cbe6b4

Training in progress, step 25, checkpoint

Browse files
last-checkpoint/adapter_config.json CHANGED
@@ -20,10 +20,12 @@
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
- "gate_up_proj",
24
- "down_proj",
25
  "o_proj",
26
- "qkv_proj"
 
 
 
27
  ],
28
  "task_type": "CAUSAL_LM",
29
  "use_dora": false,
 
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
+ "qkv_proj",
 
24
  "o_proj",
25
+ "v_proj",
26
+ "q_proj",
27
+ "gate_up_proj",
28
+ "down_proj"
29
  ],
30
  "task_type": "CAUSAL_LM",
31
  "use_dora": false,
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:30b1197f8c001e648cc21c3a2f52b5b9ab5847d7e3faf96a98c982ef353d7c3d
3
  size 116744
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4fed3a1af013c2f1ffea05423e3d954d625f76227a66765d63ef9fcf839c9118
3
  size 116744
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cbc360a986d0eff8fdd066b9cb6c6d86407649747baf0e222099d0f9c1f7ca1e
3
  size 243310
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aab980519daf7fb45ae3422fc203e3059108e016bd76025feee28fcbff3cfcdd
3
  size 243310
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f711d6320cc2a93c650d6d729c8971d0d773b8cb171c5051d93708acc92dd09a
3
  size 14512
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5becd69b928f760cbad8d38057cc5adccf0f9da02cc5e9a3fcde2020e3171777
3
  size 14512
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e55d5b446e9b4b532831f55f45365216765b37b2e45bc9fff9db5e2344a3754c
3
  size 14512
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:97853ccc6263aab55e6cbb8496f1e99b9abe2985c3208a54a97fa0b4b3fd6113
3
  size 14512
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b1df0528620c07325b8faa7567e59b0c1e86a1f1ee6af1245a69c6c0463fe4e2
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f37b2aa490ccb1598b01e14cda36e9081f7ce646deab4d3c2d03de0d2169a755
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,386 +1,203 @@
1
  {
2
- "best_metric": 10.364721298217773,
3
- "best_model_checkpoint": "miner_id_24/checkpoint-50",
4
  "epoch": 0.0655307994757536,
5
  "eval_steps": 25,
6
- "global_step": 50,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
- "epoch": 0.001310615989515072,
13
- "grad_norm": 0.0908861756324768,
14
  "learning_rate": 5e-05,
15
- "loss": 10.381,
16
  "step": 1
17
  },
18
  {
19
- "epoch": 0.001310615989515072,
20
- "eval_loss": 10.379949569702148,
21
- "eval_runtime": 7.1449,
22
- "eval_samples_per_second": 359.838,
23
- "eval_steps_per_second": 89.994,
24
  "step": 1
25
  },
26
  {
27
- "epoch": 0.002621231979030144,
28
- "grad_norm": 0.09772881865501404,
29
  "learning_rate": 0.0001,
30
- "loss": 10.3794,
31
  "step": 2
32
  },
33
  {
34
- "epoch": 0.003931847968545216,
35
- "grad_norm": 0.09483335167169571,
36
  "learning_rate": 9.989294616193017e-05,
37
- "loss": 10.3793,
38
  "step": 3
39
  },
40
  {
41
- "epoch": 0.005242463958060288,
42
- "grad_norm": 0.09991876780986786,
43
  "learning_rate": 9.957224306869053e-05,
44
- "loss": 10.3806,
45
  "step": 4
46
  },
47
  {
48
- "epoch": 0.00655307994757536,
49
- "grad_norm": 0.10192015022039413,
50
  "learning_rate": 9.903926402016153e-05,
51
- "loss": 10.3799,
52
  "step": 5
53
  },
54
  {
55
- "epoch": 0.007863695937090432,
56
- "grad_norm": 0.10369470715522766,
57
  "learning_rate": 9.829629131445342e-05,
58
- "loss": 10.3785,
59
  "step": 6
60
  },
61
  {
62
- "epoch": 0.009174311926605505,
63
- "grad_norm": 0.11331947892904282,
64
  "learning_rate": 9.73465064747553e-05,
65
- "loss": 10.3775,
66
  "step": 7
67
  },
68
  {
69
- "epoch": 0.010484927916120577,
70
- "grad_norm": 0.09781312942504883,
71
  "learning_rate": 9.619397662556435e-05,
72
- "loss": 10.3755,
73
  "step": 8
74
  },
75
  {
76
- "epoch": 0.011795543905635648,
77
- "grad_norm": 0.1051904484629631,
78
  "learning_rate": 9.484363707663442e-05,
79
- "loss": 10.3756,
80
  "step": 9
81
  },
82
  {
83
- "epoch": 0.01310615989515072,
84
- "grad_norm": 0.11857935786247253,
85
  "learning_rate": 9.330127018922194e-05,
86
- "loss": 10.3732,
87
  "step": 10
88
  },
89
  {
90
- "epoch": 0.014416775884665793,
91
- "grad_norm": 0.10414807498455048,
92
  "learning_rate": 9.157348061512727e-05,
93
- "loss": 10.3749,
94
  "step": 11
95
  },
96
  {
97
- "epoch": 0.015727391874180863,
98
- "grad_norm": 0.11369787156581879,
99
  "learning_rate": 8.966766701456177e-05,
100
- "loss": 10.3737,
101
  "step": 12
102
  },
103
  {
104
- "epoch": 0.01703800786369594,
105
- "grad_norm": 0.1203140988945961,
106
  "learning_rate": 8.759199037394887e-05,
107
- "loss": 10.3731,
108
  "step": 13
109
  },
110
  {
111
- "epoch": 0.01834862385321101,
112
- "grad_norm": 0.11466053873300552,
113
  "learning_rate": 8.535533905932738e-05,
114
- "loss": 10.3747,
115
  "step": 14
116
  },
117
  {
118
- "epoch": 0.019659239842726082,
119
- "grad_norm": 0.1231091246008873,
120
  "learning_rate": 8.296729075500344e-05,
121
- "loss": 10.3732,
122
  "step": 15
123
  },
124
  {
125
- "epoch": 0.020969855832241154,
126
- "grad_norm": 0.1282581388950348,
127
  "learning_rate": 8.043807145043604e-05,
128
- "loss": 10.3709,
129
  "step": 16
130
  },
131
  {
132
- "epoch": 0.022280471821756225,
133
- "grad_norm": 0.13268981873989105,
134
  "learning_rate": 7.777851165098012e-05,
135
- "loss": 10.3722,
136
  "step": 17
137
  },
138
  {
139
- "epoch": 0.023591087811271297,
140
- "grad_norm": 0.12877680361270905,
141
  "learning_rate": 7.500000000000001e-05,
142
- "loss": 10.376,
143
  "step": 18
144
  },
145
  {
146
- "epoch": 0.02490170380078637,
147
- "grad_norm": 0.1347285956144333,
148
  "learning_rate": 7.211443451095007e-05,
149
- "loss": 10.3757,
150
  "step": 19
151
  },
152
  {
153
- "epoch": 0.02621231979030144,
154
- "grad_norm": 0.1698066145181656,
155
  "learning_rate": 6.91341716182545e-05,
156
- "loss": 10.3733,
157
  "step": 20
158
  },
159
  {
160
- "epoch": 0.027522935779816515,
161
- "grad_norm": 0.1721210777759552,
162
  "learning_rate": 6.607197326515808e-05,
163
- "loss": 10.369,
164
  "step": 21
165
  },
166
  {
167
- "epoch": 0.028833551769331587,
168
- "grad_norm": 0.2135317325592041,
169
  "learning_rate": 6.294095225512603e-05,
170
- "loss": 10.3695,
171
  "step": 22
172
  },
173
  {
174
- "epoch": 0.03014416775884666,
175
- "grad_norm": 0.24202318489551544,
176
  "learning_rate": 5.9754516100806423e-05,
177
- "loss": 10.367,
178
  "step": 23
179
  },
180
  {
181
- "epoch": 0.03145478374836173,
182
- "grad_norm": 0.3067170977592468,
183
  "learning_rate": 5.6526309611002594e-05,
184
- "loss": 10.3656,
185
  "step": 24
186
  },
187
  {
188
- "epoch": 0.0327653997378768,
189
- "grad_norm": 0.3440342843532562,
190
  "learning_rate": 5.327015646150716e-05,
191
  "loss": 10.3763,
192
  "step": 25
193
  },
194
- {
195
- "epoch": 0.0327653997378768,
196
- "eval_loss": 10.368788719177246,
197
- "eval_runtime": 7.1928,
198
- "eval_samples_per_second": 357.442,
199
- "eval_steps_per_second": 89.395,
200
- "step": 25
201
- },
202
- {
203
- "epoch": 0.03407601572739188,
204
- "grad_norm": 0.14859028160572052,
205
- "learning_rate": 5e-05,
206
- "loss": 10.3677,
207
- "step": 26
208
- },
209
- {
210
- "epoch": 0.035386631716906945,
211
- "grad_norm": 0.14715676009655,
212
- "learning_rate": 4.6729843538492847e-05,
213
- "loss": 10.3674,
214
- "step": 27
215
- },
216
- {
217
- "epoch": 0.03669724770642202,
218
- "grad_norm": 0.1489713191986084,
219
- "learning_rate": 4.347369038899744e-05,
220
- "loss": 10.369,
221
- "step": 28
222
- },
223
- {
224
- "epoch": 0.03800786369593709,
225
- "grad_norm": 0.15279723703861237,
226
- "learning_rate": 4.0245483899193595e-05,
227
- "loss": 10.3671,
228
- "step": 29
229
- },
230
- {
231
- "epoch": 0.039318479685452164,
232
- "grad_norm": 0.14576803147792816,
233
- "learning_rate": 3.705904774487396e-05,
234
- "loss": 10.3687,
235
- "step": 30
236
- },
237
- {
238
- "epoch": 0.04062909567496723,
239
- "grad_norm": 0.1643356829881668,
240
- "learning_rate": 3.392802673484193e-05,
241
- "loss": 10.3666,
242
- "step": 31
243
- },
244
- {
245
- "epoch": 0.04193971166448231,
246
- "grad_norm": 0.15099547803401947,
247
- "learning_rate": 3.086582838174551e-05,
248
- "loss": 10.3666,
249
- "step": 32
250
- },
251
- {
252
- "epoch": 0.04325032765399738,
253
- "grad_norm": 0.1730622947216034,
254
- "learning_rate": 2.7885565489049946e-05,
255
- "loss": 10.3648,
256
- "step": 33
257
- },
258
- {
259
- "epoch": 0.04456094364351245,
260
- "grad_norm": 0.15695245563983917,
261
- "learning_rate": 2.500000000000001e-05,
262
- "loss": 10.366,
263
- "step": 34
264
- },
265
- {
266
- "epoch": 0.045871559633027525,
267
- "grad_norm": 0.1533222794532776,
268
- "learning_rate": 2.2221488349019903e-05,
269
- "loss": 10.3665,
270
- "step": 35
271
- },
272
- {
273
- "epoch": 0.047182175622542594,
274
- "grad_norm": 0.1350749284029007,
275
- "learning_rate": 1.9561928549563968e-05,
276
- "loss": 10.366,
277
- "step": 36
278
- },
279
- {
280
- "epoch": 0.04849279161205767,
281
- "grad_norm": 0.1510956883430481,
282
- "learning_rate": 1.703270924499656e-05,
283
- "loss": 10.3639,
284
- "step": 37
285
- },
286
- {
287
- "epoch": 0.04980340760157274,
288
- "grad_norm": 0.15642888844013214,
289
- "learning_rate": 1.4644660940672627e-05,
290
- "loss": 10.3667,
291
- "step": 38
292
- },
293
- {
294
- "epoch": 0.05111402359108781,
295
- "grad_norm": 0.17887574434280396,
296
- "learning_rate": 1.2408009626051137e-05,
297
- "loss": 10.3621,
298
- "step": 39
299
- },
300
- {
301
- "epoch": 0.05242463958060288,
302
- "grad_norm": 0.17263266444206238,
303
- "learning_rate": 1.0332332985438248e-05,
304
- "loss": 10.3623,
305
- "step": 40
306
- },
307
- {
308
- "epoch": 0.053735255570117955,
309
- "grad_norm": 0.15901875495910645,
310
- "learning_rate": 8.426519384872733e-06,
311
- "loss": 10.3657,
312
- "step": 41
313
- },
314
- {
315
- "epoch": 0.05504587155963303,
316
- "grad_norm": 0.1725824922323227,
317
- "learning_rate": 6.698729810778065e-06,
318
- "loss": 10.3687,
319
- "step": 42
320
- },
321
- {
322
- "epoch": 0.0563564875491481,
323
- "grad_norm": 0.17225879430770874,
324
- "learning_rate": 5.156362923365588e-06,
325
- "loss": 10.3635,
326
- "step": 43
327
- },
328
- {
329
- "epoch": 0.057667103538663174,
330
- "grad_norm": 0.19542855024337769,
331
- "learning_rate": 3.8060233744356633e-06,
332
- "loss": 10.3608,
333
- "step": 44
334
- },
335
- {
336
- "epoch": 0.05897771952817824,
337
- "grad_norm": 0.16884516179561615,
338
- "learning_rate": 2.653493525244721e-06,
339
- "loss": 10.3693,
340
- "step": 45
341
- },
342
- {
343
- "epoch": 0.06028833551769332,
344
- "grad_norm": 0.19233977794647217,
345
- "learning_rate": 1.70370868554659e-06,
346
- "loss": 10.3623,
347
- "step": 46
348
- },
349
- {
350
- "epoch": 0.061598951507208385,
351
- "grad_norm": 0.204484760761261,
352
- "learning_rate": 9.607359798384785e-07,
353
- "loss": 10.3616,
354
- "step": 47
355
- },
356
- {
357
- "epoch": 0.06290956749672345,
358
- "grad_norm": 0.23938828706741333,
359
- "learning_rate": 4.277569313094809e-07,
360
- "loss": 10.3619,
361
- "step": 48
362
- },
363
- {
364
- "epoch": 0.06422018348623854,
365
- "grad_norm": 0.2926636338233948,
366
- "learning_rate": 1.0705383806982606e-07,
367
- "loss": 10.364,
368
- "step": 49
369
- },
370
  {
371
  "epoch": 0.0655307994757536,
372
- "grad_norm": 0.3798704445362091,
373
- "learning_rate": 0.0,
374
- "loss": 10.3716,
375
- "step": 50
376
- },
377
- {
378
- "epoch": 0.0655307994757536,
379
- "eval_loss": 10.364721298217773,
380
- "eval_runtime": 7.1814,
381
- "eval_samples_per_second": 358.006,
382
- "eval_steps_per_second": 89.536,
383
- "step": 50
384
  }
385
  ],
386
  "logging_steps": 1,
@@ -404,7 +221,7 @@
404
  "should_evaluate": false,
405
  "should_log": false,
406
  "should_save": true,
407
- "should_training_stop": true
408
  },
409
  "attributes": {}
410
  }
 
1
  {
2
+ "best_metric": 10.368563652038574,
3
+ "best_model_checkpoint": "miner_id_24/checkpoint-25",
4
  "epoch": 0.0655307994757536,
5
  "eval_steps": 25,
6
+ "global_step": 25,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
+ "epoch": 0.002621231979030144,
13
+ "grad_norm": 0.0983472689986229,
14
  "learning_rate": 5e-05,
15
+ "loss": 10.3797,
16
  "step": 1
17
  },
18
  {
19
+ "epoch": 0.002621231979030144,
20
+ "eval_loss": 10.380081176757812,
21
+ "eval_runtime": 8.9595,
22
+ "eval_samples_per_second": 286.959,
23
+ "eval_steps_per_second": 143.535,
24
  "step": 1
25
  },
26
  {
27
+ "epoch": 0.005242463958060288,
28
+ "grad_norm": 0.09700549393892288,
29
  "learning_rate": 0.0001,
30
+ "loss": 10.3796,
31
  "step": 2
32
  },
33
  {
34
+ "epoch": 0.007863695937090432,
35
+ "grad_norm": 0.09336814284324646,
36
  "learning_rate": 9.989294616193017e-05,
37
+ "loss": 10.3799,
38
  "step": 3
39
  },
40
  {
41
+ "epoch": 0.010484927916120577,
42
+ "grad_norm": 0.09867435693740845,
43
  "learning_rate": 9.957224306869053e-05,
44
+ "loss": 10.3801,
45
  "step": 4
46
  },
47
  {
48
+ "epoch": 0.01310615989515072,
49
+ "grad_norm": 0.09715370833873749,
50
  "learning_rate": 9.903926402016153e-05,
51
+ "loss": 10.3797,
52
  "step": 5
53
  },
54
  {
55
+ "epoch": 0.015727391874180863,
56
+ "grad_norm": 0.10556021332740784,
57
  "learning_rate": 9.829629131445342e-05,
58
+ "loss": 10.3784,
59
  "step": 6
60
  },
61
  {
62
+ "epoch": 0.01834862385321101,
63
+ "grad_norm": 0.10574877262115479,
64
  "learning_rate": 9.73465064747553e-05,
65
+ "loss": 10.3773,
66
  "step": 7
67
  },
68
  {
69
+ "epoch": 0.020969855832241154,
70
+ "grad_norm": 0.10671475529670715,
71
  "learning_rate": 9.619397662556435e-05,
72
+ "loss": 10.3762,
73
  "step": 8
74
  },
75
  {
76
+ "epoch": 0.023591087811271297,
77
+ "grad_norm": 0.10671234875917435,
78
  "learning_rate": 9.484363707663442e-05,
79
+ "loss": 10.3758,
80
  "step": 9
81
  },
82
  {
83
+ "epoch": 0.02621231979030144,
84
+ "grad_norm": 0.11303173005580902,
85
  "learning_rate": 9.330127018922194e-05,
86
+ "loss": 10.3748,
87
  "step": 10
88
  },
89
  {
90
+ "epoch": 0.028833551769331587,
91
+ "grad_norm": 0.10760895162820816,
92
  "learning_rate": 9.157348061512727e-05,
93
+ "loss": 10.3747,
94
  "step": 11
95
  },
96
  {
97
+ "epoch": 0.03145478374836173,
98
+ "grad_norm": 0.10080239921808243,
99
  "learning_rate": 8.966766701456177e-05,
100
+ "loss": 10.3732,
101
  "step": 12
102
  },
103
  {
104
+ "epoch": 0.03407601572739188,
105
+ "grad_norm": 0.1232990175485611,
106
  "learning_rate": 8.759199037394887e-05,
107
+ "loss": 10.3735,
108
  "step": 13
109
  },
110
  {
111
+ "epoch": 0.03669724770642202,
112
+ "grad_norm": 0.13050110638141632,
113
  "learning_rate": 8.535533905932738e-05,
114
+ "loss": 10.3727,
115
  "step": 14
116
  },
117
  {
118
+ "epoch": 0.039318479685452164,
119
+ "grad_norm": 0.12770453095436096,
120
  "learning_rate": 8.296729075500344e-05,
121
+ "loss": 10.3726,
122
  "step": 15
123
  },
124
  {
125
+ "epoch": 0.04193971166448231,
126
+ "grad_norm": 0.12612299621105194,
127
  "learning_rate": 8.043807145043604e-05,
128
+ "loss": 10.3723,
129
  "step": 16
130
  },
131
  {
132
+ "epoch": 0.04456094364351245,
133
+ "grad_norm": 0.12636294960975647,
134
  "learning_rate": 7.777851165098012e-05,
135
+ "loss": 10.3726,
136
  "step": 17
137
  },
138
  {
139
+ "epoch": 0.047182175622542594,
140
+ "grad_norm": 0.1317187249660492,
141
  "learning_rate": 7.500000000000001e-05,
142
+ "loss": 10.3704,
143
  "step": 18
144
  },
145
  {
146
+ "epoch": 0.04980340760157274,
147
+ "grad_norm": 0.13668107986450195,
148
  "learning_rate": 7.211443451095007e-05,
149
+ "loss": 10.3773,
150
  "step": 19
151
  },
152
  {
153
+ "epoch": 0.05242463958060288,
154
+ "grad_norm": 0.14118903875350952,
155
  "learning_rate": 6.91341716182545e-05,
156
+ "loss": 10.3728,
157
  "step": 20
158
  },
159
  {
160
+ "epoch": 0.05504587155963303,
161
+ "grad_norm": 0.16648933291435242,
162
  "learning_rate": 6.607197326515808e-05,
163
+ "loss": 10.3702,
164
  "step": 21
165
  },
166
  {
167
+ "epoch": 0.057667103538663174,
168
+ "grad_norm": 0.1857651323080063,
169
  "learning_rate": 6.294095225512603e-05,
170
+ "loss": 10.368,
171
  "step": 22
172
  },
173
  {
174
+ "epoch": 0.06028833551769332,
175
+ "grad_norm": 0.21679675579071045,
176
  "learning_rate": 5.9754516100806423e-05,
177
+ "loss": 10.3668,
178
  "step": 23
179
  },
180
  {
181
+ "epoch": 0.06290956749672345,
182
+ "grad_norm": 0.26074954867362976,
183
  "learning_rate": 5.6526309611002594e-05,
184
+ "loss": 10.3705,
185
  "step": 24
186
  },
187
  {
188
+ "epoch": 0.0655307994757536,
189
+ "grad_norm": 0.32028478384017944,
190
  "learning_rate": 5.327015646150716e-05,
191
  "loss": 10.3763,
192
  "step": 25
193
  },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
194
  {
195
  "epoch": 0.0655307994757536,
196
+ "eval_loss": 10.368563652038574,
197
+ "eval_runtime": 8.8771,
198
+ "eval_samples_per_second": 289.621,
199
+ "eval_steps_per_second": 144.867,
200
+ "step": 25
 
 
 
 
 
 
 
201
  }
202
  ],
203
  "logging_steps": 1,
 
221
  "should_evaluate": false,
222
  "should_log": false,
223
  "should_save": true,
224
+ "should_training_stop": false
225
  },
226
  "attributes": {}
227
  }
last-checkpoint/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:504ac2c5b0969df886318cf624f9532e920824fe14de9f2e2950d1c3d6e2a41b
3
- size 6776
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dbc8ad6344141c65c086264a78841e7f4627d66d48aed40b5efe95944405944b
3
+ size 6840