nlparabic commited on
Commit
d7d19eb
1 Parent(s): c8c87e8

Training in progress, epoch 1

Browse files
config.json CHANGED
@@ -35,5 +35,5 @@
35
  "torch_dtype": "float32",
36
  "transformers_version": "4.45.0.dev0",
37
  "use_cache": true,
38
- "vocab_size": 64005
39
  }
 
35
  "torch_dtype": "float32",
36
  "transformers_version": "4.45.0.dev0",
37
  "use_cache": true,
38
+ "vocab_size": 64006
39
  }
egy_training_log.txt CHANGED
@@ -70,311 +70,7 @@ local_rank=0,
70
  log_level=passive,
71
  log_level_replica=warning,
72
  log_on_each_node=True,
73
- logging_dir=/home/iais_marenpielka/Bouthaina/res_nw_dj/runs/Aug30_21-28-25_lmgpu-node-02,
74
- logging_first_step=False,
75
- logging_nan_inf_filter=True,
76
- logging_steps=500,
77
- logging_strategy=IntervalStrategy.EPOCH,
78
- lr_scheduler_kwargs={},
79
- lr_scheduler_type=SchedulerType.LINEAR,
80
- max_grad_norm=1.0,
81
- max_steps=-1,
82
- metric_for_best_model=loss,
83
- mp_parameters=,
84
- neftune_noise_alpha=None,
85
- no_cuda=False,
86
- num_train_epochs=3.0,
87
- optim=OptimizerNames.ADAMW_TORCH,
88
- optim_args=None,
89
- optim_target_modules=None,
90
- output_dir=/home/iais_marenpielka/Bouthaina/res_nw_dj,
91
- overwrite_output_dir=False,
92
- past_index=-1,
93
- per_device_eval_batch_size=8,
94
- per_device_train_batch_size=8,
95
- prediction_loss_only=False,
96
- push_to_hub=True,
97
- push_to_hub_model_id=None,
98
- push_to_hub_organization=None,
99
- push_to_hub_token=<PUSH_TO_HUB_TOKEN>,
100
- ray_scope=last,
101
- remove_unused_columns=True,
102
- report_to=[],
103
- restore_callback_states_from_checkpoint=False,
104
- resume_from_checkpoint=None,
105
- run_name=/home/iais_marenpielka/Bouthaina/res_nw_dj,
106
- save_on_each_node=False,
107
- save_only_model=False,
108
- save_safetensors=True,
109
- save_steps=500,
110
- save_strategy=IntervalStrategy.EPOCH,
111
- save_total_limit=None,
112
- seed=42,
113
- skip_memory_metrics=True,
114
- split_batches=None,
115
- tf32=None,
116
- torch_compile=False,
117
- torch_compile_backend=None,
118
- torch_compile_mode=None,
119
- torch_empty_cache_steps=None,
120
- torchdynamo=None,
121
- tpu_metrics_debug=False,
122
- tpu_num_cores=None,
123
- use_cpu=False,
124
- use_ipex=False,
125
- use_legacy_prediction_loop=False,
126
- use_mps_device=False,
127
- warmup_ratio=0.0,
128
- warmup_steps=500,
129
- weight_decay=0.0,
130
- )
131
- INFO:datasets.builder:Using custom data configuration default-2166f9793d70674b
132
- INFO:datasets.info:Loading Dataset Infos from /home/iais_marenpielka/Bouthaina/miniconda3/lib/python3.12/site-packages/datasets/packaged_modules/text
133
- INFO:datasets.builder:Generating dataset text (/home/iais_marenpielka/.cache/huggingface/datasets/text/default-2166f9793d70674b/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101)
134
- INFO:datasets.builder:Downloading and preparing dataset text/default to /home/iais_marenpielka/.cache/huggingface/datasets/text/default-2166f9793d70674b/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101...
135
- INFO:datasets.download.download_manager:Downloading took 0.0 min
136
- INFO:datasets.download.download_manager:Checksum Computation took 0.0 min
137
- INFO:datasets.builder:Generating train split
138
- INFO:datasets.builder:Generating validation split
139
- INFO:datasets.utils.info_utils:Unable to verify splits sizes.
140
- INFO:datasets.builder:Dataset text downloaded and prepared to /home/iais_marenpielka/.cache/huggingface/datasets/text/default-2166f9793d70674b/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101. Subsequent calls will reuse this data.
141
- INFO:datasets.arrow_dataset:Caching processed dataset at /home/iais_marenpielka/.cache/huggingface/datasets/text/default-2166f9793d70674b/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101/cache-026bde2ce1e4f1eb.arrow
142
- INFO:datasets.arrow_dataset:Caching processed dataset at /home/iais_marenpielka/.cache/huggingface/datasets/text/default-2166f9793d70674b/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101/cache-f24696270c2f70a2.arrow
143
- WARNING:__main__:The tokenizer picked seems to have a very large `model_max_length` (1000000000000000019884624838656). Using block_size=768 instead. You can change that default value by passing --block_size xxx.
144
- INFO:datasets.arrow_dataset:Caching processed dataset at /home/iais_marenpielka/.cache/huggingface/datasets/text/default-2166f9793d70674b/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101/cache-4ebbc72089ee9723.arrow
145
- INFO:datasets.arrow_dataset:Caching processed dataset at /home/iais_marenpielka/.cache/huggingface/datasets/text/default-2166f9793d70674b/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101/cache-5e8574b46c8155fd.arrow
146
- WARNING:accelerate.utils.other:Detected kernel version 5.4.0, which is below the recommended minimum of 5.5.0; this can cause the process to hang. It is recommended to upgrade the kernel to the minimum version or higher.
147
- INFO:root:Epoch 1.0: Train Loss = None, Eval Loss = None
148
- INFO:absl:Using default tokenizer.
149
- INFO:root:Epoch 2.0: Train Loss = 1.2513, Eval Loss = 0.7111806869506836
150
- INFO:absl:Using default tokenizer.
151
- INFO:root:Epoch 3.0: Train Loss = 0.6462, Eval Loss = 0.6569304466247559
152
- INFO:absl:Using default tokenizer.
153
- INFO:__main__:*** Evaluate ***
154
- INFO:absl:Using default tokenizer.
155
- WARNING:__main__:Process rank: 0, device: cuda:0, n_gpu: 1, distributed training: False, 16-bits training: False
156
- INFO:__main__:Training/evaluation parameters TrainingArguments(
157
- _n_gpu=1,
158
- accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None, 'use_configured_state': False},
159
- adafactor=False,
160
- adam_beta1=0.9,
161
- adam_beta2=0.999,
162
- adam_epsilon=1e-08,
163
- auto_find_batch_size=False,
164
- batch_eval_metrics=False,
165
- bf16=False,
166
- bf16_full_eval=False,
167
- data_seed=None,
168
- dataloader_drop_last=False,
169
- dataloader_num_workers=0,
170
- dataloader_persistent_workers=False,
171
- dataloader_pin_memory=True,
172
- dataloader_prefetch_factor=None,
173
- ddp_backend=None,
174
- ddp_broadcast_buffers=None,
175
- ddp_bucket_cap_mb=None,
176
- ddp_find_unused_parameters=None,
177
- ddp_timeout=1800,
178
- debug=[],
179
- deepspeed=None,
180
- disable_tqdm=False,
181
- dispatch_batches=None,
182
- do_eval=True,
183
- do_predict=False,
184
- do_train=True,
185
- eval_accumulation_steps=None,
186
- eval_delay=0,
187
- eval_do_concat_batches=True,
188
- eval_on_start=False,
189
- eval_steps=None,
190
- eval_strategy=IntervalStrategy.EPOCH,
191
- eval_use_gather_object=False,
192
- evaluation_strategy=epoch,
193
- fp16=False,
194
- fp16_backend=auto,
195
- fp16_full_eval=False,
196
- fp16_opt_level=O1,
197
- fsdp=[],
198
- fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
199
- fsdp_min_num_params=0,
200
- fsdp_transformer_layer_cls_to_wrap=None,
201
- full_determinism=False,
202
- gradient_accumulation_steps=1,
203
- gradient_checkpointing=False,
204
- gradient_checkpointing_kwargs=None,
205
- greater_is_better=False,
206
- group_by_length=False,
207
- half_precision_backend=auto,
208
- hub_always_push=False,
209
- hub_model_id=None,
210
- hub_private_repo=False,
211
- hub_strategy=HubStrategy.EVERY_SAVE,
212
- hub_token=<HUB_TOKEN>,
213
- ignore_data_skip=False,
214
- include_inputs_for_metrics=False,
215
- include_num_input_tokens_seen=False,
216
- include_tokens_per_second=False,
217
- jit_mode_eval=False,
218
- label_names=None,
219
- label_smoothing_factor=0.0,
220
- learning_rate=5e-05,
221
- length_column_name=length,
222
- load_best_model_at_end=True,
223
- local_rank=0,
224
- log_level=passive,
225
- log_level_replica=warning,
226
- log_on_each_node=True,
227
- logging_dir=/home/iais_marenpielka/Bouthaina/res_nw_dj/runs/Aug31_10-25-38_lmgpu-node-02,
228
- logging_first_step=False,
229
- logging_nan_inf_filter=True,
230
- logging_steps=500,
231
- logging_strategy=IntervalStrategy.EPOCH,
232
- lr_scheduler_kwargs={},
233
- lr_scheduler_type=SchedulerType.LINEAR,
234
- max_grad_norm=1.0,
235
- max_steps=-1,
236
- metric_for_best_model=loss,
237
- mp_parameters=,
238
- neftune_noise_alpha=None,
239
- no_cuda=False,
240
- num_train_epochs=3.0,
241
- optim=OptimizerNames.ADAMW_TORCH,
242
- optim_args=None,
243
- optim_target_modules=None,
244
- output_dir=/home/iais_marenpielka/Bouthaina/res_nw_dj,
245
- overwrite_output_dir=False,
246
- past_index=-1,
247
- per_device_eval_batch_size=8,
248
- per_device_train_batch_size=8,
249
- prediction_loss_only=False,
250
- push_to_hub=True,
251
- push_to_hub_model_id=None,
252
- push_to_hub_organization=None,
253
- push_to_hub_token=<PUSH_TO_HUB_TOKEN>,
254
- ray_scope=last,
255
- remove_unused_columns=True,
256
- report_to=[],
257
- restore_callback_states_from_checkpoint=False,
258
- resume_from_checkpoint=None,
259
- run_name=/home/iais_marenpielka/Bouthaina/res_nw_dj,
260
- save_on_each_node=False,
261
- save_only_model=False,
262
- save_safetensors=True,
263
- save_steps=500,
264
- save_strategy=IntervalStrategy.EPOCH,
265
- save_total_limit=None,
266
- seed=42,
267
- skip_memory_metrics=True,
268
- split_batches=None,
269
- tf32=None,
270
- torch_compile=False,
271
- torch_compile_backend=None,
272
- torch_compile_mode=None,
273
- torch_empty_cache_steps=None,
274
- torchdynamo=None,
275
- tpu_metrics_debug=False,
276
- tpu_num_cores=None,
277
- use_cpu=False,
278
- use_ipex=False,
279
- use_legacy_prediction_loop=False,
280
- use_mps_device=False,
281
- warmup_ratio=0.0,
282
- warmup_steps=500,
283
- weight_decay=0.0,
284
- )
285
- INFO:__main__:Checkpoint detected, resuming training at /home/iais_marenpielka/Bouthaina/res_nw_dj/checkpoint-8109. To avoid this behavior, change the `--output_dir` or add `--overwrite_output_dir` to train from scratch.
286
- INFO:datasets.builder:Using custom data configuration default-98487e126fdb56c7
287
- INFO:datasets.info:Loading Dataset Infos from /home/iais_marenpielka/Bouthaina/miniconda3/lib/python3.12/site-packages/datasets/packaged_modules/text
288
- INFO:datasets.builder:Generating dataset text (/home/iais_marenpielka/.cache/huggingface/datasets/text/default-98487e126fdb56c7/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101)
289
- INFO:datasets.builder:Downloading and preparing dataset text/default to /home/iais_marenpielka/.cache/huggingface/datasets/text/default-98487e126fdb56c7/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101...
290
- INFO:datasets.download.download_manager:Downloading took 0.0 min
291
- INFO:datasets.download.download_manager:Checksum Computation took 0.0 min
292
- INFO:datasets.builder:Generating train split
293
- INFO:datasets.builder:Generating validation split
294
- INFO:datasets.utils.info_utils:Unable to verify splits sizes.
295
- INFO:datasets.builder:Dataset text downloaded and prepared to /home/iais_marenpielka/.cache/huggingface/datasets/text/default-98487e126fdb56c7/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101. Subsequent calls will reuse this data.
296
- INFO:datasets.arrow_dataset:Caching processed dataset at /home/iais_marenpielka/.cache/huggingface/datasets/text/default-98487e126fdb56c7/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101/cache-22cb64ac8b531f65.arrow
297
- INFO:datasets.arrow_dataset:Caching processed dataset at /home/iais_marenpielka/.cache/huggingface/datasets/text/default-98487e126fdb56c7/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101/cache-7840ce8488671e31.arrow
298
- WARNING:__main__:The tokenizer picked seems to have a very large `model_max_length` (1000000000000000019884624838656). Using block_size=768 instead. You can change that default value by passing --block_size xxx.
299
- INFO:datasets.arrow_dataset:Caching processed dataset at /home/iais_marenpielka/.cache/huggingface/datasets/text/default-98487e126fdb56c7/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101/cache-1f7e65ff1cef4012.arrow
300
- INFO:datasets.arrow_dataset:Caching processed dataset at /home/iais_marenpielka/.cache/huggingface/datasets/text/default-98487e126fdb56c7/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101/cache-cb43c016a75212ff.arrow
301
- WARNING:accelerate.utils.other:Detected kernel version 5.4.0, which is below the recommended minimum of 5.5.0; this can cause the process to hang. It is recommended to upgrade the kernel to the minimum version or higher.
302
- INFO:__main__:*** Evaluate ***
303
- INFO:absl:Using default tokenizer.
304
- WARNING:root:No losses were recorded, so the loss graph was not generated.
305
- WARNING:__main__:Process rank: 0, device: cuda:0, n_gpu: 1, distributed training: False, 16-bits training: False
306
- INFO:__main__:Training/evaluation parameters TrainingArguments(
307
- _n_gpu=1,
308
- accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None, 'use_configured_state': False},
309
- adafactor=False,
310
- adam_beta1=0.9,
311
- adam_beta2=0.999,
312
- adam_epsilon=1e-08,
313
- auto_find_batch_size=False,
314
- batch_eval_metrics=False,
315
- bf16=False,
316
- bf16_full_eval=False,
317
- data_seed=None,
318
- dataloader_drop_last=False,
319
- dataloader_num_workers=0,
320
- dataloader_persistent_workers=False,
321
- dataloader_pin_memory=True,
322
- dataloader_prefetch_factor=None,
323
- ddp_backend=None,
324
- ddp_broadcast_buffers=None,
325
- ddp_bucket_cap_mb=None,
326
- ddp_find_unused_parameters=None,
327
- ddp_timeout=1800,
328
- debug=[],
329
- deepspeed=None,
330
- disable_tqdm=False,
331
- dispatch_batches=None,
332
- do_eval=True,
333
- do_predict=False,
334
- do_train=True,
335
- eval_accumulation_steps=None,
336
- eval_delay=0,
337
- eval_do_concat_batches=True,
338
- eval_on_start=False,
339
- eval_steps=None,
340
- eval_strategy=IntervalStrategy.EPOCH,
341
- eval_use_gather_object=False,
342
- evaluation_strategy=epoch,
343
- fp16=False,
344
- fp16_backend=auto,
345
- fp16_full_eval=False,
346
- fp16_opt_level=O1,
347
- fsdp=[],
348
- fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
349
- fsdp_min_num_params=0,
350
- fsdp_transformer_layer_cls_to_wrap=None,
351
- full_determinism=False,
352
- gradient_accumulation_steps=1,
353
- gradient_checkpointing=False,
354
- gradient_checkpointing_kwargs=None,
355
- greater_is_better=False,
356
- group_by_length=False,
357
- half_precision_backend=auto,
358
- hub_always_push=False,
359
- hub_model_id=None,
360
- hub_private_repo=False,
361
- hub_strategy=HubStrategy.EVERY_SAVE,
362
- hub_token=<HUB_TOKEN>,
363
- ignore_data_skip=False,
364
- include_inputs_for_metrics=False,
365
- include_num_input_tokens_seen=False,
366
- include_tokens_per_second=False,
367
- jit_mode_eval=False,
368
- label_names=None,
369
- label_smoothing_factor=0.0,
370
- learning_rate=5e-05,
371
- length_column_name=length,
372
- load_best_model_at_end=True,
373
- local_rank=0,
374
- log_level=passive,
375
- log_level_replica=warning,
376
- log_on_each_node=True,
377
- logging_dir=/home/iais_marenpielka/Bouthaina/res_nw_dj/runs/Aug31_11-16-58_lmgpu-node-02,
378
  logging_first_step=False,
379
  logging_nan_inf_filter=True,
380
  logging_steps=500,
@@ -432,187 +128,17 @@ warmup_ratio=0.0,
432
  warmup_steps=500,
433
  weight_decay=0.0,
434
  )
435
- INFO:__main__:Checkpoint detected, resuming training at /home/iais_marenpielka/Bouthaina/res_nw_dj/checkpoint-8109. To avoid this behavior, change the `--output_dir` or add `--overwrite_output_dir` to train from scratch.
436
- INFO:datasets.builder:Using custom data configuration default-98487e126fdb56c7
437
  INFO:datasets.info:Loading Dataset Infos from /home/iais_marenpielka/Bouthaina/miniconda3/lib/python3.12/site-packages/datasets/packaged_modules/text
438
  INFO:datasets.builder:Overwrite dataset info from restored data version if exists.
439
- INFO:datasets.info:Loading Dataset info from /home/iais_marenpielka/.cache/huggingface/datasets/text/default-98487e126fdb56c7/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101
440
- INFO:datasets.builder:Found cached dataset text (/home/iais_marenpielka/.cache/huggingface/datasets/text/default-98487e126fdb56c7/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101)
441
- INFO:datasets.info:Loading Dataset info from /home/iais_marenpielka/.cache/huggingface/datasets/text/default-98487e126fdb56c7/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101
442
- INFO:datasets.arrow_dataset:Loading cached processed dataset at /home/iais_marenpielka/.cache/huggingface/datasets/text/default-98487e126fdb56c7/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101/cache-22cb64ac8b531f65.arrow
443
- INFO:datasets.arrow_dataset:Loading cached processed dataset at /home/iais_marenpielka/.cache/huggingface/datasets/text/default-98487e126fdb56c7/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101/cache-7840ce8488671e31.arrow
444
  WARNING:__main__:The tokenizer picked seems to have a very large `model_max_length` (1000000000000000019884624838656). Using block_size=768 instead. You can change that default value by passing --block_size xxx.
445
- INFO:datasets.arrow_dataset:Loading cached processed dataset at /home/iais_marenpielka/.cache/huggingface/datasets/text/default-98487e126fdb56c7/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101/cache-1f7e65ff1cef4012.arrow
446
- INFO:datasets.arrow_dataset:Loading cached processed dataset at /home/iais_marenpielka/.cache/huggingface/datasets/text/default-98487e126fdb56c7/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101/cache-cb43c016a75212ff.arrow
447
  WARNING:accelerate.utils.other:Detected kernel version 5.4.0, which is below the recommended minimum of 5.5.0; this can cause the process to hang. It is recommended to upgrade the kernel to the minimum version or higher.
448
- INFO:root:Epoch 4.0: Train Loss = 0.5673, Eval Loss = 0.6498554348945618
449
- INFO:absl:Using default tokenizer.
450
- INFO:root:Epoch 5.0: Train Loss = 0.556, Eval Loss = 0.63295978307724
451
- INFO:absl:Using default tokenizer.
452
- INFO:root:Epoch 6.0: Train Loss = 0.4786, Eval Loss = 0.6265950798988342
453
- INFO:absl:Using default tokenizer.
454
- INFO:root:Epoch 7.0: Train Loss = 0.4123, Eval Loss = 0.630312979221344
455
- INFO:absl:Using default tokenizer.
456
- INFO:root:Epoch 8.0: Train Loss = 0.3573, Eval Loss = 0.6372247338294983
457
- INFO:absl:Using default tokenizer.
458
- INFO:root:Epoch 9.0: Train Loss = 0.3108, Eval Loss = 0.646577000617981
459
- INFO:absl:Using default tokenizer.
460
- INFO:root:Epoch 10.0: Train Loss = 0.2719, Eval Loss = 0.6550981998443604
461
- INFO:absl:Using default tokenizer.
462
- INFO:__main__:*** Evaluate ***
463
- INFO:absl:Using default tokenizer.
464
- WARNING:__main__:Process rank: 0, device: cuda:0, n_gpu: 1, distributed training: False, 16-bits training: False
465
- INFO:__main__:Training/evaluation parameters TrainingArguments(
466
- _n_gpu=1,
467
- accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None, 'use_configured_state': False},
468
- adafactor=False,
469
- adam_beta1=0.9,
470
- adam_beta2=0.999,
471
- adam_epsilon=1e-08,
472
- auto_find_batch_size=False,
473
- batch_eval_metrics=False,
474
- bf16=False,
475
- bf16_full_eval=False,
476
- data_seed=None,
477
- dataloader_drop_last=False,
478
- dataloader_num_workers=0,
479
- dataloader_persistent_workers=False,
480
- dataloader_pin_memory=True,
481
- dataloader_prefetch_factor=None,
482
- ddp_backend=None,
483
- ddp_broadcast_buffers=None,
484
- ddp_bucket_cap_mb=None,
485
- ddp_find_unused_parameters=None,
486
- ddp_timeout=1800,
487
- debug=[],
488
- deepspeed=None,
489
- disable_tqdm=False,
490
- dispatch_batches=None,
491
- do_eval=True,
492
- do_predict=False,
493
- do_train=True,
494
- eval_accumulation_steps=None,
495
- eval_delay=0,
496
- eval_do_concat_batches=True,
497
- eval_on_start=False,
498
- eval_steps=None,
499
- eval_strategy=IntervalStrategy.EPOCH,
500
- eval_use_gather_object=False,
501
- evaluation_strategy=epoch,
502
- fp16=False,
503
- fp16_backend=auto,
504
- fp16_full_eval=False,
505
- fp16_opt_level=O1,
506
- fsdp=[],
507
- fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
508
- fsdp_min_num_params=0,
509
- fsdp_transformer_layer_cls_to_wrap=None,
510
- full_determinism=False,
511
- gradient_accumulation_steps=1,
512
- gradient_checkpointing=False,
513
- gradient_checkpointing_kwargs=None,
514
- greater_is_better=False,
515
- group_by_length=False,
516
- half_precision_backend=auto,
517
- hub_always_push=False,
518
- hub_model_id=None,
519
- hub_private_repo=False,
520
- hub_strategy=HubStrategy.EVERY_SAVE,
521
- hub_token=<HUB_TOKEN>,
522
- ignore_data_skip=False,
523
- include_inputs_for_metrics=False,
524
- include_num_input_tokens_seen=False,
525
- include_tokens_per_second=False,
526
- jit_mode_eval=False,
527
- label_names=None,
528
- label_smoothing_factor=0.0,
529
- learning_rate=5e-05,
530
- length_column_name=length,
531
- load_best_model_at_end=True,
532
- local_rank=0,
533
- log_level=passive,
534
- log_level_replica=warning,
535
- log_on_each_node=True,
536
- logging_dir=/home/iais_marenpielka/Bouthaina/res_nw_dj/runs/Sep01_08-31-45_lmgpu-node-09,
537
- logging_first_step=False,
538
- logging_nan_inf_filter=True,
539
- logging_steps=500,
540
- logging_strategy=IntervalStrategy.EPOCH,
541
- lr_scheduler_kwargs={},
542
- lr_scheduler_type=SchedulerType.LINEAR,
543
- max_grad_norm=1.0,
544
- max_steps=-1,
545
- metric_for_best_model=loss,
546
- mp_parameters=,
547
- neftune_noise_alpha=None,
548
- no_cuda=False,
549
- num_train_epochs=20.0,
550
- optim=OptimizerNames.ADAMW_TORCH,
551
- optim_args=None,
552
- optim_target_modules=None,
553
- output_dir=/home/iais_marenpielka/Bouthaina/res_nw_dj,
554
- overwrite_output_dir=False,
555
- past_index=-1,
556
- per_device_eval_batch_size=8,
557
- per_device_train_batch_size=8,
558
- prediction_loss_only=False,
559
- push_to_hub=True,
560
- push_to_hub_model_id=None,
561
- push_to_hub_organization=None,
562
- push_to_hub_token=<PUSH_TO_HUB_TOKEN>,
563
- ray_scope=last,
564
- remove_unused_columns=True,
565
- report_to=[],
566
- restore_callback_states_from_checkpoint=False,
567
- resume_from_checkpoint=None,
568
- run_name=/home/iais_marenpielka/Bouthaina/res_nw_dj,
569
- save_on_each_node=False,
570
- save_only_model=False,
571
- save_safetensors=True,
572
- save_steps=500,
573
- save_strategy=IntervalStrategy.EPOCH,
574
- save_total_limit=None,
575
- seed=42,
576
- skip_memory_metrics=True,
577
- split_batches=None,
578
- tf32=None,
579
- torch_compile=False,
580
- torch_compile_backend=None,
581
- torch_compile_mode=None,
582
- torch_empty_cache_steps=None,
583
- torchdynamo=None,
584
- tpu_metrics_debug=False,
585
- tpu_num_cores=None,
586
- use_cpu=False,
587
- use_ipex=False,
588
- use_legacy_prediction_loop=False,
589
- use_mps_device=False,
590
- warmup_ratio=0.0,
591
- warmup_steps=500,
592
- weight_decay=0.0,
593
- )
594
- INFO:__main__:Checkpoint detected, resuming training at /home/iais_marenpielka/Bouthaina/res_nw_dj/checkpoint-27030. To avoid this behavior, change the `--output_dir` or add `--overwrite_output_dir` to train from scratch.
595
- INFO:datasets.builder:Using custom data configuration default-98487e126fdb56c7
596
- INFO:datasets.info:Loading Dataset Infos from /home/iais_marenpielka/Bouthaina/miniconda3/lib/python3.12/site-packages/datasets/packaged_modules/text
597
- INFO:datasets.builder:Overwrite dataset info from restored data version if exists.
598
- INFO:datasets.info:Loading Dataset info from /home/iais_marenpielka/.cache/huggingface/datasets/text/default-98487e126fdb56c7/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101
599
- INFO:datasets.builder:Found cached dataset text (/home/iais_marenpielka/.cache/huggingface/datasets/text/default-98487e126fdb56c7/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101)
600
- INFO:datasets.info:Loading Dataset info from /home/iais_marenpielka/.cache/huggingface/datasets/text/default-98487e126fdb56c7/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101
601
- INFO:datasets.arrow_dataset:Loading cached processed dataset at /home/iais_marenpielka/.cache/huggingface/datasets/text/default-98487e126fdb56c7/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101/cache-22cb64ac8b531f65.arrow
602
- INFO:datasets.arrow_dataset:Loading cached processed dataset at /home/iais_marenpielka/.cache/huggingface/datasets/text/default-98487e126fdb56c7/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101/cache-7840ce8488671e31.arrow
603
- WARNING:__main__:The tokenizer picked seems to have a very large `model_max_length` (1000000000000000019884624838656). Using block_size=768 instead. You can change that default value by passing --block_size xxx.
604
- INFO:datasets.arrow_dataset:Loading cached processed dataset at /home/iais_marenpielka/.cache/huggingface/datasets/text/default-98487e126fdb56c7/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101/cache-1f7e65ff1cef4012.arrow
605
- INFO:datasets.arrow_dataset:Loading cached processed dataset at /home/iais_marenpielka/.cache/huggingface/datasets/text/default-98487e126fdb56c7/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101/cache-cb43c016a75212ff.arrow
606
- WARNING:accelerate.utils.other:Detected kernel version 5.4.0, which is below the recommended minimum of 5.5.0; this can cause the process to hang. It is recommended to upgrade the kernel to the minimum version or higher.
607
- INFO:root:Epoch 11.0: Train Loss = 0.2396, Eval Loss = 0.665830671787262
608
- INFO:absl:Using default tokenizer.
609
- INFO:root:Epoch 12.0: Train Loss = 0.2129, Eval Loss = 0.6767598390579224
610
- INFO:absl:Using default tokenizer.
611
- INFO:root:Epoch 13.0: Train Loss = 0.191, Eval Loss = 0.6870447993278503
612
- INFO:absl:Using default tokenizer.
613
- INFO:root:Epoch 14.0: Train Loss = 0.1733, Eval Loss = 0.696670651435852
614
- INFO:absl:Using default tokenizer.
615
- INFO:root:Epoch 15.0: Train Loss = 0.1593, Eval Loss = 0.7063180208206177
616
- INFO:absl:Using default tokenizer.
617
- INFO:__main__:*** Evaluate ***
618
  INFO:absl:Using default tokenizer.
 
70
  log_level=passive,
71
  log_level_replica=warning,
72
  log_on_each_node=True,
73
+ logging_dir=/home/iais_marenpielka/Bouthaina/res_nw_dj/runs/Sep01_13-56-27_lmgpu-node-07,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
74
  logging_first_step=False,
75
  logging_nan_inf_filter=True,
76
  logging_steps=500,
 
128
  warmup_steps=500,
129
  weight_decay=0.0,
130
  )
131
+ INFO:datasets.builder:Using custom data configuration default-70891baac37034df
 
132
  INFO:datasets.info:Loading Dataset Infos from /home/iais_marenpielka/Bouthaina/miniconda3/lib/python3.12/site-packages/datasets/packaged_modules/text
133
  INFO:datasets.builder:Overwrite dataset info from restored data version if exists.
134
+ INFO:datasets.info:Loading Dataset info from /home/iais_marenpielka/.cache/huggingface/datasets/text/default-70891baac37034df/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101
135
+ INFO:datasets.builder:Found cached dataset text (/home/iais_marenpielka/.cache/huggingface/datasets/text/default-70891baac37034df/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101)
136
+ INFO:datasets.info:Loading Dataset info from /home/iais_marenpielka/.cache/huggingface/datasets/text/default-70891baac37034df/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101
137
+ INFO:datasets.arrow_dataset:Loading cached processed dataset at /home/iais_marenpielka/.cache/huggingface/datasets/text/default-70891baac37034df/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101/cache-0c0084b2e51c93f9.arrow
138
+ INFO:datasets.arrow_dataset:Loading cached processed dataset at /home/iais_marenpielka/.cache/huggingface/datasets/text/default-70891baac37034df/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101/cache-77487910604ea5b4.arrow
139
  WARNING:__main__:The tokenizer picked seems to have a very large `model_max_length` (1000000000000000019884624838656). Using block_size=768 instead. You can change that default value by passing --block_size xxx.
140
+ INFO:datasets.arrow_dataset:Loading cached processed dataset at /home/iais_marenpielka/.cache/huggingface/datasets/text/default-70891baac37034df/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101/cache-f0985db2addeab61.arrow
141
+ INFO:datasets.arrow_dataset:Loading cached processed dataset at /home/iais_marenpielka/.cache/huggingface/datasets/text/default-70891baac37034df/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101/cache-555eefcdfe4bb35c.arrow
142
  WARNING:accelerate.utils.other:Detected kernel version 5.4.0, which is below the recommended minimum of 5.5.0; this can cause the process to hang. It is recommended to upgrade the kernel to the minimum version or higher.
143
+ INFO:root:Epoch 1.0: Train Loss = None, Eval Loss = None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
144
  INFO:absl:Using default tokenizer.
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bfe3c622bc3f36c295706ec7e3fd153d28f79268fcf6e11f09f6a77f69068610
3
- size 539221632
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e5633632cfa93c82e5f9c7a20da12a6109f68ad46e93939e3bc45b311171d6e4
3
+ size 539224704
special_tokens_map.json CHANGED
@@ -1,29 +1,22 @@
1
  {
2
  "additional_special_tokens": [
3
  {
4
- "content": "<s>",
5
- "lstrip": false,
6
- "normalized": false,
7
- "rstrip": false,
8
- "single_word": false
9
- },
10
- {
11
- "content": "</s>",
12
- "lstrip": false,
13
- "normalized": false,
14
- "rstrip": false,
15
- "single_word": false
16
- },
17
- {
18
- "content": "[sep]",
19
  "lstrip": false,
20
  "normalized": false,
21
  "rstrip": false,
22
  "single_word": false
23
  }
24
  ],
 
 
 
 
 
 
 
25
  "eos_token": {
26
- "content": "<EOS>",
27
  "lstrip": false,
28
  "normalized": false,
29
  "rstrip": false,
@@ -35,5 +28,12 @@
35
  "normalized": false,
36
  "rstrip": false,
37
  "single_word": false
 
 
 
 
 
 
 
38
  }
39
  }
 
1
  {
2
  "additional_special_tokens": [
3
  {
4
+ "content": "<sep>",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  "lstrip": false,
6
  "normalized": false,
7
  "rstrip": false,
8
  "single_word": false
9
  }
10
  ],
11
+ "bos_token": {
12
+ "content": "<|bos|>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false
17
+ },
18
  "eos_token": {
19
+ "content": "<|endoftext|>",
20
  "lstrip": false,
21
  "normalized": false,
22
  "rstrip": false,
 
28
  "normalized": false,
29
  "rstrip": false,
30
  "single_word": false
31
+ },
32
+ "unk_token": {
33
+ "content": "<|unk|>",
34
+ "lstrip": false,
35
+ "normalized": false,
36
+ "rstrip": false,
37
+ "single_word": false
38
  }
39
  }
tokenizer.json CHANGED
@@ -46,7 +46,7 @@
46
  },
47
  {
48
  "id": 64002,
49
- "content": "<s>",
50
  "single_word": false,
51
  "lstrip": false,
52
  "rstrip": false,
@@ -55,7 +55,7 @@
55
  },
56
  {
57
  "id": 64003,
58
- "content": "</s>",
59
  "single_word": false,
60
  "lstrip": false,
61
  "rstrip": false,
@@ -64,7 +64,16 @@
64
  },
65
  {
66
  "id": 64004,
67
- "content": "[sep]",
 
 
 
 
 
 
 
 
 
68
  "single_word": false,
69
  "lstrip": false,
70
  "rstrip": false,
 
46
  },
47
  {
48
  "id": 64002,
49
+ "content": "<|bos|>",
50
  "single_word": false,
51
  "lstrip": false,
52
  "rstrip": false,
 
55
  },
56
  {
57
  "id": 64003,
58
+ "content": "<|endoftext|>",
59
  "single_word": false,
60
  "lstrip": false,
61
  "rstrip": false,
 
64
  },
65
  {
66
  "id": 64004,
67
+ "content": "<|unk|>",
68
+ "single_word": false,
69
+ "lstrip": false,
70
+ "rstrip": false,
71
+ "normalized": false,
72
+ "special": true
73
+ },
74
+ {
75
+ "id": 64005,
76
+ "content": "<sep>",
77
  "single_word": false,
78
  "lstrip": false,
79
  "rstrip": false,
tokenizer_config.json CHANGED
@@ -25,7 +25,7 @@
25
  "special": true
26
  },
27
  "64002": {
28
- "content": "<s>",
29
  "lstrip": false,
30
  "normalized": false,
31
  "rstrip": false,
@@ -33,7 +33,7 @@
33
  "special": true
34
  },
35
  "64003": {
36
- "content": "</s>",
37
  "lstrip": false,
38
  "normalized": false,
39
  "rstrip": false,
@@ -41,7 +41,15 @@
41
  "special": true
42
  },
43
  "64004": {
44
- "content": "[sep]",
 
 
 
 
 
 
 
 
45
  "lstrip": false,
46
  "normalized": false,
47
  "rstrip": false,
@@ -50,13 +58,13 @@
50
  }
51
  },
52
  "additional_special_tokens": [
53
- "<s>",
54
- "</s>",
55
- "[sep]"
56
  ],
 
57
  "clean_up_tokenization_spaces": true,
58
- "eos_token": "<EOS>",
59
  "model_max_length": 1000000000000000019884624838656,
60
  "pad_token": "<EOS>",
61
- "tokenizer_class": "PreTrainedTokenizerFast"
 
62
  }
 
25
  "special": true
26
  },
27
  "64002": {
28
+ "content": "<|bos|>",
29
  "lstrip": false,
30
  "normalized": false,
31
  "rstrip": false,
 
33
  "special": true
34
  },
35
  "64003": {
36
+ "content": "<|endoftext|>",
37
  "lstrip": false,
38
  "normalized": false,
39
  "rstrip": false,
 
41
  "special": true
42
  },
43
  "64004": {
44
+ "content": "<|unk|>",
45
+ "lstrip": false,
46
+ "normalized": false,
47
+ "rstrip": false,
48
+ "single_word": false,
49
+ "special": true
50
+ },
51
+ "64005": {
52
+ "content": "<sep>",
53
  "lstrip": false,
54
  "normalized": false,
55
  "rstrip": false,
 
58
  }
59
  },
60
  "additional_special_tokens": [
61
+ "<sep>"
 
 
62
  ],
63
+ "bos_token": "<|bos|>",
64
  "clean_up_tokenization_spaces": true,
65
+ "eos_token": "<|endoftext|>",
66
  "model_max_length": 1000000000000000019884624838656,
67
  "pad_token": "<EOS>",
68
+ "tokenizer_class": "PreTrainedTokenizerFast",
69
+ "unk_token": "<|unk|>"
70
  }
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f941b19a4bdd74821701732fc2412174ac26cea61752479531dfffb1551ec828
3
  size 5240
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:87284e9c550cf2a7848755d719d0d88168c066eb32f4022b467068d60eb7b769
3
  size 5240