nlparabic commited on
Commit
db7ca90
·
verified ·
1 Parent(s): f4e3add

Training in progress, epoch 8

Browse files
Files changed (4) hide show
  1. egy_training_log.txt +288 -0
  2. model.safetensors +1 -1
  3. tokenizer.json +2 -16
  4. training_args.bin +1 -1
egy_training_log.txt CHANGED
@@ -570,3 +570,291 @@ INFO:root:Epoch 7.0: Train Loss = 0.0164, Eval Loss = 0.05724157765507698
570
  INFO:absl:Using default tokenizer.
571
  INFO:__main__:*** Evaluate ***
572
  INFO:absl:Using default tokenizer.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
570
  INFO:absl:Using default tokenizer.
571
  INFO:__main__:*** Evaluate ***
572
  INFO:absl:Using default tokenizer.
573
+ WARNING:__main__:Process rank: 0, device: cuda:0, n_gpu: 1, distributed training: False, 16-bits training: False
574
+ INFO:__main__:Training/evaluation parameters TrainingArguments(
575
+ _n_gpu=1,
576
+ accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None, 'use_configured_state': False},
577
+ adafactor=False,
578
+ adam_beta1=0.9,
579
+ adam_beta2=0.999,
580
+ adam_epsilon=1e-08,
581
+ auto_find_batch_size=False,
582
+ batch_eval_metrics=False,
583
+ bf16=False,
584
+ bf16_full_eval=False,
585
+ data_seed=None,
586
+ dataloader_drop_last=False,
587
+ dataloader_num_workers=0,
588
+ dataloader_persistent_workers=False,
589
+ dataloader_pin_memory=True,
590
+ dataloader_prefetch_factor=None,
591
+ ddp_backend=None,
592
+ ddp_broadcast_buffers=None,
593
+ ddp_bucket_cap_mb=None,
594
+ ddp_find_unused_parameters=None,
595
+ ddp_timeout=1800,
596
+ debug=[],
597
+ deepspeed=None,
598
+ disable_tqdm=False,
599
+ dispatch_batches=None,
600
+ do_eval=True,
601
+ do_predict=False,
602
+ do_train=True,
603
+ eval_accumulation_steps=None,
604
+ eval_delay=0,
605
+ eval_do_concat_batches=True,
606
+ eval_on_start=False,
607
+ eval_steps=None,
608
+ eval_strategy=IntervalStrategy.EPOCH,
609
+ eval_use_gather_object=False,
610
+ evaluation_strategy=epoch,
611
+ fp16=False,
612
+ fp16_backend=auto,
613
+ fp16_full_eval=False,
614
+ fp16_opt_level=O1,
615
+ fsdp=[],
616
+ fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
617
+ fsdp_min_num_params=0,
618
+ fsdp_transformer_layer_cls_to_wrap=None,
619
+ full_determinism=False,
620
+ gradient_accumulation_steps=1,
621
+ gradient_checkpointing=False,
622
+ gradient_checkpointing_kwargs=None,
623
+ greater_is_better=False,
624
+ group_by_length=False,
625
+ half_precision_backend=auto,
626
+ hub_always_push=False,
627
+ hub_model_id=None,
628
+ hub_private_repo=False,
629
+ hub_strategy=HubStrategy.EVERY_SAVE,
630
+ hub_token=<HUB_TOKEN>,
631
+ ignore_data_skip=False,
632
+ include_inputs_for_metrics=False,
633
+ include_num_input_tokens_seen=False,
634
+ include_tokens_per_second=False,
635
+ jit_mode_eval=False,
636
+ label_names=None,
637
+ label_smoothing_factor=0.0,
638
+ learning_rate=5e-05,
639
+ length_column_name=length,
640
+ load_best_model_at_end=True,
641
+ local_rank=0,
642
+ log_level=passive,
643
+ log_level_replica=warning,
644
+ log_on_each_node=True,
645
+ logging_dir=/home/iais_marenpielka/Bouthaina/res_nw_gulf_aragpt2-large/runs/Sep08_18-44-04_lmgpu-node-07,
646
+ logging_first_step=False,
647
+ logging_nan_inf_filter=True,
648
+ logging_steps=500,
649
+ logging_strategy=IntervalStrategy.EPOCH,
650
+ lr_scheduler_kwargs={},
651
+ lr_scheduler_type=SchedulerType.LINEAR,
652
+ max_grad_norm=1.0,
653
+ max_steps=-1,
654
+ metric_for_best_model=loss,
655
+ mp_parameters=,
656
+ neftune_noise_alpha=None,
657
+ no_cuda=False,
658
+ num_train_epochs=20.0,
659
+ optim=OptimizerNames.ADAMW_TORCH,
660
+ optim_args=None,
661
+ optim_target_modules=None,
662
+ output_dir=/home/iais_marenpielka/Bouthaina/res_nw_gulf_aragpt2-large,
663
+ overwrite_output_dir=False,
664
+ past_index=-1,
665
+ per_device_eval_batch_size=4,
666
+ per_device_train_batch_size=4,
667
+ prediction_loss_only=False,
668
+ push_to_hub=True,
669
+ push_to_hub_model_id=None,
670
+ push_to_hub_organization=None,
671
+ push_to_hub_token=<PUSH_TO_HUB_TOKEN>,
672
+ ray_scope=last,
673
+ remove_unused_columns=True,
674
+ report_to=[],
675
+ restore_callback_states_from_checkpoint=False,
676
+ resume_from_checkpoint=None,
677
+ run_name=/home/iais_marenpielka/Bouthaina/res_nw_gulf_aragpt2-large,
678
+ save_on_each_node=False,
679
+ save_only_model=False,
680
+ save_safetensors=True,
681
+ save_steps=500,
682
+ save_strategy=IntervalStrategy.EPOCH,
683
+ save_total_limit=None,
684
+ seed=42,
685
+ skip_memory_metrics=True,
686
+ split_batches=None,
687
+ tf32=None,
688
+ torch_compile=False,
689
+ torch_compile_backend=None,
690
+ torch_compile_mode=None,
691
+ torch_empty_cache_steps=None,
692
+ torchdynamo=None,
693
+ tpu_metrics_debug=False,
694
+ tpu_num_cores=None,
695
+ use_cpu=False,
696
+ use_ipex=False,
697
+ use_legacy_prediction_loop=False,
698
+ use_mps_device=False,
699
+ warmup_ratio=0.0,
700
+ warmup_steps=500,
701
+ weight_decay=0.0,
702
+ )
703
+ INFO:__main__:Checkpoint detected, resuming training at /home/iais_marenpielka/Bouthaina/res_nw_gulf_aragpt2-large/checkpoint-11704. To avoid this behavior, change the `--output_dir` or add `--overwrite_output_dir` to train from scratch.
704
+ INFO:datasets.builder:Using custom data configuration default-481a1a6a0ca4575e
705
+ INFO:datasets.info:Loading Dataset Infos from /home/iais_marenpielka/Bouthaina/miniconda3/lib/python3.12/site-packages/datasets/packaged_modules/text
706
+ INFO:datasets.builder:Overwrite dataset info from restored data version if exists.
707
+ INFO:datasets.info:Loading Dataset info from /home/iais_marenpielka/.cache/huggingface/datasets/text/default-481a1a6a0ca4575e/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101
708
+ INFO:datasets.builder:Found cached dataset text (/home/iais_marenpielka/.cache/huggingface/datasets/text/default-481a1a6a0ca4575e/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101)
709
+ INFO:datasets.info:Loading Dataset info from /home/iais_marenpielka/.cache/huggingface/datasets/text/default-481a1a6a0ca4575e/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101
710
+ INFO:datasets.arrow_dataset:Loading cached processed dataset at /home/iais_marenpielka/.cache/huggingface/datasets/text/default-481a1a6a0ca4575e/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101/cache-798e68ee1238406a.arrow
711
+ INFO:datasets.arrow_dataset:Loading cached processed dataset at /home/iais_marenpielka/.cache/huggingface/datasets/text/default-481a1a6a0ca4575e/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101/cache-08141c170f1669b9.arrow
712
+ WARNING:__main__:The tokenizer picked seems to have a very large `model_max_length` (1000000000000000019884624838656). Using block_size=1024 instead. You can change that default value by passing --block_size xxx.
713
+ INFO:datasets.arrow_dataset:Loading cached processed dataset at /home/iais_marenpielka/.cache/huggingface/datasets/text/default-481a1a6a0ca4575e/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101/cache-2ba0b834485aeaf5.arrow
714
+ INFO:datasets.arrow_dataset:Loading cached processed dataset at /home/iais_marenpielka/.cache/huggingface/datasets/text/default-481a1a6a0ca4575e/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101/cache-7a0a05502f2d2ac6.arrow
715
+ WARNING:accelerate.utils.other:Detected kernel version 5.4.0, which is below the recommended minimum of 5.5.0; this can cause the process to hang. It is recommended to upgrade the kernel to the minimum version or higher.
716
+ WARNING:__main__:Process rank: 0, device: cuda:0, n_gpu: 1, distributed training: False, 16-bits training: False
717
+ INFO:__main__:Training/evaluation parameters TrainingArguments(
718
+ _n_gpu=1,
719
+ accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None, 'use_configured_state': False},
720
+ adafactor=False,
721
+ adam_beta1=0.9,
722
+ adam_beta2=0.999,
723
+ adam_epsilon=1e-08,
724
+ auto_find_batch_size=False,
725
+ batch_eval_metrics=False,
726
+ bf16=False,
727
+ bf16_full_eval=False,
728
+ data_seed=None,
729
+ dataloader_drop_last=False,
730
+ dataloader_num_workers=0,
731
+ dataloader_persistent_workers=False,
732
+ dataloader_pin_memory=True,
733
+ dataloader_prefetch_factor=None,
734
+ ddp_backend=None,
735
+ ddp_broadcast_buffers=None,
736
+ ddp_bucket_cap_mb=None,
737
+ ddp_find_unused_parameters=None,
738
+ ddp_timeout=1800,
739
+ debug=[],
740
+ deepspeed=None,
741
+ disable_tqdm=False,
742
+ dispatch_batches=None,
743
+ do_eval=True,
744
+ do_predict=False,
745
+ do_train=True,
746
+ eval_accumulation_steps=None,
747
+ eval_delay=0,
748
+ eval_do_concat_batches=True,
749
+ eval_on_start=False,
750
+ eval_steps=None,
751
+ eval_strategy=IntervalStrategy.EPOCH,
752
+ eval_use_gather_object=False,
753
+ evaluation_strategy=epoch,
754
+ fp16=False,
755
+ fp16_backend=auto,
756
+ fp16_full_eval=False,
757
+ fp16_opt_level=O1,
758
+ fsdp=[],
759
+ fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
760
+ fsdp_min_num_params=0,
761
+ fsdp_transformer_layer_cls_to_wrap=None,
762
+ full_determinism=False,
763
+ gradient_accumulation_steps=1,
764
+ gradient_checkpointing=False,
765
+ gradient_checkpointing_kwargs=None,
766
+ greater_is_better=False,
767
+ group_by_length=False,
768
+ half_precision_backend=auto,
769
+ hub_always_push=False,
770
+ hub_model_id=None,
771
+ hub_private_repo=False,
772
+ hub_strategy=HubStrategy.EVERY_SAVE,
773
+ hub_token=<HUB_TOKEN>,
774
+ ignore_data_skip=False,
775
+ include_inputs_for_metrics=False,
776
+ include_num_input_tokens_seen=False,
777
+ include_tokens_per_second=False,
778
+ jit_mode_eval=False,
779
+ label_names=None,
780
+ label_smoothing_factor=0.0,
781
+ learning_rate=5e-05,
782
+ length_column_name=length,
783
+ load_best_model_at_end=True,
784
+ local_rank=0,
785
+ log_level=passive,
786
+ log_level_replica=warning,
787
+ log_on_each_node=True,
788
+ logging_dir=/home/iais_marenpielka/Bouthaina/res_nw_gulf_aragpt2-large/runs/Sep08_18-48-04_lmgpu-node-07,
789
+ logging_first_step=False,
790
+ logging_nan_inf_filter=True,
791
+ logging_steps=500,
792
+ logging_strategy=IntervalStrategy.EPOCH,
793
+ lr_scheduler_kwargs={},
794
+ lr_scheduler_type=SchedulerType.LINEAR,
795
+ max_grad_norm=1.0,
796
+ max_steps=-1,
797
+ metric_for_best_model=loss,
798
+ mp_parameters=,
799
+ neftune_noise_alpha=None,
800
+ no_cuda=False,
801
+ num_train_epochs=20.0,
802
+ optim=OptimizerNames.ADAMW_TORCH,
803
+ optim_args=None,
804
+ optim_target_modules=None,
805
+ output_dir=/home/iais_marenpielka/Bouthaina/res_nw_gulf_aragpt2-large,
806
+ overwrite_output_dir=False,
807
+ past_index=-1,
808
+ per_device_eval_batch_size=4,
809
+ per_device_train_batch_size=4,
810
+ prediction_loss_only=False,
811
+ push_to_hub=True,
812
+ push_to_hub_model_id=None,
813
+ push_to_hub_organization=None,
814
+ push_to_hub_token=<PUSH_TO_HUB_TOKEN>,
815
+ ray_scope=last,
816
+ remove_unused_columns=True,
817
+ report_to=[],
818
+ restore_callback_states_from_checkpoint=False,
819
+ resume_from_checkpoint=None,
820
+ run_name=/home/iais_marenpielka/Bouthaina/res_nw_gulf_aragpt2-large,
821
+ save_on_each_node=False,
822
+ save_only_model=False,
823
+ save_safetensors=True,
824
+ save_steps=500,
825
+ save_strategy=IntervalStrategy.EPOCH,
826
+ save_total_limit=None,
827
+ seed=42,
828
+ skip_memory_metrics=True,
829
+ split_batches=None,
830
+ tf32=None,
831
+ torch_compile=False,
832
+ torch_compile_backend=None,
833
+ torch_compile_mode=None,
834
+ torch_empty_cache_steps=None,
835
+ torchdynamo=None,
836
+ tpu_metrics_debug=False,
837
+ tpu_num_cores=None,
838
+ use_cpu=False,
839
+ use_ipex=False,
840
+ use_legacy_prediction_loop=False,
841
+ use_mps_device=False,
842
+ warmup_ratio=0.0,
843
+ warmup_steps=500,
844
+ weight_decay=0.0,
845
+ )
846
+ INFO:__main__:Checkpoint detected, resuming training at /home/iais_marenpielka/Bouthaina/res_nw_gulf_aragpt2-large/checkpoint-11704. To avoid this behavior, change the `--output_dir` or add `--overwrite_output_dir` to train from scratch.
847
+ INFO:datasets.builder:Using custom data configuration default-481a1a6a0ca4575e
848
+ INFO:datasets.info:Loading Dataset Infos from /home/iais_marenpielka/Bouthaina/miniconda3/lib/python3.12/site-packages/datasets/packaged_modules/text
849
+ INFO:datasets.builder:Overwrite dataset info from restored data version if exists.
850
+ INFO:datasets.info:Loading Dataset info from /home/iais_marenpielka/.cache/huggingface/datasets/text/default-481a1a6a0ca4575e/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101
851
+ INFO:datasets.builder:Found cached dataset text (/home/iais_marenpielka/.cache/huggingface/datasets/text/default-481a1a6a0ca4575e/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101)
852
+ INFO:datasets.info:Loading Dataset info from /home/iais_marenpielka/.cache/huggingface/datasets/text/default-481a1a6a0ca4575e/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101
853
+ INFO:datasets.arrow_dataset:Loading cached processed dataset at /home/iais_marenpielka/.cache/huggingface/datasets/text/default-481a1a6a0ca4575e/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101/cache-798e68ee1238406a.arrow
854
+ INFO:datasets.arrow_dataset:Loading cached processed dataset at /home/iais_marenpielka/.cache/huggingface/datasets/text/default-481a1a6a0ca4575e/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101/cache-08141c170f1669b9.arrow
855
+ WARNING:__main__:The tokenizer picked seems to have a very large `model_max_length` (1000000000000000019884624838656). Using block_size=1024 instead. You can change that default value by passing --block_size xxx.
856
+ INFO:datasets.arrow_dataset:Loading cached processed dataset at /home/iais_marenpielka/.cache/huggingface/datasets/text/default-481a1a6a0ca4575e/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101/cache-2ba0b834485aeaf5.arrow
857
+ INFO:datasets.arrow_dataset:Loading cached processed dataset at /home/iais_marenpielka/.cache/huggingface/datasets/text/default-481a1a6a0ca4575e/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101/cache-7a0a05502f2d2ac6.arrow
858
+ WARNING:accelerate.utils.other:Detected kernel version 5.4.0, which is below the recommended minimum of 5.5.0; this can cause the process to hang. It is recommended to upgrade the kernel to the minimum version or higher.
859
+ INFO:root:Epoch 8.0: Train Loss = 0.0148, Eval Loss = 0.05825383961200714
860
+ INFO:absl:Using default tokenizer.
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:45e0cf31bee1b8b8387fb0f29745e522f94fc53235332e2b661c0c8c7023676e
3
  size 3166550552
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:72428eb693eb4400f6f9ebc2b541739b8e512b7964d04dea4072bfce7197fd14
3
  size 3166550552
tokenizer.json CHANGED
@@ -1,21 +1,7 @@
1
  {
2
  "version": "1.0",
3
- "truncation": {
4
- "direction": "Right",
5
- "max_length": 1024,
6
- "strategy": "LongestFirst",
7
- "stride": 0
8
- },
9
- "padding": {
10
- "strategy": {
11
- "Fixed": 1024
12
- },
13
- "direction": "Right",
14
- "pad_to_multiple_of": null,
15
- "pad_id": 64002,
16
- "pad_type_id": 0,
17
- "pad_token": "[PAD]"
18
- },
19
  "added_tokens": [
20
  {
21
  "id": 0,
 
1
  {
2
  "version": "1.0",
3
+ "truncation": null,
4
+ "padding": null,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  "added_tokens": [
6
  {
7
  "id": 0,
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c626dac860f540845e1264968171d09c78ff900fa0e28bb6553c1e6488baadbb
3
  size 5240
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e35118fac8928c2503b72682aeecba0516bf0a7b8ce487ee06590491631f00dc
3
  size 5240