Jingmei commited on
Commit
82a7576
1 Parent(s): ba92c22

Training in progress, step 130

Browse files
adapter_config.json CHANGED
@@ -20,8 +20,8 @@
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
- "q_proj",
24
- "v_proj"
25
  ],
26
  "task_type": "CAUSAL_LM",
27
  "use_dora": false,
 
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
+ "v_proj",
24
+ "q_proj"
25
  ],
26
  "task_type": "CAUSAL_LM",
27
  "use_dora": false,
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ca18d6d2f7f0c409581aa17556d38b08359251fe53e40611071317ac6a3d07ac
3
  size 16794200
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bffba4d73516fe9b4096aadb07965617d7cbcb77687d6299c8030a6575d9531e
3
  size 16794200
trainer_peft.log CHANGED
@@ -632,3 +632,65 @@
632
  2024-06-01 21:25 - Setup optimizer
633
  2024-06-01 21:25 - Continue training!!
634
  2024-06-01 21:25 - Continue training!!
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
632
  2024-06-01 21:25 - Setup optimizer
633
  2024-06-01 21:25 - Continue training!!
634
  2024-06-01 21:25 - Continue training!!
635
+ 2024-06-02 08:00 - Cuda check
636
+ 2024-06-02 08:00 - True
637
+ 2024-06-02 08:00 - 2
638
+ 2024-06-02 08:00 - Configue Model and tokenizer
639
+ 2024-06-02 08:00 - Cuda check
640
+ 2024-06-02 08:00 - True
641
+ 2024-06-02 08:00 - 2
642
+ 2024-06-02 08:00 - Configue Model and tokenizer
643
+ 2024-06-02 08:00 - Memory usage in 0.00 GB
644
+ 2024-06-02 08:00 - Memory usage in 0.00 GB
645
+ 2024-06-02 08:00 - Dataset loaded successfully:
646
+ train-Jingmei/Pandemic_ECDC
647
+ test -Jingmei/Pandemic
648
+ 2024-06-02 08:00 - Tokenize data: DatasetDict({
649
+ train: Dataset({
650
+ features: ['input_ids', 'attention_mask'],
651
+ num_rows: 7008
652
+ })
653
+ test: Dataset({
654
+ features: ['input_ids', 'attention_mask'],
655
+ num_rows: 8264
656
+ })
657
+ })
658
+ 2024-06-02 08:00 - Split data into chunks:DatasetDict({
659
+ train: Dataset({
660
+ features: ['input_ids', 'attention_mask'],
661
+ num_rows: 103936
662
+ })
663
+ test: Dataset({
664
+ features: ['input_ids', 'attention_mask'],
665
+ num_rows: 198964
666
+ })
667
+ })
668
+ 2024-06-02 08:00 - Setup PEFT
669
+ 2024-06-02 08:00 - Dataset loaded successfully:
670
+ train-Jingmei/Pandemic_ECDC
671
+ test -Jingmei/Pandemic
672
+ 2024-06-02 08:00 - Tokenize data: DatasetDict({
673
+ train: Dataset({
674
+ features: ['input_ids', 'attention_mask'],
675
+ num_rows: 7008
676
+ })
677
+ test: Dataset({
678
+ features: ['input_ids', 'attention_mask'],
679
+ num_rows: 8264
680
+ })
681
+ })
682
+ 2024-06-02 08:00 - Setup optimizer
683
+ 2024-06-02 08:00 - Split data into chunks:DatasetDict({
684
+ train: Dataset({
685
+ features: ['input_ids', 'attention_mask'],
686
+ num_rows: 103936
687
+ })
688
+ test: Dataset({
689
+ features: ['input_ids', 'attention_mask'],
690
+ num_rows: 198964
691
+ })
692
+ })
693
+ 2024-06-02 08:00 - Setup PEFT
694
+ 2024-06-02 08:00 - Setup optimizer
695
+ 2024-06-02 08:00 - Continue training!!
696
+ 2024-06-02 08:00 - Continue training!!
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5c85faa6f1388329053303a670f52dcb9eed815eaa7a522050fc6b2eaeb02c67
3
  size 5176
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3a1d89ea8316291bc76327283ce8bbd2251c3c731ccf47246fcc211e7fe9a4d2
3
  size 5176