gsarti commited on
Commit
7bfdcbc
1 Parent(s): ffa055c

Initial commit

Browse files
README.md CHANGED
@@ -1,3 +1,88 @@
1
  ---
2
- license: apache-2.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ license: mit
3
+ tags:
4
+ - generated_from_trainer
5
+ datasets:
6
+ - it5/datasets
7
+ metrics:
8
+ - rouge
9
+ model-index:
10
+ - name: it5-efficient-small-el32-st_r2g-0.0003
11
+ results:
12
+ - task:
13
+ name: Summarization
14
+ type: summarization
15
+ dataset:
16
+ name: it5/datasets st_r2g
17
+ type: it5/datasets
18
+ args: st_r2g
19
+ metrics:
20
+ - name: Rouge1
21
+ type: rouge
22
+ value: 30.0502
23
  ---
24
+
25
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
26
+ should probably proofread and complete it, then remove this comment. -->
27
+
28
+ # it5-efficient-small-el32-st_r2g-0.0003
29
+
30
+ This model is a fine-tuned version of [stefan-it/it5-efficient-small-el32](https://huggingface.co/stefan-it/it5-efficient-small-el32) on the it5/datasets st_r2g dataset.
31
+ It achieves the following results on the evaluation set:
32
+ - Loss: 2.6135
33
+ - Rouge1: 30.0502
34
+ - Rouge2: 11.5687
35
+ - Rougel: 26.5953
36
+ - Rougelsum: 27.0402
37
+ - Gen Len: 16.9578
38
+
39
+ ## Model description
40
+
41
+ More information needed
42
+
43
+ ## Intended uses & limitations
44
+
45
+ More information needed
46
+
47
+ ## Training and evaluation data
48
+
49
+ More information needed
50
+
51
+ ## Training procedure
52
+
53
+ ### Training hyperparameters
54
+
55
+ The following hyperparameters were used during training:
56
+ - learning_rate: 0.0003
57
+ - train_batch_size: 8
58
+ - eval_batch_size: 8
59
+ - seed: 42
60
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
61
+ - lr_scheduler_type: linear
62
+ - num_epochs: 10.0
63
+
64
+ ### Training results
65
+
66
+ | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len |
67
+ |:-------------:|:-----:|:-----:|:---------------:|:-------:|:-------:|:-------:|:---------:|:-------:|
68
+ | 3.1265 | 0.74 | 5000 | 2.7247 | 26.8378 | 9.3464 | 23.9521 | 24.2837 | 15.5914 |
69
+ | 2.8786 | 1.49 | 10000 | 2.6532 | 27.5869 | 10.0861 | 24.7406 | 25.0245 | 15.3272 |
70
+ | 2.6587 | 2.23 | 15000 | 2.6080 | 28.2336 | 10.5229 | 25.3053 | 25.6716 | 15.4338 |
71
+ | 2.664 | 2.98 | 20000 | 2.5630 | 28.6673 | 10.8421 | 25.7032 | 26.0245 | 15.6255 |
72
+ | 2.4896 | 3.72 | 25000 | 2.5679 | 28.842 | 10.885 | 25.6757 | 26.0633 | 16.1841 |
73
+ | 2.34 | 4.47 | 30000 | 2.5564 | 29.3246 | 11.1981 | 26.1637 | 26.5392 | 15.7826 |
74
+ | 2.2204 | 5.21 | 35000 | 2.5744 | 29.5545 | 11.3806 | 26.3237 | 26.6993 | 15.8374 |
75
+ | 2.2301 | 5.96 | 40000 | 2.5614 | 29.5872 | 11.4227 | 26.3139 | 26.7196 | 15.7213 |
76
+ | 2.1219 | 6.7 | 45000 | 2.5617 | 29.8256 | 11.3702 | 26.4156 | 26.8465 | 15.936 |
77
+ | 2.007 | 7.45 | 50000 | 2.6014 | 29.743 | 11.4336 | 26.38 | 26.772 | 15.7144 |
78
+ | 1.9398 | 8.19 | 55000 | 2.6080 | 29.9478 | 11.4801 | 26.5352 | 26.9746 | 15.9308 |
79
+ | 1.9426 | 8.94 | 60000 | 2.6022 | 30.097 | 11.5602 | 26.705 | 27.1092 | 15.8598 |
80
+ | 1.8853 | 9.68 | 65000 | 2.6138 | 30.1588 | 11.5823 | 26.6984 | 27.1371 | 15.803 |
81
+
82
+
83
+ ### Framework versions
84
+
85
+ - Transformers 4.15.0
86
+ - Pytorch 1.10.0+cu102
87
+ - Datasets 1.17.0
88
+ - Tokenizers 0.10.3
all_results.json ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 10.0,
3
+ "eval_gen_len": 16.9578,
4
+ "eval_loss": 2.6134610176086426,
5
+ "eval_rouge1": 30.0502,
6
+ "eval_rouge2": 11.5687,
7
+ "eval_rougeL": 26.5953,
8
+ "eval_rougeLsum": 27.0402,
9
+ "eval_runtime": 582.0642,
10
+ "eval_samples": 10000,
11
+ "eval_samples_per_second": 17.18,
12
+ "eval_steps_per_second": 2.148,
13
+ "train_loss": 2.3876063485090873,
14
+ "train_runtime": 39696.1157,
15
+ "train_samples": 53701,
16
+ "train_samples_per_second": 13.528,
17
+ "train_steps_per_second": 1.691
18
+ }
config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": ".",
3
+ "architectures": [
4
+ "T5ForConditionalGeneration"
5
+ ],
6
+ "d_ff": 2048,
7
+ "d_kv": 64,
8
+ "d_model": 512,
9
+ "decoder_start_token_id": 0,
10
+ "dropout_rate": 0.1,
11
+ "eos_token_id": 1,
12
+ "feed_forward_proj": "relu",
13
+ "initializer_factor": 1.0,
14
+ "is_encoder_decoder": true,
15
+ "layer_norm_epsilon": 1e-06,
16
+ "model_type": "t5",
17
+ "n_positions": 512,
18
+ "num_decoder_layers": 6,
19
+ "num_heads": 8,
20
+ "num_layers": 32,
21
+ "output_past": true,
22
+ "pad_token_id": 0,
23
+ "relative_attention_max_distance": 128,
24
+ "relative_attention_num_buckets": 32,
25
+ "torch_dtype": "float32",
26
+ "transformers_version": "4.15.0",
27
+ "use_cache": true,
28
+ "vocab_size": 32100
29
+ }
eval_results.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 10.0,
3
+ "eval_gen_len": 16.9578,
4
+ "eval_loss": 2.6134610176086426,
5
+ "eval_rouge1": 30.0502,
6
+ "eval_rouge2": 11.5687,
7
+ "eval_rougeL": 26.5953,
8
+ "eval_rougeLsum": 27.0402,
9
+ "eval_runtime": 582.0642,
10
+ "eval_samples": 10000,
11
+ "eval_samples_per_second": 17.18,
12
+ "eval_steps_per_second": 2.148
13
+ }
flax_model.msgpack ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f32f90aca865f75083753513b2b24474e246018fdbeeb3bc535db1c5be83edf5
3
+ size 569246164
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e6efe540637765018901b6e84d1ac7e5b5607937125de42cd0c9532f3ef1858f
3
+ size 569387035
runs/Apr28_20-05-28_pg-gpu26/1651169174.545433/events.out.tfevents.1651169174.pg-gpu26.22885.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:920b4fd18410a4c0e175a4ebc37de893944eee6ca540d12ae522a036b2e0c9f7
3
+ size 5139
runs/Apr28_20-05-28_pg-gpu26/events.out.tfevents.1651169174.pg-gpu26.22885.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3098766f0e1c89dfd596e1d071d3fb4baa4dadf3e53c794664fc65fadf20e8c6
3
+ size 32267
runs/Apr28_20-05-28_pg-gpu26/events.out.tfevents.1651209455.pg-gpu26.22885.2 ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:954a9731b77ac33087e982339d55d0df962d750b70255902ef6092b831b40d3e
3
+ size 575
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
1
+ {"eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>", "additional_special_tokens": ["<extra_id_0>", "<extra_id_1>", "<extra_id_2>", "<extra_id_3>", "<extra_id_4>", "<extra_id_5>", "<extra_id_6>", "<extra_id_7>", "<extra_id_8>", "<extra_id_9>", "<extra_id_10>", "<extra_id_11>", "<extra_id_12>", "<extra_id_13>", "<extra_id_14>", "<extra_id_15>", "<extra_id_16>", "<extra_id_17>", "<extra_id_18>", "<extra_id_19>", "<extra_id_20>", "<extra_id_21>", "<extra_id_22>", "<extra_id_23>", "<extra_id_24>", "<extra_id_25>", "<extra_id_26>", "<extra_id_27>", "<extra_id_28>", "<extra_id_29>", "<extra_id_30>", "<extra_id_31>", "<extra_id_32>", "<extra_id_33>", "<extra_id_34>", "<extra_id_35>", "<extra_id_36>", "<extra_id_37>", "<extra_id_38>", "<extra_id_39>", "<extra_id_40>", "<extra_id_41>", "<extra_id_42>", "<extra_id_43>", "<extra_id_44>", "<extra_id_45>", "<extra_id_46>", "<extra_id_47>", "<extra_id_48>", "<extra_id_49>", "<extra_id_50>", "<extra_id_51>", "<extra_id_52>", "<extra_id_53>", "<extra_id_54>", "<extra_id_55>", "<extra_id_56>", "<extra_id_57>", "<extra_id_58>", "<extra_id_59>", "<extra_id_60>", "<extra_id_61>", "<extra_id_62>", "<extra_id_63>", "<extra_id_64>", "<extra_id_65>", "<extra_id_66>", "<extra_id_67>", "<extra_id_68>", "<extra_id_69>", "<extra_id_70>", "<extra_id_71>", "<extra_id_72>", "<extra_id_73>", "<extra_id_74>", "<extra_id_75>", "<extra_id_76>", "<extra_id_77>", "<extra_id_78>", "<extra_id_79>", "<extra_id_80>", "<extra_id_81>", "<extra_id_82>", "<extra_id_83>", "<extra_id_84>", "<extra_id_85>", "<extra_id_86>", "<extra_id_87>", "<extra_id_88>", "<extra_id_89>", "<extra_id_90>", "<extra_id_91>", "<extra_id_92>", "<extra_id_93>", "<extra_id_94>", "<extra_id_95>", "<extra_id_96>", "<extra_id_97>", "<extra_id_98>", "<extra_id_99>"]}
spiece.model ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2dffd01fc009b7e92d98eddff8853983e271b41302ed0d363000e8581df12000
3
+ size 817200
tf_model.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c4d6d87fd311c59ea51057e9f43ebe541e155cf587b4fb3806011b806897348e
3
+ size 569947488
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
1
+ {"eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>", "extra_ids": 100, "additional_special_tokens": ["<extra_id_0>", "<extra_id_1>", "<extra_id_2>", "<extra_id_3>", "<extra_id_4>", "<extra_id_5>", "<extra_id_6>", "<extra_id_7>", "<extra_id_8>", "<extra_id_9>", "<extra_id_10>", "<extra_id_11>", "<extra_id_12>", "<extra_id_13>", "<extra_id_14>", "<extra_id_15>", "<extra_id_16>", "<extra_id_17>", "<extra_id_18>", "<extra_id_19>", "<extra_id_20>", "<extra_id_21>", "<extra_id_22>", "<extra_id_23>", "<extra_id_24>", "<extra_id_25>", "<extra_id_26>", "<extra_id_27>", "<extra_id_28>", "<extra_id_29>", "<extra_id_30>", "<extra_id_31>", "<extra_id_32>", "<extra_id_33>", "<extra_id_34>", "<extra_id_35>", "<extra_id_36>", "<extra_id_37>", "<extra_id_38>", "<extra_id_39>", "<extra_id_40>", "<extra_id_41>", "<extra_id_42>", "<extra_id_43>", "<extra_id_44>", "<extra_id_45>", "<extra_id_46>", "<extra_id_47>", "<extra_id_48>", "<extra_id_49>", "<extra_id_50>", "<extra_id_51>", "<extra_id_52>", "<extra_id_53>", "<extra_id_54>", "<extra_id_55>", "<extra_id_56>", "<extra_id_57>", "<extra_id_58>", "<extra_id_59>", "<extra_id_60>", "<extra_id_61>", "<extra_id_62>", "<extra_id_63>", "<extra_id_64>", "<extra_id_65>", "<extra_id_66>", "<extra_id_67>", "<extra_id_68>", "<extra_id_69>", "<extra_id_70>", "<extra_id_71>", "<extra_id_72>", "<extra_id_73>", "<extra_id_74>", "<extra_id_75>", "<extra_id_76>", "<extra_id_77>", "<extra_id_78>", "<extra_id_79>", "<extra_id_80>", "<extra_id_81>", "<extra_id_82>", "<extra_id_83>", "<extra_id_84>", "<extra_id_85>", "<extra_id_86>", "<extra_id_87>", "<extra_id_88>", "<extra_id_89>", "<extra_id_90>", "<extra_id_91>", "<extra_id_92>", "<extra_id_93>", "<extra_id_94>", "<extra_id_95>", "<extra_id_96>", "<extra_id_97>", "<extra_id_98>", "<extra_id_99>"], "special_tokens_map_file": null, "name_or_path": "stefan-it/it5-efficient-small-el32", "sp_model_kwargs": {}, "tokenizer_class": "T5Tokenizer"}
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 10.0,
3
+ "train_loss": 2.3876063485090873,
4
+ "train_runtime": 39696.1157,
5
+ "train_samples": 53701,
6
+ "train_samples_per_second": 13.528,
7
+ "train_steps_per_second": 1.691
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,998 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 10.0,
5
+ "global_step": 67130,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.07,
12
+ "learning_rate": 0.000297765529569492,
13
+ "loss": 4.1249,
14
+ "step": 500
15
+ },
16
+ {
17
+ "epoch": 0.15,
18
+ "learning_rate": 0.00029553105913898405,
19
+ "loss": 3.6267,
20
+ "step": 1000
21
+ },
22
+ {
23
+ "epoch": 0.22,
24
+ "learning_rate": 0.00029329658870847603,
25
+ "loss": 3.4522,
26
+ "step": 1500
27
+ },
28
+ {
29
+ "epoch": 0.3,
30
+ "learning_rate": 0.00029106211827796807,
31
+ "loss": 3.3665,
32
+ "step": 2000
33
+ },
34
+ {
35
+ "epoch": 0.37,
36
+ "learning_rate": 0.0002888276478474601,
37
+ "loss": 3.3154,
38
+ "step": 2500
39
+ },
40
+ {
41
+ "epoch": 0.45,
42
+ "learning_rate": 0.0002865931774169522,
43
+ "loss": 3.2553,
44
+ "step": 3000
45
+ },
46
+ {
47
+ "epoch": 0.52,
48
+ "learning_rate": 0.0002843587069864442,
49
+ "loss": 3.2225,
50
+ "step": 3500
51
+ },
52
+ {
53
+ "epoch": 0.6,
54
+ "learning_rate": 0.0002821242365559362,
55
+ "loss": 3.1854,
56
+ "step": 4000
57
+ },
58
+ {
59
+ "epoch": 0.67,
60
+ "learning_rate": 0.00027988976612542825,
61
+ "loss": 3.1867,
62
+ "step": 4500
63
+ },
64
+ {
65
+ "epoch": 0.74,
66
+ "learning_rate": 0.0002776552956949203,
67
+ "loss": 3.1265,
68
+ "step": 5000
69
+ },
70
+ {
71
+ "epoch": 0.74,
72
+ "eval_gen_len": 15.5914,
73
+ "eval_loss": 2.7246766090393066,
74
+ "eval_rouge1": 26.8378,
75
+ "eval_rouge2": 9.3464,
76
+ "eval_rougeL": 23.9521,
77
+ "eval_rougeLsum": 24.2837,
78
+ "eval_runtime": 527.2879,
79
+ "eval_samples_per_second": 18.965,
80
+ "eval_steps_per_second": 2.371,
81
+ "step": 5000
82
+ },
83
+ {
84
+ "epoch": 0.82,
85
+ "learning_rate": 0.0002754208252644123,
86
+ "loss": 3.1287,
87
+ "step": 5500
88
+ },
89
+ {
90
+ "epoch": 0.89,
91
+ "learning_rate": 0.0002731863548339043,
92
+ "loss": 3.1078,
93
+ "step": 6000
94
+ },
95
+ {
96
+ "epoch": 0.97,
97
+ "learning_rate": 0.00027095188440339634,
98
+ "loss": 3.0787,
99
+ "step": 6500
100
+ },
101
+ {
102
+ "epoch": 1.04,
103
+ "learning_rate": 0.0002687174139728884,
104
+ "loss": 3.0054,
105
+ "step": 7000
106
+ },
107
+ {
108
+ "epoch": 1.12,
109
+ "learning_rate": 0.0002664829435423804,
110
+ "loss": 2.8889,
111
+ "step": 7500
112
+ },
113
+ {
114
+ "epoch": 1.19,
115
+ "learning_rate": 0.00026424847311187245,
116
+ "loss": 2.9183,
117
+ "step": 8000
118
+ },
119
+ {
120
+ "epoch": 1.27,
121
+ "learning_rate": 0.0002620140026813645,
122
+ "loss": 2.8848,
123
+ "step": 8500
124
+ },
125
+ {
126
+ "epoch": 1.34,
127
+ "learning_rate": 0.00025977953225085653,
128
+ "loss": 2.872,
129
+ "step": 9000
130
+ },
131
+ {
132
+ "epoch": 1.42,
133
+ "learning_rate": 0.00025754506182034857,
134
+ "loss": 2.898,
135
+ "step": 9500
136
+ },
137
+ {
138
+ "epoch": 1.49,
139
+ "learning_rate": 0.0002553105913898406,
140
+ "loss": 2.8786,
141
+ "step": 10000
142
+ },
143
+ {
144
+ "epoch": 1.49,
145
+ "eval_gen_len": 15.3272,
146
+ "eval_loss": 2.653170347213745,
147
+ "eval_rouge1": 27.5869,
148
+ "eval_rouge2": 10.0861,
149
+ "eval_rougeL": 24.7406,
150
+ "eval_rougeLsum": 25.0245,
151
+ "eval_runtime": 532.273,
152
+ "eval_samples_per_second": 18.787,
153
+ "eval_steps_per_second": 2.348,
154
+ "step": 10000
155
+ },
156
+ {
157
+ "epoch": 1.56,
158
+ "learning_rate": 0.0002530761209593326,
159
+ "loss": 2.8664,
160
+ "step": 10500
161
+ },
162
+ {
163
+ "epoch": 1.64,
164
+ "learning_rate": 0.0002508416505288246,
165
+ "loss": 2.873,
166
+ "step": 11000
167
+ },
168
+ {
169
+ "epoch": 1.71,
170
+ "learning_rate": 0.00024860718009831666,
171
+ "loss": 2.8685,
172
+ "step": 11500
173
+ },
174
+ {
175
+ "epoch": 1.79,
176
+ "learning_rate": 0.0002463727096678087,
177
+ "loss": 2.8713,
178
+ "step": 12000
179
+ },
180
+ {
181
+ "epoch": 1.86,
182
+ "learning_rate": 0.00024413823923730073,
183
+ "loss": 2.8558,
184
+ "step": 12500
185
+ },
186
+ {
187
+ "epoch": 1.94,
188
+ "learning_rate": 0.00024190376880679277,
189
+ "loss": 2.8361,
190
+ "step": 13000
191
+ },
192
+ {
193
+ "epoch": 2.01,
194
+ "learning_rate": 0.0002396692983762848,
195
+ "loss": 2.8026,
196
+ "step": 13500
197
+ },
198
+ {
199
+ "epoch": 2.09,
200
+ "learning_rate": 0.00023743482794577684,
201
+ "loss": 2.6527,
202
+ "step": 14000
203
+ },
204
+ {
205
+ "epoch": 2.16,
206
+ "learning_rate": 0.00023520035751526885,
207
+ "loss": 2.6452,
208
+ "step": 14500
209
+ },
210
+ {
211
+ "epoch": 2.23,
212
+ "learning_rate": 0.0002329658870847609,
213
+ "loss": 2.6587,
214
+ "step": 15000
215
+ },
216
+ {
217
+ "epoch": 2.23,
218
+ "eval_gen_len": 15.4338,
219
+ "eval_loss": 2.6080288887023926,
220
+ "eval_rouge1": 28.2336,
221
+ "eval_rouge2": 10.5229,
222
+ "eval_rougeL": 25.3053,
223
+ "eval_rougeLsum": 25.6716,
224
+ "eval_runtime": 535.3754,
225
+ "eval_samples_per_second": 18.678,
226
+ "eval_steps_per_second": 2.335,
227
+ "step": 15000
228
+ },
229
+ {
230
+ "epoch": 2.31,
231
+ "learning_rate": 0.00023073141665425293,
232
+ "loss": 2.6667,
233
+ "step": 15500
234
+ },
235
+ {
236
+ "epoch": 2.38,
237
+ "learning_rate": 0.00022849694622374496,
238
+ "loss": 2.6723,
239
+ "step": 16000
240
+ },
241
+ {
242
+ "epoch": 2.46,
243
+ "learning_rate": 0.000226262475793237,
244
+ "loss": 2.6652,
245
+ "step": 16500
246
+ },
247
+ {
248
+ "epoch": 2.53,
249
+ "learning_rate": 0.000224028005362729,
250
+ "loss": 2.6577,
251
+ "step": 17000
252
+ },
253
+ {
254
+ "epoch": 2.61,
255
+ "learning_rate": 0.00022179353493222105,
256
+ "loss": 2.6673,
257
+ "step": 17500
258
+ },
259
+ {
260
+ "epoch": 2.68,
261
+ "learning_rate": 0.00021955906450171308,
262
+ "loss": 2.6533,
263
+ "step": 18000
264
+ },
265
+ {
266
+ "epoch": 2.76,
267
+ "learning_rate": 0.00021732459407120512,
268
+ "loss": 2.6571,
269
+ "step": 18500
270
+ },
271
+ {
272
+ "epoch": 2.83,
273
+ "learning_rate": 0.00021509012364069713,
274
+ "loss": 2.6636,
275
+ "step": 19000
276
+ },
277
+ {
278
+ "epoch": 2.9,
279
+ "learning_rate": 0.00021285565321018917,
280
+ "loss": 2.6579,
281
+ "step": 19500
282
+ },
283
+ {
284
+ "epoch": 2.98,
285
+ "learning_rate": 0.0002106211827796812,
286
+ "loss": 2.664,
287
+ "step": 20000
288
+ },
289
+ {
290
+ "epoch": 2.98,
291
+ "eval_gen_len": 15.6255,
292
+ "eval_loss": 2.5629758834838867,
293
+ "eval_rouge1": 28.6673,
294
+ "eval_rouge2": 10.8421,
295
+ "eval_rougeL": 25.7032,
296
+ "eval_rougeLsum": 26.0245,
297
+ "eval_runtime": 536.8291,
298
+ "eval_samples_per_second": 18.628,
299
+ "eval_steps_per_second": 2.328,
300
+ "step": 20000
301
+ },
302
+ {
303
+ "epoch": 3.05,
304
+ "learning_rate": 0.00020838671234917324,
305
+ "loss": 2.512,
306
+ "step": 20500
307
+ },
308
+ {
309
+ "epoch": 3.13,
310
+ "learning_rate": 0.00020615224191866528,
311
+ "loss": 2.4748,
312
+ "step": 21000
313
+ },
314
+ {
315
+ "epoch": 3.2,
316
+ "learning_rate": 0.0002039177714881573,
317
+ "loss": 2.4773,
318
+ "step": 21500
319
+ },
320
+ {
321
+ "epoch": 3.28,
322
+ "learning_rate": 0.00020168330105764932,
323
+ "loss": 2.4694,
324
+ "step": 22000
325
+ },
326
+ {
327
+ "epoch": 3.35,
328
+ "learning_rate": 0.00019944883062714136,
329
+ "loss": 2.4928,
330
+ "step": 22500
331
+ },
332
+ {
333
+ "epoch": 3.43,
334
+ "learning_rate": 0.0001972143601966334,
335
+ "loss": 2.4941,
336
+ "step": 23000
337
+ },
338
+ {
339
+ "epoch": 3.5,
340
+ "learning_rate": 0.0001949798897661254,
341
+ "loss": 2.4867,
342
+ "step": 23500
343
+ },
344
+ {
345
+ "epoch": 3.58,
346
+ "learning_rate": 0.00019274541933561744,
347
+ "loss": 2.4978,
348
+ "step": 24000
349
+ },
350
+ {
351
+ "epoch": 3.65,
352
+ "learning_rate": 0.00019051094890510948,
353
+ "loss": 2.5118,
354
+ "step": 24500
355
+ },
356
+ {
357
+ "epoch": 3.72,
358
+ "learning_rate": 0.00018827647847460152,
359
+ "loss": 2.4896,
360
+ "step": 25000
361
+ },
362
+ {
363
+ "epoch": 3.72,
364
+ "eval_gen_len": 16.1841,
365
+ "eval_loss": 2.567906141281128,
366
+ "eval_rouge1": 28.842,
367
+ "eval_rouge2": 10.885,
368
+ "eval_rougeL": 25.6757,
369
+ "eval_rougeLsum": 26.0633,
370
+ "eval_runtime": 528.6739,
371
+ "eval_samples_per_second": 18.915,
372
+ "eval_steps_per_second": 2.364,
373
+ "step": 25000
374
+ },
375
+ {
376
+ "epoch": 3.8,
377
+ "learning_rate": 0.00018604200804409355,
378
+ "loss": 2.5141,
379
+ "step": 25500
380
+ },
381
+ {
382
+ "epoch": 3.87,
383
+ "learning_rate": 0.00018380753761358556,
384
+ "loss": 2.5001,
385
+ "step": 26000
386
+ },
387
+ {
388
+ "epoch": 3.95,
389
+ "learning_rate": 0.0001815730671830776,
390
+ "loss": 2.4924,
391
+ "step": 26500
392
+ },
393
+ {
394
+ "epoch": 4.02,
395
+ "learning_rate": 0.00017933859675256964,
396
+ "loss": 2.4512,
397
+ "step": 27000
398
+ },
399
+ {
400
+ "epoch": 4.1,
401
+ "learning_rate": 0.00017710412632206167,
402
+ "loss": 2.3092,
403
+ "step": 27500
404
+ },
405
+ {
406
+ "epoch": 4.17,
407
+ "learning_rate": 0.00017486965589155368,
408
+ "loss": 2.3359,
409
+ "step": 28000
410
+ },
411
+ {
412
+ "epoch": 4.25,
413
+ "learning_rate": 0.00017263518546104572,
414
+ "loss": 2.3522,
415
+ "step": 28500
416
+ },
417
+ {
418
+ "epoch": 4.32,
419
+ "learning_rate": 0.00017040071503053776,
420
+ "loss": 2.3723,
421
+ "step": 29000
422
+ },
423
+ {
424
+ "epoch": 4.39,
425
+ "learning_rate": 0.0001681662446000298,
426
+ "loss": 2.3489,
427
+ "step": 29500
428
+ },
429
+ {
430
+ "epoch": 4.47,
431
+ "learning_rate": 0.00016593177416952183,
432
+ "loss": 2.34,
433
+ "step": 30000
434
+ },
435
+ {
436
+ "epoch": 4.47,
437
+ "eval_gen_len": 15.7826,
438
+ "eval_loss": 2.5564002990722656,
439
+ "eval_rouge1": 29.3246,
440
+ "eval_rouge2": 11.1981,
441
+ "eval_rougeL": 26.1637,
442
+ "eval_rougeLsum": 26.5392,
443
+ "eval_runtime": 536.1351,
444
+ "eval_samples_per_second": 18.652,
445
+ "eval_steps_per_second": 2.332,
446
+ "step": 30000
447
+ },
448
+ {
449
+ "epoch": 4.54,
450
+ "learning_rate": 0.00016369730373901384,
451
+ "loss": 2.3463,
452
+ "step": 30500
453
+ },
454
+ {
455
+ "epoch": 4.62,
456
+ "learning_rate": 0.00016146283330850588,
457
+ "loss": 2.3537,
458
+ "step": 31000
459
+ },
460
+ {
461
+ "epoch": 4.69,
462
+ "learning_rate": 0.00015922836287799791,
463
+ "loss": 2.345,
464
+ "step": 31500
465
+ },
466
+ {
467
+ "epoch": 4.77,
468
+ "learning_rate": 0.00015699389244748995,
469
+ "loss": 2.345,
470
+ "step": 32000
471
+ },
472
+ {
473
+ "epoch": 4.84,
474
+ "learning_rate": 0.00015475942201698196,
475
+ "loss": 2.3586,
476
+ "step": 32500
477
+ },
478
+ {
479
+ "epoch": 4.92,
480
+ "learning_rate": 0.000152524951586474,
481
+ "loss": 2.3495,
482
+ "step": 33000
483
+ },
484
+ {
485
+ "epoch": 4.99,
486
+ "learning_rate": 0.00015029048115596604,
487
+ "loss": 2.3716,
488
+ "step": 33500
489
+ },
490
+ {
491
+ "epoch": 5.06,
492
+ "learning_rate": 0.00014805601072545804,
493
+ "loss": 2.216,
494
+ "step": 34000
495
+ },
496
+ {
497
+ "epoch": 5.14,
498
+ "learning_rate": 0.00014582154029495008,
499
+ "loss": 2.211,
500
+ "step": 34500
501
+ },
502
+ {
503
+ "epoch": 5.21,
504
+ "learning_rate": 0.00014358706986444212,
505
+ "loss": 2.2204,
506
+ "step": 35000
507
+ },
508
+ {
509
+ "epoch": 5.21,
510
+ "eval_gen_len": 15.8374,
511
+ "eval_loss": 2.5744452476501465,
512
+ "eval_rouge1": 29.5545,
513
+ "eval_rouge2": 11.3806,
514
+ "eval_rougeL": 26.3237,
515
+ "eval_rougeLsum": 26.6993,
516
+ "eval_runtime": 531.1942,
517
+ "eval_samples_per_second": 18.826,
518
+ "eval_steps_per_second": 2.353,
519
+ "step": 35000
520
+ },
521
+ {
522
+ "epoch": 5.29,
523
+ "learning_rate": 0.00014135259943393416,
524
+ "loss": 2.2043,
525
+ "step": 35500
526
+ },
527
+ {
528
+ "epoch": 5.36,
529
+ "learning_rate": 0.0001391181290034262,
530
+ "loss": 2.2349,
531
+ "step": 36000
532
+ },
533
+ {
534
+ "epoch": 5.44,
535
+ "learning_rate": 0.0001368836585729182,
536
+ "loss": 2.2155,
537
+ "step": 36500
538
+ },
539
+ {
540
+ "epoch": 5.51,
541
+ "learning_rate": 0.00013464918814241024,
542
+ "loss": 2.2326,
543
+ "step": 37000
544
+ },
545
+ {
546
+ "epoch": 5.59,
547
+ "learning_rate": 0.00013241471771190228,
548
+ "loss": 2.2205,
549
+ "step": 37500
550
+ },
551
+ {
552
+ "epoch": 5.66,
553
+ "learning_rate": 0.0001301802472813943,
554
+ "loss": 2.2288,
555
+ "step": 38000
556
+ },
557
+ {
558
+ "epoch": 5.74,
559
+ "learning_rate": 0.00012794577685088632,
560
+ "loss": 2.228,
561
+ "step": 38500
562
+ },
563
+ {
564
+ "epoch": 5.81,
565
+ "learning_rate": 0.00012571130642037836,
566
+ "loss": 2.2365,
567
+ "step": 39000
568
+ },
569
+ {
570
+ "epoch": 5.88,
571
+ "learning_rate": 0.00012347683598987037,
572
+ "loss": 2.2164,
573
+ "step": 39500
574
+ },
575
+ {
576
+ "epoch": 5.96,
577
+ "learning_rate": 0.00012124236555936242,
578
+ "loss": 2.2301,
579
+ "step": 40000
580
+ },
581
+ {
582
+ "epoch": 5.96,
583
+ "eval_gen_len": 15.7213,
584
+ "eval_loss": 2.5613908767700195,
585
+ "eval_rouge1": 29.5872,
586
+ "eval_rouge2": 11.4227,
587
+ "eval_rougeL": 26.3139,
588
+ "eval_rougeLsum": 26.7196,
589
+ "eval_runtime": 535.5579,
590
+ "eval_samples_per_second": 18.672,
591
+ "eval_steps_per_second": 2.334,
592
+ "step": 40000
593
+ },
594
+ {
595
+ "epoch": 6.03,
596
+ "learning_rate": 0.00011900789512885446,
597
+ "loss": 2.1538,
598
+ "step": 40500
599
+ },
600
+ {
601
+ "epoch": 6.11,
602
+ "learning_rate": 0.00011677342469834648,
603
+ "loss": 2.0981,
604
+ "step": 41000
605
+ },
606
+ {
607
+ "epoch": 6.18,
608
+ "learning_rate": 0.00011453895426783852,
609
+ "loss": 2.0973,
610
+ "step": 41500
611
+ },
612
+ {
613
+ "epoch": 6.26,
614
+ "learning_rate": 0.00011230448383733054,
615
+ "loss": 2.0959,
616
+ "step": 42000
617
+ },
618
+ {
619
+ "epoch": 6.33,
620
+ "learning_rate": 0.00011007001340682258,
621
+ "loss": 2.1013,
622
+ "step": 42500
623
+ },
624
+ {
625
+ "epoch": 6.41,
626
+ "learning_rate": 0.0001078355429763146,
627
+ "loss": 2.1126,
628
+ "step": 43000
629
+ },
630
+ {
631
+ "epoch": 6.48,
632
+ "learning_rate": 0.00010560107254580664,
633
+ "loss": 2.097,
634
+ "step": 43500
635
+ },
636
+ {
637
+ "epoch": 6.55,
638
+ "learning_rate": 0.00010336660211529866,
639
+ "loss": 2.1278,
640
+ "step": 44000
641
+ },
642
+ {
643
+ "epoch": 6.63,
644
+ "learning_rate": 0.0001011321316847907,
645
+ "loss": 2.118,
646
+ "step": 44500
647
+ },
648
+ {
649
+ "epoch": 6.7,
650
+ "learning_rate": 9.889766125428273e-05,
651
+ "loss": 2.1219,
652
+ "step": 45000
653
+ },
654
+ {
655
+ "epoch": 6.7,
656
+ "eval_gen_len": 15.936,
657
+ "eval_loss": 2.5617337226867676,
658
+ "eval_rouge1": 29.8256,
659
+ "eval_rouge2": 11.3702,
660
+ "eval_rougeL": 26.4156,
661
+ "eval_rougeLsum": 26.8465,
662
+ "eval_runtime": 532.4007,
663
+ "eval_samples_per_second": 18.783,
664
+ "eval_steps_per_second": 2.348,
665
+ "step": 45000
666
+ },
667
+ {
668
+ "epoch": 6.78,
669
+ "learning_rate": 9.666319082377476e-05,
670
+ "loss": 2.128,
671
+ "step": 45500
672
+ },
673
+ {
674
+ "epoch": 6.85,
675
+ "learning_rate": 9.44287203932668e-05,
676
+ "loss": 2.1422,
677
+ "step": 46000
678
+ },
679
+ {
680
+ "epoch": 6.93,
681
+ "learning_rate": 9.219424996275882e-05,
682
+ "loss": 2.1481,
683
+ "step": 46500
684
+ },
685
+ {
686
+ "epoch": 7.0,
687
+ "learning_rate": 8.995977953225085e-05,
688
+ "loss": 2.1266,
689
+ "step": 47000
690
+ },
691
+ {
692
+ "epoch": 7.08,
693
+ "learning_rate": 8.772530910174288e-05,
694
+ "loss": 2.0216,
695
+ "step": 47500
696
+ },
697
+ {
698
+ "epoch": 7.15,
699
+ "learning_rate": 8.549083867123491e-05,
700
+ "loss": 2.0065,
701
+ "step": 48000
702
+ },
703
+ {
704
+ "epoch": 7.22,
705
+ "learning_rate": 8.325636824072694e-05,
706
+ "loss": 2.0364,
707
+ "step": 48500
708
+ },
709
+ {
710
+ "epoch": 7.3,
711
+ "learning_rate": 8.102189781021897e-05,
712
+ "loss": 2.0222,
713
+ "step": 49000
714
+ },
715
+ {
716
+ "epoch": 7.37,
717
+ "learning_rate": 7.8787427379711e-05,
718
+ "loss": 2.0166,
719
+ "step": 49500
720
+ },
721
+ {
722
+ "epoch": 7.45,
723
+ "learning_rate": 7.655295694920303e-05,
724
+ "loss": 2.007,
725
+ "step": 50000
726
+ },
727
+ {
728
+ "epoch": 7.45,
729
+ "eval_gen_len": 15.7144,
730
+ "eval_loss": 2.6014492511749268,
731
+ "eval_rouge1": 29.743,
732
+ "eval_rouge2": 11.4336,
733
+ "eval_rougeL": 26.38,
734
+ "eval_rougeLsum": 26.772,
735
+ "eval_runtime": 532.2148,
736
+ "eval_samples_per_second": 18.789,
737
+ "eval_steps_per_second": 2.349,
738
+ "step": 50000
739
+ },
740
+ {
741
+ "epoch": 7.52,
742
+ "learning_rate": 7.431848651869506e-05,
743
+ "loss": 2.0252,
744
+ "step": 50500
745
+ },
746
+ {
747
+ "epoch": 7.6,
748
+ "learning_rate": 7.20840160881871e-05,
749
+ "loss": 2.0376,
750
+ "step": 51000
751
+ },
752
+ {
753
+ "epoch": 7.67,
754
+ "learning_rate": 6.984954565767912e-05,
755
+ "loss": 2.0143,
756
+ "step": 51500
757
+ },
758
+ {
759
+ "epoch": 7.75,
760
+ "learning_rate": 6.761507522717115e-05,
761
+ "loss": 2.0242,
762
+ "step": 52000
763
+ },
764
+ {
765
+ "epoch": 7.82,
766
+ "learning_rate": 6.538060479666319e-05,
767
+ "loss": 2.0094,
768
+ "step": 52500
769
+ },
770
+ {
771
+ "epoch": 7.9,
772
+ "learning_rate": 6.314613436615521e-05,
773
+ "loss": 2.03,
774
+ "step": 53000
775
+ },
776
+ {
777
+ "epoch": 7.97,
778
+ "learning_rate": 6.091166393564725e-05,
779
+ "loss": 2.02,
780
+ "step": 53500
781
+ },
782
+ {
783
+ "epoch": 8.04,
784
+ "learning_rate": 5.867719350513928e-05,
785
+ "loss": 1.972,
786
+ "step": 54000
787
+ },
788
+ {
789
+ "epoch": 8.12,
790
+ "learning_rate": 5.6442723074631304e-05,
791
+ "loss": 1.9319,
792
+ "step": 54500
793
+ },
794
+ {
795
+ "epoch": 8.19,
796
+ "learning_rate": 5.4208252644123334e-05,
797
+ "loss": 1.9398,
798
+ "step": 55000
799
+ },
800
+ {
801
+ "epoch": 8.19,
802
+ "eval_gen_len": 15.9308,
803
+ "eval_loss": 2.608029365539551,
804
+ "eval_rouge1": 29.9478,
805
+ "eval_rouge2": 11.4801,
806
+ "eval_rougeL": 26.5352,
807
+ "eval_rougeLsum": 26.9746,
808
+ "eval_runtime": 532.8851,
809
+ "eval_samples_per_second": 18.766,
810
+ "eval_steps_per_second": 2.346,
811
+ "step": 55000
812
+ },
813
+ {
814
+ "epoch": 8.27,
815
+ "learning_rate": 5.1973782213615364e-05,
816
+ "loss": 1.9379,
817
+ "step": 55500
818
+ },
819
+ {
820
+ "epoch": 8.34,
821
+ "learning_rate": 4.9739311783107394e-05,
822
+ "loss": 1.9406,
823
+ "step": 56000
824
+ },
825
+ {
826
+ "epoch": 8.42,
827
+ "learning_rate": 4.7504841352599425e-05,
828
+ "loss": 1.9487,
829
+ "step": 56500
830
+ },
831
+ {
832
+ "epoch": 8.49,
833
+ "learning_rate": 4.5270370922091455e-05,
834
+ "loss": 1.9554,
835
+ "step": 57000
836
+ },
837
+ {
838
+ "epoch": 8.57,
839
+ "learning_rate": 4.303590049158349e-05,
840
+ "loss": 1.9453,
841
+ "step": 57500
842
+ },
843
+ {
844
+ "epoch": 8.64,
845
+ "learning_rate": 4.080143006107552e-05,
846
+ "loss": 1.9522,
847
+ "step": 58000
848
+ },
849
+ {
850
+ "epoch": 8.71,
851
+ "learning_rate": 3.856695963056755e-05,
852
+ "loss": 1.958,
853
+ "step": 58500
854
+ },
855
+ {
856
+ "epoch": 8.79,
857
+ "learning_rate": 3.633248920005958e-05,
858
+ "loss": 1.9495,
859
+ "step": 59000
860
+ },
861
+ {
862
+ "epoch": 8.86,
863
+ "learning_rate": 3.409801876955161e-05,
864
+ "loss": 1.9276,
865
+ "step": 59500
866
+ },
867
+ {
868
+ "epoch": 8.94,
869
+ "learning_rate": 3.186354833904364e-05,
870
+ "loss": 1.9426,
871
+ "step": 60000
872
+ },
873
+ {
874
+ "epoch": 8.94,
875
+ "eval_gen_len": 15.8598,
876
+ "eval_loss": 2.6021728515625,
877
+ "eval_rouge1": 30.097,
878
+ "eval_rouge2": 11.5602,
879
+ "eval_rougeL": 26.705,
880
+ "eval_rougeLsum": 27.1092,
881
+ "eval_runtime": 531.6176,
882
+ "eval_samples_per_second": 18.811,
883
+ "eval_steps_per_second": 2.351,
884
+ "step": 60000
885
+ },
886
+ {
887
+ "epoch": 9.01,
888
+ "learning_rate": 2.9629077908535672e-05,
889
+ "loss": 1.9438,
890
+ "step": 60500
891
+ },
892
+ {
893
+ "epoch": 9.09,
894
+ "learning_rate": 2.7394607478027705e-05,
895
+ "loss": 1.8807,
896
+ "step": 61000
897
+ },
898
+ {
899
+ "epoch": 9.16,
900
+ "learning_rate": 2.5160137047519735e-05,
901
+ "loss": 1.8708,
902
+ "step": 61500
903
+ },
904
+ {
905
+ "epoch": 9.24,
906
+ "learning_rate": 2.2925666617011765e-05,
907
+ "loss": 1.8727,
908
+ "step": 62000
909
+ },
910
+ {
911
+ "epoch": 9.31,
912
+ "learning_rate": 2.0691196186503795e-05,
913
+ "loss": 1.8911,
914
+ "step": 62500
915
+ },
916
+ {
917
+ "epoch": 9.38,
918
+ "learning_rate": 1.845672575599583e-05,
919
+ "loss": 1.8895,
920
+ "step": 63000
921
+ },
922
+ {
923
+ "epoch": 9.46,
924
+ "learning_rate": 1.622225532548786e-05,
925
+ "loss": 1.8851,
926
+ "step": 63500
927
+ },
928
+ {
929
+ "epoch": 9.53,
930
+ "learning_rate": 1.3987784894979889e-05,
931
+ "loss": 1.9082,
932
+ "step": 64000
933
+ },
934
+ {
935
+ "epoch": 9.61,
936
+ "learning_rate": 1.1753314464471919e-05,
937
+ "loss": 1.8864,
938
+ "step": 64500
939
+ },
940
+ {
941
+ "epoch": 9.68,
942
+ "learning_rate": 9.518844033963949e-06,
943
+ "loss": 1.8853,
944
+ "step": 65000
945
+ },
946
+ {
947
+ "epoch": 9.68,
948
+ "eval_gen_len": 15.803,
949
+ "eval_loss": 2.6137681007385254,
950
+ "eval_rouge1": 30.1588,
951
+ "eval_rouge2": 11.5823,
952
+ "eval_rougeL": 26.6984,
953
+ "eval_rougeLsum": 27.1371,
954
+ "eval_runtime": 529.3936,
955
+ "eval_samples_per_second": 18.89,
956
+ "eval_steps_per_second": 2.361,
957
+ "step": 65000
958
+ },
959
+ {
960
+ "epoch": 9.76,
961
+ "learning_rate": 7.284373603455981e-06,
962
+ "loss": 1.8973,
963
+ "step": 65500
964
+ },
965
+ {
966
+ "epoch": 9.83,
967
+ "learning_rate": 5.04990317294801e-06,
968
+ "loss": 1.8832,
969
+ "step": 66000
970
+ },
971
+ {
972
+ "epoch": 9.91,
973
+ "learning_rate": 2.815432742440042e-06,
974
+ "loss": 1.8678,
975
+ "step": 66500
976
+ },
977
+ {
978
+ "epoch": 9.98,
979
+ "learning_rate": 5.809623119320721e-07,
980
+ "loss": 1.9084,
981
+ "step": 67000
982
+ },
983
+ {
984
+ "epoch": 10.0,
985
+ "step": 67130,
986
+ "total_flos": 2.0764683296037274e+17,
987
+ "train_loss": 2.3876063485090873,
988
+ "train_runtime": 39696.1157,
989
+ "train_samples_per_second": 13.528,
990
+ "train_steps_per_second": 1.691
991
+ }
992
+ ],
993
+ "max_steps": 67130,
994
+ "num_train_epochs": 10,
995
+ "total_flos": 2.0764683296037274e+17,
996
+ "trial_name": null,
997
+ "trial_params": null
998
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:954d8abdde4e1fcf58261d750d3e24dcdaabe264a9f1d1333d58551e441ddd3f
3
+ size 3183