nour4286 commited on
Commit
4c9bba9
1 Parent(s): 288619c

Training in progress, epoch 1, checkpoint

Browse files
last-checkpoint/model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b513f65f071693a24c26f6acfe08868c10cef392b47b5791724718504c59b11c
3
  size 557116312
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0321845aa554c15389dd24dff69e92c7373fc5b04cb68327a41cdc98bbcd31ba
3
  size 557116312
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:76f7e75f38e31f3caf6295d2c7a27b005bc90135c402a683980ad863b9088acd
3
  size 1113991930
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:62afe7d04b548ec8eec3d1689d7d8c3e8d969ad62a70b280216e862ba19c8547
3
  size 1113991930
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:61452000e70e2b50dbac9e487e1828eeb816725960673eeb14278e12e0cf6cb5
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:59624509e89bbc9b94c591ed4735982e33caa7e6875690c4ff16d1a97717d87c
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:67e029cb676b84aed4d4462a36ff279a302dfdcc098c603c675801d47eb56104
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9ac9643ef164647dd74d17c68b79347a9f51627588cd7ec3dd1a18bf83f7e68f
3
  size 1064
last-checkpoint/tokenizer.json CHANGED
@@ -1,6 +1,11 @@
1
  {
2
  "version": "1.0",
3
- "truncation": null,
 
 
 
 
 
4
  "padding": null,
5
  "added_tokens": [
6
  {
 
1
  {
2
  "version": "1.0",
3
+ "truncation": {
4
+ "direction": "Right",
5
+ "max_length": 128,
6
+ "strategy": "LongestFirst",
7
+ "stride": 0
8
+ },
9
  "padding": null,
10
  "added_tokens": [
11
  {
last-checkpoint/trainer_state.json CHANGED
@@ -1,146 +1,50 @@
1
  {
2
- "best_metric": 26.4112,
3
- "best_model_checkpoint": "my-model/checkpoint-6112",
4
- "epoch": 4.0,
5
  "eval_steps": 500,
6
- "global_step": 6112,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
  "epoch": 0.32722513089005234,
13
- "grad_norm": 1.6467379331588745,
14
- "learning_rate": 4.5909685863874345e-05,
15
- "loss": 0.0168,
16
  "step": 500
17
  },
18
  {
19
  "epoch": 0.6544502617801047,
20
- "grad_norm": 1.3730037212371826,
21
- "learning_rate": 4.181937172774869e-05,
22
- "loss": 0.0246,
23
  "step": 1000
24
  },
25
  {
26
  "epoch": 0.981675392670157,
27
- "grad_norm": 1.1599562168121338,
28
- "learning_rate": 3.7729057591623044e-05,
29
- "loss": 0.0308,
30
  "step": 1500
31
  },
32
  {
33
  "epoch": 1.0,
34
- "eval_loss": 5.495022296905518,
35
- "eval_rouge1": 24.8013,
36
- "eval_rouge2": 9.5895,
37
- "eval_rougeL": 21.5936,
38
- "eval_runtime": 434.7617,
39
- "eval_samples_per_second": 5.049,
40
- "eval_steps_per_second": 0.506,
41
  "step": 1528
42
- },
43
- {
44
- "epoch": 1.3089005235602094,
45
- "grad_norm": 3.42366361618042,
46
- "learning_rate": 3.3638743455497386e-05,
47
- "loss": 0.0539,
48
- "step": 2000
49
- },
50
- {
51
- "epoch": 1.6361256544502618,
52
- "grad_norm": 3.776289701461792,
53
- "learning_rate": 2.954842931937173e-05,
54
- "loss": 0.0727,
55
- "step": 2500
56
- },
57
- {
58
- "epoch": 1.9633507853403143,
59
- "grad_norm": 5.072182655334473,
60
- "learning_rate": 2.545811518324607e-05,
61
- "loss": 0.1352,
62
- "step": 3000
63
- },
64
- {
65
- "epoch": 2.0,
66
- "eval_loss": 5.219298839569092,
67
- "eval_rouge1": 24.8743,
68
- "eval_rouge2": 9.8439,
69
- "eval_rougeL": 21.6411,
70
- "eval_runtime": 433.0859,
71
- "eval_samples_per_second": 5.068,
72
- "eval_steps_per_second": 0.508,
73
- "step": 3056
74
- },
75
- {
76
- "epoch": 2.2905759162303667,
77
- "grad_norm": 6.1271209716796875,
78
- "learning_rate": 2.136780104712042e-05,
79
- "loss": 0.4727,
80
- "step": 3500
81
- },
82
- {
83
- "epoch": 2.6178010471204187,
84
- "grad_norm": 5.5732316970825195,
85
- "learning_rate": 1.7277486910994763e-05,
86
- "loss": 0.6959,
87
- "step": 4000
88
- },
89
- {
90
- "epoch": 2.945026178010471,
91
- "grad_norm": 5.73837423324585,
92
- "learning_rate": 1.3187172774869111e-05,
93
- "loss": 0.9382,
94
- "step": 4500
95
- },
96
- {
97
- "epoch": 3.0,
98
- "eval_loss": 3.44022798538208,
99
- "eval_rouge1": 26.0341,
100
- "eval_rouge2": 10.6222,
101
- "eval_rougeL": 22.7685,
102
- "eval_runtime": 417.9997,
103
- "eval_samples_per_second": 5.251,
104
- "eval_steps_per_second": 0.526,
105
- "step": 4584
106
- },
107
- {
108
- "epoch": 3.2722513089005236,
109
- "grad_norm": 5.353453159332275,
110
- "learning_rate": 9.096858638743457e-06,
111
- "loss": 0.9969,
112
- "step": 5000
113
- },
114
- {
115
- "epoch": 3.599476439790576,
116
- "grad_norm": 5.323329925537109,
117
- "learning_rate": 5.006544502617801e-06,
118
- "loss": 1.0924,
119
- "step": 5500
120
- },
121
- {
122
- "epoch": 3.9267015706806285,
123
- "grad_norm": 6.1474175453186035,
124
- "learning_rate": 9.162303664921465e-07,
125
- "loss": 1.2208,
126
- "step": 6000
127
- },
128
- {
129
- "epoch": 4.0,
130
- "eval_loss": 3.21282958984375,
131
- "eval_rouge1": 26.4112,
132
- "eval_rouge2": 10.9605,
133
- "eval_rougeL": 23.0258,
134
- "eval_runtime": 417.2936,
135
- "eval_samples_per_second": 5.26,
136
- "eval_steps_per_second": 0.527,
137
- "step": 6112
138
  }
139
  ],
140
  "logging_steps": 500,
141
- "max_steps": 6112,
142
  "num_input_tokens_seen": 0,
143
- "num_train_epochs": 4,
144
  "save_steps": 500,
145
  "stateful_callbacks": {
146
  "TrainerControl": {
@@ -149,12 +53,12 @@
149
  "should_evaluate": false,
150
  "should_log": false,
151
  "should_save": true,
152
- "should_training_stop": true
153
  },
154
  "attributes": {}
155
  }
156
  },
157
- "total_flos": 2.930549272296653e+16,
158
  "train_batch_size": 10,
159
  "trial_name": null,
160
  "trial_params": null
 
1
  {
2
+ "best_metric": 25.1709,
3
+ "best_model_checkpoint": "my-model/checkpoint-1528",
4
+ "epoch": 1.0,
5
  "eval_steps": 500,
6
+ "global_step": 1528,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
  "epoch": 0.32722513089005234,
13
+ "grad_norm": 0.9616150856018066,
14
+ "learning_rate": 4.454624781849913e-05,
15
+ "loss": 0.0158,
16
  "step": 500
17
  },
18
  {
19
  "epoch": 0.6544502617801047,
20
+ "grad_norm": 2.0808584690093994,
21
+ "learning_rate": 3.909249563699826e-05,
22
+ "loss": 0.0223,
23
  "step": 1000
24
  },
25
  {
26
  "epoch": 0.981675392670157,
27
+ "grad_norm": 1.145504117012024,
28
+ "learning_rate": 3.3638743455497386e-05,
29
+ "loss": 0.0271,
30
  "step": 1500
31
  },
32
  {
33
  "epoch": 1.0,
34
+ "eval_loss": 5.483295917510986,
35
+ "eval_rouge1": 25.1709,
36
+ "eval_rouge2": 10.0024,
37
+ "eval_rougeL": 21.9926,
38
+ "eval_runtime": 372.8877,
39
+ "eval_samples_per_second": 5.886,
40
+ "eval_steps_per_second": 0.59,
41
  "step": 1528
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
42
  }
43
  ],
44
  "logging_steps": 500,
45
+ "max_steps": 4584,
46
  "num_input_tokens_seen": 0,
47
+ "num_train_epochs": 3,
48
  "save_steps": 500,
49
  "stateful_callbacks": {
50
  "TrainerControl": {
 
53
  "should_evaluate": false,
54
  "should_log": false,
55
  "should_save": true,
56
+ "should_training_stop": false
57
  },
58
  "attributes": {}
59
  }
60
  },
61
+ "total_flos": 7327420304523264.0,
62
  "train_batch_size": 10,
63
  "trial_name": null,
64
  "trial_params": null
last-checkpoint/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:408bb8b25e44f1ecf1923d9e22dabb6474162771a082c1a83b55da4bd5267fc0
3
  size 6968
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9454715524d57fa2a7a3e1d17cb27d9ba1a09c66d019637bdd23107b92238a57
3
  size 6968