ramdhanfirdaus commited on
Commit
d07a7e7
1 Parent(s): bfe029f

Training in progress, step 600, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:69041f32214c8333f95989146c92f75a08e5cbc350a6de5b44b932abdd1f9351
3
  size 9444296
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4176108829b074af49b2cbf5e1b5db56126615b9a9ef0d10ffd44d24ae921005
3
  size 9444296
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7259a25650860d8bb8215f9796edd94667c2fd2ff8f4af3912ab231ded3d3fc3
3
  size 18902665
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:47e4e1fdc77242198cba37756e1e5633076a59ce7633bdd049c8ccef117014bd
3
  size 18902665
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:67bc3393a9340b81770a2f56f26e7c6954acce785b42d5c7c1a980acfd38e481
3
  size 14575
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c5e1dc5f769c4cbaa0ba676908c39cfb7cd49352afc3941d90d0685280366309
3
  size 14575
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3d8d6be7898f87772ccbc5c732e900fe63a643c4595ce6af3d6bc6f811ba4b65
3
  size 627
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c1a2ac1c11599601eeac95feb1dbfd49ec5c625e61dcce18b3f094491f9cf2d
3
  size 627
last-checkpoint/tokenizer.json CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5d81d9b2c9d9db79ea02c00d4c7e79bb77a718dc57ab01f5f3b1cd6649f08993
3
- size 14500569
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:17a208233d2ee8d8c83b23bc214df737c44806a1919f444e89b31e586cd956ba
3
+ size 14500471
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 2.341019868850708,
3
- "best_model_checkpoint": "./outputs/checkpoint-1200",
4
- "epoch": 0.8743169398907104,
5
  "eval_steps": 100,
6
- "global_step": 1200,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -11,177 +11,93 @@
11
  {
12
  "epoch": 0.07,
13
  "learning_rate": 0.0002,
14
- "loss": 2.7399,
15
  "step": 100
16
  },
17
  {
18
  "epoch": 0.07,
19
- "eval_loss": 2.6418099403381348,
20
- "eval_runtime": 347.6157,
21
- "eval_samples_per_second": 18.049,
22
- "eval_steps_per_second": 2.258,
23
  "step": 100
24
  },
25
  {
26
  "epoch": 0.15,
27
  "learning_rate": 0.0002,
28
- "loss": 2.6052,
29
  "step": 200
30
  },
31
  {
32
  "epoch": 0.15,
33
- "eval_loss": 2.5918312072753906,
34
- "eval_runtime": 333.731,
35
- "eval_samples_per_second": 18.8,
36
- "eval_steps_per_second": 2.352,
37
  "step": 200
38
  },
39
  {
40
  "epoch": 0.22,
41
  "learning_rate": 0.0002,
42
- "loss": 2.5622,
43
  "step": 300
44
  },
45
  {
46
  "epoch": 0.22,
47
- "eval_loss": 2.551574468612671,
48
- "eval_runtime": 204.9306,
49
- "eval_samples_per_second": 30.615,
50
- "eval_steps_per_second": 3.831,
51
  "step": 300
52
  },
53
  {
54
  "epoch": 0.29,
55
  "learning_rate": 0.0002,
56
- "loss": 2.5366,
57
  "step": 400
58
  },
59
  {
60
  "epoch": 0.29,
61
- "eval_loss": 2.517575263977051,
62
- "eval_runtime": 204.3925,
63
- "eval_samples_per_second": 30.696,
64
- "eval_steps_per_second": 3.841,
65
  "step": 400
66
  },
67
  {
68
  "epoch": 0.36,
69
  "learning_rate": 0.0002,
70
- "loss": 2.4946,
71
  "step": 500
72
  },
73
  {
74
  "epoch": 0.36,
75
- "eval_loss": 2.4924821853637695,
76
- "eval_runtime": 204.4035,
77
- "eval_samples_per_second": 30.694,
78
- "eval_steps_per_second": 3.84,
79
  "step": 500
80
  },
81
  {
82
  "epoch": 0.44,
83
  "learning_rate": 0.0002,
84
- "loss": 2.4686,
85
  "step": 600
86
  },
87
  {
88
  "epoch": 0.44,
89
- "eval_loss": 2.4666266441345215,
90
- "eval_runtime": 207.3453,
91
- "eval_samples_per_second": 30.259,
92
- "eval_steps_per_second": 3.786,
93
  "step": 600
94
- },
95
- {
96
- "epoch": 0.51,
97
- "learning_rate": 0.0002,
98
- "loss": 2.4503,
99
- "step": 700
100
- },
101
- {
102
- "epoch": 0.51,
103
- "eval_loss": 2.4440107345581055,
104
- "eval_runtime": 205.5485,
105
- "eval_samples_per_second": 30.523,
106
- "eval_steps_per_second": 3.819,
107
- "step": 700
108
- },
109
- {
110
- "epoch": 0.58,
111
- "learning_rate": 0.0002,
112
- "loss": 2.4271,
113
- "step": 800
114
- },
115
- {
116
- "epoch": 0.58,
117
- "eval_loss": 2.4231719970703125,
118
- "eval_runtime": 204.3763,
119
- "eval_samples_per_second": 30.698,
120
- "eval_steps_per_second": 3.841,
121
- "step": 800
122
- },
123
- {
124
- "epoch": 0.66,
125
- "learning_rate": 0.0002,
126
- "loss": 2.3911,
127
- "step": 900
128
- },
129
- {
130
- "epoch": 0.66,
131
- "eval_loss": 2.401764154434204,
132
- "eval_runtime": 204.7042,
133
- "eval_samples_per_second": 30.649,
134
- "eval_steps_per_second": 3.835,
135
- "step": 900
136
- },
137
- {
138
- "epoch": 0.73,
139
- "learning_rate": 0.0002,
140
- "loss": 2.3881,
141
- "step": 1000
142
- },
143
- {
144
- "epoch": 0.73,
145
- "eval_loss": 2.3822004795074463,
146
- "eval_runtime": 204.6048,
147
- "eval_samples_per_second": 30.664,
148
- "eval_steps_per_second": 3.837,
149
- "step": 1000
150
- },
151
- {
152
- "epoch": 0.8,
153
- "learning_rate": 0.0002,
154
- "loss": 2.3811,
155
- "step": 1100
156
- },
157
- {
158
- "epoch": 0.8,
159
- "eval_loss": 2.360255479812622,
160
- "eval_runtime": 283.1616,
161
- "eval_samples_per_second": 22.157,
162
- "eval_steps_per_second": 2.772,
163
- "step": 1100
164
- },
165
- {
166
- "epoch": 0.87,
167
- "learning_rate": 0.0002,
168
- "loss": 2.3378,
169
- "step": 1200
170
- },
171
- {
172
- "epoch": 0.87,
173
- "eval_loss": 2.341019868850708,
174
- "eval_runtime": 204.7618,
175
- "eval_samples_per_second": 30.64,
176
- "eval_steps_per_second": 3.834,
177
- "step": 1200
178
  }
179
  ],
180
  "logging_steps": 100,
181
  "max_steps": 4116,
182
  "num_train_epochs": 3,
183
  "save_steps": 100,
184
- "total_flos": 3.500881324867584e+16,
185
  "trial_name": null,
186
  "trial_params": null
187
  }
 
1
  {
2
+ "best_metric": 2.475057601928711,
3
+ "best_model_checkpoint": "./outputs/checkpoint-600",
4
+ "epoch": 0.4371584699453552,
5
  "eval_steps": 100,
6
+ "global_step": 600,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
11
  {
12
  "epoch": 0.07,
13
  "learning_rate": 0.0002,
14
+ "loss": 2.7401,
15
  "step": 100
16
  },
17
  {
18
  "epoch": 0.07,
19
+ "eval_loss": 2.6424810886383057,
20
+ "eval_runtime": 206.4108,
21
+ "eval_samples_per_second": 30.396,
22
+ "eval_steps_per_second": 3.803,
23
  "step": 100
24
  },
25
  {
26
  "epoch": 0.15,
27
  "learning_rate": 0.0002,
28
+ "loss": 2.6061,
29
  "step": 200
30
  },
31
  {
32
  "epoch": 0.15,
33
+ "eval_loss": 2.5929574966430664,
34
+ "eval_runtime": 206.2177,
35
+ "eval_samples_per_second": 30.424,
36
+ "eval_steps_per_second": 3.807,
37
  "step": 200
38
  },
39
  {
40
  "epoch": 0.22,
41
  "learning_rate": 0.0002,
42
+ "loss": 2.5643,
43
  "step": 300
44
  },
45
  {
46
  "epoch": 0.22,
47
+ "eval_loss": 2.5578970909118652,
48
+ "eval_runtime": 206.509,
49
+ "eval_samples_per_second": 30.381,
50
+ "eval_steps_per_second": 3.801,
51
  "step": 300
52
  },
53
  {
54
  "epoch": 0.29,
55
  "learning_rate": 0.0002,
56
+ "loss": 2.5383,
57
  "step": 400
58
  },
59
  {
60
  "epoch": 0.29,
61
+ "eval_loss": 2.5257716178894043,
62
+ "eval_runtime": 206.1349,
63
+ "eval_samples_per_second": 30.436,
64
+ "eval_steps_per_second": 3.808,
65
  "step": 400
66
  },
67
  {
68
  "epoch": 0.36,
69
  "learning_rate": 0.0002,
70
+ "loss": 2.4959,
71
  "step": 500
72
  },
73
  {
74
  "epoch": 0.36,
75
+ "eval_loss": 2.5014102458953857,
76
+ "eval_runtime": 282.6461,
77
+ "eval_samples_per_second": 22.197,
78
+ "eval_steps_per_second": 2.777,
79
  "step": 500
80
  },
81
  {
82
  "epoch": 0.44,
83
  "learning_rate": 0.0002,
84
+ "loss": 2.4693,
85
  "step": 600
86
  },
87
  {
88
  "epoch": 0.44,
89
+ "eval_loss": 2.475057601928711,
90
+ "eval_runtime": 205.7759,
91
+ "eval_samples_per_second": 30.489,
92
+ "eval_steps_per_second": 3.815,
93
  "step": 600
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
94
  }
95
  ],
96
  "logging_steps": 100,
97
  "max_steps": 4116,
98
  "num_train_epochs": 3,
99
  "save_steps": 100,
100
+ "total_flos": 1.752346796949504e+16,
101
  "trial_name": null,
102
  "trial_params": null
103
  }
last-checkpoint/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:faed5845c6fa602a9a75d6d7a3c4d37017580998e72f3835a07c1c95f579635b
3
  size 4219
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6a0ffd8c2b139f27950886c21bfa338a00634210331df117dcff8cd3df7ce8ad
3
  size 4219