duyvt6663 commited on
Commit
21c0a1f
1 Parent(s): 963c4b8

Training in progress, step 350, checkpoint

Browse files
checkpoint-350/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:11dccb17101f08d3586853b7160288aac8f2407582d696a9182469f6a28796ea
3
  size 19690328
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:34526efef9c1e1463e2498e465bea5c7c949ef33cbf3990992d6cdc9fa350ba9
3
  size 19690328
checkpoint-350/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:73aa84d0838b32d4ef5ec66b61720cf306da28efa364dce66edc4b622fc7f105
3
  size 38087162
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:184ede00fa33ab18a26728807e27506ef2fa52d75c5b5cdd9dda51e298a837a8
3
  size 38087162
checkpoint-350/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e25cd5428d8cf8cc796dddaeab3d8660264c0b651ba9796a6215b60d75f4fec8
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:da788b7fad54499b203a0e56669106ae69a8e367338d1dd514229c47a803131d
3
  size 14244
checkpoint-350/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9f5c3d7b009d5c1eca0dae12a87cb98d72259b696afa5d60c34807808d9b8b77
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:33e20c74e5155b37b73deda28273114b119ccbd031db46127c0eec0e00c88c16
3
  size 1064
checkpoint-350/special_tokens_map.json CHANGED
@@ -2,6 +2,6 @@
2
  "bos_token": "<s>",
3
  "eos_token": "</s>",
4
  "pad_token": "<pad>",
5
- "sep_token": "\n\n",
6
  "unk_token": "<unk>"
7
  }
 
2
  "bos_token": "<s>",
3
  "eos_token": "</s>",
4
  "pad_token": "<pad>",
5
+ "sep_token": "<s>",
6
  "unk_token": "<unk>"
7
  }
checkpoint-350/tokenizer_config.json CHANGED
@@ -39,7 +39,7 @@
39
  "eos_token": "</s>",
40
  "model_max_length": 1000000000000000019884624838656,
41
  "pad_token": "<pad>",
42
- "sep_token": "\n\n",
43
  "tokenizer_class": "BloomTokenizer",
44
  "unk_token": "<unk>"
45
  }
 
39
  "eos_token": "</s>",
40
  "model_max_length": 1000000000000000019884624838656,
41
  "pad_token": "<pad>",
42
+ "sep_token": "<s>",
43
  "tokenizer_class": "BloomTokenizer",
44
  "unk_token": "<unk>"
45
  }
checkpoint-350/trainer_state.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
- "best_metric": 0.6207379102706909,
3
- "best_model_checkpoint": "output/checkpoint-100",
4
- "epoch": 1.4606155451225873,
5
  "eval_steps": 50,
6
  "global_step": 350,
7
  "is_hyper_param_search": false,
@@ -11,120 +11,120 @@
11
  {
12
  "epoch": 0.0,
13
  "learning_rate": 0.0,
14
- "loss": 0.4694,
15
  "step": 1
16
  },
17
  {
18
- "epoch": 0.21,
19
- "learning_rate": 1.25e-05,
20
- "loss": 0.3006,
21
  "step": 50
22
  },
23
  {
24
- "epoch": 0.21,
25
- "eval_accuracy": 0.6971428571428572,
26
- "eval_loss": 0.6772400140762329,
27
- "eval_runtime": 28.6241,
28
- "eval_samples_per_second": 6.114,
29
- "eval_steps_per_second": 1.537,
30
  "step": 50
31
  },
32
  {
33
- "epoch": 0.42,
34
- "learning_rate": 1.8597560975609757e-05,
35
- "loss": 0.1715,
36
  "step": 100
37
  },
38
  {
39
- "epoch": 0.42,
40
- "eval_accuracy": 0.8228571428571428,
41
- "eval_loss": 0.6207379102706909,
42
- "eval_runtime": 28.9688,
43
- "eval_samples_per_second": 6.041,
44
- "eval_steps_per_second": 1.519,
45
  "step": 100
46
  },
47
  {
48
- "epoch": 0.63,
49
- "learning_rate": 1.554878048780488e-05,
50
- "loss": 0.1573,
51
  "step": 150
52
  },
53
  {
54
- "epoch": 0.63,
55
- "eval_accuracy": 0.76,
56
- "eval_loss": 0.7828966379165649,
57
- "eval_runtime": 26.1173,
58
- "eval_samples_per_second": 6.701,
59
- "eval_steps_per_second": 1.685,
60
  "step": 150
61
  },
62
  {
63
- "epoch": 0.83,
64
- "learning_rate": 1.25e-05,
65
- "loss": 0.1264,
66
  "step": 200
67
  },
68
  {
69
- "epoch": 0.83,
70
- "eval_accuracy": 0.8171428571428572,
71
- "eval_loss": 0.7038331627845764,
72
- "eval_runtime": 28.7177,
73
- "eval_samples_per_second": 6.094,
74
- "eval_steps_per_second": 1.532,
75
  "step": 200
76
  },
77
  {
78
- "epoch": 1.04,
79
- "learning_rate": 9.451219512195122e-06,
80
- "loss": 0.1099,
81
  "step": 250
82
  },
83
  {
84
- "epoch": 1.04,
85
- "eval_accuracy": 0.72,
86
- "eval_loss": 1.0845664739608765,
87
- "eval_runtime": 26.1061,
88
- "eval_samples_per_second": 6.703,
89
- "eval_steps_per_second": 1.685,
90
  "step": 250
91
  },
92
  {
93
- "epoch": 1.25,
94
- "learning_rate": 6.402439024390244e-06,
95
- "loss": 0.108,
96
  "step": 300
97
  },
98
  {
99
- "epoch": 1.25,
100
- "eval_accuracy": 0.7428571428571429,
101
- "eval_loss": 1.1050784587860107,
102
- "eval_runtime": 28.843,
103
- "eval_samples_per_second": 6.067,
104
- "eval_steps_per_second": 1.526,
105
  "step": 300
106
  },
107
  {
108
- "epoch": 1.46,
109
- "learning_rate": 3.3536585365853664e-06,
110
- "loss": 0.0842,
111
  "step": 350
112
  },
113
  {
114
- "epoch": 1.46,
115
- "eval_accuracy": 0.7771428571428571,
116
- "eval_loss": 0.9869961738586426,
117
- "eval_runtime": 26.1189,
118
- "eval_samples_per_second": 6.7,
119
- "eval_steps_per_second": 1.685,
120
  "step": 350
121
  }
122
  ],
123
  "logging_steps": 50,
124
  "max_steps": 400,
125
- "num_train_epochs": 2,
126
  "save_steps": 50,
127
- "total_flos": 1.2984653338472448e+17,
128
  "trial_name": null,
129
  "trial_params": null
130
  }
 
1
  {
2
+ "best_metric": 0.47385331988334656,
3
+ "best_model_checkpoint": "output/checkpoint-150",
4
+ "epoch": 0.28738581545725134,
5
  "eval_steps": 50,
6
  "global_step": 350,
7
  "is_hyper_param_search": false,
 
11
  {
12
  "epoch": 0.0,
13
  "learning_rate": 0.0,
14
+ "loss": 0.8903,
15
  "step": 1
16
  },
17
  {
18
+ "epoch": 0.04,
19
+ "learning_rate": 1.2222222222222224e-05,
20
+ "loss": 0.7189,
21
  "step": 50
22
  },
23
  {
24
+ "epoch": 0.04,
25
+ "eval_accuracy": 0.8228571428571428,
26
+ "eval_loss": 0.5415178537368774,
27
+ "eval_runtime": 28.5756,
28
+ "eval_samples_per_second": 6.124,
29
+ "eval_steps_per_second": 1.54,
30
  "step": 50
31
  },
32
  {
33
+ "epoch": 0.08,
34
+ "learning_rate": 1.8658536585365855e-05,
35
+ "loss": 0.5723,
36
  "step": 100
37
  },
38
  {
39
+ "epoch": 0.08,
40
+ "eval_accuracy": 0.8,
41
+ "eval_loss": 0.49520450830459595,
42
+ "eval_runtime": 28.516,
43
+ "eval_samples_per_second": 6.137,
44
+ "eval_steps_per_second": 1.543,
45
  "step": 100
46
  },
47
  {
48
+ "epoch": 0.12,
49
+ "learning_rate": 1.5609756097560978e-05,
50
+ "loss": 0.5122,
51
  "step": 150
52
  },
53
  {
54
+ "epoch": 0.12,
55
+ "eval_accuracy": 0.7885714285714286,
56
+ "eval_loss": 0.47385331988334656,
57
+ "eval_runtime": 28.2004,
58
+ "eval_samples_per_second": 6.206,
59
+ "eval_steps_per_second": 1.56,
60
  "step": 150
61
  },
62
  {
63
+ "epoch": 0.16,
64
+ "learning_rate": 1.2560975609756098e-05,
65
+ "loss": 0.4831,
66
  "step": 200
67
  },
68
  {
69
+ "epoch": 0.16,
70
+ "eval_accuracy": 0.7942857142857143,
71
+ "eval_loss": 0.49147841334342957,
72
+ "eval_runtime": 28.6363,
73
+ "eval_samples_per_second": 6.111,
74
+ "eval_steps_per_second": 1.537,
75
  "step": 200
76
  },
77
  {
78
+ "epoch": 0.21,
79
+ "learning_rate": 9.51219512195122e-06,
80
+ "loss": 0.5009,
81
  "step": 250
82
  },
83
  {
84
+ "epoch": 0.21,
85
+ "eval_accuracy": 0.8,
86
+ "eval_loss": 0.501244068145752,
87
+ "eval_runtime": 27.6943,
88
+ "eval_samples_per_second": 6.319,
89
+ "eval_steps_per_second": 1.589,
90
  "step": 250
91
  },
92
  {
93
+ "epoch": 0.25,
94
+ "learning_rate": 6.463414634146342e-06,
95
+ "loss": 0.4883,
96
  "step": 300
97
  },
98
  {
99
+ "epoch": 0.25,
100
+ "eval_accuracy": 0.7885714285714286,
101
+ "eval_loss": 0.49123460054397583,
102
+ "eval_runtime": 28.0924,
103
+ "eval_samples_per_second": 6.229,
104
+ "eval_steps_per_second": 1.566,
105
  "step": 300
106
  },
107
  {
108
+ "epoch": 0.29,
109
+ "learning_rate": 3.414634146341464e-06,
110
+ "loss": 0.4979,
111
  "step": 350
112
  },
113
  {
114
+ "epoch": 0.29,
115
+ "eval_accuracy": 0.8,
116
+ "eval_loss": 0.4914019703865051,
117
+ "eval_runtime": 28.7942,
118
+ "eval_samples_per_second": 6.078,
119
+ "eval_steps_per_second": 1.528,
120
  "step": 350
121
  }
122
  ],
123
  "logging_steps": 50,
124
  "max_steps": 400,
125
+ "num_train_epochs": 1,
126
  "save_steps": 50,
127
+ "total_flos": 1.2963525211213824e+17,
128
  "trial_name": null,
129
  "trial_params": null
130
  }
checkpoint-350/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5027c9d6c3122f21d433b1e532b4aebee600801d0d4723deb6d4671b22b3d2da
3
  size 4600
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:090099aa07c672025c60437897a7a19f63d8d7bd4284af2562fc632695431956
3
  size 4600