duyvt6663 commited on
Commit
43bbbd0
1 Parent(s): 6f21032

Training in progress, step 400, checkpoint

Browse files
checkpoint-400/README.md CHANGED
@@ -216,4 +216,4 @@ The following `bitsandbytes` quantization config was used during training:
216
  ### Framework versions
217
 
218
 
219
- - PEFT 0.6.0.dev0
 
216
  ### Framework versions
217
 
218
 
219
+ - PEFT 0.6.0
checkpoint-400/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:451033f6a193662abde5e691d971ed971f377f383365f702cf335ee0448d986e
3
+ size 9859800
checkpoint-400/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2765f7ecc3c9853b8cd6f6b5619b326032c6eb89d56c5aca981e370accd23d1b
3
  size 42788
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c5c20482c194aa84f2e29a80e18293aac830b8f9e5bad94706a4cb7538aa2a60
3
  size 42788
checkpoint-400/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:94cf04def08d1529bbdde5d3ecd3383aaa07e7e13d40d9bff68b23a599407791
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:45cf883df8e79e6c7702a28be1273b99ee3b881fdd5537e39d36822a89342a46
3
  size 14244
checkpoint-400/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1b12f69cba539ef7162900377a9e7c168038d643a5ca8ec3f033fd12ca09a416
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1188b319a1f64b76a0989754f9071329edf4edd9191e1df28b9cbe671cd49a8c
3
  size 1064
checkpoint-400/special_tokens_map.json CHANGED
@@ -2,6 +2,6 @@
2
  "bos_token": "<s>",
3
  "eos_token": "</s>",
4
  "pad_token": "<pad>",
5
- "sep_token": "[SEP]",
6
  "unk_token": "<unk>"
7
  }
 
2
  "bos_token": "<s>",
3
  "eos_token": "</s>",
4
  "pad_token": "<pad>",
5
+ "sep_token": "<s>",
6
  "unk_token": "<unk>"
7
  }
checkpoint-400/tokenizer_config.json CHANGED
@@ -39,7 +39,7 @@
39
  "eos_token": "</s>",
40
  "model_max_length": 1000000000000000019884624838656,
41
  "pad_token": "<pad>",
42
- "sep_token": "[SEP]",
43
  "tokenizer_class": "BloomTokenizer",
44
  "unk_token": "<unk>"
45
  }
 
39
  "eos_token": "</s>",
40
  "model_max_length": 1000000000000000019884624838656,
41
  "pad_token": "<pad>",
42
+ "sep_token": "<s>",
43
  "tokenizer_class": "BloomTokenizer",
44
  "unk_token": "<unk>"
45
  }
checkpoint-400/trainer_state.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
- "best_metric": 0.5725014805793762,
3
- "best_model_checkpoint": "output/checkpoint-100",
4
- "epoch": 7.547169811320755,
5
  "eval_steps": 50,
6
  "global_step": 400,
7
  "is_hyper_param_search": false,
@@ -9,137 +9,137 @@
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
- "epoch": 0.02,
13
- "learning_rate": 1.234567901234568e-06,
14
- "loss": 0.4437,
15
  "step": 1
16
  },
17
  {
18
- "epoch": 0.94,
19
- "learning_rate": 5.679012345679012e-05,
20
- "loss": 0.455,
21
  "step": 50
22
  },
23
  {
24
- "epoch": 0.94,
25
- "eval_accuracy": 0.68,
26
- "eval_loss": 0.6171127557754517,
27
- "eval_runtime": 29.0494,
28
- "eval_samples_per_second": 6.024,
29
- "eval_steps_per_second": 1.515,
30
  "step": 50
31
  },
32
  {
33
- "epoch": 1.89,
34
- "learning_rate": 9.59349593495935e-05,
35
- "loss": 0.4555,
36
  "step": 100
37
  },
38
  {
39
- "epoch": 1.89,
40
- "eval_accuracy": 0.7257142857142858,
41
- "eval_loss": 0.5725014805793762,
42
- "eval_runtime": 28.7986,
43
- "eval_samples_per_second": 6.077,
44
- "eval_steps_per_second": 1.528,
45
  "step": 100
46
  },
47
  {
48
- "epoch": 2.83,
49
- "learning_rate": 8.265582655826559e-05,
50
- "loss": 0.455,
51
  "step": 150
52
  },
53
  {
54
- "epoch": 2.83,
55
  "eval_accuracy": 0.7371428571428571,
56
- "eval_loss": 0.5729417204856873,
57
- "eval_runtime": 28.9054,
58
- "eval_samples_per_second": 6.054,
59
- "eval_steps_per_second": 1.522,
60
  "step": 150
61
  },
62
  {
63
- "epoch": 3.77,
64
- "learning_rate": 6.910569105691057e-05,
65
- "loss": 0.4718,
66
  "step": 200
67
  },
68
  {
69
- "epoch": 3.77,
70
- "eval_accuracy": 0.6685714285714286,
71
- "eval_loss": 0.6322054862976074,
72
- "eval_runtime": 28.8878,
73
- "eval_samples_per_second": 6.058,
74
- "eval_steps_per_second": 1.523,
75
  "step": 200
76
  },
77
  {
78
- "epoch": 4.72,
79
- "learning_rate": 5.555555555555556e-05,
80
- "loss": 0.4348,
81
  "step": 250
82
  },
83
  {
84
- "epoch": 4.72,
85
- "eval_accuracy": 0.7257142857142858,
86
- "eval_loss": 0.5733612179756165,
87
- "eval_runtime": 28.5713,
88
- "eval_samples_per_second": 6.125,
89
- "eval_steps_per_second": 1.54,
90
  "step": 250
91
  },
92
  {
93
- "epoch": 5.66,
94
- "learning_rate": 4.2005420054200545e-05,
95
- "loss": 0.4353,
96
  "step": 300
97
  },
98
  {
99
- "epoch": 5.66,
100
- "eval_accuracy": 0.6628571428571428,
101
- "eval_loss": 0.6271094679832458,
102
- "eval_runtime": 26.5252,
103
- "eval_samples_per_second": 6.597,
104
- "eval_steps_per_second": 1.659,
105
  "step": 300
106
  },
107
  {
108
- "epoch": 6.6,
109
- "learning_rate": 2.8455284552845528e-05,
110
- "loss": 0.4261,
111
  "step": 350
112
  },
113
  {
114
- "epoch": 6.6,
115
- "eval_accuracy": 0.6685714285714286,
116
- "eval_loss": 0.6181718111038208,
117
- "eval_runtime": 26.5098,
118
- "eval_samples_per_second": 6.601,
119
- "eval_steps_per_second": 1.66,
120
  "step": 350
121
  },
122
  {
123
- "epoch": 7.55,
124
- "learning_rate": 1.4905149051490516e-05,
125
- "loss": 0.4213,
126
  "step": 400
127
  },
128
  {
129
- "epoch": 7.55,
130
- "eval_accuracy": 0.6628571428571428,
131
- "eval_loss": 0.6409146785736084,
132
- "eval_runtime": 26.5165,
133
- "eval_samples_per_second": 6.6,
134
- "eval_steps_per_second": 1.659,
135
  "step": 400
136
  }
137
  ],
138
  "logging_steps": 50,
139
- "max_steps": 450,
140
- "num_train_epochs": 9,
141
  "save_steps": 50,
142
- "total_flos": 1.5313215392907264e+17,
143
  "trial_name": null,
144
  "trial_params": null
145
  }
 
1
  {
2
+ "best_metric": 0.5551439523696899,
3
+ "best_model_checkpoint": "output/checkpoint-400",
4
+ "epoch": 0.32086633911561213,
5
  "eval_steps": 50,
6
  "global_step": 400,
7
  "is_hyper_param_search": false,
 
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
+ "epoch": 0.0,
13
+ "learning_rate": 4.444444444444445e-07,
14
+ "loss": 1.0919,
15
  "step": 1
16
  },
17
  {
18
+ "epoch": 0.04,
19
+ "learning_rate": 2e-05,
20
+ "loss": 0.7661,
21
  "step": 50
22
  },
23
  {
24
+ "epoch": 0.04,
25
+ "eval_accuracy": 0.7142857142857143,
26
+ "eval_loss": 0.6954500079154968,
27
+ "eval_runtime": 28.4403,
28
+ "eval_samples_per_second": 6.153,
29
+ "eval_steps_per_second": 1.547,
30
  "step": 50
31
  },
32
  {
33
+ "epoch": 0.08,
34
+ "learning_rate": 4.222222222222222e-05,
35
+ "loss": 0.6119,
36
  "step": 100
37
  },
38
  {
39
+ "epoch": 0.08,
40
+ "eval_accuracy": 0.7314285714285714,
41
+ "eval_loss": 0.5819395780563354,
42
+ "eval_runtime": 28.3595,
43
+ "eval_samples_per_second": 6.171,
44
+ "eval_steps_per_second": 1.552,
45
  "step": 100
46
  },
47
  {
48
+ "epoch": 0.12,
49
+ "learning_rate": 6.444444444444446e-05,
50
+ "loss": 0.5869,
51
  "step": 150
52
  },
53
  {
54
+ "epoch": 0.12,
55
  "eval_accuracy": 0.7371428571428571,
56
+ "eval_loss": 0.6004045605659485,
57
+ "eval_runtime": 28.3043,
58
+ "eval_samples_per_second": 6.183,
59
+ "eval_steps_per_second": 1.555,
60
  "step": 150
61
  },
62
  {
63
+ "epoch": 0.16,
64
+ "learning_rate": 8.666666666666667e-05,
65
+ "loss": 0.588,
66
  "step": 200
67
  },
68
  {
69
+ "epoch": 0.16,
70
+ "eval_accuracy": 0.76,
71
+ "eval_loss": 0.5582989454269409,
72
+ "eval_runtime": 28.3002,
73
+ "eval_samples_per_second": 6.184,
74
+ "eval_steps_per_second": 1.555,
75
  "step": 200
76
  },
77
  {
78
+ "epoch": 0.2,
79
+ "learning_rate": 9.80411361410382e-05,
80
+ "loss": 0.5701,
81
  "step": 250
82
  },
83
  {
84
+ "epoch": 0.2,
85
+ "eval_accuracy": 0.7142857142857143,
86
+ "eval_loss": 0.6456161141395569,
87
+ "eval_runtime": 28.2527,
88
+ "eval_samples_per_second": 6.194,
89
+ "eval_steps_per_second": 1.557,
90
  "step": 250
91
  },
92
  {
93
+ "epoch": 0.24,
94
+ "learning_rate": 9.31439764936337e-05,
95
+ "loss": 0.5652,
96
  "step": 300
97
  },
98
  {
99
+ "epoch": 0.24,
100
+ "eval_accuracy": 0.72,
101
+ "eval_loss": 0.6802676916122437,
102
+ "eval_runtime": 28.3389,
103
+ "eval_samples_per_second": 6.175,
104
+ "eval_steps_per_second": 1.553,
105
  "step": 300
106
  },
107
  {
108
+ "epoch": 0.28,
109
+ "learning_rate": 8.82468168462292e-05,
110
+ "loss": 0.5936,
111
  "step": 350
112
  },
113
  {
114
+ "epoch": 0.28,
115
+ "eval_accuracy": 0.7542857142857143,
116
+ "eval_loss": 0.5668751001358032,
117
+ "eval_runtime": 28.2611,
118
+ "eval_samples_per_second": 6.192,
119
+ "eval_steps_per_second": 1.557,
120
  "step": 350
121
  },
122
  {
123
+ "epoch": 0.32,
124
+ "learning_rate": 8.334965719882468e-05,
125
+ "loss": 0.5961,
126
  "step": 400
127
  },
128
  {
129
+ "epoch": 0.32,
130
+ "eval_accuracy": 0.76,
131
+ "eval_loss": 0.5551439523696899,
132
+ "eval_runtime": 26.2906,
133
+ "eval_samples_per_second": 6.656,
134
+ "eval_steps_per_second": 1.674,
135
  "step": 400
136
  }
137
  ],
138
  "logging_steps": 50,
139
+ "max_steps": 1246,
140
+ "num_train_epochs": 1,
141
  "save_steps": 50,
142
+ "total_flos": 1.4812082691207168e+17,
143
  "trial_name": null,
144
  "trial_params": null
145
  }
checkpoint-400/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d91c1e3ec7fc28d5ed2a9d783b7ded0a7affbbd029319e299d9244ac0f3fbc29
3
- size 4472
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b25ecbcf37659c2f593e1603dab54c3eaea43862c7f192a7e6df4c4dcc86d20d
3
+ size 4600