duyvt6663 commited on
Commit
ac7f951
1 Parent(s): 4c66440

Training in progress, step 250, checkpoint

Browse files
checkpoint-250/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9c636431f8bb1916e4882abc33c6ee51adb973f16453eb8f363bdabc0e3f3e97
3
  size 19690328
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6e54b33e013e1b3cd063430a918263c46d2858ab73115a2fcb3b5683b37fd44d
3
  size 19690328
checkpoint-250/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:29331ba174ff69b6f50a5639e62c3d0c55abf12a38acde12a14c8d634621b6a0
3
  size 38087098
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f335ce02bf4bea0621e8bd9644a82247c47d4cb5de566eef566e50093c9ace4f
3
  size 38087098
checkpoint-250/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:38842789eae2d7dcc97932d202488f929c55a07a37b3753309f61c35397549a6
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fc8b382f7cf010ef688c530c5ae0227323d217404c3512a00e23770bb748bd02
3
  size 14244
checkpoint-250/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7bfbf44261ca80de42866b3a89abd8900d1e1d93dc66cc561b4c9937f306c29a
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:881770bbac0685d544d8ade73ef836ad9af0e0cc408e58649697975bffefe748
3
  size 1064
checkpoint-250/special_tokens_map.json CHANGED
@@ -2,6 +2,6 @@
2
  "bos_token": "<s>",
3
  "eos_token": "</s>",
4
  "pad_token": "<pad>",
5
- "sep_token": "\n\n",
6
  "unk_token": "<unk>"
7
  }
 
2
  "bos_token": "<s>",
3
  "eos_token": "</s>",
4
  "pad_token": "<pad>",
5
+ "sep_token": "<s>",
6
  "unk_token": "<unk>"
7
  }
checkpoint-250/tokenizer_config.json CHANGED
@@ -39,7 +39,7 @@
39
  "eos_token": "</s>",
40
  "model_max_length": 1000000000000000019884624838656,
41
  "pad_token": "<pad>",
42
- "sep_token": "\n\n",
43
  "tokenizer_class": "BloomTokenizer",
44
  "unk_token": "<unk>"
45
  }
 
39
  "eos_token": "</s>",
40
  "model_max_length": 1000000000000000019884624838656,
41
  "pad_token": "<pad>",
42
+ "sep_token": "<s>",
43
  "tokenizer_class": "BloomTokenizer",
44
  "unk_token": "<unk>"
45
  }
checkpoint-250/trainer_state.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
- "best_metric": 0.6207379102706909,
3
- "best_model_checkpoint": "output/checkpoint-100",
4
- "epoch": 1.0432968179447053,
5
  "eval_steps": 50,
6
  "global_step": 250,
7
  "is_hyper_param_search": false,
@@ -11,90 +11,90 @@
11
  {
12
  "epoch": 0.0,
13
  "learning_rate": 0.0,
14
- "loss": 0.4694,
15
  "step": 1
16
  },
17
  {
18
- "epoch": 0.21,
19
- "learning_rate": 1.25e-05,
20
- "loss": 0.3006,
21
  "step": 50
22
  },
23
  {
24
- "epoch": 0.21,
25
- "eval_accuracy": 0.6971428571428572,
26
- "eval_loss": 0.6772400140762329,
27
- "eval_runtime": 28.6241,
28
- "eval_samples_per_second": 6.114,
29
- "eval_steps_per_second": 1.537,
30
  "step": 50
31
  },
32
  {
33
- "epoch": 0.42,
34
- "learning_rate": 1.8597560975609757e-05,
35
- "loss": 0.1715,
36
  "step": 100
37
  },
38
  {
39
- "epoch": 0.42,
40
- "eval_accuracy": 0.8228571428571428,
41
- "eval_loss": 0.6207379102706909,
42
- "eval_runtime": 28.9688,
43
- "eval_samples_per_second": 6.041,
44
- "eval_steps_per_second": 1.519,
45
  "step": 100
46
  },
47
  {
48
- "epoch": 0.63,
49
- "learning_rate": 1.554878048780488e-05,
50
- "loss": 0.1573,
51
  "step": 150
52
  },
53
  {
54
- "epoch": 0.63,
55
- "eval_accuracy": 0.76,
56
- "eval_loss": 0.7828966379165649,
57
- "eval_runtime": 26.1173,
58
- "eval_samples_per_second": 6.701,
59
- "eval_steps_per_second": 1.685,
60
  "step": 150
61
  },
62
  {
63
- "epoch": 0.83,
64
- "learning_rate": 1.25e-05,
65
- "loss": 0.1264,
66
  "step": 200
67
  },
68
  {
69
- "epoch": 0.83,
70
- "eval_accuracy": 0.8171428571428572,
71
- "eval_loss": 0.7038331627845764,
72
- "eval_runtime": 28.7177,
73
- "eval_samples_per_second": 6.094,
74
- "eval_steps_per_second": 1.532,
75
  "step": 200
76
  },
77
  {
78
- "epoch": 1.04,
79
- "learning_rate": 9.451219512195122e-06,
80
- "loss": 0.1099,
81
  "step": 250
82
  },
83
  {
84
- "epoch": 1.04,
85
- "eval_accuracy": 0.72,
86
- "eval_loss": 1.0845664739608765,
87
- "eval_runtime": 26.1061,
88
- "eval_samples_per_second": 6.703,
89
- "eval_steps_per_second": 1.685,
90
  "step": 250
91
  }
92
  ],
93
  "logging_steps": 50,
94
  "max_steps": 400,
95
- "num_train_epochs": 2,
96
  "save_steps": 50,
97
- "total_flos": 9.277053010132992e+16,
98
  "trial_name": null,
99
  "trial_params": null
100
  }
 
1
  {
2
+ "best_metric": 0.47385331988334656,
3
+ "best_model_checkpoint": "output/checkpoint-150",
4
+ "epoch": 0.20527558246946526,
5
  "eval_steps": 50,
6
  "global_step": 250,
7
  "is_hyper_param_search": false,
 
11
  {
12
  "epoch": 0.0,
13
  "learning_rate": 0.0,
14
+ "loss": 0.8903,
15
  "step": 1
16
  },
17
  {
18
+ "epoch": 0.04,
19
+ "learning_rate": 1.2222222222222224e-05,
20
+ "loss": 0.7189,
21
  "step": 50
22
  },
23
  {
24
+ "epoch": 0.04,
25
+ "eval_accuracy": 0.8228571428571428,
26
+ "eval_loss": 0.5415178537368774,
27
+ "eval_runtime": 28.5756,
28
+ "eval_samples_per_second": 6.124,
29
+ "eval_steps_per_second": 1.54,
30
  "step": 50
31
  },
32
  {
33
+ "epoch": 0.08,
34
+ "learning_rate": 1.8658536585365855e-05,
35
+ "loss": 0.5723,
36
  "step": 100
37
  },
38
  {
39
+ "epoch": 0.08,
40
+ "eval_accuracy": 0.8,
41
+ "eval_loss": 0.49520450830459595,
42
+ "eval_runtime": 28.516,
43
+ "eval_samples_per_second": 6.137,
44
+ "eval_steps_per_second": 1.543,
45
  "step": 100
46
  },
47
  {
48
+ "epoch": 0.12,
49
+ "learning_rate": 1.5609756097560978e-05,
50
+ "loss": 0.5122,
51
  "step": 150
52
  },
53
  {
54
+ "epoch": 0.12,
55
+ "eval_accuracy": 0.7885714285714286,
56
+ "eval_loss": 0.47385331988334656,
57
+ "eval_runtime": 28.2004,
58
+ "eval_samples_per_second": 6.206,
59
+ "eval_steps_per_second": 1.56,
60
  "step": 150
61
  },
62
  {
63
+ "epoch": 0.16,
64
+ "learning_rate": 1.2560975609756098e-05,
65
+ "loss": 0.4831,
66
  "step": 200
67
  },
68
  {
69
+ "epoch": 0.16,
70
+ "eval_accuracy": 0.7942857142857143,
71
+ "eval_loss": 0.49147841334342957,
72
+ "eval_runtime": 28.6363,
73
+ "eval_samples_per_second": 6.111,
74
+ "eval_steps_per_second": 1.537,
75
  "step": 200
76
  },
77
  {
78
+ "epoch": 0.21,
79
+ "learning_rate": 9.51219512195122e-06,
80
+ "loss": 0.5009,
81
  "step": 250
82
  },
83
  {
84
+ "epoch": 0.21,
85
+ "eval_accuracy": 0.8,
86
+ "eval_loss": 0.501244068145752,
87
+ "eval_runtime": 27.6943,
88
+ "eval_samples_per_second": 6.319,
89
+ "eval_steps_per_second": 1.589,
90
  "step": 250
91
  }
92
  ],
93
  "logging_steps": 50,
94
  "max_steps": 400,
95
+ "num_train_epochs": 1,
96
  "save_steps": 50,
97
+ "total_flos": 9.260539919806464e+16,
98
  "trial_name": null,
99
  "trial_params": null
100
  }
checkpoint-250/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5027c9d6c3122f21d433b1e532b4aebee600801d0d4723deb6d4671b22b3d2da
3
  size 4600
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:090099aa07c672025c60437897a7a19f63d8d7bd4284af2562fc632695431956
3
  size 4600