duyvt6663 commited on
Commit
7e3b1cc
1 Parent(s): 8507bf2

Training in progress, step 200, checkpoint

Browse files
checkpoint-200/README.md CHANGED
@@ -216,4 +216,23 @@ The following `bitsandbytes` quantization config was used during training:
216
  ### Framework versions
217
 
218
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
219
  - PEFT 0.6.0
 
216
  ### Framework versions
217
 
218
 
219
+ - PEFT 0.6.0
220
+ ## Training procedure
221
+
222
+
223
+ The following `bitsandbytes` quantization config was used during training:
224
+ - quant_method: bitsandbytes
225
+ - load_in_8bit: True
226
+ - load_in_4bit: False
227
+ - llm_int8_threshold: 6.0
228
+ - llm_int8_skip_modules: None
229
+ - llm_int8_enable_fp32_cpu_offload: False
230
+ - llm_int8_has_fp16_weight: False
231
+ - bnb_4bit_quant_type: fp4
232
+ - bnb_4bit_use_double_quant: False
233
+ - bnb_4bit_compute_dtype: float32
234
+
235
+ ### Framework versions
236
+
237
+
238
  - PEFT 0.6.0
checkpoint-200/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d17c363cd05be312b15770b466c1338fec2b141ac20ae3fe7485fd24b8318d8c
3
  size 19690328
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dff98b7eb0d073a715b62e6df5759c50443fc67bdcecc94f7ecfdc034dcdba12
3
  size 19690328
checkpoint-200/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0962560544030656e61dc01e1d9860980dbf6790ba53119ffc396e82401a5857
3
  size 38087098
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:39ceec45f3a10d8fa37d8a0c5bc8ee99ffac785fa4c0f43e5f969dedd176fad4
3
  size 38087098
checkpoint-200/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6b698a3ffed4744d0125b17418b2a3d7497f5d9d4c57f4d5e997a2cb6ffd50f2
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eb1952519fd1553759d558392a47f35c529236b7568c03d6c6851620ee48a413
3
  size 14244
checkpoint-200/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f5b43ac6655c6879b737216c959393eb2c7f9aec4594023f8e587c864aeb04e5
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1bc034746d61c122e35ebcec68ba024674bd5039ad1df2f10fb31340e9444bca
3
  size 1064
checkpoint-200/trainer_state.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
- "best_metric": 0.4239814877510071,
3
- "best_model_checkpoint": "output/checkpoint-50",
4
- "epoch": 2.888086642599278,
5
  "eval_steps": 50,
6
  "global_step": 200,
7
  "is_hyper_param_search": false,
@@ -9,77 +9,77 @@
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
- "epoch": 0.01,
13
  "learning_rate": 0.0,
14
- "loss": 0.3991,
15
  "step": 1
16
  },
17
  {
18
- "epoch": 0.72,
19
- "learning_rate": 1.8902439024390243e-05,
20
- "loss": 0.3172,
21
  "step": 50
22
  },
23
  {
24
- "epoch": 0.72,
25
- "eval_accuracy": 0.84,
26
- "eval_loss": 0.4239814877510071,
27
- "eval_runtime": 28.6347,
28
- "eval_samples_per_second": 6.111,
29
- "eval_steps_per_second": 1.537,
30
  "step": 50
31
  },
32
  {
33
- "epoch": 1.44,
34
- "learning_rate": 1.2804878048780488e-05,
35
- "loss": 0.1954,
36
  "step": 100
37
  },
38
  {
39
- "epoch": 1.44,
40
- "eval_accuracy": 0.8514285714285714,
41
- "eval_loss": 0.5004297494888306,
42
- "eval_runtime": 28.6111,
43
- "eval_samples_per_second": 6.117,
44
- "eval_steps_per_second": 1.538,
45
  "step": 100
46
  },
47
  {
48
- "epoch": 2.17,
49
- "learning_rate": 6.707317073170733e-06,
50
- "loss": 0.1457,
51
  "step": 150
52
  },
53
  {
54
- "epoch": 2.17,
55
- "eval_accuracy": 0.8628571428571429,
56
- "eval_loss": 0.5306527614593506,
57
- "eval_runtime": 28.6321,
58
- "eval_samples_per_second": 6.112,
59
- "eval_steps_per_second": 1.537,
60
  "step": 150
61
  },
62
  {
63
- "epoch": 2.89,
64
- "learning_rate": 6.097560975609757e-07,
65
- "loss": 0.1312,
66
  "step": 200
67
  },
68
  {
69
- "epoch": 2.89,
70
- "eval_accuracy": 0.8514285714285714,
71
- "eval_loss": 0.6020106077194214,
72
- "eval_runtime": 28.6498,
73
- "eval_samples_per_second": 6.108,
74
- "eval_steps_per_second": 1.536,
75
  "step": 200
76
  }
77
  ],
78
  "logging_steps": 50,
79
- "max_steps": 200,
80
  "num_train_epochs": 3,
81
  "save_steps": 50,
82
- "total_flos": 7.64381811965952e+16,
83
  "trial_name": null,
84
  "trial_params": null
85
  }
 
1
  {
2
+ "best_metric": 0.4308854341506958,
3
+ "best_model_checkpoint": "output/checkpoint-100",
4
+ "epoch": 0.5414551607445008,
5
  "eval_steps": 50,
6
  "global_step": 200,
7
  "is_hyper_param_search": false,
 
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
+ "epoch": 0.0,
13
  "learning_rate": 0.0,
14
+ "loss": 0.9268,
15
  "step": 1
16
  },
17
  {
18
+ "epoch": 0.14,
19
+ "learning_rate": 6.111111111111112e-06,
20
+ "loss": 0.5897,
21
  "step": 50
22
  },
23
  {
24
+ "epoch": 0.14,
25
+ "eval_accuracy": 0.8628571428571429,
26
+ "eval_loss": 0.4395545721054077,
27
+ "eval_runtime": 26.2003,
28
+ "eval_samples_per_second": 6.679,
29
+ "eval_steps_per_second": 1.679,
30
  "step": 50
31
  },
32
  {
33
+ "epoch": 0.27,
34
+ "learning_rate": 1.3055555555555557e-05,
35
+ "loss": 0.4516,
36
  "step": 100
37
  },
38
  {
39
+ "epoch": 0.27,
40
+ "eval_accuracy": 0.8457142857142858,
41
+ "eval_loss": 0.4308854341506958,
42
+ "eval_runtime": 28.5089,
43
+ "eval_samples_per_second": 6.138,
44
+ "eval_steps_per_second": 1.543,
45
  "step": 100
46
  },
47
  {
48
+ "epoch": 0.41,
49
+ "learning_rate": 2e-05,
50
+ "loss": 0.4086,
51
  "step": 150
52
  },
53
  {
54
+ "epoch": 0.41,
55
+ "eval_accuracy": 0.8114285714285714,
56
+ "eval_loss": 0.45158952474594116,
57
+ "eval_runtime": 28.4711,
58
+ "eval_samples_per_second": 6.147,
59
+ "eval_steps_per_second": 1.545,
60
  "step": 150
61
  },
62
  {
63
+ "epoch": 0.54,
64
+ "learning_rate": 1.8475609756097565e-05,
65
+ "loss": 0.4227,
66
  "step": 200
67
  },
68
  {
69
+ "epoch": 0.54,
70
+ "eval_accuracy": 0.8114285714285714,
71
+ "eval_loss": 0.4496140480041504,
72
+ "eval_runtime": 26.1937,
73
+ "eval_samples_per_second": 6.681,
74
+ "eval_steps_per_second": 1.68,
75
  "step": 200
76
  }
77
  ],
78
  "logging_steps": 50,
79
+ "max_steps": 800,
80
  "num_train_epochs": 3,
81
  "save_steps": 50,
82
+ "total_flos": 7.441466063671296e+16,
83
  "trial_name": null,
84
  "trial_params": null
85
  }
checkpoint-200/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d6e1226be91d4f2bcdcde81b49645b5882b397e67c3f85a88d2c0a49245bf8c5
3
  size 4600
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eab90039203ebde7fc5537527df9e43bcc8971062162dee256a3fd06a20fcd44
3
  size 4600