duyvt6663 commited on
Commit
b10bd41
1 Parent(s): 61493dc

Training in progress, step 350, checkpoint

Browse files
checkpoint-350/README.md CHANGED
@@ -216,4 +216,23 @@ The following `bitsandbytes` quantization config was used during training:
216
  ### Framework versions
217
 
218
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
219
  - PEFT 0.6.0.dev0
 
216
  ### Framework versions
217
 
218
 
219
+ - PEFT 0.6.0.dev0
220
+ ## Training procedure
221
+
222
+
223
+ The following `bitsandbytes` quantization config was used during training:
224
+ - quant_method: bitsandbytes
225
+ - load_in_8bit: True
226
+ - load_in_4bit: False
227
+ - llm_int8_threshold: 6.0
228
+ - llm_int8_skip_modules: None
229
+ - llm_int8_enable_fp32_cpu_offload: False
230
+ - llm_int8_has_fp16_weight: False
231
+ - bnb_4bit_quant_type: fp4
232
+ - bnb_4bit_use_double_quant: False
233
+ - bnb_4bit_compute_dtype: float32
234
+
235
+ ### Framework versions
236
+
237
+
238
  - PEFT 0.6.0.dev0
checkpoint-350/adapter_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7555e30aa42692b57cadf4f85c3747eca4196876d5bec8211bf4ebe2255728ae
3
  size 9873829
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:072a1bb016febd596c985d21de00e4d37069e78d3e8fc3a1377a65c7f74d2b88
3
  size 9873829
checkpoint-350/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c13a98ef9b2d51b7ea0b0b2e95cd4e22374b9af80a47bb46d7251336048aa398
3
  size 42788
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:179bdfcba6dd304058e7e6ca1a883eb8da929965e94dcefc374651047da060ef
3
  size 42788
checkpoint-350/trainer_state.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
- "best_metric": 0.622471034526825,
3
- "best_model_checkpoint": "output/checkpoint-300",
4
  "epoch": 0.2805892373985369,
5
  "eval_steps": 50,
6
  "global_step": 350,
@@ -11,112 +11,112 @@
11
  {
12
  "epoch": 0.0,
13
  "learning_rate": 4.444444444444445e-07,
14
- "loss": 0.5558,
15
  "step": 1
16
  },
17
  {
18
  "epoch": 0.04,
19
  "learning_rate": 2e-05,
20
- "loss": 0.5679,
21
  "step": 50
22
  },
23
  {
24
  "epoch": 0.04,
25
- "eval_accuracy": 0.6776119402985075,
26
- "eval_loss": 0.6410807371139526,
27
- "eval_runtime": 61.731,
28
- "eval_samples_per_second": 5.427,
29
- "eval_steps_per_second": 1.361,
30
  "step": 50
31
  },
32
  {
33
  "epoch": 0.08,
34
  "learning_rate": 4.222222222222222e-05,
35
- "loss": 0.6108,
36
  "step": 100
37
  },
38
  {
39
  "epoch": 0.08,
40
- "eval_accuracy": 0.6925373134328359,
41
- "eval_loss": 0.6733226776123047,
42
- "eval_runtime": 56.5238,
43
- "eval_samples_per_second": 5.927,
44
- "eval_steps_per_second": 1.486,
45
  "step": 100
46
  },
47
  {
48
  "epoch": 0.12,
49
  "learning_rate": 6.444444444444446e-05,
50
- "loss": 0.5865,
51
  "step": 150
52
  },
53
  {
54
  "epoch": 0.12,
55
- "eval_accuracy": 0.6865671641791045,
56
- "eval_loss": 0.7122625708580017,
57
- "eval_runtime": 61.5948,
58
- "eval_samples_per_second": 5.439,
59
- "eval_steps_per_second": 1.364,
60
  "step": 150
61
  },
62
  {
63
  "epoch": 0.16,
64
  "learning_rate": 8.666666666666667e-05,
65
- "loss": 0.5752,
66
  "step": 200
67
  },
68
  {
69
  "epoch": 0.16,
70
- "eval_accuracy": 0.6835820895522388,
71
- "eval_loss": 0.6697484254837036,
72
- "eval_runtime": 61.5178,
73
- "eval_samples_per_second": 5.446,
74
- "eval_steps_per_second": 1.365,
75
  "step": 200
76
  },
77
  {
78
  "epoch": 0.2,
79
  "learning_rate": 9.80430528375734e-05,
80
- "loss": 0.5826,
81
  "step": 250
82
  },
83
  {
84
  "epoch": 0.2,
85
- "eval_accuracy": 0.6805970149253732,
86
- "eval_loss": 0.6812018752098083,
87
- "eval_runtime": 61.5335,
88
- "eval_samples_per_second": 5.444,
89
- "eval_steps_per_second": 1.365,
90
  "step": 250
91
  },
92
  {
93
  "epoch": 0.24,
94
  "learning_rate": 9.315068493150684e-05,
95
- "loss": 0.593,
96
  "step": 300
97
  },
98
  {
99
  "epoch": 0.24,
100
- "eval_accuracy": 0.6865671641791045,
101
- "eval_loss": 0.622471034526825,
102
- "eval_runtime": 61.6449,
103
- "eval_samples_per_second": 5.434,
104
- "eval_steps_per_second": 1.363,
105
  "step": 300
106
  },
107
  {
108
  "epoch": 0.28,
109
  "learning_rate": 8.825831702544032e-05,
110
- "loss": 0.5945,
111
  "step": 350
112
  },
113
  {
114
  "epoch": 0.28,
115
- "eval_accuracy": 0.6686567164179105,
116
- "eval_loss": 0.6458173394203186,
117
- "eval_runtime": 61.6102,
118
- "eval_samples_per_second": 5.437,
119
- "eval_steps_per_second": 1.363,
120
  "step": 350
121
  }
122
  ],
 
1
  {
2
+ "best_metric": 0.6430063247680664,
3
+ "best_model_checkpoint": "output/checkpoint-50",
4
  "epoch": 0.2805892373985369,
5
  "eval_steps": 50,
6
  "global_step": 350,
 
11
  {
12
  "epoch": 0.0,
13
  "learning_rate": 4.444444444444445e-07,
14
+ "loss": 0.5542,
15
  "step": 1
16
  },
17
  {
18
  "epoch": 0.04,
19
  "learning_rate": 2e-05,
20
+ "loss": 0.554,
21
  "step": 50
22
  },
23
  {
24
  "epoch": 0.04,
25
+ "eval_accuracy": 0.6626865671641791,
26
+ "eval_loss": 0.6430063247680664,
27
+ "eval_runtime": 61.7692,
28
+ "eval_samples_per_second": 5.423,
29
+ "eval_steps_per_second": 1.36,
30
  "step": 50
31
  },
32
  {
33
  "epoch": 0.08,
34
  "learning_rate": 4.222222222222222e-05,
35
+ "loss": 0.5957,
36
  "step": 100
37
  },
38
  {
39
  "epoch": 0.08,
40
+ "eval_accuracy": 0.6686567164179105,
41
+ "eval_loss": 0.6705919504165649,
42
+ "eval_runtime": 60.4388,
43
+ "eval_samples_per_second": 5.543,
44
+ "eval_steps_per_second": 1.39,
45
  "step": 100
46
  },
47
  {
48
  "epoch": 0.12,
49
  "learning_rate": 6.444444444444446e-05,
50
+ "loss": 0.5668,
51
  "step": 150
52
  },
53
  {
54
  "epoch": 0.12,
55
+ "eval_accuracy": 0.6805970149253732,
56
+ "eval_loss": 0.7224913835525513,
57
+ "eval_runtime": 56.4882,
58
+ "eval_samples_per_second": 5.93,
59
+ "eval_steps_per_second": 1.487,
60
  "step": 150
61
  },
62
  {
63
  "epoch": 0.16,
64
  "learning_rate": 8.666666666666667e-05,
65
+ "loss": 0.5626,
66
  "step": 200
67
  },
68
  {
69
  "epoch": 0.16,
70
+ "eval_accuracy": 0.6746268656716418,
71
+ "eval_loss": 0.7115103006362915,
72
+ "eval_runtime": 56.506,
73
+ "eval_samples_per_second": 5.929,
74
+ "eval_steps_per_second": 1.487,
75
  "step": 200
76
  },
77
  {
78
  "epoch": 0.2,
79
  "learning_rate": 9.80430528375734e-05,
80
+ "loss": 0.5604,
81
  "step": 250
82
  },
83
  {
84
  "epoch": 0.2,
85
+ "eval_accuracy": 0.6746268656716418,
86
+ "eval_loss": 0.700947105884552,
87
+ "eval_runtime": 56.4927,
88
+ "eval_samples_per_second": 5.93,
89
+ "eval_steps_per_second": 1.487,
90
  "step": 250
91
  },
92
  {
93
  "epoch": 0.24,
94
  "learning_rate": 9.315068493150684e-05,
95
+ "loss": 0.5675,
96
  "step": 300
97
  },
98
  {
99
  "epoch": 0.24,
100
+ "eval_accuracy": 0.6597014925373135,
101
+ "eval_loss": 0.662155032157898,
102
+ "eval_runtime": 56.4903,
103
+ "eval_samples_per_second": 5.93,
104
+ "eval_steps_per_second": 1.487,
105
  "step": 300
106
  },
107
  {
108
  "epoch": 0.28,
109
  "learning_rate": 8.825831702544032e-05,
110
+ "loss": 0.5725,
111
  "step": 350
112
  },
113
  {
114
  "epoch": 0.28,
115
+ "eval_accuracy": 0.6567164179104478,
116
+ "eval_loss": 0.6746364831924438,
117
+ "eval_runtime": 56.482,
118
+ "eval_samples_per_second": 5.931,
119
+ "eval_steps_per_second": 1.487,
120
  "step": 350
121
  }
122
  ],
checkpoint-350/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e2cf806244b71c753bc4b76d2695973243d02590ea5a24f377343d4ee219e984
3
  size 4472
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:87bd30bd4b21b4a0c66f1fd545858ef6e2d33b5984a8d292e79011acc3affd6b
3
  size 4472