duyvt6663 commited on
Commit
2a6708c
1 Parent(s): 0b7fd2a

Training in progress, step 300, checkpoint

Browse files
checkpoint-300/README.md CHANGED
@@ -216,23 +216,4 @@ The following `bitsandbytes` quantization config was used during training:
216
  ### Framework versions
217
 
218
 
219
- - PEFT 0.6.0.dev0
220
- ## Training procedure
221
-
222
-
223
- The following `bitsandbytes` quantization config was used during training:
224
- - quant_method: bitsandbytes
225
- - load_in_8bit: True
226
- - load_in_4bit: False
227
- - llm_int8_threshold: 6.0
228
- - llm_int8_skip_modules: None
229
- - llm_int8_enable_fp32_cpu_offload: False
230
- - llm_int8_has_fp16_weight: False
231
- - bnb_4bit_quant_type: fp4
232
- - bnb_4bit_use_double_quant: False
233
- - bnb_4bit_compute_dtype: float32
234
-
235
- ### Framework versions
236
-
237
-
238
  - PEFT 0.6.0.dev0
 
216
  ### Framework versions
217
 
218
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
219
  - PEFT 0.6.0.dev0
checkpoint-300/adapter_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:55654e42312c847791cfb6f74067eb2f8d6c5d15f9cf2f21db438a1a9c6da39d
3
  size 9873829
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ede70a090e3eac28d20a8c6f0960c1bda226099c68d78c86811b33a5be2d94f1
3
  size 9873829
checkpoint-300/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0a57a3c708b15215829b666986c530da7e8be0ca4a2f5ce45cf7fcd948b5d435
3
  size 42788
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dfa8585ce99626cb0e6a2f4614337bbefb86a58dec22d8be09ff37cb9a1bb7dc
3
  size 42788
checkpoint-300/trainer_state.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
- "best_metric": 0.622471034526825,
3
- "best_model_checkpoint": "output/checkpoint-300",
4
  "epoch": 0.24050506062731736,
5
  "eval_steps": 50,
6
  "global_step": 300,
@@ -11,97 +11,97 @@
11
  {
12
  "epoch": 0.0,
13
  "learning_rate": 4.444444444444445e-07,
14
- "loss": 0.5558,
15
  "step": 1
16
  },
17
  {
18
  "epoch": 0.04,
19
  "learning_rate": 2e-05,
20
- "loss": 0.5679,
21
  "step": 50
22
  },
23
  {
24
  "epoch": 0.04,
25
- "eval_accuracy": 0.6776119402985075,
26
- "eval_loss": 0.6410807371139526,
27
- "eval_runtime": 61.731,
28
- "eval_samples_per_second": 5.427,
29
- "eval_steps_per_second": 1.361,
30
  "step": 50
31
  },
32
  {
33
  "epoch": 0.08,
34
  "learning_rate": 4.222222222222222e-05,
35
- "loss": 0.6108,
36
  "step": 100
37
  },
38
  {
39
  "epoch": 0.08,
40
- "eval_accuracy": 0.6925373134328359,
41
- "eval_loss": 0.6733226776123047,
42
- "eval_runtime": 56.5238,
43
- "eval_samples_per_second": 5.927,
44
- "eval_steps_per_second": 1.486,
45
  "step": 100
46
  },
47
  {
48
  "epoch": 0.12,
49
  "learning_rate": 6.444444444444446e-05,
50
- "loss": 0.5865,
51
  "step": 150
52
  },
53
  {
54
  "epoch": 0.12,
55
- "eval_accuracy": 0.6865671641791045,
56
- "eval_loss": 0.7122625708580017,
57
- "eval_runtime": 61.5948,
58
- "eval_samples_per_second": 5.439,
59
- "eval_steps_per_second": 1.364,
60
  "step": 150
61
  },
62
  {
63
  "epoch": 0.16,
64
  "learning_rate": 8.666666666666667e-05,
65
- "loss": 0.5752,
66
  "step": 200
67
  },
68
  {
69
  "epoch": 0.16,
70
- "eval_accuracy": 0.6835820895522388,
71
- "eval_loss": 0.6697484254837036,
72
- "eval_runtime": 61.5178,
73
- "eval_samples_per_second": 5.446,
74
- "eval_steps_per_second": 1.365,
75
  "step": 200
76
  },
77
  {
78
  "epoch": 0.2,
79
  "learning_rate": 9.80430528375734e-05,
80
- "loss": 0.5826,
81
  "step": 250
82
  },
83
  {
84
  "epoch": 0.2,
85
- "eval_accuracy": 0.6805970149253732,
86
- "eval_loss": 0.6812018752098083,
87
- "eval_runtime": 61.5335,
88
- "eval_samples_per_second": 5.444,
89
- "eval_steps_per_second": 1.365,
90
  "step": 250
91
  },
92
  {
93
  "epoch": 0.24,
94
  "learning_rate": 9.315068493150684e-05,
95
- "loss": 0.593,
96
  "step": 300
97
  },
98
  {
99
  "epoch": 0.24,
100
- "eval_accuracy": 0.6865671641791045,
101
- "eval_loss": 0.622471034526825,
102
- "eval_runtime": 61.6449,
103
- "eval_samples_per_second": 5.434,
104
- "eval_steps_per_second": 1.363,
105
  "step": 300
106
  }
107
  ],
 
1
  {
2
+ "best_metric": 0.6430063247680664,
3
+ "best_model_checkpoint": "output/checkpoint-50",
4
  "epoch": 0.24050506062731736,
5
  "eval_steps": 50,
6
  "global_step": 300,
 
11
  {
12
  "epoch": 0.0,
13
  "learning_rate": 4.444444444444445e-07,
14
+ "loss": 0.5542,
15
  "step": 1
16
  },
17
  {
18
  "epoch": 0.04,
19
  "learning_rate": 2e-05,
20
+ "loss": 0.554,
21
  "step": 50
22
  },
23
  {
24
  "epoch": 0.04,
25
+ "eval_accuracy": 0.6626865671641791,
26
+ "eval_loss": 0.6430063247680664,
27
+ "eval_runtime": 61.7692,
28
+ "eval_samples_per_second": 5.423,
29
+ "eval_steps_per_second": 1.36,
30
  "step": 50
31
  },
32
  {
33
  "epoch": 0.08,
34
  "learning_rate": 4.222222222222222e-05,
35
+ "loss": 0.5957,
36
  "step": 100
37
  },
38
  {
39
  "epoch": 0.08,
40
+ "eval_accuracy": 0.6686567164179105,
41
+ "eval_loss": 0.6705919504165649,
42
+ "eval_runtime": 60.4388,
43
+ "eval_samples_per_second": 5.543,
44
+ "eval_steps_per_second": 1.39,
45
  "step": 100
46
  },
47
  {
48
  "epoch": 0.12,
49
  "learning_rate": 6.444444444444446e-05,
50
+ "loss": 0.5668,
51
  "step": 150
52
  },
53
  {
54
  "epoch": 0.12,
55
+ "eval_accuracy": 0.6805970149253732,
56
+ "eval_loss": 0.7224913835525513,
57
+ "eval_runtime": 56.4882,
58
+ "eval_samples_per_second": 5.93,
59
+ "eval_steps_per_second": 1.487,
60
  "step": 150
61
  },
62
  {
63
  "epoch": 0.16,
64
  "learning_rate": 8.666666666666667e-05,
65
+ "loss": 0.5626,
66
  "step": 200
67
  },
68
  {
69
  "epoch": 0.16,
70
+ "eval_accuracy": 0.6746268656716418,
71
+ "eval_loss": 0.7115103006362915,
72
+ "eval_runtime": 56.506,
73
+ "eval_samples_per_second": 5.929,
74
+ "eval_steps_per_second": 1.487,
75
  "step": 200
76
  },
77
  {
78
  "epoch": 0.2,
79
  "learning_rate": 9.80430528375734e-05,
80
+ "loss": 0.5604,
81
  "step": 250
82
  },
83
  {
84
  "epoch": 0.2,
85
+ "eval_accuracy": 0.6746268656716418,
86
+ "eval_loss": 0.700947105884552,
87
+ "eval_runtime": 56.4927,
88
+ "eval_samples_per_second": 5.93,
89
+ "eval_steps_per_second": 1.487,
90
  "step": 250
91
  },
92
  {
93
  "epoch": 0.24,
94
  "learning_rate": 9.315068493150684e-05,
95
+ "loss": 0.5675,
96
  "step": 300
97
  },
98
  {
99
  "epoch": 0.24,
100
+ "eval_accuracy": 0.6597014925373135,
101
+ "eval_loss": 0.662155032157898,
102
+ "eval_runtime": 56.4903,
103
+ "eval_samples_per_second": 5.93,
104
+ "eval_steps_per_second": 1.487,
105
  "step": 300
106
  }
107
  ],
checkpoint-300/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e2cf806244b71c753bc4b76d2695973243d02590ea5a24f377343d4ee219e984
3
  size 4472
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:87bd30bd4b21b4a0c66f1fd545858ef6e2d33b5984a8d292e79011acc3affd6b
3
  size 4472