duyvt6663 commited on
Commit
66a7072
1 Parent(s): a72e62d

Training in progress, step 100, checkpoint

Browse files
checkpoint-100/README.md CHANGED
@@ -216,23 +216,4 @@ The following `bitsandbytes` quantization config was used during training:
216
  ### Framework versions
217
 
218
 
219
- - PEFT 0.6.0.dev0
220
- ## Training procedure
221
-
222
-
223
- The following `bitsandbytes` quantization config was used during training:
224
- - quant_method: bitsandbytes
225
- - load_in_8bit: True
226
- - load_in_4bit: False
227
- - llm_int8_threshold: 6.0
228
- - llm_int8_skip_modules: None
229
- - llm_int8_enable_fp32_cpu_offload: False
230
- - llm_int8_has_fp16_weight: False
231
- - bnb_4bit_quant_type: fp4
232
- - bnb_4bit_use_double_quant: False
233
- - bnb_4bit_compute_dtype: float32
234
-
235
- ### Framework versions
236
-
237
-
238
  - PEFT 0.6.0.dev0
 
216
  ### Framework versions
217
 
218
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
219
  - PEFT 0.6.0.dev0
checkpoint-100/adapter_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f68ef74d88fadddbf3434d91fee124280668f80a340fb0f5cfd4c60a0f462b00
3
  size 9873829
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3f9ede17267dcaedaf243ee0c9dfaec7a9839db83a912ad3db13a8a8fd47f65c
3
  size 9873829
checkpoint-100/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:79b16eb3ee3d773dc55a3001899da84ed5cfc088b7b1ff81f75ed29f468b81be
3
  size 42724
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7a261d85a9162739496c39778cc079ee8abc4d3488d8fdc09bc4103dda68bda2
3
  size 42724
checkpoint-100/trainer_state.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "best_metric": 0.6410807371139526,
3
  "best_model_checkpoint": "output/checkpoint-50",
4
  "epoch": 0.08016835354243912,
5
  "eval_steps": 50,
@@ -11,37 +11,37 @@
11
  {
12
  "epoch": 0.0,
13
  "learning_rate": 4.444444444444445e-07,
14
- "loss": 0.5558,
15
  "step": 1
16
  },
17
  {
18
  "epoch": 0.04,
19
  "learning_rate": 2e-05,
20
- "loss": 0.5679,
21
  "step": 50
22
  },
23
  {
24
  "epoch": 0.04,
25
- "eval_accuracy": 0.6776119402985075,
26
- "eval_loss": 0.6410807371139526,
27
- "eval_runtime": 61.731,
28
- "eval_samples_per_second": 5.427,
29
- "eval_steps_per_second": 1.361,
30
  "step": 50
31
  },
32
  {
33
  "epoch": 0.08,
34
  "learning_rate": 4.222222222222222e-05,
35
- "loss": 0.6108,
36
  "step": 100
37
  },
38
  {
39
  "epoch": 0.08,
40
- "eval_accuracy": 0.6925373134328359,
41
- "eval_loss": 0.6733226776123047,
42
- "eval_runtime": 56.5238,
43
- "eval_samples_per_second": 5.927,
44
- "eval_steps_per_second": 1.486,
45
  "step": 100
46
  }
47
  ],
 
1
  {
2
+ "best_metric": 0.6430063247680664,
3
  "best_model_checkpoint": "output/checkpoint-50",
4
  "epoch": 0.08016835354243912,
5
  "eval_steps": 50,
 
11
  {
12
  "epoch": 0.0,
13
  "learning_rate": 4.444444444444445e-07,
14
+ "loss": 0.5542,
15
  "step": 1
16
  },
17
  {
18
  "epoch": 0.04,
19
  "learning_rate": 2e-05,
20
+ "loss": 0.554,
21
  "step": 50
22
  },
23
  {
24
  "epoch": 0.04,
25
+ "eval_accuracy": 0.6626865671641791,
26
+ "eval_loss": 0.6430063247680664,
27
+ "eval_runtime": 61.7692,
28
+ "eval_samples_per_second": 5.423,
29
+ "eval_steps_per_second": 1.36,
30
  "step": 50
31
  },
32
  {
33
  "epoch": 0.08,
34
  "learning_rate": 4.222222222222222e-05,
35
+ "loss": 0.5957,
36
  "step": 100
37
  },
38
  {
39
  "epoch": 0.08,
40
+ "eval_accuracy": 0.6686567164179105,
41
+ "eval_loss": 0.6705919504165649,
42
+ "eval_runtime": 60.4388,
43
+ "eval_samples_per_second": 5.543,
44
+ "eval_steps_per_second": 1.39,
45
  "step": 100
46
  }
47
  ],
checkpoint-100/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e2cf806244b71c753bc4b76d2695973243d02590ea5a24f377343d4ee219e984
3
  size 4472
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:87bd30bd4b21b4a0c66f1fd545858ef6e2d33b5984a8d292e79011acc3affd6b
3
  size 4472