duyvt6663 commited on
Commit
61ae91d
1 Parent(s): e8628f3

Training in progress, step 200, checkpoint

Browse files
checkpoint-200/README.md CHANGED
@@ -216,23 +216,4 @@ The following `bitsandbytes` quantization config was used during training:
216
  ### Framework versions
217
 
218
 
219
- - PEFT 0.6.0.dev0
220
- ## Training procedure
221
-
222
-
223
- The following `bitsandbytes` quantization config was used during training:
224
- - quant_method: bitsandbytes
225
- - load_in_8bit: True
226
- - load_in_4bit: False
227
- - llm_int8_threshold: 6.0
228
- - llm_int8_skip_modules: None
229
- - llm_int8_enable_fp32_cpu_offload: False
230
- - llm_int8_has_fp16_weight: False
231
- - bnb_4bit_quant_type: fp4
232
- - bnb_4bit_use_double_quant: False
233
- - bnb_4bit_compute_dtype: float32
234
-
235
- ### Framework versions
236
-
237
-
238
  - PEFT 0.6.0.dev0
 
216
  ### Framework versions
217
 
218
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
219
  - PEFT 0.6.0.dev0
checkpoint-200/adapter_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:edb1b225c0134aac43e3c85e9808f6260f3d7e63728759d3eec9f999a6c9ae10
3
  size 9873829
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:df156c15f1a54b7f026004fa1f67e111a709e0d6acd21d6cbbd98f69fcc78a18
3
  size 9873829
checkpoint-200/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:403833291baed9912c7b5c2d2066aa2d595b16beaa00bc14c9f7b0497f466693
3
  size 42724
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:065f253474063fef6dd5b47f6acfebbdc3f9af82c35df048a6049d9f0cb110e4
3
  size 42724
checkpoint-200/trainer_state.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "best_metric": 0.6410807371139526,
3
  "best_model_checkpoint": "output/checkpoint-50",
4
  "epoch": 0.16033670708487824,
5
  "eval_steps": 50,
@@ -11,67 +11,67 @@
11
  {
12
  "epoch": 0.0,
13
  "learning_rate": 4.444444444444445e-07,
14
- "loss": 0.5558,
15
  "step": 1
16
  },
17
  {
18
  "epoch": 0.04,
19
  "learning_rate": 2e-05,
20
- "loss": 0.5679,
21
  "step": 50
22
  },
23
  {
24
  "epoch": 0.04,
25
- "eval_accuracy": 0.6776119402985075,
26
- "eval_loss": 0.6410807371139526,
27
- "eval_runtime": 61.731,
28
- "eval_samples_per_second": 5.427,
29
- "eval_steps_per_second": 1.361,
30
  "step": 50
31
  },
32
  {
33
  "epoch": 0.08,
34
  "learning_rate": 4.222222222222222e-05,
35
- "loss": 0.6108,
36
  "step": 100
37
  },
38
  {
39
  "epoch": 0.08,
40
- "eval_accuracy": 0.6925373134328359,
41
- "eval_loss": 0.6733226776123047,
42
- "eval_runtime": 56.5238,
43
- "eval_samples_per_second": 5.927,
44
- "eval_steps_per_second": 1.486,
45
  "step": 100
46
  },
47
  {
48
  "epoch": 0.12,
49
  "learning_rate": 6.444444444444446e-05,
50
- "loss": 0.5865,
51
  "step": 150
52
  },
53
  {
54
  "epoch": 0.12,
55
- "eval_accuracy": 0.6865671641791045,
56
- "eval_loss": 0.7122625708580017,
57
- "eval_runtime": 61.5948,
58
- "eval_samples_per_second": 5.439,
59
- "eval_steps_per_second": 1.364,
60
  "step": 150
61
  },
62
  {
63
  "epoch": 0.16,
64
  "learning_rate": 8.666666666666667e-05,
65
- "loss": 0.5752,
66
  "step": 200
67
  },
68
  {
69
  "epoch": 0.16,
70
- "eval_accuracy": 0.6835820895522388,
71
- "eval_loss": 0.6697484254837036,
72
- "eval_runtime": 61.5178,
73
- "eval_samples_per_second": 5.446,
74
- "eval_steps_per_second": 1.365,
75
  "step": 200
76
  }
77
  ],
 
1
  {
2
+ "best_metric": 0.6430063247680664,
3
  "best_model_checkpoint": "output/checkpoint-50",
4
  "epoch": 0.16033670708487824,
5
  "eval_steps": 50,
 
11
  {
12
  "epoch": 0.0,
13
  "learning_rate": 4.444444444444445e-07,
14
+ "loss": 0.5542,
15
  "step": 1
16
  },
17
  {
18
  "epoch": 0.04,
19
  "learning_rate": 2e-05,
20
+ "loss": 0.554,
21
  "step": 50
22
  },
23
  {
24
  "epoch": 0.04,
25
+ "eval_accuracy": 0.6626865671641791,
26
+ "eval_loss": 0.6430063247680664,
27
+ "eval_runtime": 61.7692,
28
+ "eval_samples_per_second": 5.423,
29
+ "eval_steps_per_second": 1.36,
30
  "step": 50
31
  },
32
  {
33
  "epoch": 0.08,
34
  "learning_rate": 4.222222222222222e-05,
35
+ "loss": 0.5957,
36
  "step": 100
37
  },
38
  {
39
  "epoch": 0.08,
40
+ "eval_accuracy": 0.6686567164179105,
41
+ "eval_loss": 0.6705919504165649,
42
+ "eval_runtime": 60.4388,
43
+ "eval_samples_per_second": 5.543,
44
+ "eval_steps_per_second": 1.39,
45
  "step": 100
46
  },
47
  {
48
  "epoch": 0.12,
49
  "learning_rate": 6.444444444444446e-05,
50
+ "loss": 0.5668,
51
  "step": 150
52
  },
53
  {
54
  "epoch": 0.12,
55
+ "eval_accuracy": 0.6805970149253732,
56
+ "eval_loss": 0.7224913835525513,
57
+ "eval_runtime": 56.4882,
58
+ "eval_samples_per_second": 5.93,
59
+ "eval_steps_per_second": 1.487,
60
  "step": 150
61
  },
62
  {
63
  "epoch": 0.16,
64
  "learning_rate": 8.666666666666667e-05,
65
+ "loss": 0.5626,
66
  "step": 200
67
  },
68
  {
69
  "epoch": 0.16,
70
+ "eval_accuracy": 0.6746268656716418,
71
+ "eval_loss": 0.7115103006362915,
72
+ "eval_runtime": 56.506,
73
+ "eval_samples_per_second": 5.929,
74
+ "eval_steps_per_second": 1.487,
75
  "step": 200
76
  }
77
  ],
checkpoint-200/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e2cf806244b71c753bc4b76d2695973243d02590ea5a24f377343d4ee219e984
3
  size 4472
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:87bd30bd4b21b4a0c66f1fd545858ef6e2d33b5984a8d292e79011acc3affd6b
3
  size 4472