duyvt6663 commited on
Commit
97fd243
·
1 Parent(s): 325b1db

Training in progress, step 100, checkpoint

Browse files
checkpoint-100/README.md CHANGED
@@ -216,23 +216,4 @@ The following `bitsandbytes` quantization config was used during training:
216
  ### Framework versions
217
 
218
 
219
- - PEFT 0.6.0.dev0
220
- ## Training procedure
221
-
222
-
223
- The following `bitsandbytes` quantization config was used during training:
224
- - quant_method: bitsandbytes
225
- - load_in_8bit: True
226
- - load_in_4bit: False
227
- - llm_int8_threshold: 6.0
228
- - llm_int8_skip_modules: None
229
- - llm_int8_enable_fp32_cpu_offload: False
230
- - llm_int8_has_fp16_weight: False
231
- - bnb_4bit_quant_type: fp4
232
- - bnb_4bit_use_double_quant: False
233
- - bnb_4bit_compute_dtype: float32
234
-
235
- ### Framework versions
236
-
237
-
238
  - PEFT 0.6.0.dev0
 
216
  ### Framework versions
217
 
218
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
219
  - PEFT 0.6.0.dev0
checkpoint-100/adapter_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:83bc67d84c2e2770e93e7dd5eb955f94295934c16a23ca55e714bba0e414d970
3
  size 9873829
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4d47e218815f606e6ebe353f9b2c582463327a7225a44e266eb7c3001c55a9df
3
  size 9873829
checkpoint-100/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2183283d09e449ecb8d1e84db01ccf734116edb2009aace2941c44a12416340c
3
  size 42724
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8c0ae756dedda0958db06224330e4d4efe670ec68b7cbed0c7d6e1258e368c4b
3
  size 42724
checkpoint-100/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0873ef906878b4c1357d257cb94710733cd24e098664d0b4b1006ecd391a0b46
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f649ff3b7c1c3e2ba75504060c5f895b20a57a154cc2364aa1c230456dc8c528
3
  size 14244
checkpoint-100/trainer_state.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
- "best_metric": 0.6663276553153992,
3
  "best_model_checkpoint": "output/checkpoint-100",
4
- "epoch": 0.08021658477890303,
5
  "eval_steps": 50,
6
  "global_step": 100,
7
  "is_hyper_param_search": false,
@@ -11,45 +11,45 @@
11
  {
12
  "epoch": 0.0,
13
  "learning_rate": 1.3333333333333332e-06,
14
- "loss": 0.785,
15
  "step": 1
16
  },
17
  {
18
  "epoch": 0.04,
19
  "learning_rate": 5.9999999999999995e-05,
20
- "loss": 0.6547,
21
  "step": 50
22
  },
23
  {
24
  "epoch": 0.04,
25
- "eval_accuracy": 0.6626865671641791,
26
- "eval_loss": 0.8300915956497192,
27
- "eval_runtime": 61.5689,
28
- "eval_samples_per_second": 5.441,
29
- "eval_steps_per_second": 1.364,
30
  "step": 50
31
  },
32
  {
33
  "epoch": 0.08,
34
  "learning_rate": 0.00012666666666666666,
35
- "loss": 0.6654,
36
  "step": 100
37
  },
38
  {
39
  "epoch": 0.08,
40
- "eval_accuracy": 0.6895522388059702,
41
- "eval_loss": 0.6663276553153992,
42
- "eval_runtime": 56.4633,
43
- "eval_samples_per_second": 5.933,
44
- "eval_steps_per_second": 1.488,
45
  "step": 100
46
  }
47
  ],
48
  "logging_steps": 50,
49
- "max_steps": 1246,
50
  "num_train_epochs": 1,
51
  "save_steps": 100,
52
- "total_flos": 3.734879824367616e+16,
53
  "trial_name": null,
54
  "trial_params": null
55
  }
 
1
  {
2
+ "best_metric": 0.695776641368866,
3
  "best_model_checkpoint": "output/checkpoint-100",
4
+ "epoch": 0.08018442417560388,
5
  "eval_steps": 50,
6
  "global_step": 100,
7
  "is_hyper_param_search": false,
 
11
  {
12
  "epoch": 0.0,
13
  "learning_rate": 1.3333333333333332e-06,
14
+ "loss": 0.6133,
15
  "step": 1
16
  },
17
  {
18
  "epoch": 0.04,
19
  "learning_rate": 5.9999999999999995e-05,
20
+ "loss": 0.61,
21
  "step": 50
22
  },
23
  {
24
  "epoch": 0.04,
25
+ "eval_accuracy": 0.6895522388059702,
26
+ "eval_loss": 0.6718239784240723,
27
+ "eval_runtime": 61.4305,
28
+ "eval_samples_per_second": 5.453,
29
+ "eval_steps_per_second": 1.367,
30
  "step": 50
31
  },
32
  {
33
  "epoch": 0.08,
34
  "learning_rate": 0.00012666666666666666,
35
+ "loss": 0.6233,
36
  "step": 100
37
  },
38
  {
39
  "epoch": 0.08,
40
+ "eval_accuracy": 0.6835820895522388,
41
+ "eval_loss": 0.695776641368866,
42
+ "eval_runtime": 61.5801,
43
+ "eval_samples_per_second": 5.44,
44
+ "eval_steps_per_second": 1.364,
45
  "step": 100
46
  }
47
  ],
48
  "logging_steps": 50,
49
+ "max_steps": 1247,
50
  "num_train_epochs": 1,
51
  "save_steps": 100,
52
+ "total_flos": 3.729294230224896e+16,
53
  "trial_name": null,
54
  "trial_params": null
55
  }