duyvt6663 commited on
Commit
ad25868
1 Parent(s): 6ecfaf9

Training in progress, step 150, checkpoint

Browse files
checkpoint-150/README.md CHANGED
@@ -216,4 +216,23 @@ The following `bitsandbytes` quantization config was used during training:
216
  ### Framework versions
217
 
218
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
219
  - PEFT 0.6.0
 
216
  ### Framework versions
217
 
218
 
219
+ - PEFT 0.6.0
220
+ ## Training procedure
221
+
222
+
223
+ The following `bitsandbytes` quantization config was used during training:
224
+ - quant_method: bitsandbytes
225
+ - load_in_8bit: True
226
+ - load_in_4bit: False
227
+ - llm_int8_threshold: 6.0
228
+ - llm_int8_skip_modules: None
229
+ - llm_int8_enable_fp32_cpu_offload: False
230
+ - llm_int8_has_fp16_weight: False
231
+ - bnb_4bit_quant_type: fp4
232
+ - bnb_4bit_use_double_quant: False
233
+ - bnb_4bit_compute_dtype: float32
234
+
235
+ ### Framework versions
236
+
237
+
238
  - PEFT 0.6.0
checkpoint-150/adapter_config.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
  "alpha_pattern": {},
3
  "auto_mapping": null,
4
- "base_model_name_or_path": "/kaggle/input/vietcuna-for-binary-classification/kalapa-vietcuna-3b",
5
  "bias": "none",
6
  "fan_in_fan_out": false,
7
  "inference_mode": true,
@@ -9,10 +9,10 @@
9
  "layers_pattern": null,
10
  "layers_to_transform": null,
11
  "lora_alpha": 32,
12
- "lora_dropout": 0.1,
13
  "modules_to_save": null,
14
  "peft_type": "LORA",
15
- "r": 8,
16
  "rank_pattern": {},
17
  "revision": null,
18
  "target_modules": [
 
1
  {
2
  "alpha_pattern": {},
3
  "auto_mapping": null,
4
+ "base_model_name_or_path": "vietcuna-3b-v2/kalapa-vietcuna-3b/",
5
  "bias": "none",
6
  "fan_in_fan_out": false,
7
  "inference_mode": true,
 
9
  "layers_pattern": null,
10
  "layers_to_transform": null,
11
  "lora_alpha": 32,
12
+ "lora_dropout": 0.05,
13
  "modules_to_save": null,
14
  "peft_type": "LORA",
15
+ "r": 16,
16
  "rank_pattern": {},
17
  "revision": null,
18
  "target_modules": [
checkpoint-150/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:953b5c0a434279eedbdc0b0b8a9aa74658e382b2c83dca7011218ad7a0902776
3
- size 9859800
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dddcd65ff6df9d9f677eb4f14a43db2eb07482d82b13c30695cff8601f54c15e
3
+ size 19690328
checkpoint-150/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8a6e29cc344423bb6f9262ac1c7ec4968cedc8ed87ac2cef49f98122da71942b
3
- size 42724
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2bf8a8e294ad91dad98cdc10260e4bc5df16015bdea464a7c37a94878393d035
3
+ size 38087098
checkpoint-150/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7cc73ce8c65e40ac6855f7b2e3e6248898fe7fdbbaf2d613f95e2b6c995ee1bf
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:55e71ed3f6f3ad40fcbc3c5cfb0b3bc77f998518af052b57e64a7852561eecf3
3
  size 14244
checkpoint-150/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9393be481ed85e8190dd814ca7d2cf441f97ed5dbde384528496c6d547e316ce
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c5dfe32eefef4e0fa34f6f96b3b2ec5e3e27c4f5298718cbc335d8fcca2a95dc
3
  size 1064
checkpoint-150/special_tokens_map.json CHANGED
@@ -2,6 +2,6 @@
2
  "bos_token": "<s>",
3
  "eos_token": "</s>",
4
  "pad_token": "<pad>",
5
- "sep_token": "<s>",
6
  "unk_token": "<unk>"
7
  }
 
2
  "bos_token": "<s>",
3
  "eos_token": "</s>",
4
  "pad_token": "<pad>",
5
+ "sep_token": "\n\n",
6
  "unk_token": "<unk>"
7
  }
checkpoint-150/tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
3
+ size 499723
checkpoint-150/tokenizer_config.json CHANGED
@@ -39,7 +39,7 @@
39
  "eos_token": "</s>",
40
  "model_max_length": 1000000000000000019884624838656,
41
  "pad_token": "<pad>",
42
- "sep_token": "<s>",
43
  "tokenizer_class": "BloomTokenizer",
44
  "unk_token": "<unk>"
45
  }
 
39
  "eos_token": "</s>",
40
  "model_max_length": 1000000000000000019884624838656,
41
  "pad_token": "<pad>",
42
+ "sep_token": "\n\n",
43
  "tokenizer_class": "BloomTokenizer",
44
  "unk_token": "<unk>"
45
  }
checkpoint-150/trainer_state.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
- "best_metric": 0.522564172744751,
3
- "best_model_checkpoint": "output/checkpoint-50",
4
- "epoch": 1.0471204188481675,
5
  "eval_steps": 50,
6
  "global_step": 150,
7
  "is_hyper_param_search": false,
@@ -10,61 +10,61 @@
10
  "log_history": [
11
  {
12
  "epoch": 0.01,
13
- "learning_rate": 1.0000000000000001e-07,
14
- "loss": 0.4196,
15
  "step": 1
16
  },
17
  {
18
- "epoch": 0.35,
19
- "learning_rate": 4.6e-06,
20
- "loss": 0.4279,
21
  "step": 50
22
  },
23
  {
24
- "epoch": 0.35,
25
- "eval_accuracy": 0.7657142857142857,
26
- "eval_loss": 0.522564172744751,
27
- "eval_runtime": 29.0326,
28
- "eval_samples_per_second": 6.028,
29
- "eval_steps_per_second": 1.516,
30
  "step": 50
31
  },
32
  {
33
- "epoch": 0.7,
34
- "learning_rate": 8.86829268292683e-06,
35
- "loss": 0.4089,
36
  "step": 100
37
  },
38
  {
39
- "epoch": 0.7,
40
- "eval_accuracy": 0.7428571428571429,
41
- "eval_loss": 0.5401451587677002,
42
- "eval_runtime": 29.0003,
43
- "eval_samples_per_second": 6.034,
44
- "eval_steps_per_second": 1.517,
45
  "step": 100
46
  },
47
  {
48
- "epoch": 1.05,
49
- "learning_rate": 7.792682926829268e-06,
50
- "loss": 0.4018,
51
  "step": 150
52
  },
53
  {
54
- "epoch": 1.05,
55
- "eval_accuracy": 0.7428571428571429,
56
- "eval_loss": 0.5425500273704529,
57
- "eval_runtime": 28.9813,
58
- "eval_samples_per_second": 6.038,
59
- "eval_steps_per_second": 1.518,
60
  "step": 150
61
  }
62
  ],
63
  "logging_steps": 50,
64
- "max_steps": 500,
65
- "num_train_epochs": 4,
66
  "save_steps": 50,
67
- "total_flos": 5.643009513984e+16,
68
  "trial_name": null,
69
  "trial_params": null
70
  }
 
1
  {
2
+ "best_metric": 1.2631328105926514,
3
+ "best_model_checkpoint": "output/checkpoint-150",
4
+ "epoch": 0.7657945118059988,
5
  "eval_steps": 50,
6
  "global_step": 150,
7
  "is_hyper_param_search": false,
 
10
  "log_history": [
11
  {
12
  "epoch": 0.01,
13
+ "learning_rate": 2.777777777777778e-07,
14
+ "loss": 9.5569,
15
  "step": 1
16
  },
17
  {
18
+ "epoch": 0.26,
19
+ "learning_rate": 1.25e-05,
20
+ "loss": 6.958,
21
  "step": 50
22
  },
23
  {
24
+ "epoch": 0.26,
25
+ "eval_accuracy": 0.42857142857142855,
26
+ "eval_loss": 6.581231117248535,
27
+ "eval_runtime": 28.2966,
28
+ "eval_samples_per_second": 6.184,
29
+ "eval_steps_per_second": 1.555,
30
  "step": 50
31
  },
32
  {
33
+ "epoch": 0.51,
34
+ "learning_rate": 2.6388888888888892e-05,
35
+ "loss": 1.6141,
36
  "step": 100
37
  },
38
  {
39
+ "epoch": 0.51,
40
+ "eval_accuracy": 0.5028571428571429,
41
+ "eval_loss": 1.6640697717666626,
42
+ "eval_runtime": 28.1139,
43
+ "eval_samples_per_second": 6.225,
44
+ "eval_steps_per_second": 1.565,
45
  "step": 100
46
  },
47
  {
48
+ "epoch": 0.77,
49
+ "learning_rate": 4.027777777777778e-05,
50
+ "loss": 0.8533,
51
  "step": 150
52
  },
53
  {
54
+ "epoch": 0.77,
55
+ "eval_accuracy": 0.6342857142857142,
56
+ "eval_loss": 1.2631328105926514,
57
+ "eval_runtime": 26.205,
58
+ "eval_samples_per_second": 6.678,
59
+ "eval_steps_per_second": 1.679,
60
  "step": 150
61
  }
62
  ],
63
  "logging_steps": 50,
64
+ "max_steps": 1000,
65
+ "num_train_epochs": 6,
66
  "save_steps": 50,
67
+ "total_flos": 5.595400201863168e+16,
68
  "trial_name": null,
69
  "trial_params": null
70
  }
checkpoint-150/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b1b0a94354c0f3dfee9c93e225dbd48ad6f856f98977ef38ff3415910328e64a
3
  size 4600
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5a0ef36d7c31265f68a3edc8ae53ce9272dcc90a79f261e29a2a46e040c77a33
3
  size 4600