osbm commited on
Commit
d66234e
1 Parent(s): a417fba

Upload 19 files

Browse files
README.md ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ ---
4
+ ## Training procedure
5
+
6
+
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - quant_method: QuantizationMethod.BITS_AND_BYTES
9
+ - load_in_8bit: False
10
+ - load_in_4bit: True
11
+ - llm_int8_threshold: 6.0
12
+ - llm_int8_skip_modules: None
13
+ - llm_int8_enable_fp32_cpu_offload: False
14
+ - llm_int8_has_fp16_weight: False
15
+ - bnb_4bit_quant_type: nf4
16
+ - bnb_4bit_use_double_quant: True
17
+ - bnb_4bit_compute_dtype: float16
18
+ ### Framework versions
19
+
20
+
21
+ - PEFT 0.5.0
adapter_config.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "huggyllama/llama-13b",
4
+ "bias": "none",
5
+ "fan_in_fan_out": false,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 32,
11
+ "lora_dropout": 0.1,
12
+ "modules_to_save": null,
13
+ "peft_type": "LORA",
14
+ "r": 8,
15
+ "revision": null,
16
+ "target_modules": [
17
+ "q_proj",
18
+ "v_proj"
19
+ ],
20
+ "task_type": "CAUSAL_LM"
21
+ }
adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:89d68f68c57d2285a9842467dd248325fb228d9b6e738064665722a44ad36140
3
+ size 26272202
checkpoint-300/README.md ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ ---
4
+ ## Training procedure
5
+
6
+
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - quant_method: QuantizationMethod.BITS_AND_BYTES
9
+ - load_in_8bit: False
10
+ - load_in_4bit: True
11
+ - llm_int8_threshold: 6.0
12
+ - llm_int8_skip_modules: None
13
+ - llm_int8_enable_fp32_cpu_offload: False
14
+ - llm_int8_has_fp16_weight: False
15
+ - bnb_4bit_quant_type: nf4
16
+ - bnb_4bit_use_double_quant: True
17
+ - bnb_4bit_compute_dtype: float16
18
+ ### Framework versions
19
+
20
+
21
+ - PEFT 0.5.0
checkpoint-300/adapter_config.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "huggyllama/llama-13b",
4
+ "bias": "none",
5
+ "fan_in_fan_out": false,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 32,
11
+ "lora_dropout": 0.1,
12
+ "modules_to_save": null,
13
+ "peft_type": "LORA",
14
+ "r": 8,
15
+ "revision": null,
16
+ "target_modules": [
17
+ "q_proj",
18
+ "v_proj"
19
+ ],
20
+ "task_type": "CAUSAL_LM"
21
+ }
checkpoint-300/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:208852bcd4c4d2e15939df532804de08a937f6aa6984d7a05cfdd0c06aea1b58
3
+ size 26272202
checkpoint-300/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:72497e178149970851e1fef8c420af9e8947d73b6f5e58a02a84ffe012367801
3
+ size 52563258
checkpoint-300/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:875a034eff9c5d594adbe7a3c1a892b41caa763dfe833b6b76500f340b834ef1
3
+ size 14244
checkpoint-300/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d8b1de87f0ba3d95cc748fb3c6441e806561235c4d56857c438a531c9bda5144
3
+ size 1064
checkpoint-300/trainer_state.json ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 2.4423337856173677,
5
+ "eval_steps": 50,
6
+ "global_step": 300,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.41,
13
+ "learning_rate": 0.000980951231800518,
14
+ "loss": 1.7983,
15
+ "step": 50
16
+ },
17
+ {
18
+ "epoch": 0.41,
19
+ "eval_loss": 1.6800895929336548,
20
+ "eval_runtime": 79.4837,
21
+ "eval_samples_per_second": 10.304,
22
+ "eval_steps_per_second": 1.032,
23
+ "step": 50
24
+ },
25
+ {
26
+ "epoch": 0.81,
27
+ "learning_rate": 0.0008643535534997409,
28
+ "loss": 1.648,
29
+ "step": 100
30
+ },
31
+ {
32
+ "epoch": 0.81,
33
+ "eval_loss": 1.6579192876815796,
34
+ "eval_runtime": 80.5139,
35
+ "eval_samples_per_second": 10.172,
36
+ "eval_steps_per_second": 1.018,
37
+ "step": 100
38
+ },
39
+ {
40
+ "epoch": 1.22,
41
+ "learning_rate": 0.000666265691928808,
42
+ "loss": 1.6202,
43
+ "step": 150
44
+ },
45
+ {
46
+ "epoch": 1.22,
47
+ "eval_loss": 1.6476719379425049,
48
+ "eval_runtime": 81.2124,
49
+ "eval_samples_per_second": 10.085,
50
+ "eval_steps_per_second": 1.01,
51
+ "step": 150
52
+ },
53
+ {
54
+ "epoch": 1.63,
55
+ "learning_rate": 0.00043099136249808665,
56
+ "loss": 1.5998,
57
+ "step": 200
58
+ },
59
+ {
60
+ "epoch": 1.63,
61
+ "eval_loss": 1.6405467987060547,
62
+ "eval_runtime": 80.4615,
63
+ "eval_samples_per_second": 10.179,
64
+ "eval_steps_per_second": 1.019,
65
+ "step": 200
66
+ },
67
+ {
68
+ "epoch": 2.04,
69
+ "learning_rate": 0.00021115129043425184,
70
+ "loss": 1.5781,
71
+ "step": 250
72
+ },
73
+ {
74
+ "epoch": 2.04,
75
+ "eval_loss": 1.6351187229156494,
76
+ "eval_runtime": 79.4429,
77
+ "eval_samples_per_second": 10.309,
78
+ "eval_steps_per_second": 1.032,
79
+ "step": 250
80
+ },
81
+ {
82
+ "epoch": 2.44,
83
+ "learning_rate": 5.591422293498632e-05,
84
+ "loss": 1.5429,
85
+ "step": 300
86
+ },
87
+ {
88
+ "epoch": 2.44,
89
+ "eval_loss": 1.6372405290603638,
90
+ "eval_runtime": 80.2927,
91
+ "eval_samples_per_second": 10.2,
92
+ "eval_steps_per_second": 1.021,
93
+ "step": 300
94
+ }
95
+ ],
96
+ "logging_steps": 50,
97
+ "max_steps": 350,
98
+ "num_train_epochs": 3,
99
+ "save_steps": 50,
100
+ "total_flos": 8.421150573099418e+17,
101
+ "trial_name": null,
102
+ "trial_params": null
103
+ }
checkpoint-300/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:da97d3940bba432b5b7b3e63fb89cd8284ed694adc54fd644c3d3775609b9ebd
3
+ size 4408
checkpoint-350/README.md ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ ---
4
+ ## Training procedure
5
+
6
+
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - quant_method: QuantizationMethod.BITS_AND_BYTES
9
+ - load_in_8bit: False
10
+ - load_in_4bit: True
11
+ - llm_int8_threshold: 6.0
12
+ - llm_int8_skip_modules: None
13
+ - llm_int8_enable_fp32_cpu_offload: False
14
+ - llm_int8_has_fp16_weight: False
15
+ - bnb_4bit_quant_type: nf4
16
+ - bnb_4bit_use_double_quant: True
17
+ - bnb_4bit_compute_dtype: float16
18
+ ### Framework versions
19
+
20
+
21
+ - PEFT 0.5.0
checkpoint-350/adapter_config.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "huggyllama/llama-13b",
4
+ "bias": "none",
5
+ "fan_in_fan_out": false,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 32,
11
+ "lora_dropout": 0.1,
12
+ "modules_to_save": null,
13
+ "peft_type": "LORA",
14
+ "r": 8,
15
+ "revision": null,
16
+ "target_modules": [
17
+ "q_proj",
18
+ "v_proj"
19
+ ],
20
+ "task_type": "CAUSAL_LM"
21
+ }
checkpoint-350/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:89d68f68c57d2285a9842467dd248325fb228d9b6e738064665722a44ad36140
3
+ size 26272202
checkpoint-350/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:085ff8fe880f9bfbdc06c00129e893c6c1903227d9da354da01d7a868dd81e0f
3
+ size 52563258
checkpoint-350/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:11680d9405f4b2dda071b535e466403b2d487e48706b90da952a3b982a054b1f
3
+ size 14244
checkpoint-350/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a6dd94ce3103f26f5db9c664dceaf453ad6516fd669277d7c50dc5cef40087e2
3
+ size 1064
checkpoint-350/trainer_state.json ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 2.8493894165535956,
5
+ "eval_steps": 50,
6
+ "global_step": 350,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.41,
13
+ "learning_rate": 0.000980951231800518,
14
+ "loss": 1.7983,
15
+ "step": 50
16
+ },
17
+ {
18
+ "epoch": 0.41,
19
+ "eval_loss": 1.6800895929336548,
20
+ "eval_runtime": 79.4837,
21
+ "eval_samples_per_second": 10.304,
22
+ "eval_steps_per_second": 1.032,
23
+ "step": 50
24
+ },
25
+ {
26
+ "epoch": 0.81,
27
+ "learning_rate": 0.0008643535534997409,
28
+ "loss": 1.648,
29
+ "step": 100
30
+ },
31
+ {
32
+ "epoch": 0.81,
33
+ "eval_loss": 1.6579192876815796,
34
+ "eval_runtime": 80.5139,
35
+ "eval_samples_per_second": 10.172,
36
+ "eval_steps_per_second": 1.018,
37
+ "step": 100
38
+ },
39
+ {
40
+ "epoch": 1.22,
41
+ "learning_rate": 0.000666265691928808,
42
+ "loss": 1.6202,
43
+ "step": 150
44
+ },
45
+ {
46
+ "epoch": 1.22,
47
+ "eval_loss": 1.6476719379425049,
48
+ "eval_runtime": 81.2124,
49
+ "eval_samples_per_second": 10.085,
50
+ "eval_steps_per_second": 1.01,
51
+ "step": 150
52
+ },
53
+ {
54
+ "epoch": 1.63,
55
+ "learning_rate": 0.00043099136249808665,
56
+ "loss": 1.5998,
57
+ "step": 200
58
+ },
59
+ {
60
+ "epoch": 1.63,
61
+ "eval_loss": 1.6405467987060547,
62
+ "eval_runtime": 80.4615,
63
+ "eval_samples_per_second": 10.179,
64
+ "eval_steps_per_second": 1.019,
65
+ "step": 200
66
+ },
67
+ {
68
+ "epoch": 2.04,
69
+ "learning_rate": 0.00021115129043425184,
70
+ "loss": 1.5781,
71
+ "step": 250
72
+ },
73
+ {
74
+ "epoch": 2.04,
75
+ "eval_loss": 1.6351187229156494,
76
+ "eval_runtime": 79.4429,
77
+ "eval_samples_per_second": 10.309,
78
+ "eval_steps_per_second": 1.032,
79
+ "step": 250
80
+ },
81
+ {
82
+ "epoch": 2.44,
83
+ "learning_rate": 5.591422293498632e-05,
84
+ "loss": 1.5429,
85
+ "step": 300
86
+ },
87
+ {
88
+ "epoch": 2.44,
89
+ "eval_loss": 1.6372405290603638,
90
+ "eval_runtime": 80.2927,
91
+ "eval_samples_per_second": 10.2,
92
+ "eval_steps_per_second": 1.021,
93
+ "step": 300
94
+ },
95
+ {
96
+ "epoch": 2.85,
97
+ "learning_rate": 0.0,
98
+ "loss": 1.5488,
99
+ "step": 350
100
+ },
101
+ {
102
+ "epoch": 2.85,
103
+ "eval_loss": 1.6368159055709839,
104
+ "eval_runtime": 80.9154,
105
+ "eval_samples_per_second": 10.122,
106
+ "eval_steps_per_second": 1.013,
107
+ "step": 350
108
+ }
109
+ ],
110
+ "logging_steps": 50,
111
+ "max_steps": 350,
112
+ "num_train_epochs": 3,
113
+ "save_steps": 50,
114
+ "total_flos": 9.826726868965786e+17,
115
+ "trial_name": null,
116
+ "trial_params": null
117
+ }
checkpoint-350/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:da97d3940bba432b5b7b3e63fb89cd8284ed694adc54fd644c3d3775609b9ebd
3
+ size 4408