DiaVio commited on
Commit
72c04ae
1 Parent(s): 129c6ee

loras commit

Browse files
Files changed (30) hide show
  1. llama2_13b_responsibility-16R-128B-4E-100S/adapter_config.json +25 -0
  2. llama2_13b_responsibility-16R-128B-4E-100S/adapter_model.bin +3 -0
  3. llama2_13b_responsibility-16R-128B-4E-100S/training_log.json +20 -0
  4. llama2_13b_responsibility-16R-128B-4E-100S/training_parameters.json +37 -0
  5. llama2_13b_responsibility-16R-128B-4E-100S/training_prompt.json +5 -0
  6. llama2_13b_type-16R-128B-4E-100S/adapter_config.json +25 -0
  7. llama2_13b_type-16R-128B-4E-100S/adapter_model.bin +3 -0
  8. llama2_13b_type-16R-128B-4E-100S/training_log.json +20 -0
  9. llama2_13b_type-16R-128B-4E-100S/training_parameters.json +37 -0
  10. llama2_13b_type-16R-128B-4E-100S/training_prompt.json +5 -0
  11. llama2_70b_responsibility-16R-128B-4E-100S/adapter_config.json +25 -0
  12. llama2_70b_responsibility-16R-128B-4E-100S/adapter_model.bin +3 -0
  13. llama2_70b_responsibility-16R-128B-4E-100S/training_log.json +15 -0
  14. llama2_70b_responsibility-16R-128B-4E-100S/training_parameters.json +37 -0
  15. llama2_70b_responsibility-16R-128B-4E-100S/training_prompt.json +5 -0
  16. llama2_70b_type-16R-128B-4E-100S/adapter_config.json +25 -0
  17. llama2_70b_type-16R-128B-4E-100S/adapter_model.bin +3 -0
  18. llama2_70b_type-16R-128B-4E-100S/training_log.json +20 -0
  19. llama2_70b_type-16R-128B-4E-100S/training_parameters.json +37 -0
  20. llama2_70b_type-16R-128B-4E-100S/training_prompt.json +5 -0
  21. llama2_7b_responsibility-16R-128B-4E-100S/adapter_config.json +25 -0
  22. llama2_7b_responsibility-16R-128B-4E-100S/adapter_model.bin +3 -0
  23. llama2_7b_responsibility-16R-128B-4E-100S/training_log.json +20 -0
  24. llama2_7b_responsibility-16R-128B-4E-100S/training_parameters.json +37 -0
  25. llama2_7b_responsibility-16R-128B-4E-100S/training_prompt.json +5 -0
  26. llama2_7b_type-16R-128B-4E-100S/adapter_config.json +25 -0
  27. llama2_7b_type-16R-128B-4E-100S/adapter_model.bin +3 -0
  28. llama2_7b_type-16R-128B-4E-100S/training_log.json +20 -0
  29. llama2_7b_type-16R-128B-4E-100S/training_parameters.json +37 -0
  30. llama2_7b_type-16R-128B-4E-100S/training_prompt.json +5 -0
llama2_13b_responsibility-16R-128B-4E-100S/adapter_config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "models/meta-llama_Llama-2-13b-chat-hf",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layers_pattern": null,
10
+ "layers_to_transform": null,
11
+ "lora_alpha": 32,
12
+ "lora_dropout": 0.05,
13
+ "modules_to_save": null,
14
+ "peft_type": "LORA",
15
+ "r": 16,
16
+ "rank_pattern": {},
17
+ "revision": null,
18
+ "target_modules": [
19
+ "q_proj",
20
+ "v_proj",
21
+ "o_proj",
22
+ "k_proj"
23
+ ],
24
+ "task_type": "CAUSAL_LM"
25
+ }
llama2_13b_responsibility-16R-128B-4E-100S/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aadeb839f1fe30a28cbace1ac0f93a138b943a83f89c1aebe858375bbe157be6
3
+ size 104973834
llama2_13b_responsibility-16R-128B-4E-100S/training_log.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "base_model_name": "meta-llama_Llama-2-13b-chat-hf",
3
+ "base_model_class": "LlamaForCausalLM",
4
+ "base_loaded_in_4bit": false,
5
+ "base_loaded_in_8bit": false,
6
+ "projections": "q, v, k, o",
7
+ "eval_loss": 0.27035561203956604,
8
+ "eval_runtime": 318.5835,
9
+ "eval_samples_per_second": 4.85,
10
+ "eval_steps_per_second": 0.609,
11
+ "epoch": 4.0,
12
+ "current_steps": 6268,
13
+ "loss": 0.262,
14
+ "learning_rate": 1.5624999999999999e-06,
15
+ "train_runtime": 27128.8237,
16
+ "train_samples_per_second": 0.925,
17
+ "train_steps_per_second": 0.007,
18
+ "total_flos": 1.9848229987811328e+18,
19
+ "train_loss": 0.34869462008378943
20
+ }
llama2_13b_responsibility-16R-128B-4E-100S/training_parameters.json ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "lora_name": "llama2_13b_responsibility-16R-128B-4E-100S",
3
+ "always_override": true,
4
+ "q_proj_en": true,
5
+ "v_proj_en": true,
6
+ "k_proj_en": true,
7
+ "o_proj_en": true,
8
+ "gate_proj_en": false,
9
+ "down_proj_en": false,
10
+ "up_proj_en": false,
11
+ "save_steps": 100.0,
12
+ "micro_batch_size": 4,
13
+ "batch_size": 128,
14
+ "epochs": 4.0,
15
+ "learning_rate": "3e-4",
16
+ "lr_scheduler_type": "linear",
17
+ "lora_rank": 16,
18
+ "lora_alpha": 32,
19
+ "lora_dropout": 0.05,
20
+ "cutoff_len": 1024,
21
+ "dataset": "responsibility_train",
22
+ "eval_dataset": "responsibility_val",
23
+ "format": "alpaca-format",
24
+ "eval_steps": 100.0,
25
+ "raw_text_file": "None",
26
+ "overlap_len": 128,
27
+ "newline_favor_len": 128,
28
+ "higher_rank_limit": false,
29
+ "warmup_steps": 100.0,
30
+ "optimizer": "adamw_torch",
31
+ "hard_cut_string": "\\n\\n\\n",
32
+ "train_only_after": "",
33
+ "stop_at_loss": 0,
34
+ "add_eos_token": false,
35
+ "min_chars": 0.0,
36
+ "report_to": "tensorboard"
37
+ }
llama2_13b_responsibility-16R-128B-4E-100S/training_prompt.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "template_type": "dataset",
3
+ "template_1": "### Instruction:\n%instruction%\n\n### Response:\n%output%",
4
+ "template_2": "### Instruction:\n%instruction%\n\n### Input:\n%input%\n\n### Response:\n%output%"
5
+ }
llama2_13b_type-16R-128B-4E-100S/adapter_config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "models/meta-llama_Llama-2-13b-chat-hf",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layers_pattern": null,
10
+ "layers_to_transform": null,
11
+ "lora_alpha": 32,
12
+ "lora_dropout": 0.05,
13
+ "modules_to_save": null,
14
+ "peft_type": "LORA",
15
+ "r": 16,
16
+ "rank_pattern": {},
17
+ "revision": null,
18
+ "target_modules": [
19
+ "q_proj",
20
+ "o_proj",
21
+ "v_proj",
22
+ "k_proj"
23
+ ],
24
+ "task_type": "CAUSAL_LM"
25
+ }
llama2_13b_type-16R-128B-4E-100S/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:faac9479d9c6886ff2f0ff03b8a8e342ae3f3fe4346a71891a8cb545622caef5
3
+ size 104973834
llama2_13b_type-16R-128B-4E-100S/training_log.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "base_model_name": "meta-llama_Llama-2-13b-chat-hf",
3
+ "base_model_class": "LlamaForCausalLM",
4
+ "base_loaded_in_4bit": false,
5
+ "base_loaded_in_8bit": false,
6
+ "projections": "q, v, k, o",
7
+ "eval_loss": 0.6852580904960632,
8
+ "eval_runtime": 235.3491,
9
+ "eval_samples_per_second": 4.844,
10
+ "eval_steps_per_second": 0.608,
11
+ "epoch": 3.98,
12
+ "current_steps": 4586,
13
+ "loss": 0.6811,
14
+ "learning_rate": 8.571428571428571e-06,
15
+ "train_runtime": 17316.2264,
16
+ "train_samples_per_second": 1.07,
17
+ "train_steps_per_second": 0.008,
18
+ "total_flos": 1.4576995856705126e+18,
19
+ "train_loss": 0.7967557840877109
20
+ }
llama2_13b_type-16R-128B-4E-100S/training_parameters.json ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "lora_name": "llama2_13b_type-16R-128B-4E-100S",
3
+ "always_override": true,
4
+ "q_proj_en": true,
5
+ "v_proj_en": true,
6
+ "k_proj_en": true,
7
+ "o_proj_en": true,
8
+ "gate_proj_en": false,
9
+ "down_proj_en": false,
10
+ "up_proj_en": false,
11
+ "save_steps": 100.0,
12
+ "micro_batch_size": 4,
13
+ "batch_size": 128,
14
+ "epochs": 4.0,
15
+ "learning_rate": "3e-4",
16
+ "lr_scheduler_type": "linear",
17
+ "lora_rank": 16,
18
+ "lora_alpha": 32,
19
+ "lora_dropout": 0.05,
20
+ "cutoff_len": 1024,
21
+ "dataset": "type_train",
22
+ "eval_dataset": "type_val",
23
+ "format": "alpaca-format",
24
+ "eval_steps": 100.0,
25
+ "raw_text_file": "None",
26
+ "overlap_len": 128,
27
+ "newline_favor_len": 128,
28
+ "higher_rank_limit": false,
29
+ "warmup_steps": 100.0,
30
+ "optimizer": "adamw_torch",
31
+ "hard_cut_string": "\\n\\n\\n",
32
+ "train_only_after": "",
33
+ "stop_at_loss": 0,
34
+ "add_eos_token": false,
35
+ "min_chars": 0.0,
36
+ "report_to": "tensorboard"
37
+ }
llama2_13b_type-16R-128B-4E-100S/training_prompt.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "template_type": "dataset",
3
+ "template_1": "### Instruction:\n%instruction%\n\n### Response:\n%output%",
4
+ "template_2": "### Instruction:\n%instruction%\n\n### Input:\n%input%\n\n### Response:\n%output%"
5
+ }
llama2_70b_responsibility-16R-128B-4E-100S/adapter_config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "models/meta-llama_Llama-2-70b-chat-hf",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layers_pattern": null,
10
+ "layers_to_transform": null,
11
+ "lora_alpha": 32,
12
+ "lora_dropout": 0.05,
13
+ "modules_to_save": null,
14
+ "peft_type": "LORA",
15
+ "r": 16,
16
+ "rank_pattern": {},
17
+ "revision": null,
18
+ "target_modules": [
19
+ "q_proj",
20
+ "k_proj",
21
+ "v_proj",
22
+ "o_proj"
23
+ ],
24
+ "task_type": "CAUSAL_LM"
25
+ }
llama2_70b_responsibility-16R-128B-4E-100S/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2911e088ce27e1bd3cabe145db8690212af86b05c580fba3639771db7894c296
3
+ size 262376202
llama2_70b_responsibility-16R-128B-4E-100S/training_log.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "base_model_name": "meta-llama_Llama-2-70b-chat-hf",
3
+ "base_model_class": "LlamaForCausalLM",
4
+ "base_loaded_in_4bit": false,
5
+ "base_loaded_in_8bit": false,
6
+ "projections": "q, v, k, o",
7
+ "eval_loss": 0.3329446017742157,
8
+ "eval_runtime": 797.899,
9
+ "eval_samples_per_second": 1.936,
10
+ "eval_steps_per_second": 0.243,
11
+ "epoch": 3.99,
12
+ "current_steps": 2174,
13
+ "loss": 0.3353,
14
+ "learning_rate": 0.00017202797202797203
15
+ }
llama2_70b_responsibility-16R-128B-4E-100S/training_parameters.json ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "lora_name": "llama2_70b_responsibility-16R-128B-4E-100S",
3
+ "always_override": true,
4
+ "q_proj_en": true,
5
+ "v_proj_en": true,
6
+ "k_proj_en": true,
7
+ "o_proj_en": true,
8
+ "gate_proj_en": false,
9
+ "down_proj_en": false,
10
+ "up_proj_en": false,
11
+ "save_steps": 100.0,
12
+ "micro_batch_size": 4,
13
+ "batch_size": 128,
14
+ "epochs": 4.0,
15
+ "learning_rate": "3e-4",
16
+ "lr_scheduler_type": "linear",
17
+ "lora_rank": 16,
18
+ "lora_alpha": 32,
19
+ "lora_dropout": 0.05,
20
+ "cutoff_len": 600,
21
+ "dataset": "responsibility_train",
22
+ "eval_dataset": "responsibility_val",
23
+ "format": "alpaca-format",
24
+ "eval_steps": 100.0,
25
+ "raw_text_file": "None",
26
+ "overlap_len": 128,
27
+ "newline_favor_len": 128,
28
+ "higher_rank_limit": false,
29
+ "warmup_steps": 100.0,
30
+ "optimizer": "adamw_torch",
31
+ "hard_cut_string": "\\n\\n\\n",
32
+ "train_only_after": "",
33
+ "stop_at_loss": 0,
34
+ "add_eos_token": false,
35
+ "min_chars": 0.0,
36
+ "report_to": "tensorboard"
37
+ }
llama2_70b_responsibility-16R-128B-4E-100S/training_prompt.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "template_type": "dataset",
3
+ "template_1": "### Instruction:\n%instruction%\n\n### Response:\n%output%",
4
+ "template_2": "### Instruction:\n%instruction%\n\n### Input:\n%input%\n\n### Response:\n%output%"
5
+ }
llama2_70b_type-16R-128B-4E-100S/adapter_config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "models/meta-llama_Llama-2-70b-chat-hf",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layers_pattern": null,
10
+ "layers_to_transform": null,
11
+ "lora_alpha": 32,
12
+ "lora_dropout": 0.05,
13
+ "modules_to_save": null,
14
+ "peft_type": "LORA",
15
+ "r": 16,
16
+ "rank_pattern": {},
17
+ "revision": null,
18
+ "target_modules": [
19
+ "v_proj",
20
+ "q_proj",
21
+ "o_proj",
22
+ "k_proj"
23
+ ],
24
+ "task_type": "CAUSAL_LM"
25
+ }
llama2_70b_type-16R-128B-4E-100S/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:189dfae66be9aef6b175f6289d68d1534c1e8b301ebcf0d5987a52994fa69bde
3
+ size 262376202
llama2_70b_type-16R-128B-4E-100S/training_log.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "base_model_name": "meta-llama_Llama-2-70b-chat-hf",
3
+ "base_model_class": "LlamaForCausalLM",
4
+ "base_loaded_in_4bit": false,
5
+ "base_loaded_in_8bit": false,
6
+ "projections": "q, v, k, o",
7
+ "eval_loss": 0.5739039182662964,
8
+ "eval_runtime": 1244.9728,
9
+ "eval_samples_per_second": 0.916,
10
+ "eval_steps_per_second": 0.115,
11
+ "epoch": 3.99,
12
+ "current_steps": 2296,
13
+ "loss": 0.5677,
14
+ "learning_rate": 8.823529411764705e-06,
15
+ "train_runtime": 44904.3442,
16
+ "train_samples_per_second": 0.206,
17
+ "train_steps_per_second": 0.002,
18
+ "total_flos": 2.2812138370400256e+18,
19
+ "train_loss": 0.7556745294067595
20
+ }
llama2_70b_type-16R-128B-4E-100S/training_parameters.json ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "lora_name": "llama2_70b_type-16R-128B-4E-100S",
3
+ "always_override": true,
4
+ "q_proj_en": true,
5
+ "v_proj_en": true,
6
+ "k_proj_en": true,
7
+ "o_proj_en": true,
8
+ "gate_proj_en": false,
9
+ "down_proj_en": false,
10
+ "up_proj_en": false,
11
+ "save_steps": 100.0,
12
+ "micro_batch_size": 4,
13
+ "batch_size": 128,
14
+ "epochs": 4.0,
15
+ "learning_rate": "3e-4",
16
+ "lr_scheduler_type": "linear",
17
+ "lora_rank": 16,
18
+ "lora_alpha": 32,
19
+ "lora_dropout": 0.05,
20
+ "cutoff_len": 600,
21
+ "dataset": "type_train",
22
+ "eval_dataset": "type_val",
23
+ "format": "alpaca-format",
24
+ "eval_steps": 100.0,
25
+ "raw_text_file": "None",
26
+ "overlap_len": 128,
27
+ "newline_favor_len": 128,
28
+ "higher_rank_limit": false,
29
+ "warmup_steps": 100.0,
30
+ "optimizer": "adamw_torch",
31
+ "hard_cut_string": "\\n\\n\\n",
32
+ "train_only_after": "",
33
+ "stop_at_loss": 0,
34
+ "add_eos_token": false,
35
+ "min_chars": 0.0,
36
+ "report_to": "tensorboard"
37
+ }
llama2_70b_type-16R-128B-4E-100S/training_prompt.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "template_type": "dataset",
3
+ "template_1": "### Instruction:\n%instruction%\n\n### Response:\n%output%",
4
+ "template_2": "### Instruction:\n%instruction%\n\n### Input:\n%input%\n\n### Response:\n%output%"
5
+ }
llama2_7b_responsibility-16R-128B-4E-100S/adapter_config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "models/meta-llama_Llama-2-7b-chat-hf",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layers_pattern": null,
10
+ "layers_to_transform": null,
11
+ "lora_alpha": 32,
12
+ "lora_dropout": 0.05,
13
+ "modules_to_save": null,
14
+ "peft_type": "LORA",
15
+ "r": 16,
16
+ "rank_pattern": {},
17
+ "revision": null,
18
+ "target_modules": [
19
+ "q_proj",
20
+ "k_proj",
21
+ "o_proj",
22
+ "v_proj"
23
+ ],
24
+ "task_type": "CAUSAL_LM"
25
+ }
llama2_7b_responsibility-16R-128B-4E-100S/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dcd269e5a4b9108be77479e726a1cb4f7f6f5d420072113b7f8546073545986b
3
+ size 67201802
llama2_7b_responsibility-16R-128B-4E-100S/training_log.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "base_model_name": "meta-llama_Llama-2-7b-chat-hf",
3
+ "base_model_class": "LlamaForCausalLM",
4
+ "base_loaded_in_4bit": false,
5
+ "base_loaded_in_8bit": false,
6
+ "projections": "q, v, k, o",
7
+ "eval_loss": 0.2940330505371094,
8
+ "eval_runtime": 178.282,
9
+ "eval_samples_per_second": 8.666,
10
+ "eval_steps_per_second": 1.088,
11
+ "epoch": 4.0,
12
+ "current_steps": 4701,
13
+ "loss": 0.2961,
14
+ "learning_rate": 4.195804195804196e-06,
15
+ "train_runtime": 10950.2359,
16
+ "train_samples_per_second": 1.719,
17
+ "train_steps_per_second": 0.013,
18
+ "total_flos": 5.982058047995904e+17,
19
+ "train_loss": 0.4049693025699278
20
+ }
llama2_7b_responsibility-16R-128B-4E-100S/training_parameters.json ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "lora_name": "llama2_7b_responsibility-16R-128B-4E-100S",
3
+ "always_override": true,
4
+ "q_proj_en": true,
5
+ "v_proj_en": true,
6
+ "k_proj_en": true,
7
+ "o_proj_en": true,
8
+ "gate_proj_en": false,
9
+ "down_proj_en": false,
10
+ "up_proj_en": false,
11
+ "save_steps": 100.0,
12
+ "micro_batch_size": 4,
13
+ "batch_size": 128,
14
+ "epochs": 4.0,
15
+ "learning_rate": "3e-4",
16
+ "lr_scheduler_type": "linear",
17
+ "lora_rank": 16,
18
+ "lora_alpha": 32,
19
+ "lora_dropout": 0.05,
20
+ "cutoff_len": 800,
21
+ "dataset": "responsibility_train",
22
+ "eval_dataset": "responsibility_val",
23
+ "format": "alpaca-format",
24
+ "eval_steps": 100.0,
25
+ "raw_text_file": "None",
26
+ "overlap_len": 128,
27
+ "newline_favor_len": 128,
28
+ "higher_rank_limit": false,
29
+ "warmup_steps": 100.0,
30
+ "optimizer": "adamw_torch",
31
+ "hard_cut_string": "\\n\\n\\n",
32
+ "train_only_after": "",
33
+ "stop_at_loss": 0,
34
+ "add_eos_token": false,
35
+ "min_chars": 0.0,
36
+ "report_to": "tensorboard"
37
+ }
llama2_7b_responsibility-16R-128B-4E-100S/training_prompt.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "template_type": "dataset",
3
+ "template_1": "### Instruction:\n%instruction%\n\n### Response:\n%output%",
4
+ "template_2": "### Instruction:\n%instruction%\n\n### Input:\n%input%\n\n### Response:\n%output%"
5
+ }
llama2_7b_type-16R-128B-4E-100S/adapter_config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "models/meta-llama_Llama-2-7b-chat-hf",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layers_pattern": null,
10
+ "layers_to_transform": null,
11
+ "lora_alpha": 32,
12
+ "lora_dropout": 0.05,
13
+ "modules_to_save": null,
14
+ "peft_type": "LORA",
15
+ "r": 16,
16
+ "rank_pattern": {},
17
+ "revision": null,
18
+ "target_modules": [
19
+ "q_proj",
20
+ "o_proj",
21
+ "v_proj",
22
+ "k_proj"
23
+ ],
24
+ "task_type": "CAUSAL_LM"
25
+ }
llama2_7b_type-16R-128B-4E-100S/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:900dcca63d709a15775775098f1153a07990161a4bb6f9388393172d4f886f27
3
+ size 67201802
llama2_7b_type-16R-128B-4E-100S/training_log.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "base_model_name": "meta-llama_Llama-2-7b-chat-hf",
3
+ "base_model_class": "LlamaForCausalLM",
4
+ "base_loaded_in_4bit": false,
5
+ "base_loaded_in_8bit": false,
6
+ "projections": "q, v, k, o",
7
+ "eval_loss": 0.7212664484977722,
8
+ "eval_runtime": 139.4052,
9
+ "eval_samples_per_second": 8.178,
10
+ "eval_steps_per_second": 1.026,
11
+ "epoch": 3.98,
12
+ "current_steps": 4586,
13
+ "loss": 0.7217,
14
+ "learning_rate": 8.571428571428571e-06,
15
+ "train_runtime": 11212.7879,
16
+ "train_samples_per_second": 1.653,
17
+ "train_steps_per_second": 0.013,
18
+ "total_flos": 7.497902781722788e+17,
19
+ "train_loss": 0.8539240459601084
20
+ }
llama2_7b_type-16R-128B-4E-100S/training_parameters.json ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "lora_name": "llama2_7b_type-16R-128B-4E-100S",
3
+ "always_override": true,
4
+ "q_proj_en": true,
5
+ "v_proj_en": true,
6
+ "k_proj_en": true,
7
+ "o_proj_en": true,
8
+ "gate_proj_en": false,
9
+ "down_proj_en": false,
10
+ "up_proj_en": false,
11
+ "save_steps": 100.0,
12
+ "micro_batch_size": 4,
13
+ "batch_size": 128,
14
+ "epochs": 4.0,
15
+ "learning_rate": "3e-4",
16
+ "lr_scheduler_type": "linear",
17
+ "lora_rank": 16,
18
+ "lora_alpha": 32,
19
+ "lora_dropout": 0.05,
20
+ "cutoff_len": 1024,
21
+ "dataset": "type_train",
22
+ "eval_dataset": "type_val",
23
+ "format": "alpaca-format",
24
+ "eval_steps": 100.0,
25
+ "raw_text_file": "None",
26
+ "overlap_len": 128,
27
+ "newline_favor_len": 128,
28
+ "higher_rank_limit": false,
29
+ "warmup_steps": 100.0,
30
+ "optimizer": "adamw_torch",
31
+ "hard_cut_string": "\\n\\n\\n",
32
+ "train_only_after": "",
33
+ "stop_at_loss": 0,
34
+ "add_eos_token": false,
35
+ "min_chars": 0.0,
36
+ "report_to": "tensorboard"
37
+ }
llama2_7b_type-16R-128B-4E-100S/training_prompt.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "template_type": "dataset",
3
+ "template_1": "### Instruction:\n%instruction%\n\n### Response:\n%output%",
4
+ "template_2": "### Instruction:\n%instruction%\n\n### Input:\n%input%\n\n### Response:\n%output%"
5
+ }