bnjmnmarie commited on
Commit
89201b3
1 Parent(s): 9405d21

Upload 11 files

Browse files
README.md CHANGED
@@ -1,3 +1,34 @@
1
  ---
2
- license: cc-by-sa-4.0
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ library_name: peft
3
  ---
4
+ ## Training procedure
5
+
6
+
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - quant_method: bitsandbytes
9
+ - load_in_8bit: False
10
+ - load_in_4bit: True
11
+ - llm_int8_threshold: 6.0
12
+ - llm_int8_skip_modules: None
13
+ - llm_int8_enable_fp32_cpu_offload: False
14
+ - llm_int8_has_fp16_weight: False
15
+ - bnb_4bit_quant_type: nf4
16
+ - bnb_4bit_use_double_quant: True
17
+ - bnb_4bit_compute_dtype: float16
18
+
19
+ The following `bitsandbytes` quantization config was used during training:
20
+ - quant_method: bitsandbytes
21
+ - load_in_8bit: False
22
+ - load_in_4bit: True
23
+ - llm_int8_threshold: 6.0
24
+ - llm_int8_skip_modules: None
25
+ - llm_int8_enable_fp32_cpu_offload: False
26
+ - llm_int8_has_fp16_weight: False
27
+ - bnb_4bit_quant_type: nf4
28
+ - bnb_4bit_use_double_quant: True
29
+ - bnb_4bit_compute_dtype: float16
30
+ ### Framework versions
31
+
32
+ - PEFT 0.5.0
33
+
34
+ - PEFT 0.5.0
adapter_config.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "meta-llama/Llama-2-7b-hf",
4
+ "bias": "none",
5
+ "fan_in_fan_out": false,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 16,
11
+ "lora_dropout": 0.05,
12
+ "modules_to_save": null,
13
+ "peft_type": "LORA",
14
+ "r": 16,
15
+ "revision": null,
16
+ "target_modules": [
17
+ "gate_proj",
18
+ "down_proj",
19
+ "up_proj"
20
+ ],
21
+ "task_type": "CAUSAL_LM"
22
+ }
adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:813a0d452dc50f5945b3110658315f82ebccefb5d0de3e7498c03fd583c2d875
3
+ size 92867533
optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:315915ed1ec06967ba58e01db849faaf05028f4bc1c6d8d411783b1d53473971
3
+ size 185759493
rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:67c75173d3879db635fd595ebff71b7e567970628e6d2806de9b253b3052fce3
3
+ size 14575
scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ec75d0ea72b9443721912e1add368f17afed290ed2e66c3bde40d5a43b441ac2
3
+ size 627
special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "</s>",
17
+ "unk_token": {
18
+ "content": "<unk>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "__type": "AddedToken",
4
+ "content": "<s>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false
9
+ },
10
+ "clean_up_tokenization_spaces": false,
11
+ "eos_token": {
12
+ "__type": "AddedToken",
13
+ "content": "</s>",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false
18
+ },
19
+ "legacy": false,
20
+ "model_max_length": 1000000000000000019884624838656,
21
+ "pad_token": null,
22
+ "padding_side": "right",
23
+ "sp_model_kwargs": {},
24
+ "tokenizer_class": "LlamaTokenizer",
25
+ "unk_token": {
26
+ "__type": "AddedToken",
27
+ "content": "<unk>",
28
+ "lstrip": false,
29
+ "normalized": false,
30
+ "rstrip": false,
31
+ "single_word": false
32
+ },
33
+ "use_default_system_prompt": true
34
+ }
trainer_state.json ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.9993906154783668,
5
+ "eval_steps": 100,
6
+ "global_step": 615,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.16,
13
+ "learning_rate": 0.0004,
14
+ "loss": 1.3292,
15
+ "step": 100
16
+ },
17
+ {
18
+ "epoch": 0.16,
19
+ "eval_loss": 1.264318823814392,
20
+ "eval_runtime": 84.6316,
21
+ "eval_samples_per_second": 6.121,
22
+ "eval_steps_per_second": 1.536,
23
+ "step": 100
24
+ },
25
+ {
26
+ "epoch": 0.33,
27
+ "learning_rate": 0.00036392754225221053,
28
+ "loss": 1.2472,
29
+ "step": 200
30
+ },
31
+ {
32
+ "epoch": 0.33,
33
+ "eval_loss": 1.2486543655395508,
34
+ "eval_runtime": 84.6693,
35
+ "eval_samples_per_second": 6.118,
36
+ "eval_steps_per_second": 1.535,
37
+ "step": 200
38
+ },
39
+ {
40
+ "epoch": 0.49,
41
+ "learning_rate": 0.00026872239108850267,
42
+ "loss": 1.2331,
43
+ "step": 300
44
+ },
45
+ {
46
+ "epoch": 0.49,
47
+ "eval_loss": 1.2404521703720093,
48
+ "eval_runtime": 84.6623,
49
+ "eval_samples_per_second": 6.118,
50
+ "eval_steps_per_second": 1.536,
51
+ "step": 300
52
+ },
53
+ {
54
+ "epoch": 0.65,
55
+ "learning_rate": 0.00014872738443612402,
56
+ "loss": 1.2188,
57
+ "step": 400
58
+ },
59
+ {
60
+ "epoch": 0.65,
61
+ "eval_loss": 1.2340422868728638,
62
+ "eval_runtime": 84.6101,
63
+ "eval_samples_per_second": 6.122,
64
+ "eval_steps_per_second": 1.536,
65
+ "step": 400
66
+ },
67
+ {
68
+ "epoch": 0.81,
69
+ "learning_rate": 4.722767036921105e-05,
70
+ "loss": 1.2173,
71
+ "step": 500
72
+ },
73
+ {
74
+ "epoch": 0.81,
75
+ "eval_loss": 1.2294026613235474,
76
+ "eval_runtime": 84.6649,
77
+ "eval_samples_per_second": 6.118,
78
+ "eval_steps_per_second": 1.535,
79
+ "step": 500
80
+ },
81
+ {
82
+ "epoch": 0.98,
83
+ "learning_rate": 8.366903586781493e-07,
84
+ "loss": 1.2025,
85
+ "step": 600
86
+ },
87
+ {
88
+ "epoch": 0.98,
89
+ "eval_loss": 1.2282078266143799,
90
+ "eval_runtime": 84.629,
91
+ "eval_samples_per_second": 6.121,
92
+ "eval_steps_per_second": 1.536,
93
+ "step": 600
94
+ }
95
+ ],
96
+ "logging_steps": 100,
97
+ "max_steps": 615,
98
+ "num_train_epochs": 1,
99
+ "save_steps": 100,
100
+ "total_flos": 8.660077507682304e+16,
101
+ "trial_name": null,
102
+ "trial_params": null
103
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:20469e100d2886c495c7cccfe2d1fc3156b74c166ccdd9a494eaa45d776cd255
3
+ size 4027