imdatta0 commited on
Commit
65d4185
1 Parent(s): dd92b1c

End of training

Browse files
README.md ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: llama2
3
+ library_name: peft
4
+ tags:
5
+ - generated_from_trainer
6
+ base_model: meta-llama/Llama-2-7b-hf
7
+ model-index:
8
+ - name: llama_2_7b_Magiccoder_evol_downNupNgateNqNkNvNo_r8_lr0.0001_bg88_alpha8_0_41_reverseinit
9
+ results: []
10
+ ---
11
+
12
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
13
+ should probably proofread and complete it, then remove this comment. -->
14
+
15
+ # llama_2_7b_Magiccoder_evol_downNupNgateNqNkNvNo_r8_lr0.0001_bg88_alpha8_0_41_reverseinit
16
+
17
+ This model is a fine-tuned version of [meta-llama/Llama-2-7b-hf](https://huggingface.co/meta-llama/Llama-2-7b-hf) on an unknown dataset.
18
+ It achieves the following results on the evaluation set:
19
+ - Loss: 1.1235
20
+
21
+ ## Model description
22
+
23
+ More information needed
24
+
25
+ ## Intended uses & limitations
26
+
27
+ More information needed
28
+
29
+ ## Training and evaluation data
30
+
31
+ More information needed
32
+
33
+ ## Training procedure
34
+
35
+ ### Training hyperparameters
36
+
37
+ The following hyperparameters were used during training:
38
+ - learning_rate: 0.0001
39
+ - train_batch_size: 8
40
+ - eval_batch_size: 8
41
+ - seed: 42
42
+ - gradient_accumulation_steps: 8
43
+ - total_train_batch_size: 64
44
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
45
+ - lr_scheduler_type: cosine
46
+ - lr_scheduler_warmup_steps: 0.02
47
+ - num_epochs: 1
48
+
49
+ ### Training results
50
+
51
+ | Training Loss | Epoch | Step | Validation Loss |
52
+ |:-------------:|:------:|:----:|:---------------:|
53
+ | 1.2502 | 0.0203 | 31 | 1.2151 |
54
+ | 1.1432 | 0.0405 | 62 | 1.1929 |
55
+ | 1.1409 | 0.0608 | 93 | 1.1804 |
56
+ | 1.1494 | 0.0810 | 124 | 1.1749 |
57
+ | 1.1213 | 0.1013 | 155 | 1.1669 |
58
+ | 1.1207 | 0.1215 | 186 | 1.1610 |
59
+ | 1.1488 | 0.1418 | 217 | 1.1596 |
60
+ | 1.1185 | 0.1620 | 248 | 1.1558 |
61
+ | 1.1321 | 0.1823 | 279 | 1.1539 |
62
+ | 1.1031 | 0.2025 | 310 | 1.1509 |
63
+ | 1.0976 | 0.2228 | 341 | 1.1506 |
64
+ | 1.1203 | 0.2431 | 372 | 1.1452 |
65
+ | 1.1118 | 0.2633 | 403 | 1.1472 |
66
+ | 1.1198 | 0.2836 | 434 | 1.1451 |
67
+ | 1.1149 | 0.3038 | 465 | 1.1436 |
68
+ | 1.1028 | 0.3241 | 496 | 1.1390 |
69
+ | 1.1137 | 0.3443 | 527 | 1.1387 |
70
+ | 1.1014 | 0.3646 | 558 | 1.1381 |
71
+ | 1.1078 | 0.3848 | 589 | 1.1378 |
72
+ | 1.0852 | 0.4051 | 620 | 1.1369 |
73
+ | 1.1071 | 0.4254 | 651 | 1.1370 |
74
+ | 1.1182 | 0.4456 | 682 | 1.1350 |
75
+ | 1.102 | 0.4659 | 713 | 1.1343 |
76
+ | 1.104 | 0.4861 | 744 | 1.1336 |
77
+ | 1.0855 | 0.5064 | 775 | 1.1333 |
78
+ | 1.083 | 0.5266 | 806 | 1.1305 |
79
+ | 1.0745 | 0.5469 | 837 | 1.1311 |
80
+ | 1.0763 | 0.5671 | 868 | 1.1295 |
81
+ | 1.0901 | 0.5874 | 899 | 1.1296 |
82
+ | 1.1007 | 0.6076 | 930 | 1.1293 |
83
+ | 1.0832 | 0.6279 | 961 | 1.1286 |
84
+ | 1.0931 | 0.6482 | 992 | 1.1261 |
85
+ | 1.0848 | 0.6684 | 1023 | 1.1264 |
86
+ | 1.1041 | 0.6887 | 1054 | 1.1263 |
87
+ | 1.0906 | 0.7089 | 1085 | 1.1244 |
88
+ | 1.0847 | 0.7292 | 1116 | 1.1257 |
89
+ | 1.0761 | 0.7494 | 1147 | 1.1249 |
90
+ | 1.0949 | 0.7697 | 1178 | 1.1243 |
91
+ | 1.0956 | 0.7899 | 1209 | 1.1240 |
92
+ | 1.0814 | 0.8102 | 1240 | 1.1240 |
93
+ | 1.0919 | 0.8304 | 1271 | 1.1242 |
94
+ | 1.0858 | 0.8507 | 1302 | 1.1240 |
95
+ | 1.0784 | 0.8710 | 1333 | 1.1238 |
96
+ | 1.0816 | 0.8912 | 1364 | 1.1236 |
97
+ | 1.0918 | 0.9115 | 1395 | 1.1233 |
98
+ | 1.1 | 0.9317 | 1426 | 1.1235 |
99
+ | 1.0551 | 0.9520 | 1457 | 1.1234 |
100
+ | 1.0643 | 0.9722 | 1488 | 1.1235 |
101
+ | 1.0921 | 0.9925 | 1519 | 1.1235 |
102
+
103
+
104
+ ### Framework versions
105
+
106
+ - PEFT 0.7.1
107
+ - Transformers 4.40.2
108
+ - Pytorch 2.3.0+cu121
109
+ - Datasets 2.16.1
110
+ - Tokenizers 0.19.1
adapter_config.json ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "meta-llama/Llama-2-7b-hf",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layers_pattern": null,
10
+ "layers_to_transform": [
11
+ 0,
12
+ 1,
13
+ 2,
14
+ 3,
15
+ 4,
16
+ 5,
17
+ 6,
18
+ 7,
19
+ 8,
20
+ 9,
21
+ 10,
22
+ 11,
23
+ 12,
24
+ 13,
25
+ 14,
26
+ 15,
27
+ 16,
28
+ 17,
29
+ 18,
30
+ 19,
31
+ 20,
32
+ 21,
33
+ 22,
34
+ 23,
35
+ 24,
36
+ 25,
37
+ 26,
38
+ 27,
39
+ 28,
40
+ 29,
41
+ 30,
42
+ 31,
43
+ 32,
44
+ 33,
45
+ 34,
46
+ 35,
47
+ 36,
48
+ 37,
49
+ 38,
50
+ 39,
51
+ 40
52
+ ],
53
+ "loftq_config": {},
54
+ "lora_alpha": 8,
55
+ "lora_dropout": 0,
56
+ "megatron_config": null,
57
+ "megatron_core": "megatron.core",
58
+ "modules_to_save": null,
59
+ "peft_type": "LORA",
60
+ "r": 8,
61
+ "rank_pattern": {},
62
+ "revision": "unsloth",
63
+ "target_modules": [
64
+ "up_proj",
65
+ "k_proj",
66
+ "v_proj",
67
+ "q_proj",
68
+ "down_proj",
69
+ "gate_proj",
70
+ "o_proj"
71
+ ],
72
+ "task_type": "CAUSAL_LM"
73
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d38424d35ef8984eb3485f8c93c4a8275f98f2b89772ffd004469e32985549ea
3
+ size 80013120
special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "<unk>",
17
+ "unk_token": {
18
+ "content": "<unk>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
3
+ size 499723
tokenizer_config.json ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "added_tokens_decoder": {
5
+ "0": {
6
+ "content": "<unk>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "1": {
14
+ "content": "<s>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "2": {
22
+ "content": "</s>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ }
29
+ },
30
+ "bos_token": "<s>",
31
+ "clean_up_tokenization_spaces": false,
32
+ "eos_token": "</s>",
33
+ "legacy": false,
34
+ "model_max_length": 4096,
35
+ "pad_token": "<unk>",
36
+ "padding_side": "right",
37
+ "sp_model_kwargs": {},
38
+ "tokenizer_class": "LlamaTokenizer",
39
+ "unk_token": "<unk>",
40
+ "use_default_system_prompt": false
41
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cdf5220c6885aef808b44cc6189aae8790dd10b0cbc53a3f5c6713d266a9f392
3
+ size 5304