Litzy619 commited on
Commit
151f33e
1 Parent(s): e6878f8

End of training

Browse files
README.md ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: mit
3
+ base_model: microsoft/Phi-3-mini-4k-instruct
4
+ tags:
5
+ - generated_from_trainer
6
+ model-index:
7
+ - name: PHI30512HMAB18H
8
+ results: []
9
+ ---
10
+
11
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
12
+ should probably proofread and complete it, then remove this comment. -->
13
+
14
+ # PHI30512HMAB18H
15
+
16
+ This model is a fine-tuned version of [microsoft/Phi-3-mini-4k-instruct](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct) on an unknown dataset.
17
+ It achieves the following results on the evaluation set:
18
+ - Loss: 0.0740
19
+
20
+ ## Model description
21
+
22
+ More information needed
23
+
24
+ ## Intended uses & limitations
25
+
26
+ More information needed
27
+
28
+ ## Training and evaluation data
29
+
30
+ More information needed
31
+
32
+ ## Training procedure
33
+
34
+ ### Training hyperparameters
35
+
36
+ The following hyperparameters were used during training:
37
+ - learning_rate: 0.0003
38
+ - train_batch_size: 8
39
+ - eval_batch_size: 8
40
+ - seed: 42
41
+ - gradient_accumulation_steps: 16
42
+ - total_train_batch_size: 128
43
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
44
+ - lr_scheduler_type: cosine_with_restarts
45
+ - lr_scheduler_warmup_steps: 60
46
+ - num_epochs: 3
47
+ - mixed_precision_training: Native AMP
48
+
49
+ ### Training results
50
+
51
+ | Training Loss | Epoch | Step | Validation Loss |
52
+ |:-------------:|:-----:|:----:|:---------------:|
53
+ | 4.2497 | 0.09 | 10 | 0.9067 |
54
+ | 0.4537 | 0.18 | 20 | 0.2641 |
55
+ | 0.3338 | 0.27 | 30 | 0.2501 |
56
+ | 0.2554 | 0.36 | 40 | 0.2343 |
57
+ | 0.2601 | 0.45 | 50 | 0.2149 |
58
+ | 0.2187 | 0.54 | 60 | 0.2108 |
59
+ | 0.2111 | 0.63 | 70 | 0.2032 |
60
+ | 0.1904 | 0.73 | 80 | 0.1720 |
61
+ | 0.1685 | 0.82 | 90 | 0.1673 |
62
+ | 0.1626 | 0.91 | 100 | 0.1636 |
63
+ | 0.1578 | 1.0 | 110 | 0.1104 |
64
+ | 0.1041 | 1.09 | 120 | 0.0870 |
65
+ | 0.0882 | 1.18 | 130 | 0.0854 |
66
+ | 0.0933 | 1.27 | 140 | 0.0801 |
67
+ | 0.0816 | 1.36 | 150 | 0.0792 |
68
+ | 0.0829 | 1.45 | 160 | 0.0751 |
69
+ | 0.0741 | 1.54 | 170 | 0.0777 |
70
+ | 0.0773 | 1.63 | 180 | 0.0732 |
71
+ | 0.0705 | 1.72 | 190 | 0.0705 |
72
+ | 0.0749 | 1.81 | 200 | 0.0677 |
73
+ | 0.0624 | 1.9 | 210 | 0.0734 |
74
+ | 0.0655 | 1.99 | 220 | 0.0722 |
75
+ | 0.0436 | 2.08 | 230 | 0.0802 |
76
+ | 0.0464 | 2.18 | 240 | 0.0876 |
77
+ | 0.0402 | 2.27 | 250 | 0.0758 |
78
+ | 0.0379 | 2.36 | 260 | 0.0742 |
79
+ | 0.0435 | 2.45 | 270 | 0.0732 |
80
+ | 0.0348 | 2.54 | 280 | 0.0766 |
81
+ | 0.0356 | 2.63 | 290 | 0.0775 |
82
+ | 0.0409 | 2.72 | 300 | 0.0757 |
83
+ | 0.0473 | 2.81 | 310 | 0.0746 |
84
+ | 0.0401 | 2.9 | 320 | 0.0741 |
85
+ | 0.0402 | 2.99 | 330 | 0.0740 |
86
+
87
+
88
+ ### Framework versions
89
+
90
+ - Transformers 4.36.0.dev0
91
+ - Pytorch 2.1.2+cu121
92
+ - Datasets 2.14.6
93
+ - Tokenizers 0.14.0
adapter_config.json ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "adaptive_ratio": 0.01,
3
+ "adaptive_ratio_decay": 1.005,
4
+ "additive_modeling": false,
5
+ "allow_empty_lora": false,
6
+ "auto_mapping": null,
7
+ "base_model_name_or_path": "microsoft/Phi-3-mini-4k-instruct",
8
+ "bias": "none",
9
+ "curr_learning": true,
10
+ "detached_training": true,
11
+ "dynamic_adapter_pool": true,
12
+ "enable_lora": null,
13
+ "encoder_hidden_size": 3072,
14
+ "fan_in_fan_out": false,
15
+ "hypernetwork": true,
16
+ "inference_mode": true,
17
+ "input_based_adapter_selection": true,
18
+ "insert_zero_lora": false,
19
+ "layer_to_lora": [],
20
+ "lora_alpha": 16,
21
+ "lora_dropout": 0.05,
22
+ "merge_weights": false,
23
+ "modules_to_save": null,
24
+ "num_attention_heads": 32,
25
+ "num_layers": 32,
26
+ "num_prefix_set": 3,
27
+ "num_transformer_submodules": 1,
28
+ "num_virtual_tokens": 30,
29
+ "number_of_adapter_pre_layer": 8,
30
+ "ot_diversified_dispatcher": false,
31
+ "ot_diversified_prefix": false,
32
+ "peft_type": "PREFIX_MA_LORA",
33
+ "pool_selective_inference": true,
34
+ "pool_selective_training": true,
35
+ "prefix_projection": true,
36
+ "r": 8,
37
+ "random_routing": false,
38
+ "random_routing_inference": false,
39
+ "scale": 64,
40
+ "selective_num": 8,
41
+ "simple_hidden_matching": true,
42
+ "simple_instance_matching": true,
43
+ "target_modules": [
44
+ "qkv_proj"
45
+ ],
46
+ "task_type": "CAUSAL_LM",
47
+ "token_dim": 3072
48
+ }
adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:713c6c89ef56709a20d3e84c0f74defab161ff63444805968eb1cc7cb03063b0
3
+ size 326557073
added_tokens.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "<|assistant|>": 32001,
3
+ "<|endoftext|>": 32000,
4
+ "<|end|>": 32007,
5
+ "<|placeholder1|>": 32002,
6
+ "<|placeholder2|>": 32003,
7
+ "<|placeholder3|>": 32004,
8
+ "<|placeholder4|>": 32005,
9
+ "<|placeholder5|>": 32008,
10
+ "<|placeholder6|>": 32009,
11
+ "<|system|>": 32006,
12
+ "<|user|>": 32010
13
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:608fce663509f5d73d7871d64ce4ea134d0dfccde7ae2f93171ad4505cf888ea
3
+ size 7867763280
special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|endoftext|>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "<unk>",
17
+ "unk_token": {
18
+ "content": "<unk>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
3
+ size 499723
tokenizer_config.json ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": true,
4
+ "added_tokens_decoder": {
5
+ "0": {
6
+ "content": "<unk>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "1": {
14
+ "content": "<s>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "2": {
22
+ "content": "</s>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": true,
26
+ "single_word": false,
27
+ "special": false
28
+ },
29
+ "32000": {
30
+ "content": "<|endoftext|>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "32001": {
38
+ "content": "<|assistant|>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": true,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "32002": {
46
+ "content": "<|placeholder1|>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": true,
50
+ "single_word": false,
51
+ "special": true
52
+ },
53
+ "32003": {
54
+ "content": "<|placeholder2|>",
55
+ "lstrip": false,
56
+ "normalized": false,
57
+ "rstrip": true,
58
+ "single_word": false,
59
+ "special": true
60
+ },
61
+ "32004": {
62
+ "content": "<|placeholder3|>",
63
+ "lstrip": false,
64
+ "normalized": false,
65
+ "rstrip": true,
66
+ "single_word": false,
67
+ "special": true
68
+ },
69
+ "32005": {
70
+ "content": "<|placeholder4|>",
71
+ "lstrip": false,
72
+ "normalized": false,
73
+ "rstrip": true,
74
+ "single_word": false,
75
+ "special": true
76
+ },
77
+ "32006": {
78
+ "content": "<|system|>",
79
+ "lstrip": false,
80
+ "normalized": false,
81
+ "rstrip": true,
82
+ "single_word": false,
83
+ "special": true
84
+ },
85
+ "32007": {
86
+ "content": "<|end|>",
87
+ "lstrip": false,
88
+ "normalized": false,
89
+ "rstrip": true,
90
+ "single_word": false,
91
+ "special": true
92
+ },
93
+ "32008": {
94
+ "content": "<|placeholder5|>",
95
+ "lstrip": false,
96
+ "normalized": false,
97
+ "rstrip": true,
98
+ "single_word": false,
99
+ "special": true
100
+ },
101
+ "32009": {
102
+ "content": "<|placeholder6|>",
103
+ "lstrip": false,
104
+ "normalized": false,
105
+ "rstrip": true,
106
+ "single_word": false,
107
+ "special": true
108
+ },
109
+ "32010": {
110
+ "content": "<|user|>",
111
+ "lstrip": false,
112
+ "normalized": false,
113
+ "rstrip": true,
114
+ "single_word": false,
115
+ "special": true
116
+ }
117
+ },
118
+ "bos_token": "<s>",
119
+ "chat_template": "{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'user') %}{{'<|user|>' + '\n' + message['content'] + '<|end|>' + '\n' + '<|assistant|>' + '\n'}}{% elif (message['role'] == 'assistant') %}{{message['content'] + '<|end|>' + '\n'}}{% endif %}{% endfor %}",
120
+ "clean_up_tokenization_spaces": false,
121
+ "eos_token": "<|endoftext|>",
122
+ "legacy": false,
123
+ "model_max_length": 4096,
124
+ "pad_token": "<unk>",
125
+ "padding_side": "left",
126
+ "sp_model_kwargs": {},
127
+ "spaces_between_special_tokens": false,
128
+ "tokenizer_class": "LlamaTokenizer",
129
+ "unk_token": "<unk>",
130
+ "use_default_system_prompt": false
131
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:550fb5db2f6955b1d6aca206422bf8121264b8f3468c5751b2b9dfe7ad57f7fb
3
+ size 5240