alizaidi commited on
Commit
5512abf
·
verified ·
1 Parent(s): aeeeaab

alizaidi/phi3-goud-lora

Browse files
README.md ADDED
@@ -0,0 +1,188 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: microsoft/Phi-3-mini-4k-instruct
3
+ library_name: peft
4
+ license: mit
5
+ tags:
6
+ - trl
7
+ - sft
8
+ - generated_from_trainer
9
+ model-index:
10
+ - name: phi-3-mini-LoRA
11
+ results: []
12
+ ---
13
+
14
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
15
+ should probably proofread and complete it, then remove this comment. -->
16
+
17
+ [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="200" height="32"/>](https://wandb.ai/alizaidi/Phi3-mini-ft-goud-summarization/runs/4r2wc9jx)
18
+ # phi-3-mini-LoRA
19
+
20
+ This model is a fine-tuned version of [microsoft/Phi-3-mini-4k-instruct](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct) on an unknown dataset.
21
+ It achieves the following results on the evaluation set:
22
+ - Loss: 1.2015
23
+
24
+ ## Model description
25
+
26
+ More information needed
27
+
28
+ ## Intended uses & limitations
29
+
30
+ More information needed
31
+
32
+ ## Training and evaluation data
33
+
34
+ More information needed
35
+
36
+ ## Training procedure
37
+
38
+ ### Training hyperparameters
39
+
40
+ The following hyperparameters were used during training:
41
+ - learning_rate: 0.0001
42
+ - train_batch_size: 8
43
+ - eval_batch_size: 8
44
+ - seed: 42
45
+ - gradient_accumulation_steps: 4
46
+ - total_train_batch_size: 32
47
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
48
+ - lr_scheduler_type: linear
49
+ - lr_scheduler_warmup_ratio: 0.1
50
+ - num_epochs: 3
51
+
52
+ ### Training results
53
+
54
+ | Training Loss | Epoch | Step | Validation Loss |
55
+ |:-------------:|:------:|:-----:|:---------------:|
56
+ | 1.9715 | 0.0242 | 100 | 1.9517 |
57
+ | 1.8637 | 0.0484 | 200 | 1.7875 |
58
+ | 1.7019 | 0.0725 | 300 | 1.6473 |
59
+ | 1.6127 | 0.0967 | 400 | 1.5828 |
60
+ | 1.5545 | 0.1209 | 500 | 1.5389 |
61
+ | 1.5144 | 0.1451 | 600 | 1.5045 |
62
+ | 1.4823 | 0.1693 | 700 | 1.4746 |
63
+ | 1.4535 | 0.1935 | 800 | 1.4502 |
64
+ | 1.4293 | 0.2176 | 900 | 1.4270 |
65
+ | 1.4132 | 0.2418 | 1000 | 1.4073 |
66
+ | 1.388 | 0.2660 | 1100 | 1.3880 |
67
+ | 1.3757 | 0.2902 | 1200 | 1.3706 |
68
+ | 1.3594 | 0.3144 | 1300 | 1.3543 |
69
+ | 1.3399 | 0.3386 | 1400 | 1.3410 |
70
+ | 1.3314 | 0.3627 | 1500 | 1.3284 |
71
+ | 1.3161 | 0.3869 | 1600 | 1.3167 |
72
+ | 1.3005 | 0.4111 | 1700 | 1.3084 |
73
+ | 1.2937 | 0.4353 | 1800 | 1.2987 |
74
+ | 1.2824 | 0.4595 | 1900 | 1.2920 |
75
+ | 1.2806 | 0.4836 | 2000 | 1.2859 |
76
+ | 1.2773 | 0.5078 | 2100 | 1.2793 |
77
+ | 1.2717 | 0.5320 | 2200 | 1.2738 |
78
+ | 1.2654 | 0.5562 | 2300 | 1.2692 |
79
+ | 1.2597 | 0.5804 | 2400 | 1.2644 |
80
+ | 1.2536 | 0.6046 | 2500 | 1.2601 |
81
+ | 1.2486 | 0.6287 | 2600 | 1.2560 |
82
+ | 1.2416 | 0.6529 | 2700 | 1.2527 |
83
+ | 1.2462 | 0.6771 | 2800 | 1.2494 |
84
+ | 1.2402 | 0.7013 | 2900 | 1.2465 |
85
+ | 1.2353 | 0.7255 | 3000 | 1.2434 |
86
+ | 1.2285 | 0.7497 | 3100 | 1.2410 |
87
+ | 1.2314 | 0.7738 | 3200 | 1.2384 |
88
+ | 1.2342 | 0.7980 | 3300 | 1.2357 |
89
+ | 1.2195 | 0.8222 | 3400 | 1.2339 |
90
+ | 1.2306 | 0.8464 | 3500 | 1.2316 |
91
+ | 1.2225 | 0.8706 | 3600 | 1.2301 |
92
+ | 1.2174 | 0.8947 | 3700 | 1.2281 |
93
+ | 1.2293 | 0.9189 | 3800 | 1.2267 |
94
+ | 1.2194 | 0.9431 | 3900 | 1.2250 |
95
+ | 1.2169 | 0.9673 | 4000 | 1.2234 |
96
+ | 1.2138 | 0.9915 | 4100 | 1.2224 |
97
+ | 1.2105 | 1.0157 | 4200 | 1.2214 |
98
+ | 1.2081 | 1.0398 | 4300 | 1.2201 |
99
+ | 1.2129 | 1.0640 | 4400 | 1.2188 |
100
+ | 1.1995 | 1.0882 | 4500 | 1.2177 |
101
+ | 1.196 | 1.1124 | 4600 | 1.2167 |
102
+ | 1.2041 | 1.1366 | 4700 | 1.2163 |
103
+ | 1.2104 | 1.1608 | 4800 | 1.2151 |
104
+ | 1.205 | 1.1849 | 4900 | 1.2144 |
105
+ | 1.2055 | 1.2091 | 5000 | 1.2135 |
106
+ | 1.1966 | 1.2333 | 5100 | 1.2128 |
107
+ | 1.2017 | 1.2575 | 5200 | 1.2120 |
108
+ | 1.1995 | 1.2817 | 5300 | 1.2117 |
109
+ | 1.2015 | 1.3058 | 5400 | 1.2108 |
110
+ | 1.1978 | 1.3300 | 5500 | 1.2103 |
111
+ | 1.2017 | 1.3542 | 5600 | 1.2098 |
112
+ | 1.196 | 1.3784 | 5700 | 1.2093 |
113
+ | 1.1976 | 1.4026 | 5800 | 1.2089 |
114
+ | 1.2057 | 1.4268 | 5900 | 1.2082 |
115
+ | 1.2012 | 1.4509 | 6000 | 1.2079 |
116
+ | 1.2067 | 1.4751 | 6100 | 1.2074 |
117
+ | 1.2048 | 1.4993 | 6200 | 1.2071 |
118
+ | 1.2011 | 1.5235 | 6300 | 1.2068 |
119
+ | 1.1911 | 1.5477 | 6400 | 1.2064 |
120
+ | 1.1974 | 1.5719 | 6500 | 1.2061 |
121
+ | 1.1934 | 1.5960 | 6600 | 1.2059 |
122
+ | 1.1896 | 1.6202 | 6700 | 1.2057 |
123
+ | 1.1895 | 1.6444 | 6800 | 1.2052 |
124
+ | 1.203 | 1.6686 | 6900 | 1.2051 |
125
+ | 1.191 | 1.6928 | 7000 | 1.2048 |
126
+ | 1.1995 | 1.7169 | 7100 | 1.2045 |
127
+ | 1.1979 | 1.7411 | 7200 | 1.2043 |
128
+ | 1.1918 | 1.7653 | 7300 | 1.2042 |
129
+ | 1.1969 | 1.7895 | 7400 | 1.2040 |
130
+ | 1.1869 | 1.8137 | 7500 | 1.2038 |
131
+ | 1.1871 | 1.8379 | 7600 | 1.2036 |
132
+ | 1.1988 | 1.8620 | 7700 | 1.2035 |
133
+ | 1.1942 | 1.8862 | 7800 | 1.2034 |
134
+ | 1.1931 | 1.9104 | 7900 | 1.2033 |
135
+ | 1.1947 | 1.9346 | 8000 | 1.2030 |
136
+ | 1.1932 | 1.9588 | 8100 | 1.2030 |
137
+ | 1.1922 | 1.9830 | 8200 | 1.2028 |
138
+ | 1.192 | 2.0071 | 8300 | 1.2027 |
139
+ | 1.1997 | 2.0313 | 8400 | 1.2027 |
140
+ | 1.1945 | 2.0555 | 8500 | 1.2026 |
141
+ | 1.1934 | 2.0797 | 8600 | 1.2026 |
142
+ | 1.1955 | 2.1039 | 8700 | 1.2024 |
143
+ | 1.1901 | 2.1280 | 8800 | 1.2024 |
144
+ | 1.1898 | 2.1522 | 8900 | 1.2023 |
145
+ | 1.186 | 2.1764 | 9000 | 1.2022 |
146
+ | 1.1858 | 2.2006 | 9100 | 1.2022 |
147
+ | 1.1965 | 2.2248 | 9200 | 1.2021 |
148
+ | 1.1835 | 2.2490 | 9300 | 1.2021 |
149
+ | 1.1983 | 2.2731 | 9400 | 1.2020 |
150
+ | 1.1813 | 2.2973 | 9500 | 1.2020 |
151
+ | 1.1903 | 2.3215 | 9600 | 1.2019 |
152
+ | 1.1952 | 2.3457 | 9700 | 1.2019 |
153
+ | 1.1899 | 2.3699 | 9800 | 1.2018 |
154
+ | 1.2011 | 2.3941 | 9900 | 1.2018 |
155
+ | 1.1936 | 2.4182 | 10000 | 1.2018 |
156
+ | 1.1931 | 2.4424 | 10100 | 1.2018 |
157
+ | 1.1991 | 2.4666 | 10200 | 1.2017 |
158
+ | 1.19 | 2.4908 | 10300 | 1.2017 |
159
+ | 1.1913 | 2.5150 | 10400 | 1.2016 |
160
+ | 1.1886 | 2.5391 | 10500 | 1.2017 |
161
+ | 1.1848 | 2.5633 | 10600 | 1.2016 |
162
+ | 1.1875 | 2.5875 | 10700 | 1.2016 |
163
+ | 1.1887 | 2.6117 | 10800 | 1.2016 |
164
+ | 1.1866 | 2.6359 | 10900 | 1.2016 |
165
+ | 1.188 | 2.6601 | 11000 | 1.2016 |
166
+ | 1.1952 | 2.6842 | 11100 | 1.2015 |
167
+ | 1.1947 | 2.7084 | 11200 | 1.2015 |
168
+ | 1.1905 | 2.7326 | 11300 | 1.2015 |
169
+ | 1.1838 | 2.7568 | 11400 | 1.2015 |
170
+ | 1.1893 | 2.7810 | 11500 | 1.2015 |
171
+ | 1.1808 | 2.8052 | 11600 | 1.2015 |
172
+ | 1.1909 | 2.8293 | 11700 | 1.2015 |
173
+ | 1.1858 | 2.8535 | 11800 | 1.2015 |
174
+ | 1.185 | 2.8777 | 11900 | 1.2015 |
175
+ | 1.1947 | 2.9019 | 12000 | 1.2015 |
176
+ | 1.1868 | 2.9261 | 12100 | 1.2014 |
177
+ | 1.1872 | 2.9502 | 12200 | 1.2015 |
178
+ | 1.1852 | 2.9744 | 12300 | 1.2015 |
179
+ | 1.185 | 2.9986 | 12400 | 1.2015 |
180
+
181
+
182
+ ### Framework versions
183
+
184
+ - PEFT 0.11.1
185
+ - Transformers 4.43.1
186
+ - Pytorch 2.3.1+cu121
187
+ - Datasets 2.20.0
188
+ - Tokenizers 0.19.1
adapter_config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "microsoft/Phi-3-mini-4k-instruct",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layer_replication": null,
10
+ "layers_pattern": null,
11
+ "layers_to_transform": null,
12
+ "loftq_config": {},
13
+ "lora_alpha": 16,
14
+ "lora_dropout": 0.05,
15
+ "megatron_config": null,
16
+ "megatron_core": "megatron.core",
17
+ "modules_to_save": null,
18
+ "peft_type": "LORA",
19
+ "r": 16,
20
+ "rank_pattern": {},
21
+ "revision": null,
22
+ "target_modules": [
23
+ "up_proj",
24
+ "o_proj",
25
+ "q_proj",
26
+ "gate_proj",
27
+ "k_proj",
28
+ "v_proj",
29
+ "down_proj"
30
+ ],
31
+ "task_type": "CAUSAL_LM",
32
+ "use_dora": false,
33
+ "use_rslora": false
34
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:042c5144fc0553dd2dc05b024604a89cff275209f3a8cf4b6806760a05f1629a
3
+ size 17842848
added_tokens.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "<|assistant|>": 32001,
3
+ "<|endoftext|>": 32000,
4
+ "<|end|>": 32007,
5
+ "<|placeholder1|>": 32002,
6
+ "<|placeholder2|>": 32003,
7
+ "<|placeholder3|>": 32004,
8
+ "<|placeholder4|>": 32005,
9
+ "<|placeholder5|>": 32008,
10
+ "<|placeholder6|>": 32009,
11
+ "<|system|>": 32006,
12
+ "<|user|>": 32010
13
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|endoftext|>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "<unk>",
17
+ "unk_token": {
18
+ "content": "<unk>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
3
+ size 499723
tokenizer_config.json ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_eos_token": true,
4
+ "add_prefix_space": null,
5
+ "added_tokens_decoder": {
6
+ "0": {
7
+ "content": "<unk>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false,
12
+ "special": true
13
+ },
14
+ "1": {
15
+ "content": "<s>",
16
+ "lstrip": false,
17
+ "normalized": false,
18
+ "rstrip": false,
19
+ "single_word": false,
20
+ "special": true
21
+ },
22
+ "2": {
23
+ "content": "</s>",
24
+ "lstrip": false,
25
+ "normalized": false,
26
+ "rstrip": true,
27
+ "single_word": false,
28
+ "special": false
29
+ },
30
+ "32000": {
31
+ "content": "<|endoftext|>",
32
+ "lstrip": false,
33
+ "normalized": false,
34
+ "rstrip": false,
35
+ "single_word": false,
36
+ "special": true
37
+ },
38
+ "32001": {
39
+ "content": "<|assistant|>",
40
+ "lstrip": false,
41
+ "normalized": false,
42
+ "rstrip": true,
43
+ "single_word": false,
44
+ "special": true
45
+ },
46
+ "32002": {
47
+ "content": "<|placeholder1|>",
48
+ "lstrip": false,
49
+ "normalized": false,
50
+ "rstrip": true,
51
+ "single_word": false,
52
+ "special": true
53
+ },
54
+ "32003": {
55
+ "content": "<|placeholder2|>",
56
+ "lstrip": false,
57
+ "normalized": false,
58
+ "rstrip": true,
59
+ "single_word": false,
60
+ "special": true
61
+ },
62
+ "32004": {
63
+ "content": "<|placeholder3|>",
64
+ "lstrip": false,
65
+ "normalized": false,
66
+ "rstrip": true,
67
+ "single_word": false,
68
+ "special": true
69
+ },
70
+ "32005": {
71
+ "content": "<|placeholder4|>",
72
+ "lstrip": false,
73
+ "normalized": false,
74
+ "rstrip": true,
75
+ "single_word": false,
76
+ "special": true
77
+ },
78
+ "32006": {
79
+ "content": "<|system|>",
80
+ "lstrip": false,
81
+ "normalized": false,
82
+ "rstrip": true,
83
+ "single_word": false,
84
+ "special": true
85
+ },
86
+ "32007": {
87
+ "content": "<|end|>",
88
+ "lstrip": false,
89
+ "normalized": false,
90
+ "rstrip": true,
91
+ "single_word": false,
92
+ "special": true
93
+ },
94
+ "32008": {
95
+ "content": "<|placeholder5|>",
96
+ "lstrip": false,
97
+ "normalized": false,
98
+ "rstrip": true,
99
+ "single_word": false,
100
+ "special": true
101
+ },
102
+ "32009": {
103
+ "content": "<|placeholder6|>",
104
+ "lstrip": false,
105
+ "normalized": false,
106
+ "rstrip": true,
107
+ "single_word": false,
108
+ "special": true
109
+ },
110
+ "32010": {
111
+ "content": "<|user|>",
112
+ "lstrip": false,
113
+ "normalized": false,
114
+ "rstrip": true,
115
+ "single_word": false,
116
+ "special": true
117
+ }
118
+ },
119
+ "bos_token": "<s>",
120
+ "chat_template": "{% for message in messages %}{% if message['role'] == 'system' %}{{'<|system|>\n' + message['content'] + '<|end|>\n'}}{% elif message['role'] == 'user' %}{{'<|user|>\n' + message['content'] + '<|end|>\n'}}{% elif message['role'] == 'assistant' %}{{'<|assistant|>\n' + message['content'] + '<|end|>\n'}}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ '<|assistant|>\n' }}{% else %}{{ eos_token }}{% endif %}",
121
+ "clean_up_tokenization_spaces": false,
122
+ "eos_token": "<|endoftext|>",
123
+ "legacy": false,
124
+ "model_max_length": 4096,
125
+ "pad_token": "<unk>",
126
+ "padding_side": "left",
127
+ "sp_model_kwargs": {},
128
+ "tokenizer_class": "LlamaTokenizer",
129
+ "unk_token": "<unk>",
130
+ "use_default_system_prompt": false
131
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f5ccef6425c51543a08fba86455fc99b6f414efc663746b8f5f1ab506d0966b3
3
+ size 5432