xianchaowu
commited on
Commit
•
67059e2
1
Parent(s):
215c0a2
checkpoint-1500 for v2 release
Browse files- README.md +116 -110
- adapter_model.bin +1 -1
README.md
CHANGED
@@ -8,7 +8,13 @@ license: llama2
|
|
8 |
|
9 |
0. using the updated [Meta's LLaMA-2 models](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf).
|
10 |
1. support [4-bit qlora](https://arxiv.org/abs/2305.14314), extreme GPU memory and inference time saving;
|
11 |
-
2.
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
|
13 |
### Introduction
|
14 |
Determine the rank of LoRA layers by the singular values of pretrained weight matrices.
|
@@ -88,129 +94,129 @@ model.print_trainable_parameters()
|
|
88 |
### MMLU eval result:
|
89 |
|
90 |
```json
|
91 |
-
{"mmlu_loss": 1.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
92 |
"mmlu_eval_accuracy_econometrics": 0.16666666666666666,
|
93 |
-
"
|
94 |
-
"
|
95 |
-
"mmlu_eval_accuracy_high_school_macroeconomics": 0.46511627906976744,
|
96 |
"mmlu_eval_accuracy_high_school_government_and_politics": 0.5238095238095238,
|
97 |
-
"
|
98 |
-
"mmlu_eval_accuracy_medical_genetics": 0.8181818181818182,
|
99 |
-
"mmlu_eval_accuracy_international_law": 0.9230769230769231,
|
100 |
-
"mmlu_eval_accuracy_management": 0.6363636363636364,
|
101 |
-
"mmlu_eval_accuracy_philosophy": 0.35294117647058826,
|
102 |
-
"mmlu_eval_accuracy_high_school_geography": 0.6363636363636364,
|
103 |
-
"mmlu_eval_accuracy_professional_accounting": 0.2903225806451613,
|
104 |
-
"mmlu_eval_accuracy_jurisprudence": 0.45454545454545453,
|
105 |
-
"mmlu_eval_accuracy_miscellaneous": 0.627906976744186,
|
106 |
-
"mmlu_eval_accuracy_human_aging": 0.6956521739130435,
|
107 |
-
"mmlu_eval_accuracy_prehistory": 0.4857142857142857,
|
108 |
-
"mmlu_eval_accuracy_electrical_engineering": 0.3125,
|
109 |
-
"mmlu_eval_accuracy_high_school_biology": 0.375,
|
110 |
-
"mmlu_eval_accuracy_high_school_statistics": 0.34782608695652173,
|
111 |
-
"mmlu_eval_accuracy_moral_scenarios": 0.28,
|
112 |
-
"mmlu_eval_accuracy_sociology": 0.6363636363636364,
|
113 |
-
"mmlu_eval_accuracy_world_religions": 0.6842105263157895,
|
114 |
-
"mmlu_eval_accuracy_abstract_algebra": 0.36363636363636365,
|
115 |
-
"mmlu_eval_accuracy_college_mathematics": 0.36363636363636365,
|
116 |
-
"mmlu_eval_accuracy_conceptual_physics": 0.3076923076923077,
|
117 |
-
"mmlu_eval_accuracy_moral_disputes": 0.47368421052631576,
|
118 |
-
"mmlu_eval_accuracy_college_medicine": 0.36363636363636365,
|
119 |
-
"mmlu_eval_accuracy_professional_psychology": 0.391304347826087,
|
120 |
-
"mmlu_eval_accuracy_elementary_mathematics": 0.24390243902439024,
|
121 |
-
"mmlu_eval_accuracy_logical_fallacies": 0.6666666666666666,
|
122 |
-
"mmlu_eval_accuracy_high_school_physics": 0.35294117647058826,
|
123 |
"mmlu_eval_accuracy_college_chemistry": 0.375,
|
|
|
|
|
|
|
|
|
|
|
|
|
124 |
"mmlu_eval_accuracy_college_biology": 0.375,
|
125 |
-
"
|
126 |
-
"
|
127 |
-
"
|
128 |
-
"
|
129 |
-
"
|
130 |
-
"
|
|
|
|
|
|
|
|
|
131 |
"mmlu_eval_accuracy_clinical_knowledge": 0.4482758620689655,
|
132 |
-
"
|
133 |
-
"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
134 |
"mmlu_eval_accuracy_business_ethics": 0.45454545454545453,
|
135 |
-
"
|
136 |
-
"
|
137 |
-
"
|
138 |
-
"
|
139 |
-
"
|
140 |
-
"
|
141 |
-
"mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
|
142 |
-
"mmlu_eval_accuracy_security_studies": 0.5185185185185185,
|
143 |
-
"mmlu_eval_accuracy_astronomy": 0.4375,
|
144 |
-
"mmlu_eval_accuracy_public_relations": 0.5,
|
145 |
-
"mmlu_eval_accuracy_high_school_mathematics": 0.1724137931034483,
|
146 |
-
"mmlu_eval_accuracy_computer_security": 0.18181818181818182,
|
147 |
-
"mmlu_eval_accuracy_global_facts": 0.5,
|
148 |
-
"mmlu_eval_accuracy_high_school_world_history": 0.5769230769230769,
|
149 |
-
"mmlu_eval_accuracy": 0.46043208436613065}
|
150 |
```
|
151 |
|
152 |
### MMLU test result:
|
153 |
|
154 |
```json
|
155 |
-
{"mmlu_loss": 1.
|
156 |
-
"
|
157 |
-
"
|
158 |
-
"
|
159 |
-
"
|
160 |
-
"
|
161 |
-
"
|
|
|
|
|
162 |
"mmlu_test_accuracy_high_school_physics": 0.33112582781456956,
|
163 |
-
"
|
164 |
-
"
|
165 |
-
"
|
166 |
-
"mmlu_test_accuracy_machine_learning": 0.35714285714285715,
|
167 |
-
"mmlu_test_accuracy_moral_scenarios": 0.22569832402234638,
|
168 |
-
"mmlu_test_accuracy_jurisprudence": 0.5925925925925926,
|
169 |
-
"mmlu_test_accuracy_professional_law": 0.3239895697522816,
|
170 |
-
"mmlu_test_accuracy_medical_genetics": 0.48,
|
171 |
-
"mmlu_test_accuracy_college_chemistry": 0.36,
|
172 |
-
"mmlu_test_accuracy_high_school_geography": 0.5606060606060606,
|
173 |
-
"mmlu_test_accuracy_prehistory": 0.5185185185185185,
|
174 |
-
"mmlu_test_accuracy_high_school_world_history": 0.5864978902953587,
|
175 |
-
"mmlu_test_accuracy_professional_psychology": 0.4297385620915033,
|
176 |
-
"mmlu_test_accuracy_public_relations": 0.5272727272727272,
|
177 |
-
"mmlu_test_accuracy_high_school_psychology": 0.6256880733944954,
|
178 |
-
"mmlu_test_accuracy_high_school_biology": 0.5225806451612903,
|
179 |
-
"mmlu_test_accuracy_computer_security": 0.52,
|
180 |
-
"mmlu_test_accuracy_conceptual_physics": 0.3829787234042553,
|
181 |
-
"mmlu_test_accuracy_elementary_mathematics": 0.30423280423280424,
|
182 |
-
"mmlu_test_accuracy_high_school_computer_science": 0.42,
|
183 |
-
"mmlu_test_accuracy_marketing": 0.6495726495726496,
|
184 |
-
"mmlu_test_accuracy_college_mathematics": 0.33,
|
185 |
-
"mmlu_test_accuracy_college_biology": 0.5138888888888888,
|
186 |
-
"mmlu_test_accuracy_us_foreign_policy": 0.73,
|
187 |
-
"mmlu_test_accuracy_security_studies": 0.4775510204081633,
|
188 |
-
"mmlu_test_accuracy_high_school_european_history": 0.5393939393939394,
|
189 |
"mmlu_test_accuracy_international_law": 0.6363636363636364,
|
190 |
-
"mmlu_test_accuracy_moral_disputes": 0.
|
191 |
-
"
|
192 |
-
"
|
193 |
-
"
|
194 |
-
"
|
195 |
-
"
|
196 |
-
"mmlu_test_accuracy_high_school_macroeconomics": 0.
|
197 |
-
"
|
198 |
-
"
|
199 |
-
"
|
200 |
-
"
|
201 |
-
"
|
202 |
-
"
|
203 |
-
"
|
204 |
-
"
|
205 |
-
"
|
206 |
-
"
|
207 |
-
"
|
208 |
-
"
|
209 |
-
"
|
210 |
-
"mmlu_test_accuracy_college_medicine": 0.37572254335260113,
|
211 |
"mmlu_test_accuracy_clinical_knowledge": 0.49433962264150944,
|
212 |
-
"
|
213 |
-
"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
214 |
```
|
215 |
|
216 |
## License and intended use
|
|
|
8 |
|
9 |
0. using the updated [Meta's LLaMA-2 models](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf).
|
10 |
1. support [4-bit qlora](https://arxiv.org/abs/2305.14314), extreme GPU memory and inference time saving;
|
11 |
+
2. comparable MMLU evaluation dataset results:
|
12 |
+
|
13 |
+
| | eval | test | comp-eval | comp-test |
|
14 |
+
|---------------|--------|--------|-----------|-----------|
|
15 |
+
|llama2-7b-chat | 49.38% | 48.22% | | |
|
16 |
+
|ckpt-1600 | 46.51% | 47.44% | -2.87% | -0.78% |
|
17 |
+
|ckpt-1500 | 47.66% | 46.88% | -1.72% | -1.33% |
|
18 |
|
19 |
### Introduction
|
20 |
Determine the rank of LoRA layers by the singular values of pretrained weight matrices.
|
|
|
94 |
### MMLU eval result:
|
95 |
|
96 |
```json
|
97 |
+
{"mmlu_loss": 1.44828412985973,
|
98 |
+
"mmlu_eval_accuracy_professional_accounting": 0.3548387096774194,
|
99 |
+
"mmlu_eval_accuracy_high_school_physics": 0.35294117647058826,
|
100 |
+
"mmlu_eval_accuracy_elementary_mathematics": 0.24390243902439024,
|
101 |
+
"mmlu_eval_accuracy_high_school_statistics": 0.391304347826087,
|
102 |
+
"mmlu_eval_accuracy_machine_learning": 0.36363636363636365,
|
103 |
+
"mmlu_eval_accuracy_sociology": 0.6363636363636364,
|
104 |
+
"mmlu_eval_accuracy_human_sexuality": 0.4166666666666667,
|
105 |
+
"mmlu_eval_accuracy_professional_medicine": 0.3870967741935484,
|
106 |
+
"mmlu_eval_accuracy_computer_security": 0.2727272727272727,
|
107 |
+
"mmlu_eval_accuracy_astronomy": 0.375,
|
108 |
+
"mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
|
109 |
+
"mmlu_eval_accuracy_high_school_world_history": 0.5384615384615384,
|
110 |
+
"mmlu_eval_accuracy_high_school_psychology": 0.7166666666666667,
|
111 |
+
"mmlu_eval_accuracy_professional_law": 0.3176470588235294,
|
112 |
"mmlu_eval_accuracy_econometrics": 0.16666666666666666,
|
113 |
+
"mmlu_eval_accuracy_high_school_chemistry": 0.2727272727272727,
|
114 |
+
"mmlu_eval_accuracy_global_facts": 0.5,
|
|
|
115 |
"mmlu_eval_accuracy_high_school_government_and_politics": 0.5238095238095238,
|
116 |
+
"mmlu_eval_accuracy_electrical_engineering": 0.25,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
117 |
"mmlu_eval_accuracy_college_chemistry": 0.375,
|
118 |
+
"mmlu_eval_accuracy_high_school_biology": 0.4375,
|
119 |
+
"mmlu_eval_accuracy_high_school_microeconomics": 0.4230769230769231,
|
120 |
+
"mmlu_eval_accuracy_public_relations": 0.5833333333333334,
|
121 |
+
"mmlu_eval_accuracy_high_school_macroeconomics": 0.4883720930232558,
|
122 |
+
"mmlu_eval_accuracy_world_religions": 0.7368421052631579,
|
123 |
+
"mmlu_eval_accuracy_medical_genetics": 0.8181818181818182,
|
124 |
"mmlu_eval_accuracy_college_biology": 0.375,
|
125 |
+
"mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
|
126 |
+
"mmlu_eval_accuracy_college_mathematics": 0.36363636363636365,
|
127 |
+
"mmlu_eval_accuracy_miscellaneous": 0.627906976744186,
|
128 |
+
"mmlu_eval_accuracy_high_school_us_history": 0.8181818181818182,
|
129 |
+
"mmlu_eval_accuracy_virology": 0.4444444444444444,
|
130 |
+
"mmlu_eval_accuracy_anatomy": 0.5,
|
131 |
+
"mmlu_eval_accuracy_college_medicine": 0.3181818181818182,
|
132 |
+
"mmlu_eval_accuracy_human_aging": 0.7391304347826086,
|
133 |
+
"mmlu_eval_accuracy_college_physics": 0.45454545454545453,
|
134 |
+
"mmlu_eval_accuracy_philosophy": 0.38235294117647056,
|
135 |
"mmlu_eval_accuracy_clinical_knowledge": 0.4482758620689655,
|
136 |
+
"mmlu_eval_accuracy_formal_logic": 0.21428571428571427,
|
137 |
+
"mmlu_eval_accuracy_nutrition": 0.6363636363636364,
|
138 |
+
"mmlu_eval_accuracy_high_school_mathematics": 0.20689655172413793,
|
139 |
+
"mmlu_eval_accuracy_logical_fallacies": 0.6666666666666666,
|
140 |
+
"mmlu_eval_accuracy_professional_psychology": 0.42028985507246375,
|
141 |
+
"mmlu_eval_accuracy_prehistory": 0.5428571428571428,
|
142 |
+
"mmlu_eval_accuracy_high_school_geography": 0.7272727272727273,
|
143 |
+
"mmlu_eval_accuracy_management": 0.6363636363636364,
|
144 |
+
"mmlu_eval_accuracy_marketing": 0.76,
|
145 |
+
"mmlu_eval_accuracy_international_law": 0.9230769230769231,
|
146 |
+
"mmlu_eval_accuracy_us_foreign_policy": 0.7272727272727273,
|
147 |
+
"mmlu_eval_accuracy_moral_scenarios": 0.32,
|
148 |
+
"mmlu_eval_accuracy_high_school_european_history": 0.5,
|
149 |
"mmlu_eval_accuracy_business_ethics": 0.45454545454545453,
|
150 |
+
"mmlu_eval_accuracy_moral_disputes": 0.39473684210526316,
|
151 |
+
"mmlu_eval_accuracy_conceptual_physics": 0.38461538461538464,
|
152 |
+
"mmlu_eval_accuracy_security_studies": 0.5925925925925926,
|
153 |
+
"mmlu_eval_accuracy_abstract_algebra": 0.36363636363636365,
|
154 |
+
"mmlu_eval_accuracy_college_computer_science": 0.36363636363636365,
|
155 |
+
"mmlu_eval_accuracy": 0.4766441930115949}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
156 |
```
|
157 |
|
158 |
### MMLU test result:
|
159 |
|
160 |
```json
|
161 |
+
{"mmlu_loss": 1.4452685356679218,
|
162 |
+
"mmlu_test_accuracy_moral_scenarios": 0.23575418994413408,
|
163 |
+
"mmlu_test_accuracy_security_studies": 0.5020408163265306,
|
164 |
+
"mmlu_test_accuracy_astronomy": 0.4934210526315789,
|
165 |
+
"mmlu_test_accuracy_medical_genetics": 0.52,
|
166 |
+
"mmlu_test_accuracy_logical_fallacies": 0.5521472392638037,
|
167 |
+
"mmlu_test_accuracy_professional_psychology": 0.4444444444444444,
|
168 |
+
"mmlu_test_accuracy_high_school_psychology": 0.6110091743119266,
|
169 |
+
"mmlu_test_accuracy_high_school_us_history": 0.6372549019607843,
|
170 |
"mmlu_test_accuracy_high_school_physics": 0.33112582781456956,
|
171 |
+
"mmlu_test_accuracy_prehistory": 0.5308641975308642,
|
172 |
+
"mmlu_test_accuracy_human_sexuality": 0.549618320610687,
|
173 |
+
"mmlu_test_accuracy_management": 0.6213592233009708,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
174 |
"mmlu_test_accuracy_international_law": 0.6363636363636364,
|
175 |
+
"mmlu_test_accuracy_moral_disputes": 0.49421965317919075,
|
176 |
+
"mmlu_test_accuracy_conceptual_physics": 0.4127659574468085,
|
177 |
+
"mmlu_test_accuracy_econometrics": 0.3508771929824561,
|
178 |
+
"mmlu_test_accuracy_college_medicine": 0.3815028901734104,
|
179 |
+
"mmlu_test_accuracy_high_school_biology": 0.5064516129032258,
|
180 |
+
"mmlu_test_accuracy_high_school_statistics": 0.27314814814814814,
|
181 |
+
"mmlu_test_accuracy_high_school_macroeconomics": 0.43333333333333335,
|
182 |
+
"mmlu_test_accuracy_college_mathematics": 0.28,
|
183 |
+
"mmlu_test_accuracy_elementary_mathematics": 0.30687830687830686,
|
184 |
+
"mmlu_test_accuracy_public_relations": 0.509090909090909,
|
185 |
+
"mmlu_test_accuracy_high_school_european_history": 0.5515151515151515,
|
186 |
+
"mmlu_test_accuracy_human_aging": 0.5381165919282511,
|
187 |
+
"mmlu_test_accuracy_high_school_geography": 0.5555555555555556,
|
188 |
+
"mmlu_test_accuracy_formal_logic": 0.25396825396825395,
|
189 |
+
"mmlu_test_accuracy_miscellaneous": 0.665389527458493,
|
190 |
+
"mmlu_test_accuracy_high_school_computer_science": 0.4,
|
191 |
+
"mmlu_test_accuracy_global_facts": 0.33,
|
192 |
+
"mmlu_test_accuracy_world_religions": 0.6666666666666666,
|
193 |
+
"mmlu_test_accuracy_machine_learning": 0.33035714285714285,
|
194 |
+
"mmlu_test_accuracy_sociology": 0.6169154228855721,
|
|
|
195 |
"mmlu_test_accuracy_clinical_knowledge": 0.49433962264150944,
|
196 |
+
"mmlu_test_accuracy_virology": 0.4397590361445783,
|
197 |
+
"mmlu_test_accuracy_high_school_government_and_politics": 0.6839378238341969,
|
198 |
+
"mmlu_test_accuracy_high_school_world_history": 0.6329113924050633,
|
199 |
+
"mmlu_test_accuracy_college_biology": 0.5138888888888888,
|
200 |
+
"mmlu_test_accuracy_philosophy": 0.5627009646302251,
|
201 |
+
"mmlu_test_accuracy_college_physics": 0.2549019607843137,
|
202 |
+
"mmlu_test_accuracy_college_computer_science": 0.34,
|
203 |
+
"mmlu_test_accuracy_high_school_chemistry": 0.3793103448275862,
|
204 |
+
"mmlu_test_accuracy_nutrition": 0.5163398692810458,
|
205 |
+
"mmlu_test_accuracy_professional_accounting": 0.35106382978723405,
|
206 |
+
"mmlu_test_accuracy_jurisprudence": 0.5925925925925926,
|
207 |
+
"mmlu_test_accuracy_high_school_mathematics": 0.25925925925925924,
|
208 |
+
"mmlu_test_accuracy_marketing": 0.6923076923076923,
|
209 |
+
"mmlu_test_accuracy_business_ethics": 0.48,
|
210 |
+
"mmlu_test_accuracy_high_school_microeconomics": 0.4495798319327731,
|
211 |
+
"mmlu_test_accuracy_college_chemistry": 0.3,
|
212 |
+
"mmlu_test_accuracy_us_foreign_policy": 0.72,
|
213 |
+
"mmlu_test_accuracy_computer_security": 0.6,
|
214 |
+
"mmlu_test_accuracy_anatomy": 0.4740740740740741,
|
215 |
+
"mmlu_test_accuracy_professional_law": 0.3220338983050847,
|
216 |
+
"mmlu_test_accuracy_abstract_algebra": 0.27,
|
217 |
+
"mmlu_test_accuracy_electrical_engineering": 0.4827586206896552,
|
218 |
+
"mmlu_test_accuracy_professional_medicine": 0.3897058823529412,
|
219 |
+
"mmlu_test_accuracy": 0.46883545484585126}
|
220 |
```
|
221 |
|
222 |
## License and intended use
|
adapter_model.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 320063949
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5551124cbd98cda4968a723a220d63a05fec079e01bbc93ce0f88721a0971fde
|
3 |
size 320063949
|