Initial GPTQ model commit
Browse files- convert_baichuan_to_llama.py +14 -0
convert_baichuan_to_llama.py
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from collections import OrderedDict
|
2 |
+
import torch
|
3 |
+
|
4 |
+
baichuan = torch.load("pytorch_model.bin")
|
5 |
+
llama = OrderedDict()
|
6 |
+
|
7 |
+
for key in baichuan:
|
8 |
+
if 'W_pack' in key:
|
9 |
+
llama[key.replace('W_pack', 'q_proj')] = baichuan[key][:4096]
|
10 |
+
llama[key.replace('W_pack', 'k_proj')] = baichuan[key][4096:4096 * 2]
|
11 |
+
llama[key.replace('W_pack', 'v_proj')] = baichuan[key][4096 * 2:]
|
12 |
+
else:
|
13 |
+
llama[key] = baichuan[key]
|
14 |
+
torch.save(llama, "pytorch_model.bin")
|