File size: 12,537 Bytes
543fc8a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
weight_parallel_dim = {"llma.tok_embeddings.weight": 1, "llma.layers.0.attention.wq.weight": 0,
                       "llma.layers.0.attention.wq.bias": 0, "llma.layers.0.attention.wk.weight": 0,
                       "llma.layers.0.attention.wk.bias": 0, "llma.layers.0.attention.wv.weight": 0,
                       "llma.layers.0.attention.wv.bias": 0, "llma.layers.0.attention.wo.weight": 1,
                       "llma.layers.1.attention.wq.weight": 0, "llma.layers.1.attention.wq.bias": 0,
                       "llma.layers.1.attention.wk.weight": 0, "llma.layers.1.attention.wk.bias": 0,
                       "llma.layers.1.attention.wv.weight": 0, "llma.layers.1.attention.wv.bias": 0,
                       "llma.layers.1.attention.wo.weight": 1, "llma.layers.2.attention.wq.weight": 0,
                       "llma.layers.2.attention.wq.bias": 0, "llma.layers.2.attention.wk.weight": 0,
                       "llma.layers.2.attention.wk.bias": 0, "llma.layers.2.attention.wv.weight": 0,
                       "llma.layers.2.attention.wv.bias": 0, "llma.layers.2.attention.wo.weight": 1,
                       "llma.layers.3.attention.wq.weight": 0, "llma.layers.3.attention.wq.bias": 0,
                       "llma.layers.3.attention.wk.weight": 0, "llma.layers.3.attention.wk.bias": 0,
                       "llma.layers.3.attention.wv.weight": 0, "llma.layers.3.attention.wv.bias": 0,
                       "llma.layers.3.attention.wo.weight": 1, "llma.layers.4.attention.wq.weight": 0,
                       "llma.layers.4.attention.wq.bias": 0, "llma.layers.4.attention.wk.weight": 0,
                       "llma.layers.4.attention.wk.bias": 0, "llma.layers.4.attention.wv.weight": 0,
                       "llma.layers.4.attention.wv.bias": 0, "llma.layers.4.attention.wo.weight": 1,
                       "llma.layers.5.attention.wq.weight": 0, "llma.layers.5.attention.wq.bias": 0,
                       "llma.layers.5.attention.wk.weight": 0, "llma.layers.5.attention.wk.bias": 0,
                       "llma.layers.5.attention.wv.weight": 0, "llma.layers.5.attention.wv.bias": 0,
                       "llma.layers.5.attention.wo.weight": 1, "llma.layers.6.attention.wq.weight": 0,
                       "llma.layers.6.attention.wq.bias": 0, "llma.layers.6.attention.wk.weight": 0,
                       "llma.layers.6.attention.wk.bias": 0, "llma.layers.6.attention.wv.weight": 0,
                       "llma.layers.6.attention.wv.bias": 0, "llma.layers.6.attention.wo.weight": 1,
                       "llma.layers.7.attention.wq.weight": 0, "llma.layers.7.attention.wq.bias": 0,
                       "llma.layers.7.attention.wk.weight": 0, "llma.layers.7.attention.wk.bias": 0,
                       "llma.layers.7.attention.wv.weight": 0, "llma.layers.7.attention.wv.bias": 0,
                       "llma.layers.7.attention.wo.weight": 1, "llma.layers.8.attention.wq.weight": 0,
                       "llma.layers.8.attention.wq.bias": 0, "llma.layers.8.attention.wk.weight": 0,
                       "llma.layers.8.attention.wk.bias": 0, "llma.layers.8.attention.wv.weight": 0,
                       "llma.layers.8.attention.wv.bias": 0, "llma.layers.8.attention.wo.weight": 1,
                       "llma.layers.9.attention.wq.weight": 0, "llma.layers.9.attention.wq.bias": 0,
                       "llma.layers.9.attention.wk.weight": 0, "llma.layers.9.attention.wk.bias": 0,
                       "llma.layers.9.attention.wv.weight": 0, "llma.layers.9.attention.wv.bias": 0,
                       "llma.layers.9.attention.wo.weight": 1, "llma.layers.10.attention.wq.weight": 0,
                       "llma.layers.10.attention.wq.bias": 0, "llma.layers.10.attention.wk.weight": 0,
                       "llma.layers.10.attention.wk.bias": 0, "llma.layers.10.attention.wv.weight": 0,
                       "llma.layers.10.attention.wv.bias": 0, "llma.layers.10.attention.wo.weight": 1,
                       "llma.layers.11.attention.wq.weight": 0, "llma.layers.11.attention.wq.bias": 0,
                       "llma.layers.11.attention.wk.weight": 0, "llma.layers.11.attention.wk.bias": 0,
                       "llma.layers.11.attention.wv.weight": 0, "llma.layers.11.attention.wv.bias": 0,
                       "llma.layers.11.attention.wo.weight": 1, "llma.layers.12.attention.wq.weight": 0,
                       "llma.layers.12.attention.wq.bias": 0, "llma.layers.12.attention.wk.weight": 0,
                       "llma.layers.12.attention.wk.bias": 0, "llma.layers.12.attention.wv.weight": 0,
                       "llma.layers.12.attention.wv.bias": 0, "llma.layers.12.attention.wo.weight": 1,
                       "llma.layers.13.attention.wq.weight": 0, "llma.layers.13.attention.wq.bias": 0,
                       "llma.layers.13.attention.wk.weight": 0, "llma.layers.13.attention.wk.bias": 0,
                       "llma.layers.13.attention.wv.weight": 0, "llma.layers.13.attention.wv.bias": 0,
                       "llma.layers.13.attention.wo.weight": 1, "llma.layers.14.attention.wq.weight": 0,
                       "llma.layers.14.attention.wq.bias": 0, "llma.layers.14.attention.wk.weight": 0,
                       "llma.layers.14.attention.wk.bias": 0, "llma.layers.14.attention.wv.weight": 0,
                       "llma.layers.14.attention.wv.bias": 0, "llma.layers.14.attention.wo.weight": 1,
                       "llma.layers.15.attention.wq.weight": 0, "llma.layers.15.attention.wq.bias": 0,
                       "llma.layers.15.attention.wk.weight": 0, "llma.layers.15.attention.wk.bias": 0,
                       "llma.layers.15.attention.wv.weight": 0, "llma.layers.15.attention.wv.bias": 0,
                       "llma.layers.15.attention.wo.weight": 1, "llma.layers.16.attention.wq.weight": 0,
                       "llma.layers.16.attention.wq.bias": 0, "llma.layers.16.attention.wk.weight": 0,
                       "llma.layers.16.attention.wk.bias": 0, "llma.layers.16.attention.wv.weight": 0,
                       "llma.layers.16.attention.wv.bias": 0, "llma.layers.16.attention.wo.weight": 1,
                       "llma.layers.17.attention.wq.weight": 0, "llma.layers.17.attention.wq.bias": 0,
                       "llma.layers.17.attention.wk.weight": 0, "llma.layers.17.attention.wk.bias": 0,
                       "llma.layers.17.attention.wv.weight": 0, "llma.layers.17.attention.wv.bias": 0,
                       "llma.layers.17.attention.wo.weight": 1, "llma.layers.18.attention.wq.weight": 0,
                       "llma.layers.18.attention.wq.bias": 0, "llma.layers.18.attention.wk.weight": 0,
                       "llma.layers.18.attention.wk.bias": 0, "llma.layers.18.attention.wv.weight": 0,
                       "llma.layers.18.attention.wv.bias": 0, "llma.layers.18.attention.wo.weight": 1,
                       "llma.layers.19.attention.wq.weight": 0, "llma.layers.19.attention.wq.bias": 0,
                       "llma.layers.19.attention.wk.weight": 0, "llma.layers.19.attention.wk.bias": 0,
                       "llma.layers.19.attention.wv.weight": 0, "llma.layers.19.attention.wv.bias": 0,
                       "llma.layers.19.attention.wo.weight": 1, "llma.layers.20.attention.wq.weight": 0,
                       "llma.layers.20.attention.wq.bias": 0, "llma.layers.20.attention.wk.weight": 0,
                       "llma.layers.20.attention.wk.bias": 0, "llma.layers.20.attention.wv.weight": 0,
                       "llma.layers.20.attention.wv.bias": 0, "llma.layers.20.attention.wo.weight": 1,
                       "llma.layers.21.attention.wq.weight": 0, "llma.layers.21.attention.wq.bias": 0,
                       "llma.layers.21.attention.wk.weight": 0, "llma.layers.21.attention.wk.bias": 0,
                       "llma.layers.21.attention.wv.weight": 0, "llma.layers.21.attention.wv.bias": 0,
                       "llma.layers.21.attention.wo.weight": 1, "llma.layers.22.attention.wq.weight": 0,
                       "llma.layers.22.attention.wq.bias": 0, "llma.layers.22.attention.wk.weight": 0,
                       "llma.layers.22.attention.wk.bias": 0, "llma.layers.22.attention.wv.weight": 0,
                       "llma.layers.22.attention.wv.bias": 0, "llma.layers.22.attention.wo.weight": 1,
                       "llma.layers.23.attention.wq.weight": 0, "llma.layers.23.attention.wq.bias": 0,
                       "llma.layers.23.attention.wk.weight": 0, "llma.layers.23.attention.wk.bias": 0,
                       "llma.layers.23.attention.wv.weight": 0, "llma.layers.23.attention.wv.bias": 0,
                       "llma.layers.23.attention.wo.weight": 1, "llma.layers.24.attention.wq.weight": 0,
                       "llma.layers.24.attention.wq.bias": 0, "llma.layers.24.attention.wk.weight": 0,
                       "llma.layers.24.attention.wk.bias": 0, "llma.layers.24.attention.wv.weight": 0,
                       "llma.layers.24.attention.wv.bias": 0, "llma.layers.24.attention.wo.weight": 1,
                       "llma.layers.25.attention.wq.weight": 0, "llma.layers.25.attention.wq.bias": 0,
                       "llma.layers.25.attention.wk.weight": 0, "llma.layers.25.attention.wk.bias": 0,
                       "llma.layers.25.attention.wv.weight": 0, "llma.layers.25.attention.wv.bias": 0,
                       "llma.layers.25.attention.wo.weight": 1, "llma.layers.26.attention.wq.weight": 0,
                       "llma.layers.26.attention.wq.bias": 0, "llma.layers.26.attention.wk.weight": 0,
                       "llma.layers.26.attention.wk.bias": 0, "llma.layers.26.attention.wv.weight": 0,
                       "llma.layers.26.attention.wv.bias": 0, "llma.layers.26.attention.wo.weight": 1,
                       "llma.layers.27.attention.wq.weight": 0, "llma.layers.27.attention.wq.bias": 0,
                       "llma.layers.27.attention.wk.weight": 0, "llma.layers.27.attention.wk.bias": 0,
                       "llma.layers.27.attention.wv.weight": 0, "llma.layers.27.attention.wv.bias": 0,
                       "llma.layers.27.attention.wo.weight": 1, "llma.layers.28.attention.wq.weight": 0,
                       "llma.layers.28.attention.wq.bias": 0, "llma.layers.28.attention.wk.weight": 0,
                       "llma.layers.28.attention.wk.bias": 0, "llma.layers.28.attention.wv.weight": 0,
                       "llma.layers.28.attention.wv.bias": 0, "llma.layers.28.attention.wo.weight": 1,
                       "llma.layers.29.attention.wq.weight": 0, "llma.layers.29.attention.wq.bias": 0,
                       "llma.layers.29.attention.wk.weight": 0, "llma.layers.29.attention.wk.bias": 0,
                       "llma.layers.29.attention.wv.weight": 0, "llma.layers.29.attention.wv.bias": 0,
                       "llma.layers.29.attention.wo.weight": 1, "llma.layers.30.attention.wq.weight": 0,
                       "llma.layers.30.attention.wq.bias": 0, "llma.layers.30.attention.wk.weight": 0,
                       "llma.layers.30.attention.wk.bias": 0, "llma.layers.30.attention.wv.weight": 0,
                       "llma.layers.30.attention.wv.bias": 0, "llma.layers.30.attention.wo.weight": 1,
                       "llma.layers.31.attention.wq.weight": 0, "llma.layers.31.attention.wq.bias": 0,
                       "llma.layers.31.attention.wk.weight": 0, "llma.layers.31.attention.wk.bias": 0,
                       "llma.layers.31.attention.wv.weight": 0, "llma.layers.31.attention.wv.bias": 0,
                       "llma.layers.31.attention.wo.weight": 1, "llma.output.weight": 0, "llma.output.bias": 0}

import torch
from pathlib import Path

Path("./converted").mkdir(exist_ok=True)

ori = torch.load("consolidated.00.pth", map_location="cpu")
ori = {"llma." + key: val for key, val in ori.items()}


def func(rank=0):
    shard_split_to = 8
    split_ckpt = {}
    for key, ori_param in ori.items():
        if key in weight_parallel_dim:
            split_ckpt[key] = torch.chunk(ori_param, shard_split_to, weight_parallel_dim[key])[
                rank % shard_split_to].clone()
            if rank == 0:
                print(f"chunk {key}")
        else:
            if "experts." in key and int(key.split("experts.")[1].split(".")[0]) != rank:
                continue
            else:
                split_ckpt[key] = ori_param
                if rank == 0:
                    print(f"inherit {key}")
    torch.save({"model": split_ckpt}, f"converted/consolidated.{rank:02d}-of-08.model.pth")


for r in range(8):
    func(r)