matlok commited on
Commit
50228f4
1 Parent(s): e09bf10

add readme update for showing how this model was created, what it looks like in the header, and that it takes ~80s to merge 5 models

Browse files
Files changed (1) hide show
  1. README.md +689 -0
README.md CHANGED
@@ -1,3 +1,692 @@
1
  ---
2
  license: unknown
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
  license: unknown
3
  ---
4
+
5
+ ## Merging models like lego blocks using ddare and ties
6
+
7
+ If you want to fine-tune, here's an example Unsloth fine tuning guide for:
8
+ [Alpaca + TinyLlama + RoPE Scaling full example.ipynb](https://colab.research.google.com/drive/1AZghoNBQaMDgWJpi4RbffGM1h6raLUj9?usp=sharing#scrollTo=LjY75GoYUCB8)
9
+
10
+ ## How do I generate my own model merges?
11
+
12
+ The code below merges the following HuggingFace TinyLlama models:
13
+
14
+ - TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T
15
+ - Doctor-Shotgun/TinyLlama-1.1B-32k-Instruct
16
+ - Doctor-Shotgun/TinyLlama-1.1B-32k
17
+ - Tensoic/TinyLlama-1.1B-3T-openhermes
18
+ - Josephgflowers/TinyLlama-3T-Cinder-v1.3
19
+
20
+ ```python3
21
+ import transformers
22
+ import torch
23
+ import logging
24
+ from ddare.merge import merge_tensors
25
+ from ddare.tensor import dare_ties_sparsification, relative_norm, divide_tensor_into_sets
26
+ from ddare.util import get_device
27
+ import re
28
+ from typing import Dict, Tuple, List
29
+
30
+ logging.basicConfig(level=logging.INFO)
31
+ log = logging.getLogger(__name__)
32
+
33
+
34
+ def get_models(
35
+ models: List[str],
36
+ trust_remote_code: bool,
37
+ ):
38
+ config = {
39
+ 'torch_dtype': torch.float16,
40
+ 'low_cpu_mem_usage': False,
41
+ 'trust_remote_code': trust_remote_code,
42
+ }
43
+ loaded_models = []
44
+ num_models = len(models)
45
+ for midx, model_path in enumerate(models):
46
+ log.info(
47
+ f"loading model={midx}/{num_models} "
48
+ f"model={model_path} "
49
+ )
50
+ loaded_models.append(
51
+ transformers.AutoModelForCausalLM.from_pretrained(
52
+ model_path,
53
+ **config
54
+ )
55
+ )
56
+ return loaded_models
57
+
58
+
59
+ def pm(
60
+ model,
61
+ ):
62
+ keys = model.state_dict().keys()
63
+ log.info(f"model keys={len(keys)}")
64
+ for i, k in enumerate(keys):
65
+ tensor = model.state_dict()[k]
66
+ log.info(
67
+ f"{i:3d} {k} shape={tensor.shape} "
68
+ f"type={tensor.dtype} dev={tensor.device} "
69
+ f"contig={tensor.is_contiguous()}")
70
+
71
+
72
+ def run_text_test(
73
+ model,
74
+ model_path,
75
+ device: str,
76
+ question: str,
77
+ ):
78
+ base_model = model.to(device)
79
+ log.info(
80
+ f"loading model={model_path}"
81
+ )
82
+ tokenizer = transformers.AutoTokenizer.from_pretrained(
83
+ model_path,
84
+ torch_dtype=torch.float16)
85
+
86
+ inputs = tokenizer(
87
+ question,
88
+ return_tensors="pt"
89
+ ).to("cuda")
90
+ with torch.backends.cuda.sdp_kernel(
91
+ enable_flash=True,
92
+ enable_math=False,
93
+ enable_mem_efficient=False
94
+ ):
95
+ outputs = base_model.generate(**inputs)
96
+ log.info(tokenizer.decode(outputs[0], skip_special_tokens=True))
97
+ base_model = base_model.to("cpu")
98
+
99
+
100
+ def get_layer_type(
101
+ key: str
102
+ ) -> Tuple[int, str]:
103
+ matcher = re.compile(r"model.layers.(\d+).(.+)")
104
+ m = matcher.match(key)
105
+ if m is None:
106
+ if "model.norm.weight" == key:
107
+ return -1, "norm"
108
+ if "model.embed_tokens.weight" == key:
109
+ return -1, "embed"
110
+ if "lm_head.weight" == key:
111
+ return -1, "head"
112
+ log.info(f"Unknown key {key}")
113
+ return -1, "unknown"
114
+ return int(m.group(1)), m.group(2)
115
+
116
+
117
+ def merge_model_with_ties(
118
+ models: List[str],
119
+ model_dst: str,
120
+ trust_remote_code: bool = True
121
+ ):
122
+ models = get_models(
123
+ models=models,
124
+ trust_remote_code=trust_remote_code,
125
+ )
126
+ config = {}
127
+ result_dict: Dict[str, torch.Tensor] = {}
128
+ device = get_device()
129
+ keys = models[0].state_dict().keys()
130
+ num_keys = len(keys)
131
+ for k in keys:
132
+ block, layer_type = get_layer_type(k)
133
+ m0: torch.Tensor = models[0].state_dict()[k]
134
+ result = m0.clone()
135
+ sets = divide_tensor_into_sets(tensor=m0, n_sets=4)
136
+
137
+ # get the src layers to merge
138
+ m = [
139
+ models[1].state_dict()[k],
140
+ models[2].state_dict()[k],
141
+ models[3].state_dict()[k],
142
+ ]
143
+
144
+ # build a ratio
145
+ ratio = {
146
+ 'to_q': 0.0,
147
+ 'to_k': 0.0,
148
+ 'to_v': 0.0,
149
+ }.get(layer_type, .5)
150
+
151
+ norm_ratio = 0.68
152
+ log.info(
153
+ f"model={k} {num_keys} shape={m0.shape} "
154
+ f"dtype={m0.dtype} {m0.device} "
155
+ f"raio={ratio} "
156
+ f"contig={m0.is_contiguous()} "
157
+ f"norm={norm_ratio}")
158
+
159
+ # for all tensors
160
+ for i, tensor in enumerate(m):
161
+ if layer_type == "to_k":
162
+ # Get to_q key
163
+ q_base = models[0].state_dict()[k.replace("to_k", "to_q")]
164
+ q_merge = models[i].state_dict()[k.replace("to_k", "to_q")]
165
+ scale = relative_norm(q_merge, q_base)
166
+ tensor = tensor.to(device) / scale
167
+ del scale
168
+ elif layer_type == "to_q":
169
+ scale = relative_norm(tensor, m0)
170
+ tensor = tensor.to(device) * scale
171
+ del scale
172
+ slice_mask = (
173
+ sets == i
174
+ ).bool()
175
+ new_tensor = dare_ties_sparsification(
176
+ model_a_param=m0,
177
+ model_b_param=tensor,
178
+ drop_rate=norm_ratio,
179
+ ties="sum",
180
+ rescale="off",
181
+ device=device,
182
+ **config)
183
+ new_tensor = merge_tensors("slerp", m0, tensor, ratio)
184
+ result = torch.where(slice_mask, new_tensor, result)
185
+ del new_tensor, slice_mask
186
+
187
+ result_dict[k] = result
188
+ # end of merge
189
+
190
+ log.info(
191
+ f"{config} - done merge saving to file: {model_dst}"
192
+ )
193
+ out_model = (
194
+ transformers.AutoModelForCausalLM.from_pretrained(
195
+ model_dst,
196
+ **config
197
+ )
198
+ )
199
+ out_model.state_dict = lambda: result_dict
200
+ out_model.save_pretrained(model_dst)
201
+
202
+
203
+ def run():
204
+ log.info("start")
205
+ model_src = "TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T"
206
+ model_dst = "matlok/tinyllama-cinder-openhermes-32k"
207
+ config = {
208
+ 'torch_dtype': torch.float16,
209
+ 'low_cpu_mem_usage': False,
210
+ 'trust_remote_code': True,
211
+ }
212
+ models = [
213
+ model_src,
214
+ "Doctor-Shotgun/TinyLlama-1.1B-32k-Instruct",
215
+ "Doctor-Shotgun/TinyLlama-1.1B-32k",
216
+ "Tensoic/TinyLlama-1.1B-3T-openhermes",
217
+ "Josephgflowers/TinyLlama-3T-Cinder-v1.3",
218
+ ]
219
+ merge_model_with_ties(
220
+ models=models,
221
+ model_dst=model_dst
222
+ )
223
+ log.info(f"loading newly-created file: {model_dst}")
224
+ model = transformers.AutoModelForCausalLM.from_pretrained(
225
+ model_dst,
226
+ **config
227
+ )
228
+ pm(model=model)
229
+ log.info(f"done loading new model: {model} file: {model_dst}")
230
+
231
+
232
+ if __name__ == "__main__":
233
+ run()
234
+ ```
235
+
236
+
237
+ ### Logs
238
+
239
+ Here's hte logs
240
+
241
+ ```
242
+ Total VRAM 12282 MB, total RAM 85434 MB
243
+ Set vram state to: NORMAL_VRAM
244
+ Device: cuda:0 NVIDIA GeForce RTX 4070 Ti : native
245
+ VAE dtype: torch.bfloat16
246
+ INFO:__main__:start
247
+ INFO:__main__:loading model=0/5 model=TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T
248
+ INFO:__main__:loading model=1/5 model=Doctor-Shotgun/TinyLlama-1.1B-32k-Instruct
249
+ INFO:__main__:loading model=2/5 model=Doctor-Shotgun/TinyLlama-1.1B-32k
250
+ INFO:__main__:loading model=3/5 model=Tensoic/TinyLlama-1.1B-3T-openhermes
251
+ INFO:__main__:loading model=4/5 model=Josephgflowers/TinyLlama-3T-Cinder-v1.3
252
+ INFO:__main__:model=model.embed_tokens.weight 201 shape=torch.Size([32000, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
253
+ INFO:__main__:model=model.layers.0.self_attn.q_proj.weight 201 shape=torch.Size([2048, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
254
+ INFO:__main__:model=model.layers.0.self_attn.k_proj.weight 201 shape=torch.Size([256, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
255
+ INFO:__main__:model=model.layers.0.self_attn.v_proj.weight 201 shape=torch.Size([256, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
256
+ INFO:__main__:model=model.layers.0.self_attn.o_proj.weight 201 shape=torch.Size([2048, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
257
+ INFO:__main__:model=model.layers.0.mlp.gate_proj.weight 201 shape=torch.Size([5632, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
258
+ INFO:__main__:model=model.layers.0.mlp.up_proj.weight 201 shape=torch.Size([5632, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
259
+ INFO:__main__:model=model.layers.0.mlp.down_proj.weight 201 shape=torch.Size([2048, 5632]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
260
+ INFO:__main__:model=model.layers.0.input_layernorm.weight 201 shape=torch.Size([2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
261
+ INFO:__main__:model=model.layers.0.post_attention_layernorm.weight 201 shape=torch.Size([2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
262
+ INFO:__main__:model=model.layers.1.self_attn.q_proj.weight 201 shape=torch.Size([2048, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
263
+ INFO:__main__:model=model.layers.1.self_attn.k_proj.weight 201 shape=torch.Size([256, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
264
+ INFO:__main__:model=model.layers.1.self_attn.v_proj.weight 201 shape=torch.Size([256, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
265
+ INFO:__main__:model=model.layers.1.self_attn.o_proj.weight 201 shape=torch.Size([2048, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
266
+ INFO:__main__:model=model.layers.1.mlp.gate_proj.weight 201 shape=torch.Size([5632, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
267
+ INFO:__main__:model=model.layers.1.mlp.up_proj.weight 201 shape=torch.Size([5632, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
268
+ INFO:__main__:model=model.layers.1.mlp.down_proj.weight 201 shape=torch.Size([2048, 5632]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
269
+ INFO:__main__:model=model.layers.1.input_layernorm.weight 201 shape=torch.Size([2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
270
+ INFO:__main__:model=model.layers.1.post_attention_layernorm.weight 201 shape=torch.Size([2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
271
+ INFO:__main__:model=model.layers.2.self_attn.q_proj.weight 201 shape=torch.Size([2048, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
272
+ INFO:__main__:model=model.layers.2.self_attn.k_proj.weight 201 shape=torch.Size([256, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
273
+ INFO:__main__:model=model.layers.2.self_attn.v_proj.weight 201 shape=torch.Size([256, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
274
+ INFO:__main__:model=model.layers.2.self_attn.o_proj.weight 201 shape=torch.Size([2048, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
275
+ INFO:__main__:model=model.layers.2.mlp.gate_proj.weight 201 shape=torch.Size([5632, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
276
+ INFO:__main__:model=model.layers.2.mlp.up_proj.weight 201 shape=torch.Size([5632, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
277
+ INFO:__main__:model=model.layers.2.mlp.down_proj.weight 201 shape=torch.Size([2048, 5632]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
278
+ INFO:__main__:model=model.layers.2.input_layernorm.weight 201 shape=torch.Size([2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
279
+ INFO:__main__:model=model.layers.2.post_attention_layernorm.weight 201 shape=torch.Size([2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
280
+ INFO:__main__:model=model.layers.3.self_attn.q_proj.weight 201 shape=torch.Size([2048, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
281
+ INFO:__main__:model=model.layers.3.self_attn.k_proj.weight 201 shape=torch.Size([256, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
282
+ INFO:__main__:model=model.layers.3.self_attn.v_proj.weight 201 shape=torch.Size([256, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
283
+ INFO:__main__:model=model.layers.3.self_attn.o_proj.weight 201 shape=torch.Size([2048, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
284
+ INFO:__main__:model=model.layers.3.mlp.gate_proj.weight 201 shape=torch.Size([5632, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
285
+ INFO:__main__:model=model.layers.3.mlp.up_proj.weight 201 shape=torch.Size([5632, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
286
+ INFO:__main__:model=model.layers.3.mlp.down_proj.weight 201 shape=torch.Size([2048, 5632]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
287
+ INFO:__main__:model=model.layers.3.input_layernorm.weight 201 shape=torch.Size([2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
288
+ INFO:__main__:model=model.layers.3.post_attention_layernorm.weight 201 shape=torch.Size([2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
289
+ INFO:__main__:model=model.layers.4.self_attn.q_proj.weight 201 shape=torch.Size([2048, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
290
+ INFO:__main__:model=model.layers.4.self_attn.k_proj.weight 201 shape=torch.Size([256, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
291
+ INFO:__main__:model=model.layers.4.self_attn.v_proj.weight 201 shape=torch.Size([256, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
292
+ INFO:__main__:model=model.layers.4.self_attn.o_proj.weight 201 shape=torch.Size([2048, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
293
+ INFO:__main__:model=model.layers.4.mlp.gate_proj.weight 201 shape=torch.Size([5632, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
294
+ INFO:__main__:model=model.layers.4.mlp.up_proj.weight 201 shape=torch.Size([5632, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
295
+ INFO:__main__:model=model.layers.4.mlp.down_proj.weight 201 shape=torch.Size([2048, 5632]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
296
+ INFO:__main__:model=model.layers.4.input_layernorm.weight 201 shape=torch.Size([2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
297
+ INFO:__main__:model=model.layers.4.post_attention_layernorm.weight 201 shape=torch.Size([2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
298
+ INFO:__main__:model=model.layers.5.self_attn.q_proj.weight 201 shape=torch.Size([2048, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
299
+ INFO:__main__:model=model.layers.5.self_attn.k_proj.weight 201 shape=torch.Size([256, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
300
+ INFO:__main__:model=model.layers.5.self_attn.v_proj.weight 201 shape=torch.Size([256, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
301
+ INFO:__main__:model=model.layers.5.self_attn.o_proj.weight 201 shape=torch.Size([2048, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
302
+ INFO:__main__:model=model.layers.5.mlp.gate_proj.weight 201 shape=torch.Size([5632, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
303
+ INFO:__main__:model=model.layers.5.mlp.up_proj.weight 201 shape=torch.Size([5632, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
304
+ INFO:__main__:model=model.layers.5.mlp.down_proj.weight 201 shape=torch.Size([2048, 5632]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
305
+ INFO:__main__:model=model.layers.5.input_layernorm.weight 201 shape=torch.Size([2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
306
+ INFO:__main__:model=model.layers.5.post_attention_layernorm.weight 201 shape=torch.Size([2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
307
+ INFO:__main__:model=model.layers.6.self_attn.q_proj.weight 201 shape=torch.Size([2048, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
308
+ INFO:__main__:model=model.layers.6.self_attn.k_proj.weight 201 shape=torch.Size([256, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
309
+ INFO:__main__:model=model.layers.6.self_attn.v_proj.weight 201 shape=torch.Size([256, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
310
+ INFO:__main__:model=model.layers.6.self_attn.o_proj.weight 201 shape=torch.Size([2048, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
311
+ INFO:__main__:model=model.layers.6.mlp.gate_proj.weight 201 shape=torch.Size([5632, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
312
+ INFO:__main__:model=model.layers.6.mlp.up_proj.weight 201 shape=torch.Size([5632, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
313
+ INFO:__main__:model=model.layers.6.mlp.down_proj.weight 201 shape=torch.Size([2048, 5632]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
314
+ INFO:__main__:model=model.layers.6.input_layernorm.weight 201 shape=torch.Size([2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
315
+ INFO:__main__:model=model.layers.6.post_attention_layernorm.weight 201 shape=torch.Size([2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
316
+ INFO:__main__:model=model.layers.7.self_attn.q_proj.weight 201 shape=torch.Size([2048, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
317
+ INFO:__main__:model=model.layers.7.self_attn.k_proj.weight 201 shape=torch.Size([256, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
318
+ INFO:__main__:model=model.layers.7.self_attn.v_proj.weight 201 shape=torch.Size([256, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
319
+ INFO:__main__:model=model.layers.7.self_attn.o_proj.weight 201 shape=torch.Size([2048, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
320
+ INFO:__main__:model=model.layers.7.mlp.gate_proj.weight 201 shape=torch.Size([5632, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
321
+ INFO:__main__:model=model.layers.7.mlp.up_proj.weight 201 shape=torch.Size([5632, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
322
+ INFO:__main__:model=model.layers.7.mlp.down_proj.weight 201 shape=torch.Size([2048, 5632]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
323
+ INFO:__main__:model=model.layers.7.input_layernorm.weight 201 shape=torch.Size([2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
324
+ INFO:__main__:model=model.layers.7.post_attention_layernorm.weight 201 shape=torch.Size([2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
325
+ INFO:__main__:model=model.layers.8.self_attn.q_proj.weight 201 shape=torch.Size([2048, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
326
+ INFO:__main__:model=model.layers.8.self_attn.k_proj.weight 201 shape=torch.Size([256, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
327
+ INFO:__main__:model=model.layers.8.self_attn.v_proj.weight 201 shape=torch.Size([256, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
328
+ INFO:__main__:model=model.layers.8.self_attn.o_proj.weight 201 shape=torch.Size([2048, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
329
+ INFO:__main__:model=model.layers.8.mlp.gate_proj.weight 201 shape=torch.Size([5632, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
330
+ INFO:__main__:model=model.layers.8.mlp.up_proj.weight 201 shape=torch.Size([5632, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
331
+ INFO:__main__:model=model.layers.8.mlp.down_proj.weight 201 shape=torch.Size([2048, 5632]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
332
+ INFO:__main__:model=model.layers.8.input_layernorm.weight 201 shape=torch.Size([2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
333
+ INFO:__main__:model=model.layers.8.post_attention_layernorm.weight 201 shape=torch.Size([2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
334
+ INFO:__main__:model=model.layers.9.self_attn.q_proj.weight 201 shape=torch.Size([2048, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
335
+ INFO:__main__:model=model.layers.9.self_attn.k_proj.weight 201 shape=torch.Size([256, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
336
+ INFO:__main__:model=model.layers.9.self_attn.v_proj.weight 201 shape=torch.Size([256, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
337
+ INFO:__main__:model=model.layers.9.self_attn.o_proj.weight 201 shape=torch.Size([2048, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
338
+ INFO:__main__:model=model.layers.9.mlp.gate_proj.weight 201 shape=torch.Size([5632, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
339
+ INFO:__main__:model=model.layers.9.mlp.up_proj.weight 201 shape=torch.Size([5632, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
340
+ INFO:__main__:model=model.layers.9.mlp.down_proj.weight 201 shape=torch.Size([2048, 5632]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
341
+ INFO:__main__:model=model.layers.9.input_layernorm.weight 201 shape=torch.Size([2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
342
+ INFO:__main__:model=model.layers.9.post_attention_layernorm.weight 201 shape=torch.Size([2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
343
+ INFO:__main__:model=model.layers.10.self_attn.q_proj.weight 201 shape=torch.Size([2048, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
344
+ INFO:__main__:model=model.layers.10.self_attn.k_proj.weight 201 shape=torch.Size([256, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
345
+ INFO:__main__:model=model.layers.10.self_attn.v_proj.weight 201 shape=torch.Size([256, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
346
+ INFO:__main__:model=model.layers.10.self_attn.o_proj.weight 201 shape=torch.Size([2048, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
347
+ INFO:__main__:model=model.layers.10.mlp.gate_proj.weight 201 shape=torch.Size([5632, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
348
+ INFO:__main__:model=model.layers.10.mlp.up_proj.weight 201 shape=torch.Size([5632, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
349
+ INFO:__main__:model=model.layers.10.mlp.down_proj.weight 201 shape=torch.Size([2048, 5632]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
350
+ INFO:__main__:model=model.layers.10.input_layernorm.weight 201 shape=torch.Size([2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
351
+ INFO:__main__:model=model.layers.10.post_attention_layernorm.weight 201 shape=torch.Size([2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
352
+ INFO:__main__:model=model.layers.11.self_attn.q_proj.weight 201 shape=torch.Size([2048, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
353
+ INFO:__main__:model=model.layers.11.self_attn.k_proj.weight 201 shape=torch.Size([256, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
354
+ INFO:__main__:model=model.layers.11.self_attn.v_proj.weight 201 shape=torch.Size([256, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
355
+ INFO:__main__:model=model.layers.11.self_attn.o_proj.weight 201 shape=torch.Size([2048, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
356
+ INFO:__main__:model=model.layers.11.mlp.gate_proj.weight 201 shape=torch.Size([5632, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
357
+ INFO:__main__:model=model.layers.11.mlp.up_proj.weight 201 shape=torch.Size([5632, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
358
+ INFO:__main__:model=model.layers.11.mlp.down_proj.weight 201 shape=torch.Size([2048, 5632]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
359
+ INFO:__main__:model=model.layers.11.input_layernorm.weight 201 shape=torch.Size([2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
360
+ INFO:__main__:model=model.layers.11.post_attention_layernorm.weight 201 shape=torch.Size([2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
361
+ INFO:__main__:model=model.layers.12.self_attn.q_proj.weight 201 shape=torch.Size([2048, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
362
+ INFO:__main__:model=model.layers.12.self_attn.k_proj.weight 201 shape=torch.Size([256, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
363
+ INFO:__main__:model=model.layers.12.self_attn.v_proj.weight 201 shape=torch.Size([256, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
364
+ INFO:__main__:model=model.layers.12.self_attn.o_proj.weight 201 shape=torch.Size([2048, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
365
+ INFO:__main__:model=model.layers.12.mlp.gate_proj.weight 201 shape=torch.Size([5632, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
366
+ INFO:__main__:model=model.layers.12.mlp.up_proj.weight 201 shape=torch.Size([5632, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
367
+ INFO:__main__:model=model.layers.12.mlp.down_proj.weight 201 shape=torch.Size([2048, 5632]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
368
+ INFO:__main__:model=model.layers.12.input_layernorm.weight 201 shape=torch.Size([2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
369
+ INFO:__main__:model=model.layers.12.post_attention_layernorm.weight 201 shape=torch.Size([2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
370
+ INFO:__main__:model=model.layers.13.self_attn.q_proj.weight 201 shape=torch.Size([2048, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
371
+ INFO:__main__:model=model.layers.13.self_attn.k_proj.weight 201 shape=torch.Size([256, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
372
+ INFO:__main__:model=model.layers.13.self_attn.v_proj.weight 201 shape=torch.Size([256, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
373
+ INFO:__main__:model=model.layers.13.self_attn.o_proj.weight 201 shape=torch.Size([2048, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
374
+ INFO:__main__:model=model.layers.13.mlp.gate_proj.weight 201 shape=torch.Size([5632, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
375
+ INFO:__main__:model=model.layers.13.mlp.up_proj.weight 201 shape=torch.Size([5632, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
376
+ INFO:__main__:model=model.layers.13.mlp.down_proj.weight 201 shape=torch.Size([2048, 5632]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
377
+ INFO:__main__:model=model.layers.13.input_layernorm.weight 201 shape=torch.Size([2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
378
+ INFO:__main__:model=model.layers.13.post_attention_layernorm.weight 201 shape=torch.Size([2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
379
+ INFO:__main__:model=model.layers.14.self_attn.q_proj.weight 201 shape=torch.Size([2048, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
380
+ INFO:__main__:model=model.layers.14.self_attn.k_proj.weight 201 shape=torch.Size([256, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
381
+ INFO:__main__:model=model.layers.14.self_attn.v_proj.weight 201 shape=torch.Size([256, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
382
+ INFO:__main__:model=model.layers.14.self_attn.o_proj.weight 201 shape=torch.Size([2048, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
383
+ INFO:__main__:model=model.layers.14.mlp.gate_proj.weight 201 shape=torch.Size([5632, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
384
+ INFO:__main__:model=model.layers.14.mlp.up_proj.weight 201 shape=torch.Size([5632, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
385
+ INFO:__main__:model=model.layers.14.mlp.down_proj.weight 201 shape=torch.Size([2048, 5632]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
386
+ INFO:__main__:model=model.layers.14.input_layernorm.weight 201 shape=torch.Size([2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
387
+ INFO:__main__:model=model.layers.14.post_attention_layernorm.weight 201 shape=torch.Size([2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
388
+ INFO:__main__:model=model.layers.15.self_attn.q_proj.weight 201 shape=torch.Size([2048, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
389
+ INFO:__main__:model=model.layers.15.self_attn.k_proj.weight 201 shape=torch.Size([256, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
390
+ INFO:__main__:model=model.layers.15.self_attn.v_proj.weight 201 shape=torch.Size([256, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
391
+ INFO:__main__:model=model.layers.15.self_attn.o_proj.weight 201 shape=torch.Size([2048, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
392
+ INFO:__main__:model=model.layers.15.mlp.gate_proj.weight 201 shape=torch.Size([5632, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
393
+ INFO:__main__:model=model.layers.15.mlp.up_proj.weight 201 shape=torch.Size([5632, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
394
+ INFO:__main__:model=model.layers.15.mlp.down_proj.weight 201 shape=torch.Size([2048, 5632]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
395
+ INFO:__main__:model=model.layers.15.input_layernorm.weight 201 shape=torch.Size([2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
396
+ INFO:__main__:model=model.layers.15.post_attention_layernorm.weight 201 shape=torch.Size([2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
397
+ INFO:__main__:model=model.layers.16.self_attn.q_proj.weight 201 shape=torch.Size([2048, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
398
+ INFO:__main__:model=model.layers.16.self_attn.k_proj.weight 201 shape=torch.Size([256, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
399
+ INFO:__main__:model=model.layers.16.self_attn.v_proj.weight 201 shape=torch.Size([256, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
400
+ INFO:__main__:model=model.layers.16.self_attn.o_proj.weight 201 shape=torch.Size([2048, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
401
+ INFO:__main__:model=model.layers.16.mlp.gate_proj.weight 201 shape=torch.Size([5632, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
402
+ INFO:__main__:model=model.layers.16.mlp.up_proj.weight 201 shape=torch.Size([5632, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
403
+ INFO:__main__:model=model.layers.16.mlp.down_proj.weight 201 shape=torch.Size([2048, 5632]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
404
+ INFO:__main__:model=model.layers.16.input_layernorm.weight 201 shape=torch.Size([2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
405
+ INFO:__main__:model=model.layers.16.post_attention_layernorm.weight 201 shape=torch.Size([2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
406
+ INFO:__main__:model=model.layers.17.self_attn.q_proj.weight 201 shape=torch.Size([2048, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
407
+ INFO:__main__:model=model.layers.17.self_attn.k_proj.weight 201 shape=torch.Size([256, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
408
+ INFO:__main__:model=model.layers.17.self_attn.v_proj.weight 201 shape=torch.Size([256, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
409
+ INFO:__main__:model=model.layers.17.self_attn.o_proj.weight 201 shape=torch.Size([2048, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
410
+ INFO:__main__:model=model.layers.17.mlp.gate_proj.weight 201 shape=torch.Size([5632, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
411
+ INFO:__main__:model=model.layers.17.mlp.up_proj.weight 201 shape=torch.Size([5632, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
412
+ INFO:__main__:model=model.layers.17.mlp.down_proj.weight 201 shape=torch.Size([2048, 5632]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
413
+ INFO:__main__:model=model.layers.17.input_layernorm.weight 201 shape=torch.Size([2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
414
+ INFO:__main__:model=model.layers.17.post_attention_layernorm.weight 201 shape=torch.Size([2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
415
+ INFO:__main__:model=model.layers.18.self_attn.q_proj.weight 201 shape=torch.Size([2048, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
416
+ INFO:__main__:model=model.layers.18.self_attn.k_proj.weight 201 shape=torch.Size([256, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
417
+ INFO:__main__:model=model.layers.18.self_attn.v_proj.weight 201 shape=torch.Size([256, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
418
+ INFO:__main__:model=model.layers.18.self_attn.o_proj.weight 201 shape=torch.Size([2048, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
419
+ INFO:__main__:model=model.layers.18.mlp.gate_proj.weight 201 shape=torch.Size([5632, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
420
+ INFO:__main__:model=model.layers.18.mlp.up_proj.weight 201 shape=torch.Size([5632, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
421
+ INFO:__main__:model=model.layers.18.mlp.down_proj.weight 201 shape=torch.Size([2048, 5632]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
422
+ INFO:__main__:model=model.layers.18.input_layernorm.weight 201 shape=torch.Size([2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
423
+ INFO:__main__:model=model.layers.18.post_attention_layernorm.weight 201 shape=torch.Size([2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
424
+ INFO:__main__:model=model.layers.19.self_attn.q_proj.weight 201 shape=torch.Size([2048, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
425
+ INFO:__main__:model=model.layers.19.self_attn.k_proj.weight 201 shape=torch.Size([256, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
426
+ INFO:__main__:model=model.layers.19.self_attn.v_proj.weight 201 shape=torch.Size([256, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
427
+ INFO:__main__:model=model.layers.19.self_attn.o_proj.weight 201 shape=torch.Size([2048, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
428
+ INFO:__main__:model=model.layers.19.mlp.gate_proj.weight 201 shape=torch.Size([5632, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
429
+ INFO:__main__:model=model.layers.19.mlp.up_proj.weight 201 shape=torch.Size([5632, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
430
+ INFO:__main__:model=model.layers.19.mlp.down_proj.weight 201 shape=torch.Size([2048, 5632]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
431
+ INFO:__main__:model=model.layers.19.input_layernorm.weight 201 shape=torch.Size([2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
432
+ INFO:__main__:model=model.layers.19.post_attention_layernorm.weight 201 shape=torch.Size([2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
433
+ INFO:__main__:model=model.layers.20.self_attn.q_proj.weight 201 shape=torch.Size([2048, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
434
+ INFO:__main__:model=model.layers.20.self_attn.k_proj.weight 201 shape=torch.Size([256, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
435
+ INFO:__main__:model=model.layers.20.self_attn.v_proj.weight 201 shape=torch.Size([256, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
436
+ INFO:__main__:model=model.layers.20.self_attn.o_proj.weight 201 shape=torch.Size([2048, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
437
+ INFO:__main__:model=model.layers.20.mlp.gate_proj.weight 201 shape=torch.Size([5632, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
438
+ INFO:__main__:model=model.layers.20.mlp.up_proj.weight 201 shape=torch.Size([5632, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
439
+ INFO:__main__:model=model.layers.20.mlp.down_proj.weight 201 shape=torch.Size([2048, 5632]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
440
+ INFO:__main__:model=model.layers.20.input_layernorm.weight 201 shape=torch.Size([2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
441
+ INFO:__main__:model=model.layers.20.post_attention_layernorm.weight 201 shape=torch.Size([2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
442
+ INFO:__main__:model=model.layers.21.self_attn.q_proj.weight 201 shape=torch.Size([2048, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
443
+ INFO:__main__:model=model.layers.21.self_attn.k_proj.weight 201 shape=torch.Size([256, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
444
+ INFO:__main__:model=model.layers.21.self_attn.v_proj.weight 201 shape=torch.Size([256, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
445
+ INFO:__main__:model=model.layers.21.self_attn.o_proj.weight 201 shape=torch.Size([2048, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
446
+ INFO:__main__:model=model.layers.21.mlp.gate_proj.weight 201 shape=torch.Size([5632, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
447
+ INFO:__main__:model=model.layers.21.mlp.up_proj.weight 201 shape=torch.Size([5632, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
448
+ INFO:__main__:model=model.layers.21.mlp.down_proj.weight 201 shape=torch.Size([2048, 5632]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
449
+ INFO:__main__:model=model.layers.21.input_layernorm.weight 201 shape=torch.Size([2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
450
+ INFO:__main__:model=model.layers.21.post_attention_layernorm.weight 201 shape=torch.Size([2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
451
+ INFO:__main__:model=model.norm.weight 201 shape=torch.Size([2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
452
+ INFO:__main__:model=lm_head.weight 201 shape=torch.Size([32000, 2048]) dtype=torch.float16 cpu raio=0.5 contig=True norm=0.68
453
+ INFO:__main__:{} - done merge saving to file: matlok/tinyllama-cinder-openhermes-32k
454
+ config.json: 100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 724/724 [00:00<00:00, 6.15MB/s]
455
+ model.safetensors: 100%|██████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 2.20G/2.20G [00:57<00:00, 38.0MB/s]
456
+ generation_config.json: 100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 133/133 [00:00<00:00, 1.82MB/s]
457
+ INFO:__main__:loading newly-created file: matlok/tinyllama-cinder-openhermes-32k
458
+ INFO:__main__:model keys=201
459
+ INFO:__main__: 0 model.embed_tokens.weight shape=torch.Size([32000, 2048]) type=torch.float16 dev=cpu contig=True
460
+ INFO:__main__: 1 model.layers.0.self_attn.q_proj.weight shape=torch.Size([2048, 2048]) type=torch.float16 dev=cpu contig=True
461
+ INFO:__main__: 2 model.layers.0.self_attn.k_proj.weight shape=torch.Size([256, 2048]) type=torch.float16 dev=cpu contig=True
462
+ INFO:__main__: 3 model.layers.0.self_attn.v_proj.weight shape=torch.Size([256, 2048]) type=torch.float16 dev=cpu contig=True
463
+ INFO:__main__: 4 model.layers.0.self_attn.o_proj.weight shape=torch.Size([2048, 2048]) type=torch.float16 dev=cpu contig=True
464
+ INFO:__main__: 5 model.layers.0.mlp.gate_proj.weight shape=torch.Size([5632, 2048]) type=torch.float16 dev=cpu contig=True
465
+ INFO:__main__: 6 model.layers.0.mlp.up_proj.weight shape=torch.Size([5632, 2048]) type=torch.float16 dev=cpu contig=True
466
+ INFO:__main__: 7 model.layers.0.mlp.down_proj.weight shape=torch.Size([2048, 5632]) type=torch.float16 dev=cpu contig=True
467
+ INFO:__main__: 8 model.layers.0.input_layernorm.weight shape=torch.Size([2048]) type=torch.float16 dev=cpu contig=True
468
+ INFO:__main__: 9 model.layers.0.post_attention_layernorm.weight shape=torch.Size([2048]) type=torch.float16 dev=cpu contig=True
469
+ INFO:__main__: 10 model.layers.1.self_attn.q_proj.weight shape=torch.Size([2048, 2048]) type=torch.float16 dev=cpu contig=True
470
+ INFO:__main__: 11 model.layers.1.self_attn.k_proj.weight shape=torch.Size([256, 2048]) type=torch.float16 dev=cpu contig=True
471
+ INFO:__main__: 12 model.layers.1.self_attn.v_proj.weight shape=torch.Size([256, 2048]) type=torch.float16 dev=cpu contig=True
472
+ INFO:__main__: 13 model.layers.1.self_attn.o_proj.weight shape=torch.Size([2048, 2048]) type=torch.float16 dev=cpu contig=True
473
+ INFO:__main__: 14 model.layers.1.mlp.gate_proj.weight shape=torch.Size([5632, 2048]) type=torch.float16 dev=cpu contig=True
474
+ INFO:__main__: 15 model.layers.1.mlp.up_proj.weight shape=torch.Size([5632, 2048]) type=torch.float16 dev=cpu contig=True
475
+ INFO:__main__: 16 model.layers.1.mlp.down_proj.weight shape=torch.Size([2048, 5632]) type=torch.float16 dev=cpu contig=True
476
+ INFO:__main__: 17 model.layers.1.input_layernorm.weight shape=torch.Size([2048]) type=torch.float16 dev=cpu contig=True
477
+ INFO:__main__: 18 model.layers.1.post_attention_layernorm.weight shape=torch.Size([2048]) type=torch.float16 dev=cpu contig=True
478
+ INFO:__main__: 19 model.layers.2.self_attn.q_proj.weight shape=torch.Size([2048, 2048]) type=torch.float16 dev=cpu contig=True
479
+ INFO:__main__: 20 model.layers.2.self_attn.k_proj.weight shape=torch.Size([256, 2048]) type=torch.float16 dev=cpu contig=True
480
+ INFO:__main__: 21 model.layers.2.self_attn.v_proj.weight shape=torch.Size([256, 2048]) type=torch.float16 dev=cpu contig=True
481
+ INFO:__main__: 22 model.layers.2.self_attn.o_proj.weight shape=torch.Size([2048, 2048]) type=torch.float16 dev=cpu contig=True
482
+ INFO:__main__: 23 model.layers.2.mlp.gate_proj.weight shape=torch.Size([5632, 2048]) type=torch.float16 dev=cpu contig=True
483
+ INFO:__main__: 24 model.layers.2.mlp.up_proj.weight shape=torch.Size([5632, 2048]) type=torch.float16 dev=cpu contig=True
484
+ INFO:__main__: 25 model.layers.2.mlp.down_proj.weight shape=torch.Size([2048, 5632]) type=torch.float16 dev=cpu contig=True
485
+ INFO:__main__: 26 model.layers.2.input_layernorm.weight shape=torch.Size([2048]) type=torch.float16 dev=cpu contig=True
486
+ INFO:__main__: 27 model.layers.2.post_attention_layernorm.weight shape=torch.Size([2048]) type=torch.float16 dev=cpu contig=True
487
+ INFO:__main__: 28 model.layers.3.self_attn.q_proj.weight shape=torch.Size([2048, 2048]) type=torch.float16 dev=cpu contig=True
488
+ INFO:__main__: 29 model.layers.3.self_attn.k_proj.weight shape=torch.Size([256, 2048]) type=torch.float16 dev=cpu contig=True
489
+ INFO:__main__: 30 model.layers.3.self_attn.v_proj.weight shape=torch.Size([256, 2048]) type=torch.float16 dev=cpu contig=True
490
+ INFO:__main__: 31 model.layers.3.self_attn.o_proj.weight shape=torch.Size([2048, 2048]) type=torch.float16 dev=cpu contig=True
491
+ INFO:__main__: 32 model.layers.3.mlp.gate_proj.weight shape=torch.Size([5632, 2048]) type=torch.float16 dev=cpu contig=True
492
+ INFO:__main__: 33 model.layers.3.mlp.up_proj.weight shape=torch.Size([5632, 2048]) type=torch.float16 dev=cpu contig=True
493
+ INFO:__main__: 34 model.layers.3.mlp.down_proj.weight shape=torch.Size([2048, 5632]) type=torch.float16 dev=cpu contig=True
494
+ INFO:__main__: 35 model.layers.3.input_layernorm.weight shape=torch.Size([2048]) type=torch.float16 dev=cpu contig=True
495
+ INFO:__main__: 36 model.layers.3.post_attention_layernorm.weight shape=torch.Size([2048]) type=torch.float16 dev=cpu contig=True
496
+ INFO:__main__: 37 model.layers.4.self_attn.q_proj.weight shape=torch.Size([2048, 2048]) type=torch.float16 dev=cpu contig=True
497
+ INFO:__main__: 38 model.layers.4.self_attn.k_proj.weight shape=torch.Size([256, 2048]) type=torch.float16 dev=cpu contig=True
498
+ INFO:__main__: 39 model.layers.4.self_attn.v_proj.weight shape=torch.Size([256, 2048]) type=torch.float16 dev=cpu contig=True
499
+ INFO:__main__: 40 model.layers.4.self_attn.o_proj.weight shape=torch.Size([2048, 2048]) type=torch.float16 dev=cpu contig=True
500
+ INFO:__main__: 41 model.layers.4.mlp.gate_proj.weight shape=torch.Size([5632, 2048]) type=torch.float16 dev=cpu contig=True
501
+ INFO:__main__: 42 model.layers.4.mlp.up_proj.weight shape=torch.Size([5632, 2048]) type=torch.float16 dev=cpu contig=True
502
+ INFO:__main__: 43 model.layers.4.mlp.down_proj.weight shape=torch.Size([2048, 5632]) type=torch.float16 dev=cpu contig=True
503
+ INFO:__main__: 44 model.layers.4.input_layernorm.weight shape=torch.Size([2048]) type=torch.float16 dev=cpu contig=True
504
+ INFO:__main__: 45 model.layers.4.post_attention_layernorm.weight shape=torch.Size([2048]) type=torch.float16 dev=cpu contig=True
505
+ INFO:__main__: 46 model.layers.5.self_attn.q_proj.weight shape=torch.Size([2048, 2048]) type=torch.float16 dev=cpu contig=True
506
+ INFO:__main__: 47 model.layers.5.self_attn.k_proj.weight shape=torch.Size([256, 2048]) type=torch.float16 dev=cpu contig=True
507
+ INFO:__main__: 48 model.layers.5.self_attn.v_proj.weight shape=torch.Size([256, 2048]) type=torch.float16 dev=cpu contig=True
508
+ INFO:__main__: 49 model.layers.5.self_attn.o_proj.weight shape=torch.Size([2048, 2048]) type=torch.float16 dev=cpu contig=True
509
+ INFO:__main__: 50 model.layers.5.mlp.gate_proj.weight shape=torch.Size([5632, 2048]) type=torch.float16 dev=cpu contig=True
510
+ INFO:__main__: 51 model.layers.5.mlp.up_proj.weight shape=torch.Size([5632, 2048]) type=torch.float16 dev=cpu contig=True
511
+ INFO:__main__: 52 model.layers.5.mlp.down_proj.weight shape=torch.Size([2048, 5632]) type=torch.float16 dev=cpu contig=True
512
+ INFO:__main__: 53 model.layers.5.input_layernorm.weight shape=torch.Size([2048]) type=torch.float16 dev=cpu contig=True
513
+ INFO:__main__: 54 model.layers.5.post_attention_layernorm.weight shape=torch.Size([2048]) type=torch.float16 dev=cpu contig=True
514
+ INFO:__main__: 55 model.layers.6.self_attn.q_proj.weight shape=torch.Size([2048, 2048]) type=torch.float16 dev=cpu contig=True
515
+ INFO:__main__: 56 model.layers.6.self_attn.k_proj.weight shape=torch.Size([256, 2048]) type=torch.float16 dev=cpu contig=True
516
+ INFO:__main__: 57 model.layers.6.self_attn.v_proj.weight shape=torch.Size([256, 2048]) type=torch.float16 dev=cpu contig=True
517
+ INFO:__main__: 58 model.layers.6.self_attn.o_proj.weight shape=torch.Size([2048, 2048]) type=torch.float16 dev=cpu contig=True
518
+ INFO:__main__: 59 model.layers.6.mlp.gate_proj.weight shape=torch.Size([5632, 2048]) type=torch.float16 dev=cpu contig=True
519
+ INFO:__main__: 60 model.layers.6.mlp.up_proj.weight shape=torch.Size([5632, 2048]) type=torch.float16 dev=cpu contig=True
520
+ INFO:__main__: 61 model.layers.6.mlp.down_proj.weight shape=torch.Size([2048, 5632]) type=torch.float16 dev=cpu contig=True
521
+ INFO:__main__: 62 model.layers.6.input_layernorm.weight shape=torch.Size([2048]) type=torch.float16 dev=cpu contig=True
522
+ INFO:__main__: 63 model.layers.6.post_attention_layernorm.weight shape=torch.Size([2048]) type=torch.float16 dev=cpu contig=True
523
+ INFO:__main__: 64 model.layers.7.self_attn.q_proj.weight shape=torch.Size([2048, 2048]) type=torch.float16 dev=cpu contig=True
524
+ INFO:__main__: 65 model.layers.7.self_attn.k_proj.weight shape=torch.Size([256, 2048]) type=torch.float16 dev=cpu contig=True
525
+ INFO:__main__: 66 model.layers.7.self_attn.v_proj.weight shape=torch.Size([256, 2048]) type=torch.float16 dev=cpu contig=True
526
+ INFO:__main__: 67 model.layers.7.self_attn.o_proj.weight shape=torch.Size([2048, 2048]) type=torch.float16 dev=cpu contig=True
527
+ INFO:__main__: 68 model.layers.7.mlp.gate_proj.weight shape=torch.Size([5632, 2048]) type=torch.float16 dev=cpu contig=True
528
+ INFO:__main__: 69 model.layers.7.mlp.up_proj.weight shape=torch.Size([5632, 2048]) type=torch.float16 dev=cpu contig=True
529
+ INFO:__main__: 70 model.layers.7.mlp.down_proj.weight shape=torch.Size([2048, 5632]) type=torch.float16 dev=cpu contig=True
530
+ INFO:__main__: 71 model.layers.7.input_layernorm.weight shape=torch.Size([2048]) type=torch.float16 dev=cpu contig=True
531
+ INFO:__main__: 72 model.layers.7.post_attention_layernorm.weight shape=torch.Size([2048]) type=torch.float16 dev=cpu contig=True
532
+ INFO:__main__: 73 model.layers.8.self_attn.q_proj.weight shape=torch.Size([2048, 2048]) type=torch.float16 dev=cpu contig=True
533
+ INFO:__main__: 74 model.layers.8.self_attn.k_proj.weight shape=torch.Size([256, 2048]) type=torch.float16 dev=cpu contig=True
534
+ INFO:__main__: 75 model.layers.8.self_attn.v_proj.weight shape=torch.Size([256, 2048]) type=torch.float16 dev=cpu contig=True
535
+ INFO:__main__: 76 model.layers.8.self_attn.o_proj.weight shape=torch.Size([2048, 2048]) type=torch.float16 dev=cpu contig=True
536
+ INFO:__main__: 77 model.layers.8.mlp.gate_proj.weight shape=torch.Size([5632, 2048]) type=torch.float16 dev=cpu contig=True
537
+ INFO:__main__: 78 model.layers.8.mlp.up_proj.weight shape=torch.Size([5632, 2048]) type=torch.float16 dev=cpu contig=True
538
+ INFO:__main__: 79 model.layers.8.mlp.down_proj.weight shape=torch.Size([2048, 5632]) type=torch.float16 dev=cpu contig=True
539
+ INFO:__main__: 80 model.layers.8.input_layernorm.weight shape=torch.Size([2048]) type=torch.float16 dev=cpu contig=True
540
+ INFO:__main__: 81 model.layers.8.post_attention_layernorm.weight shape=torch.Size([2048]) type=torch.float16 dev=cpu contig=True
541
+ INFO:__main__: 82 model.layers.9.self_attn.q_proj.weight shape=torch.Size([2048, 2048]) type=torch.float16 dev=cpu contig=True
542
+ INFO:__main__: 83 model.layers.9.self_attn.k_proj.weight shape=torch.Size([256, 2048]) type=torch.float16 dev=cpu contig=True
543
+ INFO:__main__: 84 model.layers.9.self_attn.v_proj.weight shape=torch.Size([256, 2048]) type=torch.float16 dev=cpu contig=True
544
+ INFO:__main__: 85 model.layers.9.self_attn.o_proj.weight shape=torch.Size([2048, 2048]) type=torch.float16 dev=cpu contig=True
545
+ INFO:__main__: 86 model.layers.9.mlp.gate_proj.weight shape=torch.Size([5632, 2048]) type=torch.float16 dev=cpu contig=True
546
+ INFO:__main__: 87 model.layers.9.mlp.up_proj.weight shape=torch.Size([5632, 2048]) type=torch.float16 dev=cpu contig=True
547
+ INFO:__main__: 88 model.layers.9.mlp.down_proj.weight shape=torch.Size([2048, 5632]) type=torch.float16 dev=cpu contig=True
548
+ INFO:__main__: 89 model.layers.9.input_layernorm.weight shape=torch.Size([2048]) type=torch.float16 dev=cpu contig=True
549
+ INFO:__main__: 90 model.layers.9.post_attention_layernorm.weight shape=torch.Size([2048]) type=torch.float16 dev=cpu contig=True
550
+ INFO:__main__: 91 model.layers.10.self_attn.q_proj.weight shape=torch.Size([2048, 2048]) type=torch.float16 dev=cpu contig=True
551
+ INFO:__main__: 92 model.layers.10.self_attn.k_proj.weight shape=torch.Size([256, 2048]) type=torch.float16 dev=cpu contig=True
552
+ INFO:__main__: 93 model.layers.10.self_attn.v_proj.weight shape=torch.Size([256, 2048]) type=torch.float16 dev=cpu contig=True
553
+ INFO:__main__: 94 model.layers.10.self_attn.o_proj.weight shape=torch.Size([2048, 2048]) type=torch.float16 dev=cpu contig=True
554
+ INFO:__main__: 95 model.layers.10.mlp.gate_proj.weight shape=torch.Size([5632, 2048]) type=torch.float16 dev=cpu contig=True
555
+ INFO:__main__: 96 model.layers.10.mlp.up_proj.weight shape=torch.Size([5632, 2048]) type=torch.float16 dev=cpu contig=True
556
+ INFO:__main__: 97 model.layers.10.mlp.down_proj.weight shape=torch.Size([2048, 5632]) type=torch.float16 dev=cpu contig=True
557
+ INFO:__main__: 98 model.layers.10.input_layernorm.weight shape=torch.Size([2048]) type=torch.float16 dev=cpu contig=True
558
+ INFO:__main__: 99 model.layers.10.post_attention_layernorm.weight shape=torch.Size([2048]) type=torch.float16 dev=cpu contig=True
559
+ INFO:__main__:100 model.layers.11.self_attn.q_proj.weight shape=torch.Size([2048, 2048]) type=torch.float16 dev=cpu contig=True
560
+ INFO:__main__:101 model.layers.11.self_attn.k_proj.weight shape=torch.Size([256, 2048]) type=torch.float16 dev=cpu contig=True
561
+ INFO:__main__:102 model.layers.11.self_attn.v_proj.weight shape=torch.Size([256, 2048]) type=torch.float16 dev=cpu contig=True
562
+ INFO:__main__:103 model.layers.11.self_attn.o_proj.weight shape=torch.Size([2048, 2048]) type=torch.float16 dev=cpu contig=True
563
+ INFO:__main__:104 model.layers.11.mlp.gate_proj.weight shape=torch.Size([5632, 2048]) type=torch.float16 dev=cpu contig=True
564
+ INFO:__main__:105 model.layers.11.mlp.up_proj.weight shape=torch.Size([5632, 2048]) type=torch.float16 dev=cpu contig=True
565
+ INFO:__main__:106 model.layers.11.mlp.down_proj.weight shape=torch.Size([2048, 5632]) type=torch.float16 dev=cpu contig=True
566
+ INFO:__main__:107 model.layers.11.input_layernorm.weight shape=torch.Size([2048]) type=torch.float16 dev=cpu contig=True
567
+ INFO:__main__:108 model.layers.11.post_attention_layernorm.weight shape=torch.Size([2048]) type=torch.float16 dev=cpu contig=True
568
+ INFO:__main__:109 model.layers.12.self_attn.q_proj.weight shape=torch.Size([2048, 2048]) type=torch.float16 dev=cpu contig=True
569
+ INFO:__main__:110 model.layers.12.self_attn.k_proj.weight shape=torch.Size([256, 2048]) type=torch.float16 dev=cpu contig=True
570
+ INFO:__main__:111 model.layers.12.self_attn.v_proj.weight shape=torch.Size([256, 2048]) type=torch.float16 dev=cpu contig=True
571
+ INFO:__main__:112 model.layers.12.self_attn.o_proj.weight shape=torch.Size([2048, 2048]) type=torch.float16 dev=cpu contig=True
572
+ INFO:__main__:113 model.layers.12.mlp.gate_proj.weight shape=torch.Size([5632, 2048]) type=torch.float16 dev=cpu contig=True
573
+ INFO:__main__:114 model.layers.12.mlp.up_proj.weight shape=torch.Size([5632, 2048]) type=torch.float16 dev=cpu contig=True
574
+ INFO:__main__:115 model.layers.12.mlp.down_proj.weight shape=torch.Size([2048, 5632]) type=torch.float16 dev=cpu contig=True
575
+ INFO:__main__:116 model.layers.12.input_layernorm.weight shape=torch.Size([2048]) type=torch.float16 dev=cpu contig=True
576
+ INFO:__main__:117 model.layers.12.post_attention_layernorm.weight shape=torch.Size([2048]) type=torch.float16 dev=cpu contig=True
577
+ INFO:__main__:118 model.layers.13.self_attn.q_proj.weight shape=torch.Size([2048, 2048]) type=torch.float16 dev=cpu contig=True
578
+ INFO:__main__:119 model.layers.13.self_attn.k_proj.weight shape=torch.Size([256, 2048]) type=torch.float16 dev=cpu contig=True
579
+ INFO:__main__:120 model.layers.13.self_attn.v_proj.weight shape=torch.Size([256, 2048]) type=torch.float16 dev=cpu contig=True
580
+ INFO:__main__:121 model.layers.13.self_attn.o_proj.weight shape=torch.Size([2048, 2048]) type=torch.float16 dev=cpu contig=True
581
+ INFO:__main__:122 model.layers.13.mlp.gate_proj.weight shape=torch.Size([5632, 2048]) type=torch.float16 dev=cpu contig=True
582
+ INFO:__main__:123 model.layers.13.mlp.up_proj.weight shape=torch.Size([5632, 2048]) type=torch.float16 dev=cpu contig=True
583
+ INFO:__main__:124 model.layers.13.mlp.down_proj.weight shape=torch.Size([2048, 5632]) type=torch.float16 dev=cpu contig=True
584
+ INFO:__main__:125 model.layers.13.input_layernorm.weight shape=torch.Size([2048]) type=torch.float16 dev=cpu contig=True
585
+ INFO:__main__:126 model.layers.13.post_attention_layernorm.weight shape=torch.Size([2048]) type=torch.float16 dev=cpu contig=True
586
+ INFO:__main__:127 model.layers.14.self_attn.q_proj.weight shape=torch.Size([2048, 2048]) type=torch.float16 dev=cpu contig=True
587
+ INFO:__main__:128 model.layers.14.self_attn.k_proj.weight shape=torch.Size([256, 2048]) type=torch.float16 dev=cpu contig=True
588
+ INFO:__main__:129 model.layers.14.self_attn.v_proj.weight shape=torch.Size([256, 2048]) type=torch.float16 dev=cpu contig=True
589
+ INFO:__main__:130 model.layers.14.self_attn.o_proj.weight shape=torch.Size([2048, 2048]) type=torch.float16 dev=cpu contig=True
590
+ INFO:__main__:131 model.layers.14.mlp.gate_proj.weight shape=torch.Size([5632, 2048]) type=torch.float16 dev=cpu contig=True
591
+ INFO:__main__:132 model.layers.14.mlp.up_proj.weight shape=torch.Size([5632, 2048]) type=torch.float16 dev=cpu contig=True
592
+ INFO:__main__:133 model.layers.14.mlp.down_proj.weight shape=torch.Size([2048, 5632]) type=torch.float16 dev=cpu contig=True
593
+ INFO:__main__:134 model.layers.14.input_layernorm.weight shape=torch.Size([2048]) type=torch.float16 dev=cpu contig=True
594
+ INFO:__main__:135 model.layers.14.post_attention_layernorm.weight shape=torch.Size([2048]) type=torch.float16 dev=cpu contig=True
595
+ INFO:__main__:136 model.layers.15.self_attn.q_proj.weight shape=torch.Size([2048, 2048]) type=torch.float16 dev=cpu contig=True
596
+ INFO:__main__:137 model.layers.15.self_attn.k_proj.weight shape=torch.Size([256, 2048]) type=torch.float16 dev=cpu contig=True
597
+ INFO:__main__:138 model.layers.15.self_attn.v_proj.weight shape=torch.Size([256, 2048]) type=torch.float16 dev=cpu contig=True
598
+ INFO:__main__:139 model.layers.15.self_attn.o_proj.weight shape=torch.Size([2048, 2048]) type=torch.float16 dev=cpu contig=True
599
+ INFO:__main__:140 model.layers.15.mlp.gate_proj.weight shape=torch.Size([5632, 2048]) type=torch.float16 dev=cpu contig=True
600
+ INFO:__main__:141 model.layers.15.mlp.up_proj.weight shape=torch.Size([5632, 2048]) type=torch.float16 dev=cpu contig=True
601
+ INFO:__main__:142 model.layers.15.mlp.down_proj.weight shape=torch.Size([2048, 5632]) type=torch.float16 dev=cpu contig=True
602
+ INFO:__main__:143 model.layers.15.input_layernorm.weight shape=torch.Size([2048]) type=torch.float16 dev=cpu contig=True
603
+ INFO:__main__:144 model.layers.15.post_attention_layernorm.weight shape=torch.Size([2048]) type=torch.float16 dev=cpu contig=True
604
+ INFO:__main__:145 model.layers.16.self_attn.q_proj.weight shape=torch.Size([2048, 2048]) type=torch.float16 dev=cpu contig=True
605
+ INFO:__main__:146 model.layers.16.self_attn.k_proj.weight shape=torch.Size([256, 2048]) type=torch.float16 dev=cpu contig=True
606
+ INFO:__main__:147 model.layers.16.self_attn.v_proj.weight shape=torch.Size([256, 2048]) type=torch.float16 dev=cpu contig=True
607
+ INFO:__main__:148 model.layers.16.self_attn.o_proj.weight shape=torch.Size([2048, 2048]) type=torch.float16 dev=cpu contig=True
608
+ INFO:__main__:149 model.layers.16.mlp.gate_proj.weight shape=torch.Size([5632, 2048]) type=torch.float16 dev=cpu contig=True
609
+ INFO:__main__:150 model.layers.16.mlp.up_proj.weight shape=torch.Size([5632, 2048]) type=torch.float16 dev=cpu contig=True
610
+ INFO:__main__:151 model.layers.16.mlp.down_proj.weight shape=torch.Size([2048, 5632]) type=torch.float16 dev=cpu contig=True
611
+ INFO:__main__:152 model.layers.16.input_layernorm.weight shape=torch.Size([2048]) type=torch.float16 dev=cpu contig=True
612
+ INFO:__main__:153 model.layers.16.post_attention_layernorm.weight shape=torch.Size([2048]) type=torch.float16 dev=cpu contig=True
613
+ INFO:__main__:154 model.layers.17.self_attn.q_proj.weight shape=torch.Size([2048, 2048]) type=torch.float16 dev=cpu contig=True
614
+ INFO:__main__:155 model.layers.17.self_attn.k_proj.weight shape=torch.Size([256, 2048]) type=torch.float16 dev=cpu contig=True
615
+ INFO:__main__:156 model.layers.17.self_attn.v_proj.weight shape=torch.Size([256, 2048]) type=torch.float16 dev=cpu contig=True
616
+ INFO:__main__:157 model.layers.17.self_attn.o_proj.weight shape=torch.Size([2048, 2048]) type=torch.float16 dev=cpu contig=True
617
+ INFO:__main__:158 model.layers.17.mlp.gate_proj.weight shape=torch.Size([5632, 2048]) type=torch.float16 dev=cpu contig=True
618
+ INFO:__main__:159 model.layers.17.mlp.up_proj.weight shape=torch.Size([5632, 2048]) type=torch.float16 dev=cpu contig=True
619
+ INFO:__main__:160 model.layers.17.mlp.down_proj.weight shape=torch.Size([2048, 5632]) type=torch.float16 dev=cpu contig=True
620
+ INFO:__main__:161 model.layers.17.input_layernorm.weight shape=torch.Size([2048]) type=torch.float16 dev=cpu contig=True
621
+ INFO:__main__:162 model.layers.17.post_attention_layernorm.weight shape=torch.Size([2048]) type=torch.float16 dev=cpu contig=True
622
+ INFO:__main__:163 model.layers.18.self_attn.q_proj.weight shape=torch.Size([2048, 2048]) type=torch.float16 dev=cpu contig=True
623
+ INFO:__main__:164 model.layers.18.self_attn.k_proj.weight shape=torch.Size([256, 2048]) type=torch.float16 dev=cpu contig=True
624
+ INFO:__main__:165 model.layers.18.self_attn.v_proj.weight shape=torch.Size([256, 2048]) type=torch.float16 dev=cpu contig=True
625
+ INFO:__main__:166 model.layers.18.self_attn.o_proj.weight shape=torch.Size([2048, 2048]) type=torch.float16 dev=cpu contig=True
626
+ INFO:__main__:167 model.layers.18.mlp.gate_proj.weight shape=torch.Size([5632, 2048]) type=torch.float16 dev=cpu contig=True
627
+ INFO:__main__:168 model.layers.18.mlp.up_proj.weight shape=torch.Size([5632, 2048]) type=torch.float16 dev=cpu contig=True
628
+ INFO:__main__:169 model.layers.18.mlp.down_proj.weight shape=torch.Size([2048, 5632]) type=torch.float16 dev=cpu contig=True
629
+ INFO:__main__:170 model.layers.18.input_layernorm.weight shape=torch.Size([2048]) type=torch.float16 dev=cpu contig=True
630
+ INFO:__main__:171 model.layers.18.post_attention_layernorm.weight shape=torch.Size([2048]) type=torch.float16 dev=cpu contig=True
631
+ INFO:__main__:172 model.layers.19.self_attn.q_proj.weight shape=torch.Size([2048, 2048]) type=torch.float16 dev=cpu contig=True
632
+ INFO:__main__:173 model.layers.19.self_attn.k_proj.weight shape=torch.Size([256, 2048]) type=torch.float16 dev=cpu contig=True
633
+ INFO:__main__:174 model.layers.19.self_attn.v_proj.weight shape=torch.Size([256, 2048]) type=torch.float16 dev=cpu contig=True
634
+ INFO:__main__:175 model.layers.19.self_attn.o_proj.weight shape=torch.Size([2048, 2048]) type=torch.float16 dev=cpu contig=True
635
+ INFO:__main__:176 model.layers.19.mlp.gate_proj.weight shape=torch.Size([5632, 2048]) type=torch.float16 dev=cpu contig=True
636
+ INFO:__main__:177 model.layers.19.mlp.up_proj.weight shape=torch.Size([5632, 2048]) type=torch.float16 dev=cpu contig=True
637
+ INFO:__main__:178 model.layers.19.mlp.down_proj.weight shape=torch.Size([2048, 5632]) type=torch.float16 dev=cpu contig=True
638
+ INFO:__main__:179 model.layers.19.input_layernorm.weight shape=torch.Size([2048]) type=torch.float16 dev=cpu contig=True
639
+ INFO:__main__:180 model.layers.19.post_attention_layernorm.weight shape=torch.Size([2048]) type=torch.float16 dev=cpu contig=True
640
+ INFO:__main__:181 model.layers.20.self_attn.q_proj.weight shape=torch.Size([2048, 2048]) type=torch.float16 dev=cpu contig=True
641
+ INFO:__main__:182 model.layers.20.self_attn.k_proj.weight shape=torch.Size([256, 2048]) type=torch.float16 dev=cpu contig=True
642
+ INFO:__main__:183 model.layers.20.self_attn.v_proj.weight shape=torch.Size([256, 2048]) type=torch.float16 dev=cpu contig=True
643
+ INFO:__main__:184 model.layers.20.self_attn.o_proj.weight shape=torch.Size([2048, 2048]) type=torch.float16 dev=cpu contig=True
644
+ INFO:__main__:185 model.layers.20.mlp.gate_proj.weight shape=torch.Size([5632, 2048]) type=torch.float16 dev=cpu contig=True
645
+ INFO:__main__:186 model.layers.20.mlp.up_proj.weight shape=torch.Size([5632, 2048]) type=torch.float16 dev=cpu contig=True
646
+ INFO:__main__:187 model.layers.20.mlp.down_proj.weight shape=torch.Size([2048, 5632]) type=torch.float16 dev=cpu contig=True
647
+ INFO:__main__:188 model.layers.20.input_layernorm.weight shape=torch.Size([2048]) type=torch.float16 dev=cpu contig=True
648
+ INFO:__main__:189 model.layers.20.post_attention_layernorm.weight shape=torch.Size([2048]) type=torch.float16 dev=cpu contig=True
649
+ INFO:__main__:190 model.layers.21.self_attn.q_proj.weight shape=torch.Size([2048, 2048]) type=torch.float16 dev=cpu contig=True
650
+ INFO:__main__:191 model.layers.21.self_attn.k_proj.weight shape=torch.Size([256, 2048]) type=torch.float16 dev=cpu contig=True
651
+ INFO:__main__:192 model.layers.21.self_attn.v_proj.weight shape=torch.Size([256, 2048]) type=torch.float16 dev=cpu contig=True
652
+ INFO:__main__:193 model.layers.21.self_attn.o_proj.weight shape=torch.Size([2048, 2048]) type=torch.float16 dev=cpu contig=True
653
+ INFO:__main__:194 model.layers.21.mlp.gate_proj.weight shape=torch.Size([5632, 2048]) type=torch.float16 dev=cpu contig=True
654
+ INFO:__main__:195 model.layers.21.mlp.up_proj.weight shape=torch.Size([5632, 2048]) type=torch.float16 dev=cpu contig=True
655
+ INFO:__main__:196 model.layers.21.mlp.down_proj.weight shape=torch.Size([2048, 5632]) type=torch.float16 dev=cpu contig=True
656
+ INFO:__main__:197 model.layers.21.input_layernorm.weight shape=torch.Size([2048]) type=torch.float16 dev=cpu contig=True
657
+ INFO:__main__:198 model.layers.21.post_attention_layernorm.weight shape=torch.Size([2048]) type=torch.float16 dev=cpu contig=True
658
+ INFO:__main__:199 model.norm.weight shape=torch.Size([2048]) type=torch.float16 dev=cpu contig=True
659
+ INFO:__main__:200 lm_head.weight shape=torch.Size([32000, 2048]) type=torch.float16 dev=cpu contig=True
660
+ INFO:__main__:done loading new model: LlamaForCausalLM(
661
+ (model): LlamaModel(
662
+ (embed_tokens): Embedding(32000, 2048)
663
+ (layers): ModuleList(
664
+ (0-21): 22 x LlamaDecoderLayer(
665
+ (self_attn): LlamaSdpaAttention(
666
+ (q_proj): Linear(in_features=2048, out_features=2048, bias=False)
667
+ (k_proj): Linear(in_features=2048, out_features=256, bias=False)
668
+ (v_proj): Linear(in_features=2048, out_features=256, bias=False)
669
+ (o_proj): Linear(in_features=2048, out_features=2048, bias=False)
670
+ (rotary_emb): LlamaRotaryEmbedding()
671
+ )
672
+ (mlp): LlamaMLP(
673
+ (gate_proj): Linear(in_features=2048, out_features=5632, bias=False)
674
+ (up_proj): Linear(in_features=2048, out_features=5632, bias=False)
675
+ (down_proj): Linear(in_features=5632, out_features=2048, bias=False)
676
+ (act_fn): SiLU()
677
+ )
678
+ (input_layernorm): LlamaRMSNorm()
679
+ (post_attention_layernorm): LlamaRMSNorm()
680
+ )
681
+ )
682
+ (norm): LlamaRMSNorm()
683
+ )
684
+ (lm_head): Linear(in_features=2048, out_features=32000, bias=False)
685
+ ) file: matlok/tinyllama-cinder-openhermes-32k
686
+
687
+ real 1m18.070s
688
+ user 2m10.228s
689
+ sys 0m14.040s
690
+ ```
691
+
692
+ Note: code sample above was modified from [this very helpful GitHub gist](https://gist.github.com/maldevide/08829eada04ad9bd78e46c1a3787d42b)