File size: 130,581 Bytes
89e9da9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 |
/home/floriadmin/miniforge3/envs/mlc/bin/python -m mlc_llm gen_config ../dist/models/Qwen1.5-4B --quantization q8f32_1 --conv-template chatml --output /tmp/tmpvomo8uva [2024-03-18 19:32:31] INFO auto_config.py:115: [92mFound[0m model configuration: ../dist/models/Qwen1.5-4B/config.json [2024-03-18 19:32:31] INFO auto_config.py:153: [92mFound[0m model type: [1mqwen2[0m. Use `--model-type` to override. [2024-03-18 19:32:31] INFO qwen2_model.py:46: [1mcontext_window_size[0m not found in config.json. Falling back to [1mmax_position_embeddings[0m (32768) [2024-03-18 19:32:31] INFO qwen2_model.py:60: [1mprefill_chunk_size[0m defaults to [1mcontext_window_size[0m (32768) [2024-03-18 19:32:31] WARNING config.py:99: [91mWarning[0m: Cannot override [1mmax_batch_size[0m, because [1mQWen2Config[0m does not have this field [2024-03-18 19:32:31] INFO gen_config.py:133: [generation_config.json] Setting [1mbos_token_id[0m: 151643 [2024-03-18 19:32:31] INFO gen_config.py:133: [generation_config.json] Setting [1meos_token_id[0m: 151643 [2024-03-18 19:32:31] INFO gen_config.py:147: [91mNot found[0m tokenizer config: ../dist/models/Qwen1.5-4B/tokenizer.model [2024-03-18 19:32:31] INFO gen_config.py:145: [92mFound[0m tokenizer config: ../dist/models/Qwen1.5-4B/tokenizer.json. Copying to [1m/tmp/tmpvomo8uva/tokenizer.json[0m [2024-03-18 19:32:31] INFO gen_config.py:145: [92mFound[0m tokenizer config: ../dist/models/Qwen1.5-4B/vocab.json. Copying to [1m/tmp/tmpvomo8uva/vocab.json[0m [2024-03-18 19:32:31] INFO gen_config.py:145: [92mFound[0m tokenizer config: ../dist/models/Qwen1.5-4B/merges.txt. Copying to [1m/tmp/tmpvomo8uva/merges.txt[0m [2024-03-18 19:32:31] INFO gen_config.py:147: [91mNot found[0m tokenizer config: ../dist/models/Qwen1.5-4B/added_tokens.json [2024-03-18 19:32:31] INFO gen_config.py:145: [92mFound[0m tokenizer config: ../dist/models/Qwen1.5-4B/tokenizer_config.json. Copying to [1m/tmp/tmpvomo8uva/tokenizer_config.json[0m [2024-03-18 19:32:31] INFO gen_config.py:75: [System default] Setting [1mpad_token_id[0m: 0 [2024-03-18 19:32:31] INFO gen_config.py:75: [System default] Setting [1mtemperature[0m: 0.7 [2024-03-18 19:32:31] INFO gen_config.py:75: [System default] Setting [1mpresence_penalty[0m: 0.0 [2024-03-18 19:32:31] INFO gen_config.py:75: [System default] Setting [1mfrequency_penalty[0m: 0.0 [2024-03-18 19:32:31] INFO gen_config.py:75: [System default] Setting [1mrepetition_penalty[0m: 1.0 [2024-03-18 19:32:31] INFO gen_config.py:75: [System default] Setting [1mtop_p[0m: 0.95 [2024-03-18 19:32:31] INFO gen_config.py:75: [System default] Setting [1mmean_gen_len[0m: 128 [2024-03-18 19:32:31] INFO gen_config.py:75: [System default] Setting [1mmax_gen_len[0m: 512 [2024-03-18 19:32:31] INFO gen_config.py:75: [System default] Setting [1mshift_fill_factor[0m: 0.3 [2024-03-18 19:32:31] INFO gen_config.py:198: Dumping configuration file to: [1m/tmp/tmpvomo8uva/mlc-chat-config.json[0m /home/floriadmin/miniforge3/envs/mlc/bin/python -m mlc_llm convert_weight ../dist/models/Qwen1.5-4B --quantization q8f32_1 --source-format auto --output /tmp/tmpvomo8uva [2024-03-18 19:32:32] INFO auto_config.py:115: [92mFound[0m model configuration: ../dist/models/Qwen1.5-4B/config.json [2024-03-18 19:32:33] INFO auto_device.py:76: [92mFound[0m device: cuda:0 [2024-03-18 19:32:33] INFO auto_device.py:76: [92mFound[0m device: cuda:1 [2024-03-18 19:32:33] INFO auto_device.py:76: [92mFound[0m device: cuda:2 [2024-03-18 19:32:33] INFO auto_device.py:76: [92mFound[0m device: cuda:3 [2024-03-18 19:32:33] INFO auto_device.py:76: [92mFound[0m device: cuda:4 [2024-03-18 19:32:33] INFO auto_device.py:76: [92mFound[0m device: cuda:5 [2024-03-18 19:32:33] INFO auto_device.py:76: [92mFound[0m device: cuda:6 [2024-03-18 19:32:33] INFO auto_device.py:76: [92mFound[0m device: cuda:7 [2024-03-18 19:32:33] INFO auto_device.py:76: [92mFound[0m device: cuda:8 [2024-03-18 19:32:33] INFO auto_device.py:76: [92mFound[0m device: cuda:9 [2024-03-18 19:32:34] INFO auto_device.py:85: [91mNot found[0m device: rocm:0 [2024-03-18 19:32:35] INFO auto_device.py:85: [91mNot found[0m device: metal:0 [2024-03-18 19:32:39] INFO auto_device.py:76: [92mFound[0m device: vulkan:0 [2024-03-18 19:32:39] INFO auto_device.py:76: [92mFound[0m device: vulkan:1 [2024-03-18 19:32:39] INFO auto_device.py:76: [92mFound[0m device: vulkan:2 [2024-03-18 19:32:39] INFO auto_device.py:76: [92mFound[0m device: vulkan:3 [2024-03-18 19:32:39] INFO auto_device.py:76: [92mFound[0m device: vulkan:4 [2024-03-18 19:32:39] INFO auto_device.py:76: [92mFound[0m device: vulkan:5 [2024-03-18 19:32:39] INFO auto_device.py:76: [92mFound[0m device: vulkan:6 [2024-03-18 19:32:39] INFO auto_device.py:76: [92mFound[0m device: vulkan:7 [2024-03-18 19:32:39] INFO auto_device.py:76: [92mFound[0m device: vulkan:8 [2024-03-18 19:32:39] INFO auto_device.py:76: [92mFound[0m device: vulkan:9 [2024-03-18 19:32:39] INFO auto_device.py:76: [92mFound[0m device: vulkan:10 [2024-03-18 19:32:40] INFO auto_device.py:85: [91mNot found[0m device: opencl:0 [2024-03-18 19:32:40] INFO auto_device.py:33: Using device: [1mcuda:0[0m [2024-03-18 19:32:40] INFO auto_weight.py:70: Finding weights in: ../dist/models/Qwen1.5-4B [2024-03-18 19:32:40] INFO auto_weight.py:136: [91mNot found[0m Huggingface PyTorch [2024-03-18 19:32:40] INFO auto_weight.py:143: [92mFound[0m source weight format: huggingface-safetensor. Source configuration: ../dist/models/Qwen1.5-4B/model.safetensors.index.json [2024-03-18 19:32:40] INFO auto_weight.py:106: Using source weight configuration: [1m../dist/models/Qwen1.5-4B/model.safetensors.index.json[0m. Use `--source` to override. [2024-03-18 19:32:40] INFO auto_weight.py:110: Using source weight format: [1mhuggingface-safetensor[0m. Use `--source-format` to override. [2024-03-18 19:32:40] INFO auto_config.py:153: [92mFound[0m model type: [1mqwen2[0m. Use `--model-type` to override. [2024-03-18 19:32:40] INFO qwen2_model.py:46: [1mcontext_window_size[0m not found in config.json. Falling back to [1mmax_position_embeddings[0m (32768) [2024-03-18 19:32:40] INFO qwen2_model.py:60: [1mprefill_chunk_size[0m defaults to [1mcontext_window_size[0m (32768) [1mWeight conversion with arguments:[0m [1m--config[0m ../dist/models/Qwen1.5-4B/config.json [1m--quantization[0m GroupQuantize(name='q8f32_1', kind='group-quant', group_size=32, quantize_dtype='int8', storage_dtype='uint32', model_dtype='float32', linear_weight_layout='NK', quantize_embedding=True, quantize_final_fc=True, num_elem_per_storage=4, num_storage_per_group=8, max_int_value=127) [1m--model-type[0m qwen2 [1m--device[0m cuda:0 [1m--source[0m ../dist/models/Qwen1.5-4B/model.safetensors.index.json [1m--source-format[0m huggingface-safetensor [1m--output[0m /tmp/tmpvomo8uva Start storing to cache /tmp/tmpvomo8uva 0%| | 0/283 [00:00<?, ?it/s] [2024-03-18 19:32:43] INFO huggingface_loader.py:182: Loading HF parameters from: ../dist/models/Qwen1.5-4B/model-00002-of-00002.safetensors 0%| | 0/283 [00:00<?, ?it/s] [2024-03-18 19:32:51] INFO group_quantization.py:232: Compiling quantize function for key: ((151936, 2560), float32, cuda, axis=1, output_transpose=False) 0%| | 0/283 [00:08<?, ?it/s] [2024-03-18 19:32:52] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mlm_head.q_weight[0m", shape: (151936, 640), dtype: uint32 0%| | 0/283 [00:09<?, ?it/s] [2024-03-18 19:32:54] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mlm_head.q_scale[0m", shape: (151936, 80), dtype: float32 0%| | 0/283 [00:11<?, ?it/s]/home/floriadmin/miniforge3/envs/mlc/lib/python3.11/site-packages/numpy/core/getlimits.py:549: UserWarning: The value of the smallest subnormal for <class 'numpy.float32'> type is zero. setattr(self, word, getattr(machar, word).flat[0]) /home/floriadmin/miniforge3/envs/mlc/lib/python3.11/site-packages/numpy/core/getlimits.py:89: UserWarning: The value of the smallest subnormal for <class 'numpy.float32'> type is zero. return self._float_to_str(self.smallest_subnormal) 0%|β | 1/283 [00:11<54:47, 11.66s/it] [2024-03-18 19:32:54] INFO huggingface_loader.py:172: [Not quantized] Parameter: "[1mmodel.layers.20.input_layernorm.weight[0m", shape: (2560,), dtype: float32 0%|β | 1/283 [00:11<54:47, 11.66s/it] [2024-03-18 19:32:55] INFO group_quantization.py:232: Compiling quantize function for key: ((2560, 6912), float32, cuda, axis=1, output_transpose=False) 0%|β | 1/283 [00:11<54:47, 11.66s/it] [2024-03-18 19:32:55] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.20.mlp.down_proj.q_weight[0m", shape: (2560, 1728), dtype: uint32 0%|β | 1/283 [00:12<54:47, 11.66s/it] [2024-03-18 19:32:55] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.20.mlp.down_proj.q_scale[0m", shape: (2560, 216), dtype: float32 0%|β | 1/283 [00:12<54:47, 11.66s/it] 1%|β | 3/283 [00:12<15:28, 3.32s/it] [2024-03-18 19:32:56] INFO group_quantization.py:232: Compiling quantize function for key: ((13824, 2560), float32, cuda, axis=1, output_transpose=False) 1%|β | 3/283 [00:12<15:28, 3.32s/it] [2024-03-18 19:32:56] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.20.mlp.gate_up_proj.q_weight[0m", shape: (13824, 640), dtype: uint32 1%|β | 3/283 [00:13<15:28, 3.32s/it] [2024-03-18 19:32:56] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.20.mlp.gate_up_proj.q_scale[0m", shape: (13824, 80), dtype: float32 1%|β | 3/283 [00:13<15:28, 3.32s/it] 1%|ββ | 4/283 [00:13<11:59, 2.58s/it] [2024-03-18 19:32:56] INFO huggingface_loader.py:172: [Not quantized] Parameter: "[1mmodel.layers.20.post_attention_layernorm.weight[0m", shape: (2560,), dtype: float32 1%|ββ | 4/283 [00:13<11:59, 2.58s/it] [2024-03-18 19:32:56] INFO group_quantization.py:232: Compiling quantize function for key: ((2560, 2560), float32, cuda, axis=1, output_transpose=False) 1%|ββ | 4/283 [00:13<11:59, 2.58s/it] [2024-03-18 19:32:57] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.20.self_attn.o_proj.q_weight[0m", shape: (2560, 640), dtype: uint32 1%|ββ | 4/283 [00:14<11:59, 2.58s/it] [2024-03-18 19:32:57] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.20.self_attn.o_proj.q_scale[0m", shape: (2560, 80), dtype: float32 1%|ββ | 4/283 [00:14<11:59, 2.58s/it] 2%|ββ | 6/283 [00:14<06:55, 1.50s/it] [2024-03-18 19:32:57] INFO huggingface_loader.py:172: [Not quantized] Parameter: "[1mmodel.layers.21.input_layernorm.weight[0m", shape: (2560,), dtype: float32 2%|ββ | 6/283 [00:14<06:55, 1.50s/it] [2024-03-18 19:32:57] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.21.mlp.down_proj.q_weight[0m", shape: (2560, 1728), dtype: uint32 2%|ββ | 6/283 [00:14<06:55, 1.50s/it] [2024-03-18 19:32:57] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.21.mlp.down_proj.q_scale[0m", shape: (2560, 216), dtype: float32 2%|ββ | 6/283 [00:14<06:55, 1.50s/it] 3%|βββ | 8/283 [00:14<04:17, 1.07it/s] [2024-03-18 19:32:58] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.21.mlp.gate_up_proj.q_weight[0m", shape: (13824, 640), dtype: uint32 3%|βββ | 8/283 [00:15<04:17, 1.07it/s] [2024-03-18 19:32:58] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.21.mlp.gate_up_proj.q_scale[0m", shape: (13824, 80), dtype: float32 3%|βββ | 8/283 [00:15<04:17, 1.07it/s] 3%|βββ | 9/283 [00:15<03:57, 1.15it/s] [2024-03-18 19:32:58] INFO huggingface_loader.py:172: [Not quantized] Parameter: "[1mmodel.layers.21.post_attention_layernorm.weight[0m", shape: (2560,), dtype: float32 3%|βββ | 9/283 [00:15<03:57, 1.15it/s] [2024-03-18 19:32:58] INFO huggingface_loader.py:172: [Not quantized] Parameter: "[1mmodel.layers.21.self_attn.c_attn.bias[0m", shape: (7680,), dtype: float32 3%|βββ | 9/283 [00:15<03:57, 1.15it/s] [2024-03-18 19:32:58] INFO group_quantization.py:232: Compiling quantize function for key: ((7680, 2560), float32, cuda, axis=1, output_transpose=False) 3%|βββ | 9/283 [00:15<03:57, 1.15it/s] [2024-03-18 19:32:59] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.21.self_attn.c_attn.q_weight[0m", shape: (7680, 640), dtype: uint32 3%|βββ | 9/283 [00:16<03:57, 1.15it/s] [2024-03-18 19:32:59] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.21.self_attn.c_attn.q_scale[0m", shape: (7680, 80), dtype: float32 3%|βββ | 9/283 [00:16<03:57, 1.15it/s] 4%|ββββ | 12/283 [00:16<02:34, 1.75it/s] [2024-03-18 19:32:59] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.21.self_attn.o_proj.q_weight[0m", shape: (2560, 640), dtype: uint32 4%|ββββ | 12/283 [00:16<02:34, 1.75it/s] [2024-03-18 19:32:59] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.21.self_attn.o_proj.q_scale[0m", shape: (2560, 80), dtype: float32 4%|ββββ | 12/283 [00:16<02:34, 1.75it/s] 5%|βββββ | 13/283 [00:16<02:12, 2.04it/s] [2024-03-18 19:32:59] INFO huggingface_loader.py:172: [Not quantized] Parameter: "[1mmodel.layers.22.input_layernorm.weight[0m", shape: (2560,), dtype: float32 5%|βββββ | 13/283 [00:16<02:12, 2.04it/s] [2024-03-18 19:32:59] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.22.mlp.down_proj.q_weight[0m", shape: (2560, 1728), dtype: uint32 5%|βββββ | 13/283 [00:16<02:12, 2.04it/s] [2024-03-18 19:32:59] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.22.mlp.down_proj.q_scale[0m", shape: (2560, 216), dtype: float32 5%|βββββ | 13/283 [00:16<02:12, 2.04it/s] 5%|βββββ | 15/283 [00:16<01:37, 2.74it/s] [2024-03-18 19:33:00] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.22.mlp.gate_up_proj.q_weight[0m", shape: (13824, 640), dtype: uint32 5%|βββββ | 15/283 [00:17<01:37, 2.74it/s] [2024-03-18 19:33:00] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.22.mlp.gate_up_proj.q_scale[0m", shape: (13824, 80), dtype: float32 5%|βββββ | 15/283 [00:17<01:37, 2.74it/s] 6%|ββββββ | 16/283 [00:17<01:52, 2.36it/s] [2024-03-18 19:33:00] INFO huggingface_loader.py:172: [Not quantized] Parameter: "[1mmodel.layers.22.post_attention_layernorm.weight[0m", shape: (2560,), dtype: float32 6%|ββββββ | 16/283 [00:17<01:52, 2.36it/s] [2024-03-18 19:33:00] INFO huggingface_loader.py:172: [Not quantized] Parameter: "[1mmodel.layers.22.self_attn.c_attn.bias[0m", shape: (7680,), dtype: float32 6%|ββββββ | 16/283 [00:17<01:52, 2.36it/s] [2024-03-18 19:33:00] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.22.self_attn.c_attn.q_weight[0m", shape: (7680, 640), dtype: uint32 6%|ββββββ | 16/283 [00:17<01:52, 2.36it/s] [2024-03-18 19:33:00] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.22.self_attn.c_attn.q_scale[0m", shape: (7680, 80), dtype: float32 6%|ββββββ | 16/283 [00:17<01:52, 2.36it/s] 7%|ββββββ | 19/283 [00:17<01:13, 3.58it/s] [2024-03-18 19:33:00] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.22.self_attn.o_proj.q_weight[0m", shape: (2560, 640), dtype: uint32 7%|ββββββ | 19/283 [00:17<01:13, 3.58it/s] [2024-03-18 19:33:00] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.22.self_attn.o_proj.q_scale[0m", shape: (2560, 80), dtype: float32 7%|ββββββ | 19/283 [00:17<01:13, 3.58it/s] 7%|βββββββ | 20/283 [00:17<01:07, 3.89it/s] [2024-03-18 19:33:00] INFO huggingface_loader.py:172: [Not quantized] Parameter: "[1mmodel.layers.23.input_layernorm.weight[0m", shape: (2560,), dtype: float32 7%|βββββββ | 20/283 [00:17<01:07, 3.89it/s] [2024-03-18 19:33:01] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.23.mlp.down_proj.q_weight[0m", shape: (2560, 1728), dtype: uint32 7%|βββββββ | 20/283 [00:17<01:07, 3.89it/s] [2024-03-18 19:33:01] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.23.mlp.down_proj.q_scale[0m", shape: (2560, 216), dtype: float32 7%|βββββββ | 20/283 [00:18<01:07, 3.89it/s] 8%|βββββββ | 22/283 [00:18<00:56, 4.62it/s] [2024-03-18 19:33:01] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.23.mlp.gate_up_proj.q_weight[0m", shape: (13824, 640), dtype: uint32 8%|βββββββ | 22/283 [00:18<00:56, 4.62it/s] [2024-03-18 19:33:01] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.23.mlp.gate_up_proj.q_scale[0m", shape: (13824, 80), dtype: float32 8%|βββββββ | 22/283 [00:18<00:56, 4.62it/s] 8%|ββββββββ | 23/283 [00:18<01:16, 3.39it/s] [2024-03-18 19:33:01] INFO huggingface_loader.py:172: [Not quantized] Parameter: "[1mmodel.layers.23.post_attention_layernorm.weight[0m", shape: (2560,), dtype: float32 8%|ββββββββ | 23/283 [00:18<01:16, 3.39it/s] [2024-03-18 19:33:01] INFO huggingface_loader.py:172: [Not quantized] Parameter: "[1mmodel.layers.23.self_attn.c_attn.bias[0m", shape: (7680,), dtype: float32 8%|ββββββββ | 23/283 [00:18<01:16, 3.39it/s] [2024-03-18 19:33:02] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.23.self_attn.c_attn.q_weight[0m", shape: (7680, 640), dtype: uint32 8%|ββββββββ | 23/283 [00:18<01:16, 3.39it/s] [2024-03-18 19:33:02] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.23.self_attn.c_attn.q_scale[0m", shape: (7680, 80), dtype: float32 8%|ββββββββ | 23/283 [00:18<01:16, 3.39it/s] 9%|βββββββββ | 26/283 [00:18<00:54, 4.73it/s] [2024-03-18 19:33:02] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.23.self_attn.o_proj.q_weight[0m", shape: (2560, 640), dtype: uint32 9%|βββββββββ | 26/283 [00:19<00:54, 4.73it/s] [2024-03-18 19:33:02] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.23.self_attn.o_proj.q_scale[0m", shape: (2560, 80), dtype: float32 9%|βββββββββ | 26/283 [00:19<00:54, 4.73it/s] 10%|βββββββββ | 27/283 [00:19<00:51, 4.98it/s] [2024-03-18 19:33:02] INFO huggingface_loader.py:172: [Not quantized] Parameter: "[1mmodel.layers.24.input_layernorm.weight[0m", shape: (2560,), dtype: float32 10%|βββββββββ | 27/283 [00:19<00:51, 4.98it/s] [2024-03-18 19:33:02] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.24.mlp.down_proj.q_weight[0m", shape: (2560, 1728), dtype: uint32 10%|βββββββββ | 27/283 [00:19<00:51, 4.98it/s] [2024-03-18 19:33:02] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.24.mlp.down_proj.q_scale[0m", shape: (2560, 216), dtype: float32 10%|βββββββββ | 27/283 [00:19<00:51, 4.98it/s] 10%|ββββββββββ | 29/283 [00:19<00:45, 5.62it/s] [2024-03-18 19:33:03] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.24.mlp.gate_up_proj.q_weight[0m", shape: (13824, 640), dtype: uint32 10%|ββββββββββ | 29/283 [00:19<00:45, 5.62it/s] [2024-03-18 19:33:03] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.24.mlp.gate_up_proj.q_scale[0m", shape: (13824, 80), dtype: float32 10%|ββββββββββ | 29/283 [00:20<00:45, 5.62it/s] 11%|ββββββββββ | 30/283 [00:20<01:06, 3.80it/s] [2024-03-18 19:33:03] INFO huggingface_loader.py:172: [Not quantized] Parameter: "[1mmodel.layers.24.post_attention_layernorm.weight[0m", shape: (2560,), dtype: float32 11%|ββββββββββ | 30/283 [00:20<01:06, 3.80it/s] [2024-03-18 19:33:03] INFO huggingface_loader.py:172: [Not quantized] Parameter: "[1mmodel.layers.24.self_attn.c_attn.bias[0m", shape: (7680,), dtype: float32 11%|ββββββββββ | 30/283 [00:20<01:06, 3.80it/s] [2024-03-18 19:33:03] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.24.self_attn.c_attn.q_weight[0m", shape: (7680, 640), dtype: uint32 11%|ββββββββββ | 30/283 [00:20<01:06, 3.80it/s] [2024-03-18 19:33:03] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.24.self_attn.c_attn.q_scale[0m", shape: (7680, 80), dtype: float32 11%|ββββββββββ | 30/283 [00:20<01:06, 3.80it/s] 12%|βββββββββββ | 33/283 [00:20<00:48, 5.13it/s] [2024-03-18 19:33:03] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.24.self_attn.o_proj.q_weight[0m", shape: (2560, 640), dtype: uint32 12%|βββββββββββ | 33/283 [00:20<00:48, 5.13it/s] [2024-03-18 19:33:03] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.24.self_attn.o_proj.q_scale[0m", shape: (2560, 80), dtype: float32 12%|βββββββββββ | 33/283 [00:20<00:48, 5.13it/s] 12%|βββββββββββ | 34/283 [00:20<00:46, 5.34it/s] [2024-03-18 19:33:03] INFO huggingface_loader.py:172: [Not quantized] Parameter: "[1mmodel.layers.25.input_layernorm.weight[0m", shape: (2560,), dtype: float32 12%|βββββββββββ | 34/283 [00:20<00:46, 5.34it/s] [2024-03-18 19:33:03] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.25.mlp.down_proj.q_weight[0m", shape: (2560, 1728), dtype: uint32 12%|βββββββββββ | 34/283 [00:20<00:46, 5.34it/s] [2024-03-18 19:33:03] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.25.mlp.down_proj.q_scale[0m", shape: (2560, 216), dtype: float32 12%|βββββββββββ | 34/283 [00:20<00:46, 5.34it/s] 13%|ββββββββββββ | 36/283 [00:20<00:41, 5.94it/s] [2024-03-18 19:33:04] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.25.mlp.gate_up_proj.q_weight[0m", shape: (13824, 640), dtype: uint32 13%|ββββββββββββ | 36/283 [00:21<00:41, 5.94it/s] [2024-03-18 19:33:04] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.25.mlp.gate_up_proj.q_scale[0m", shape: (13824, 80), dtype: float32 13%|ββββββββββββ | 36/283 [00:21<00:41, 5.94it/s] 13%|ββββββββββββ | 37/283 [00:21<01:03, 3.86it/s] [2024-03-18 19:33:04] INFO huggingface_loader.py:172: [Not quantized] Parameter: "[1mmodel.layers.25.post_attention_layernorm.weight[0m", shape: (2560,), dtype: float32 13%|ββββββββββββ | 37/283 [00:21<01:03, 3.86it/s] [2024-03-18 19:33:04] INFO huggingface_loader.py:172: [Not quantized] Parameter: "[1mmodel.layers.25.self_attn.c_attn.bias[0m", shape: (7680,), dtype: float32 13%|ββββββββββββ | 37/283 [00:21<01:03, 3.86it/s] [2024-03-18 19:33:04] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.25.self_attn.c_attn.q_weight[0m", shape: (7680, 640), dtype: uint32 13%|ββββββββββββ | 37/283 [00:21<01:03, 3.86it/s] [2024-03-18 19:33:04] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.25.self_attn.c_attn.q_scale[0m", shape: (7680, 80), dtype: float32 13%|ββββββββββββ | 37/283 [00:21<01:03, 3.86it/s] 14%|βββββββββββββ | 40/283 [00:21<00:46, 5.18it/s] [2024-03-18 19:33:05] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.25.self_attn.o_proj.q_weight[0m", shape: (2560, 640), dtype: uint32 14%|βββββββββββββ | 40/283 [00:21<00:46, 5.18it/s] [2024-03-18 19:33:05] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.25.self_attn.o_proj.q_scale[0m", shape: (2560, 80), dtype: float32 14%|βββββββββββββ | 40/283 [00:21<00:46, 5.18it/s] 14%|ββββββββββββββ | 41/283 [00:21<00:44, 5.40it/s] [2024-03-18 19:33:05] INFO huggingface_loader.py:172: [Not quantized] Parameter: "[1mmodel.layers.26.input_layernorm.weight[0m", shape: (2560,), dtype: float32 14%|ββββββββββββββ | 41/283 [00:21<00:44, 5.40it/s] [2024-03-18 19:33:05] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.26.mlp.down_proj.q_weight[0m", shape: (2560, 1728), dtype: uint32 14%|ββββββββββββββ | 41/283 [00:22<00:44, 5.40it/s] [2024-03-18 19:33:05] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.26.mlp.down_proj.q_scale[0m", shape: (2560, 216), dtype: float32 14%|ββββββββββββββ | 41/283 [00:22<00:44, 5.40it/s] 15%|ββββββββββββββ | 43/283 [00:22<00:39, 6.01it/s] [2024-03-18 19:33:05] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.26.mlp.gate_up_proj.q_weight[0m", shape: (13824, 640), dtype: uint32 15%|ββββββββββββββ | 43/283 [00:22<00:39, 6.01it/s] [2024-03-18 19:33:05] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.26.mlp.gate_up_proj.q_scale[0m", shape: (13824, 80), dtype: float32 15%|ββββββββββββββ | 43/283 [00:22<00:39, 6.01it/s] 16%|βββββββββββββββ | 44/283 [00:22<01:00, 3.93it/s] [2024-03-18 19:33:05] INFO huggingface_loader.py:172: [Not quantized] Parameter: "[1mmodel.layers.26.post_attention_layernorm.weight[0m", shape: (2560,), dtype: float32 16%|βββββββββββββββ | 44/283 [00:22<01:00, 3.93it/s] [2024-03-18 19:33:05] INFO huggingface_loader.py:172: [Not quantized] Parameter: "[1mmodel.layers.26.self_attn.c_attn.bias[0m", shape: (7680,), dtype: float32 16%|βββββββββββββββ | 44/283 [00:22<01:00, 3.93it/s] [2024-03-18 19:33:06] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.26.self_attn.c_attn.q_weight[0m", shape: (7680, 640), dtype: uint32 16%|βββββββββββββββ | 44/283 [00:23<01:00, 3.93it/s] [2024-03-18 19:33:06] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.26.self_attn.c_attn.q_scale[0m", shape: (7680, 80), dtype: float32 16%|βββββββββββββββ | 44/283 [00:23<01:00, 3.93it/s] 17%|βββββββββββββββ | 47/283 [00:23<00:44, 5.25it/s] [2024-03-18 19:33:06] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.26.self_attn.o_proj.q_weight[0m", shape: (2560, 640), dtype: uint32 17%|βββββββββββββββ | 47/283 [00:23<00:44, 5.25it/s] [2024-03-18 19:33:06] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.26.self_attn.o_proj.q_scale[0m", shape: (2560, 80), dtype: float32 17%|βββββββββββββββ | 47/283 [00:23<00:44, 5.25it/s] 17%|ββββββββββββββββ | 48/283 [00:23<00:43, 5.43it/s] [2024-03-18 19:33:06] INFO huggingface_loader.py:172: [Not quantized] Parameter: "[1mmodel.layers.27.input_layernorm.weight[0m", shape: (2560,), dtype: float32 17%|ββββββββββββββββ | 48/283 [00:23<00:43, 5.43it/s] [2024-03-18 19:33:06] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.27.mlp.down_proj.q_weight[0m", shape: (2560, 1728), dtype: uint32 17%|ββββββββββββββββ | 48/283 [00:23<00:43, 5.43it/s] [2024-03-18 19:33:06] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.27.mlp.down_proj.q_scale[0m", shape: (2560, 216), dtype: float32 17%|ββββββββββββββββ | 48/283 [00:23<00:43, 5.43it/s] 18%|ββββββββββββββββ | 50/283 [00:23<00:38, 6.01it/s] [2024-03-18 19:33:07] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.27.mlp.gate_up_proj.q_weight[0m", shape: (13824, 640), dtype: uint32 18%|ββββββββββββββββ | 50/283 [00:23<00:38, 6.01it/s] [2024-03-18 19:33:07] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.27.mlp.gate_up_proj.q_scale[0m", shape: (13824, 80), dtype: float32 18%|ββββββββββββββββ | 50/283 [00:24<00:38, 6.01it/s] 18%|βββββββββββββββββ | 51/283 [00:24<00:58, 3.95it/s] [2024-03-18 19:33:07] INFO huggingface_loader.py:172: [Not quantized] Parameter: "[1mmodel.layers.27.post_attention_layernorm.weight[0m", shape: (2560,), dtype: float32 18%|βββββββββββββββββ | 51/283 [00:24<00:58, 3.95it/s] [2024-03-18 19:33:07] INFO huggingface_loader.py:172: [Not quantized] Parameter: "[1mmodel.layers.27.self_attn.c_attn.bias[0m", shape: (7680,), dtype: float32 18%|βββββββββββββββββ | 51/283 [00:24<00:58, 3.95it/s] [2024-03-18 19:33:07] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.27.self_attn.c_attn.q_weight[0m", shape: (7680, 640), dtype: uint32 18%|βββββββββββββββββ | 51/283 [00:24<00:58, 3.95it/s] [2024-03-18 19:33:07] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.27.self_attn.c_attn.q_scale[0m", shape: (7680, 80), dtype: float32 18%|βββββββββββββββββ | 51/283 [00:24<00:58, 3.95it/s] 19%|ββββββββββββββββββ | 54/283 [00:24<00:43, 5.29it/s] [2024-03-18 19:33:07] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.27.self_attn.o_proj.q_weight[0m", shape: (2560, 640), dtype: uint32 19%|ββββββββββββββββββ | 54/283 [00:24<00:43, 5.29it/s] [2024-03-18 19:33:07] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.27.self_attn.o_proj.q_scale[0m", shape: (2560, 80), dtype: float32 19%|ββββββββββββββββββ | 54/283 [00:24<00:43, 5.29it/s] 19%|ββββββββββββββββββ | 55/283 [00:24<00:41, 5.49it/s] [2024-03-18 19:33:07] INFO huggingface_loader.py:172: [Not quantized] Parameter: "[1mmodel.layers.28.input_layernorm.weight[0m", shape: (2560,), dtype: float32 19%|ββββββββββββββββββ | 55/283 [00:24<00:41, 5.49it/s] [2024-03-18 19:33:08] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.28.mlp.down_proj.q_weight[0m", shape: (2560, 1728), dtype: uint32 19%|ββββββββββββββββββ | 55/283 [00:24<00:41, 5.49it/s] [2024-03-18 19:33:08] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.28.mlp.down_proj.q_scale[0m", shape: (2560, 216), dtype: float32 19%|ββββββββββββββββββ | 55/283 [00:24<00:41, 5.49it/s] 20%|βββββββββββββββββββ | 57/283 [00:24<00:37, 6.05it/s] [2024-03-18 19:33:08] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.28.mlp.gate_up_proj.q_weight[0m", shape: (13824, 640), dtype: uint32 20%|βββββββββββββββββββ | 57/283 [00:25<00:37, 6.05it/s] [2024-03-18 19:33:08] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.28.mlp.gate_up_proj.q_scale[0m", shape: (13824, 80), dtype: float32 20%|βββββββββββββββββββ | 57/283 [00:25<00:37, 6.05it/s] 20%|βββββββββββββββββββ | 58/283 [00:25<00:56, 3.96it/s] [2024-03-18 19:33:08] INFO huggingface_loader.py:172: [Not quantized] Parameter: "[1mmodel.layers.28.post_attention_layernorm.weight[0m", shape: (2560,), dtype: float32 20%|βββββββββββββββββββ | 58/283 [00:25<00:56, 3.96it/s] [2024-03-18 19:33:08] INFO huggingface_loader.py:172: [Not quantized] Parameter: "[1mmodel.layers.28.self_attn.c_attn.bias[0m", shape: (7680,), dtype: float32 20%|βββββββββββββββββββ | 58/283 [00:25<00:56, 3.96it/s] [2024-03-18 19:33:08] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.28.self_attn.c_attn.q_weight[0m", shape: (7680, 640), dtype: uint32 20%|βββββββββββββββββββ | 58/283 [00:25<00:56, 3.96it/s] [2024-03-18 19:33:09] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.28.self_attn.c_attn.q_scale[0m", shape: (7680, 80), dtype: float32 20%|βββββββββββββββββββ | 58/283 [00:25<00:56, 3.96it/s] 22%|ββββββββββββββββββββ | 61/283 [00:25<00:41, 5.30it/s] [2024-03-18 19:33:09] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.28.self_attn.o_proj.q_weight[0m", shape: (2560, 640), dtype: uint32 22%|ββββββββββββββββββββ | 61/283 [00:25<00:41, 5.30it/s] [2024-03-18 19:33:09] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.28.self_attn.o_proj.q_scale[0m", shape: (2560, 80), dtype: float32 22%|ββββββββββββββββββββ | 61/283 [00:25<00:41, 5.30it/s] 22%|ββββββββββββββββββββ | 62/283 [00:25<00:40, 5.51it/s] [2024-03-18 19:33:09] INFO huggingface_loader.py:172: [Not quantized] Parameter: "[1mmodel.layers.29.input_layernorm.weight[0m", shape: (2560,), dtype: float32 22%|ββββββββββββββββββββ | 62/283 [00:25<00:40, 5.51it/s] [2024-03-18 19:33:09] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.29.mlp.down_proj.q_weight[0m", shape: (2560, 1728), dtype: uint32 22%|ββββββββββββββββββββ | 62/283 [00:26<00:40, 5.51it/s] [2024-03-18 19:33:09] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.29.mlp.down_proj.q_scale[0m", shape: (2560, 216), dtype: float32 22%|ββββββββββββββββββββ | 62/283 [00:26<00:40, 5.51it/s] 23%|βββββββββββββββββββββ | 64/283 [00:26<00:36, 6.05it/s] [2024-03-18 19:33:09] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.29.mlp.gate_up_proj.q_weight[0m", shape: (13824, 640), dtype: uint32 23%|βββββββββββββββββββββ | 64/283 [00:26<00:36, 6.05it/s] [2024-03-18 19:33:10] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.29.mlp.gate_up_proj.q_scale[0m", shape: (13824, 80), dtype: float32 23%|βββββββββββββββββββββ | 64/283 [00:26<00:36, 6.05it/s] 23%|βββββββββββββββββββββ | 65/283 [00:26<00:55, 3.95it/s] [2024-03-18 19:33:10] INFO huggingface_loader.py:172: [Not quantized] Parameter: "[1mmodel.layers.29.post_attention_layernorm.weight[0m", shape: (2560,), dtype: float32 23%|βββββββββββββββββββββ | 65/283 [00:26<00:55, 3.95it/s] [2024-03-18 19:33:10] INFO huggingface_loader.py:172: [Not quantized] Parameter: "[1mmodel.layers.29.self_attn.c_attn.bias[0m", shape: (7680,), dtype: float32 23%|βββββββββββββββββββββ | 65/283 [00:26<00:55, 3.95it/s] [2024-03-18 19:33:10] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.29.self_attn.c_attn.q_weight[0m", shape: (7680, 640), dtype: uint32 23%|βββββββββββββββββββββ | 65/283 [00:27<00:55, 3.95it/s] [2024-03-18 19:33:10] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.29.self_attn.c_attn.q_scale[0m", shape: (7680, 80), dtype: float32 23%|βββββββββββββββββββββ | 65/283 [00:27<00:55, 3.95it/s] 24%|ββββββββββββββββββββββ | 68/283 [00:27<00:40, 5.29it/s] [2024-03-18 19:33:10] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.29.self_attn.o_proj.q_weight[0m", shape: (2560, 640), dtype: uint32 24%|ββββββββββββββββββββββ | 68/283 [00:27<00:40, 5.29it/s] [2024-03-18 19:33:10] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.29.self_attn.o_proj.q_scale[0m", shape: (2560, 80), dtype: float32 24%|ββββββββββββββββββββββ | 68/283 [00:27<00:40, 5.29it/s] 24%|βββββββββββββββββββββββ | 69/283 [00:27<00:38, 5.49it/s] [2024-03-18 19:33:10] INFO huggingface_loader.py:172: [Not quantized] Parameter: "[1mmodel.layers.30.input_layernorm.weight[0m", shape: (2560,), dtype: float32 24%|βββββββββββββββββββββββ | 69/283 [00:27<00:38, 5.49it/s] [2024-03-18 19:33:10] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.30.mlp.down_proj.q_weight[0m", shape: (2560, 1728), dtype: uint32 24%|βββββββββββββββββββββββ | 69/283 [00:27<00:38, 5.49it/s] [2024-03-18 19:33:10] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.30.mlp.down_proj.q_scale[0m", shape: (2560, 216), dtype: float32 24%|βββββββββββββββββββββββ | 69/283 [00:27<00:38, 5.49it/s] 25%|βββββββββββββββββββββββ | 71/283 [00:27<00:35, 6.06it/s] [2024-03-18 19:33:11] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.30.mlp.gate_up_proj.q_weight[0m", shape: (13824, 640), dtype: uint32 25%|βββββββββββββββββββββββ | 71/283 [00:28<00:35, 6.06it/s] [2024-03-18 19:33:11] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.30.mlp.gate_up_proj.q_scale[0m", shape: (13824, 80), dtype: float32 25%|βββββββββββββββββββββββ | 71/283 [00:28<00:35, 6.06it/s] 25%|ββββββββββββββββββββββββ | 72/283 [00:28<00:53, 3.93it/s] [2024-03-18 19:33:11] INFO huggingface_loader.py:172: [Not quantized] Parameter: "[1mmodel.layers.30.post_attention_layernorm.weight[0m", shape: (2560,), dtype: float32 25%|ββββββββββββββββββββββββ | 72/283 [00:28<00:53, 3.93it/s] [2024-03-18 19:33:11] INFO huggingface_loader.py:172: [Not quantized] Parameter: "[1mmodel.layers.30.self_attn.c_attn.bias[0m", shape: (7680,), dtype: float32 25%|ββββββββββββββββββββββββ | 72/283 [00:28<00:53, 3.93it/s] [2024-03-18 19:33:11] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.30.self_attn.c_attn.q_weight[0m", shape: (7680, 640), dtype: uint32 25%|ββββββββββββββββββββββββ | 72/283 [00:28<00:53, 3.93it/s] [2024-03-18 19:33:11] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.30.self_attn.c_attn.q_scale[0m", shape: (7680, 80), dtype: float32 25%|ββββββββββββββββββββββββ | 72/283 [00:28<00:53, 3.93it/s] 27%|ββββββββββββββββββββββββ | 75/283 [00:28<00:39, 5.26it/s] [2024-03-18 19:33:11] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.30.self_attn.o_proj.q_weight[0m", shape: (2560, 640), dtype: uint32 27%|ββββββββββββββββββββββββ | 75/283 [00:28<00:39, 5.26it/s] [2024-03-18 19:33:11] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.30.self_attn.o_proj.q_scale[0m", shape: (2560, 80), dtype: float32 27%|ββββββββββββββββββββββββ | 75/283 [00:28<00:39, 5.26it/s] 27%|βββββββββββββββββββββββββ | 76/283 [00:28<00:37, 5.45it/s] [2024-03-18 19:33:11] INFO huggingface_loader.py:172: [Not quantized] Parameter: "[1mmodel.layers.31.input_layernorm.weight[0m", shape: (2560,), dtype: float32 27%|βββββββββββββββββββββββββ | 76/283 [00:28<00:37, 5.45it/s] [2024-03-18 19:33:12] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.31.mlp.down_proj.q_weight[0m", shape: (2560, 1728), dtype: uint32 27%|βββββββββββββββββββββββββ | 76/283 [00:28<00:37, 5.45it/s] [2024-03-18 19:33:12] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.31.mlp.down_proj.q_scale[0m", shape: (2560, 216), dtype: float32 27%|βββββββββββββββββββββββββ | 76/283 [00:29<00:37, 5.45it/s] 28%|βββββββββββββββββββββββββ | 78/283 [00:29<00:34, 6.01it/s] [2024-03-18 19:33:12] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.31.mlp.gate_up_proj.q_weight[0m", shape: (13824, 640), dtype: uint32 28%|βββββββββββββββββββββββββ | 78/283 [00:29<00:34, 6.01it/s] [2024-03-18 19:33:12] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.31.mlp.gate_up_proj.q_scale[0m", shape: (13824, 80), dtype: float32 28%|βββββββββββββββββββββββββ | 78/283 [00:29<00:34, 6.01it/s] 28%|ββββββββββββββββββββββββββ | 79/283 [00:29<00:52, 3.92it/s] [2024-03-18 19:33:12] INFO huggingface_loader.py:172: [Not quantized] Parameter: "[1mmodel.layers.31.post_attention_layernorm.weight[0m", shape: (2560,), dtype: float32 28%|ββββββββββββββββββββββββββ | 79/283 [00:29<00:52, 3.92it/s] [2024-03-18 19:33:12] INFO huggingface_loader.py:172: [Not quantized] Parameter: "[1mmodel.layers.31.self_attn.c_attn.bias[0m", shape: (7680,), dtype: float32 28%|ββββββββββββββββββββββββββ | 79/283 [00:29<00:52, 3.92it/s] [2024-03-18 19:33:13] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.31.self_attn.c_attn.q_weight[0m", shape: (7680, 640), dtype: uint32 28%|ββββββββββββββββββββββββββ | 79/283 [00:29<00:52, 3.92it/s] [2024-03-18 19:33:13] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.31.self_attn.c_attn.q_scale[0m", shape: (7680, 80), dtype: float32 28%|ββββββββββββββββββββββββββ | 79/283 [00:29<00:52, 3.92it/s] 29%|βββββββββββββββββββββββββββ | 82/283 [00:29<00:38, 5.26it/s] [2024-03-18 19:33:13] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.31.self_attn.o_proj.q_weight[0m", shape: (2560, 640), dtype: uint32 29%|βββββββββββββββββββββββββββ | 82/283 [00:30<00:38, 5.26it/s] [2024-03-18 19:33:13] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.31.self_attn.o_proj.q_scale[0m", shape: (2560, 80), dtype: float32 29%|βββββββββββββββββββββββββββ | 82/283 [00:30<00:38, 5.26it/s] 29%|βββββββββββββββββββββββββββ | 83/283 [00:30<00:36, 5.44it/s] [2024-03-18 19:33:13] INFO huggingface_loader.py:172: [Not quantized] Parameter: "[1mmodel.layers.32.input_layernorm.weight[0m", shape: (2560,), dtype: float32 29%|βββββββββββββββββββββββββββ | 83/283 [00:30<00:36, 5.44it/s] [2024-03-18 19:33:13] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.32.mlp.down_proj.q_weight[0m", shape: (2560, 1728), dtype: uint32 29%|βββββββββββββββββββββββββββ | 83/283 [00:30<00:36, 5.44it/s] [2024-03-18 19:33:13] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.32.mlp.down_proj.q_scale[0m", shape: (2560, 216), dtype: float32 29%|βββββββββββββββββββββββββββ | 83/283 [00:30<00:36, 5.44it/s] 30%|ββββββββββββββββββββββββββββ | 85/283 [00:30<00:33, 5.99it/s] [2024-03-18 19:33:14] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.32.mlp.gate_up_proj.q_weight[0m", shape: (13824, 640), dtype: uint32 30%|ββββββββββββββββββββββββββββ | 85/283 [00:30<00:33, 5.99it/s] [2024-03-18 19:33:14] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.32.mlp.gate_up_proj.q_scale[0m", shape: (13824, 80), dtype: float32 30%|ββββββββββββββββββββββββββββ | 85/283 [00:31<00:33, 5.99it/s] 30%|ββββββββββββββββββββββββββββ | 86/283 [00:31<00:51, 3.86it/s] [2024-03-18 19:33:14] INFO huggingface_loader.py:172: [Not quantized] Parameter: "[1mmodel.layers.32.post_attention_layernorm.weight[0m", shape: (2560,), dtype: float32 30%|ββββββββββββββββββββββββββββ | 86/283 [00:31<00:51, 3.86it/s] [2024-03-18 19:33:14] INFO huggingface_loader.py:172: [Not quantized] Parameter: "[1mmodel.layers.32.self_attn.c_attn.bias[0m", shape: (7680,), dtype: float32 30%|ββββββββββββββββββββββββββββ | 86/283 [00:31<00:51, 3.86it/s] [2024-03-18 19:33:14] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.32.self_attn.c_attn.q_weight[0m", shape: (7680, 640), dtype: uint32 30%|ββββββββββββββββββββββββββββ | 86/283 [00:31<00:51, 3.86it/s] [2024-03-18 19:33:14] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.32.self_attn.c_attn.q_scale[0m", shape: (7680, 80), dtype: float32 30%|ββββββββββββββββββββββββββββ | 86/283 [00:31<00:51, 3.86it/s] 31%|βββββββββββββββββββββββββββββ | 89/283 [00:31<00:37, 5.20it/s] [2024-03-18 19:33:14] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.32.self_attn.o_proj.q_weight[0m", shape: (2560, 640), dtype: uint32 31%|βββββββββββββββββββββββββββββ | 89/283 [00:31<00:37, 5.20it/s] [2024-03-18 19:33:14] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.32.self_attn.o_proj.q_scale[0m", shape: (2560, 80), dtype: float32 31%|βββββββββββββββββββββββββββββ | 89/283 [00:31<00:37, 5.20it/s] 32%|βββββββββββββββββββββββββββββ | 90/283 [00:31<00:36, 5.35it/s] [2024-03-18 19:33:14] INFO huggingface_loader.py:172: [Not quantized] Parameter: "[1mmodel.layers.33.input_layernorm.weight[0m", shape: (2560,), dtype: float32 32%|βββββββββββββββββββββββββββββ | 90/283 [00:31<00:36, 5.35it/s] [2024-03-18 19:33:14] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.33.mlp.down_proj.q_weight[0m", shape: (2560, 1728), dtype: uint32 32%|βββββββββββββββββββββββββββββ | 90/283 [00:31<00:36, 5.35it/s] [2024-03-18 19:33:14] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.33.mlp.down_proj.q_scale[0m", shape: (2560, 216), dtype: float32 32%|βββββββββββββββββββββββββββββ | 90/283 [00:31<00:36, 5.35it/s] 33%|ββββββββββββββββββββββββββββββ | 92/283 [00:31<00:32, 5.96it/s] [2024-03-18 19:33:15] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.33.mlp.gate_up_proj.q_weight[0m", shape: (13824, 640), dtype: uint32 33%|ββββββββββββββββββββββββββββββ | 92/283 [00:32<00:32, 5.96it/s] [2024-03-18 19:33:15] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.33.mlp.gate_up_proj.q_scale[0m", shape: (13824, 80), dtype: float32 33%|ββββββββββββββββββββββββββββββ | 92/283 [00:32<00:32, 5.96it/s] 33%|ββββββββββββββββββββββββββββββ | 93/283 [00:32<00:50, 3.78it/s] [2024-03-18 19:33:15] INFO huggingface_loader.py:172: [Not quantized] Parameter: "[1mmodel.layers.33.post_attention_layernorm.weight[0m", shape: (2560,), dtype: float32 33%|ββββββββββββββββββββββββββββββ | 93/283 [00:32<00:50, 3.78it/s] [2024-03-18 19:33:15] INFO huggingface_loader.py:172: [Not quantized] Parameter: "[1mmodel.layers.33.self_attn.c_attn.bias[0m", shape: (7680,), dtype: float32 33%|ββββββββββββββββββββββββββββββ | 93/283 [00:32<00:50, 3.78it/s] [2024-03-18 19:33:15] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.33.self_attn.c_attn.q_weight[0m", shape: (7680, 640), dtype: uint32 33%|ββββββββββββββββββββββββββββββ | 93/283 [00:32<00:50, 3.78it/s] [2024-03-18 19:33:15] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.33.self_attn.c_attn.q_scale[0m", shape: (7680, 80), dtype: float32 33%|ββββββββββββββββββββββββββββββ | 93/283 [00:32<00:50, 3.78it/s] 34%|βββββββββββββββββββββββββββββββ | 96/283 [00:32<00:36, 5.11it/s] [2024-03-18 19:33:16] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.33.self_attn.o_proj.q_weight[0m", shape: (2560, 640), dtype: uint32 34%|βββββββββββββββββββββββββββββββ | 96/283 [00:32<00:36, 5.11it/s] [2024-03-18 19:33:16] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.33.self_attn.o_proj.q_scale[0m", shape: (2560, 80), dtype: float32 34%|βββββββββββββββββββββββββββββββ | 96/283 [00:32<00:36, 5.11it/s] 34%|ββββββββββββββββββββββββββββββββ | 97/283 [00:32<00:34, 5.34it/s] [2024-03-18 19:33:16] INFO huggingface_loader.py:172: [Not quantized] Parameter: "[1mmodel.layers.34.input_layernorm.weight[0m", shape: (2560,), dtype: float32 34%|ββββββββββββββββββββββββββββββββ | 97/283 [00:32<00:34, 5.34it/s] [2024-03-18 19:33:16] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.34.mlp.down_proj.q_weight[0m", shape: (2560, 1728), dtype: uint32 34%|ββββββββββββββββββββββββββββββββ | 97/283 [00:33<00:34, 5.34it/s] [2024-03-18 19:33:16] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.34.mlp.down_proj.q_scale[0m", shape: (2560, 216), dtype: float32 34%|ββββββββββββββββββββββββββββββββ | 97/283 [00:33<00:34, 5.34it/s] 35%|ββββββββββββββββββββββββββββββββ | 99/283 [00:33<00:31, 5.86it/s] [2024-03-18 19:33:16] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.34.mlp.gate_up_proj.q_weight[0m", shape: (13824, 640), dtype: uint32 35%|ββββββββββββββββββββββββββββββββ | 99/283 [00:33<00:31, 5.86it/s] [2024-03-18 19:33:17] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.34.mlp.gate_up_proj.q_scale[0m", shape: (13824, 80), dtype: float32 35%|ββββββββββββββββββββββββββββββββ | 99/283 [00:33<00:31, 5.86it/s] 35%|ββββββββββββββββββββββββββββββββ | 100/283 [00:33<00:47, 3.85it/s] [2024-03-18 19:33:17] INFO huggingface_loader.py:172: [Not quantized] Parameter: "[1mmodel.layers.34.post_attention_layernorm.weight[0m", shape: (2560,), dtype: float32 35%|ββββββββββββββββββββββββββββββββ | 100/283 [00:33<00:47, 3.85it/s] [2024-03-18 19:33:17] INFO huggingface_loader.py:172: [Not quantized] Parameter: "[1mmodel.layers.34.self_attn.c_attn.bias[0m", shape: (7680,), dtype: float32 35%|ββββββββββββββββββββββββββββββββ | 100/283 [00:33<00:47, 3.85it/s] [2024-03-18 19:33:17] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.34.self_attn.c_attn.q_weight[0m", shape: (7680, 640), dtype: uint32 35%|ββββββββββββββββββββββββββββββββ | 100/283 [00:34<00:47, 3.85it/s] [2024-03-18 19:33:17] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.34.self_attn.c_attn.q_scale[0m", shape: (7680, 80), dtype: float32 35%|ββββββββββββββββββββββββββββββββ | 100/283 [00:34<00:47, 3.85it/s] 36%|βββββββββββββββββββββββββββββββββ | 103/283 [00:34<00:34, 5.17it/s] [2024-03-18 19:33:17] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.34.self_attn.o_proj.q_weight[0m", shape: (2560, 640), dtype: uint32 36%|βββββββββββββββββββββββββββββββββ | 103/283 [00:34<00:34, 5.17it/s] [2024-03-18 19:33:17] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.34.self_attn.o_proj.q_scale[0m", shape: (2560, 80), dtype: float32 36%|βββββββββββββββββββββββββββββββββ | 103/283 [00:34<00:34, 5.17it/s] 37%|βββββββββββββββββββββββββββββββββ | 104/283 [00:34<00:33, 5.29it/s] [2024-03-18 19:33:17] INFO huggingface_loader.py:172: [Not quantized] Parameter: "[1mmodel.layers.35.input_layernorm.weight[0m", shape: (2560,), dtype: float32 37%|βββββββββββββββββββββββββββββββββ | 104/283 [00:34<00:33, 5.29it/s] [2024-03-18 19:33:17] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.35.mlp.down_proj.q_weight[0m", shape: (2560, 1728), dtype: uint32 37%|βββββββββββββββββββββββββββββββββ | 104/283 [00:34<00:33, 5.29it/s] [2024-03-18 19:33:17] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.35.mlp.down_proj.q_scale[0m", shape: (2560, 216), dtype: float32 37%|βββββββββββββββββββββββββββββββββ | 104/283 [00:34<00:33, 5.29it/s] 37%|ββββββββββββββββββββββββββββββββββ | 106/283 [00:34<00:29, 5.91it/s] [2024-03-18 19:33:18] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.35.mlp.gate_up_proj.q_weight[0m", shape: (13824, 640), dtype: uint32 37%|ββββββββββββββββββββββββββββββββββ | 106/283 [00:35<00:29, 5.91it/s] [2024-03-18 19:33:18] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.35.mlp.gate_up_proj.q_scale[0m", shape: (13824, 80), dtype: float32 37%|ββββββββββββββββββββββββββββββββββ | 106/283 [00:35<00:29, 5.91it/s] 38%|ββββββββββββββββββββββββββββββββββ | 107/283 [00:35<00:45, 3.86it/s] [2024-03-18 19:33:18] INFO huggingface_loader.py:172: [Not quantized] Parameter: "[1mmodel.layers.35.post_attention_layernorm.weight[0m", shape: (2560,), dtype: float32 38%|ββββββββββββββββββββββββββββββββββ | 107/283 [00:35<00:45, 3.86it/s] [2024-03-18 19:33:18] INFO huggingface_loader.py:172: [Not quantized] Parameter: "[1mmodel.layers.35.self_attn.c_attn.bias[0m", shape: (7680,), dtype: float32 38%|ββββββββββββββββββββββββββββββββββ | 107/283 [00:35<00:45, 3.86it/s] [2024-03-18 19:33:18] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.35.self_attn.c_attn.q_weight[0m", shape: (7680, 640), dtype: uint32 38%|ββββββββββββββββββββββββββββββββββ | 107/283 [00:35<00:45, 3.86it/s] [2024-03-18 19:33:18] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.35.self_attn.c_attn.q_scale[0m", shape: (7680, 80), dtype: float32 38%|ββββββββββββββββββββββββββββββββββ | 107/283 [00:35<00:45, 3.86it/s] 39%|βββββββββββββββββββββββββββββββββββ | 110/283 [00:35<00:33, 5.17it/s] [2024-03-18 19:33:18] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.35.self_attn.o_proj.q_weight[0m", shape: (2560, 640), dtype: uint32 39%|βββββββββββββββββββββββββββββββββββ | 110/283 [00:35<00:33, 5.17it/s] [2024-03-18 19:33:18] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.35.self_attn.o_proj.q_scale[0m", shape: (2560, 80), dtype: float32 39%|βββββββββββββββββββββββββββββββββββ | 110/283 [00:35<00:33, 5.17it/s] 39%|ββββββββββββββββββββββββββββββββββββ | 111/283 [00:35<00:32, 5.33it/s] [2024-03-18 19:33:18] INFO huggingface_loader.py:172: [Not quantized] Parameter: "[1mmodel.layers.36.input_layernorm.weight[0m", shape: (2560,), dtype: float32 39%|ββββββββββββββββββββββββββββββββββββ | 111/283 [00:35<00:32, 5.33it/s] [2024-03-18 19:33:19] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.36.mlp.down_proj.q_weight[0m", shape: (2560, 1728), dtype: uint32 39%|ββββββββββββββββββββββββββββββββββββ | 111/283 [00:35<00:32, 5.33it/s] [2024-03-18 19:33:19] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.36.mlp.down_proj.q_scale[0m", shape: (2560, 216), dtype: float32 39%|ββββββββββββββββββββββββββββββββββββ | 111/283 [00:36<00:32, 5.33it/s] 40%|ββββββββββββββββββββββββββββββββββββ | 113/283 [00:36<00:28, 5.94it/s] [2024-03-18 19:33:19] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.36.mlp.gate_up_proj.q_weight[0m", shape: (13824, 640), dtype: uint32 40%|ββββββββββββββββββββββββββββββββββββ | 113/283 [00:36<00:28, 5.94it/s] [2024-03-18 19:33:20] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.36.mlp.gate_up_proj.q_scale[0m", shape: (13824, 80), dtype: float32 40%|ββββββββββββββββββββββββββββββββββββ | 113/283 [00:36<00:28, 5.94it/s] 40%|βββββββββββββββββββββββββββββββββββββ | 114/283 [00:37<00:56, 2.99it/s] [2024-03-18 19:33:20] INFO huggingface_loader.py:172: [Not quantized] Parameter: "[1mmodel.layers.36.post_attention_layernorm.weight[0m", shape: (2560,), dtype: float32 40%|βββββββββββββββββββββββββββββββββββββ | 114/283 [00:37<00:56, 2.99it/s] [2024-03-18 19:33:20] INFO huggingface_loader.py:172: [Not quantized] Parameter: "[1mmodel.layers.36.self_attn.c_attn.bias[0m", shape: (7680,), dtype: float32 40%|βββββββββββββββββββββββββββββββββββββ | 114/283 [00:37<00:56, 2.99it/s] [2024-03-18 19:33:20] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.36.self_attn.c_attn.q_weight[0m", shape: (7680, 640), dtype: uint32 40%|βββββββββββββββββββββββββββββββββββββ | 114/283 [00:37<00:56, 2.99it/s] [2024-03-18 19:33:20] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.36.self_attn.c_attn.q_scale[0m", shape: (7680, 80), dtype: float32 40%|βββββββββββββββββββββββββββββββββββββ | 114/283 [00:37<00:56, 2.99it/s] 41%|ββββββββββββββββββββββββββββββββββββββ | 117/283 [00:37<00:39, 4.23it/s] [2024-03-18 19:33:20] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.36.self_attn.o_proj.q_weight[0m", shape: (2560, 640), dtype: uint32 41%|ββββββββββββββββββββββββββββββββββββββ | 117/283 [00:37<00:39, 4.23it/s] [2024-03-18 19:33:20] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.36.self_attn.o_proj.q_scale[0m", shape: (2560, 80), dtype: float32 41%|ββββββββββββββββββββββββββββββββββββββ | 117/283 [00:37<00:39, 4.23it/s] 42%|ββββββββββββββββββββββββββββββββββββββ | 118/283 [00:37<00:36, 4.51it/s] [2024-03-18 19:33:20] INFO huggingface_loader.py:172: [Not quantized] Parameter: "[1mmodel.layers.37.input_layernorm.weight[0m", shape: (2560,), dtype: float32 42%|ββββββββββββββββββββββββββββββββββββββ | 118/283 [00:37<00:36, 4.51it/s] [2024-03-18 19:33:20] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.37.mlp.down_proj.q_weight[0m", shape: (2560, 1728), dtype: uint32 42%|ββββββββββββββββββββββββββββββββββββββ | 118/283 [00:37<00:36, 4.51it/s] [2024-03-18 19:33:21] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.37.mlp.down_proj.q_scale[0m", shape: (2560, 216), dtype: float32 42%|ββββββββββββββββββββββββββββββββββββββ | 118/283 [00:37<00:36, 4.51it/s] 42%|βββββββββββββββββββββββββββββββββββββββ | 120/283 [00:37<00:31, 5.22it/s] [2024-03-18 19:33:21] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.37.mlp.gate_up_proj.q_weight[0m", shape: (13824, 640), dtype: uint32 42%|βββββββββββββββββββββββββββββββββββββββ | 120/283 [00:38<00:31, 5.22it/s] [2024-03-18 19:33:21] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.37.mlp.gate_up_proj.q_scale[0m", shape: (13824, 80), dtype: float32 42%|βββββββββββββββββββββββββββββββββββββββ | 120/283 [00:38<00:31, 5.22it/s] 43%|βββββββββββββββββββββββββββββββββββββββ | 121/283 [00:38<00:44, 3.61it/s] [2024-03-18 19:33:21] INFO huggingface_loader.py:172: [Not quantized] Parameter: "[1mmodel.layers.37.post_attention_layernorm.weight[0m", shape: (2560,), dtype: float32 43%|βββββββββββββββββββββββββββββββββββββββ | 121/283 [00:38<00:44, 3.61it/s] [2024-03-18 19:33:21] INFO huggingface_loader.py:172: [Not quantized] Parameter: "[1mmodel.layers.37.self_attn.c_attn.bias[0m", shape: (7680,), dtype: float32 43%|βββββββββββββββββββββββββββββββββββββββ | 121/283 [00:38<00:44, 3.61it/s] [2024-03-18 19:33:21] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.37.self_attn.c_attn.q_weight[0m", shape: (7680, 640), dtype: uint32 43%|βββββββββββββββββββββββββββββββββββββββ | 121/283 [00:38<00:44, 3.61it/s] [2024-03-18 19:33:21] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.37.self_attn.c_attn.q_scale[0m", shape: (7680, 80), dtype: float32 43%|βββββββββββββββββββββββββββββββββββββββ | 121/283 [00:38<00:44, 3.61it/s] 44%|ββββββββββββββββββββββββββββββββββββββββ | 124/283 [00:38<00:32, 4.94it/s] [2024-03-18 19:33:22] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.37.self_attn.o_proj.q_weight[0m", shape: (2560, 640), dtype: uint32 44%|ββββββββββββββββββββββββββββββββββββββββ | 124/283 [00:38<00:32, 4.94it/s] [2024-03-18 19:33:22] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.37.self_attn.o_proj.q_scale[0m", shape: (2560, 80), dtype: float32 44%|ββββββββββββββββββββββββββββββββββββββββ | 124/283 [00:38<00:32, 4.94it/s] 44%|ββββββββββββββββββββββββββββββββββββββββ | 125/283 [00:38<00:30, 5.18it/s] [2024-03-18 19:33:22] INFO huggingface_loader.py:172: [Not quantized] Parameter: "[1mmodel.layers.38.input_layernorm.weight[0m", shape: (2560,), dtype: float32 44%|ββββββββββββββββββββββββββββββββββββββββ | 125/283 [00:38<00:30, 5.18it/s] [2024-03-18 19:33:22] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.38.mlp.down_proj.q_weight[0m", shape: (2560, 1728), dtype: uint32 44%|ββββββββββββββββββββββββββββββββββββββββ | 125/283 [00:39<00:30, 5.18it/s] [2024-03-18 19:33:22] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.38.mlp.down_proj.q_scale[0m", shape: (2560, 216), dtype: float32 44%|ββββββββββββββββββββββββββββββββββββββββ | 125/283 [00:39<00:30, 5.18it/s] 45%|βββββββββββββββββββββββββββββββββββββββββ | 127/283 [00:39<00:26, 5.82it/s] [2024-03-18 19:33:23] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.38.mlp.gate_up_proj.q_weight[0m", shape: (13824, 640), dtype: uint32 45%|βββββββββββββββββββββββββββββββββββββββββ | 127/283 [00:39<00:26, 5.82it/s] [2024-03-18 19:33:23] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.38.mlp.gate_up_proj.q_scale[0m", shape: (13824, 80), dtype: float32 45%|βββββββββββββββββββββββββββββββββββββββββ | 127/283 [00:40<00:26, 5.82it/s] 45%|βββββββββββββββββββββββββββββββββββββββββ | 128/283 [00:40<00:48, 3.22it/s] [2024-03-18 19:33:23] INFO huggingface_loader.py:172: [Not quantized] Parameter: "[1mmodel.layers.38.post_attention_layernorm.weight[0m", shape: (2560,), dtype: float32 45%|βββββββββββββββββββββββββββββββββββββββββ | 128/283 [00:40<00:48, 3.22it/s] [2024-03-18 19:33:23] INFO huggingface_loader.py:172: [Not quantized] Parameter: "[1mmodel.layers.38.self_attn.c_attn.bias[0m", shape: (7680,), dtype: float32 45%|βββββββββββββββββββββββββββββββββββββββββ | 128/283 [00:40<00:48, 3.22it/s] [2024-03-18 19:33:23] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.38.self_attn.c_attn.q_weight[0m", shape: (7680, 640), dtype: uint32 45%|βββββββββββββββββββββββββββββββββββββββββ | 128/283 [00:40<00:48, 3.22it/s] [2024-03-18 19:33:23] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.38.self_attn.c_attn.q_scale[0m", shape: (7680, 80), dtype: float32 45%|βββββββββββββββββββββββββββββββββββββββββ | 128/283 [00:40<00:48, 3.22it/s] 46%|ββββββββββββββββββββββββββββββββββββββββββ | 131/283 [00:40<00:33, 4.53it/s] [2024-03-18 19:33:23] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.38.self_attn.o_proj.q_weight[0m", shape: (2560, 640), dtype: uint32 46%|ββββββββββββββββββββββββββββββββββββββββββ | 131/283 [00:40<00:33, 4.53it/s] [2024-03-18 19:33:23] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.38.self_attn.o_proj.q_scale[0m", shape: (2560, 80), dtype: float32 46%|ββββββββββββββββββββββββββββββββββββββββββ | 131/283 [00:40<00:33, 4.53it/s] 47%|ββββββββββββββββββββββββββββββββββββββββββ | 132/283 [00:40<00:31, 4.77it/s] [2024-03-18 19:33:23] INFO huggingface_loader.py:172: [Not quantized] Parameter: "[1mmodel.layers.39.input_layernorm.weight[0m", shape: (2560,), dtype: float32 47%|ββββββββββββββββββββββββββββββββββββββββββ | 132/283 [00:40<00:31, 4.77it/s] [2024-03-18 19:33:23] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.39.mlp.down_proj.q_weight[0m", shape: (2560, 1728), dtype: uint32 47%|ββββββββββββββββββββββββββββββββββββββββββ | 132/283 [00:40<00:31, 4.77it/s] [2024-03-18 19:33:24] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.39.mlp.down_proj.q_scale[0m", shape: (2560, 216), dtype: float32 47%|ββββββββββββββββββββββββββββββββββββββββββ | 132/283 [00:40<00:31, 4.77it/s] 47%|βββββββββββββββββββββββββββββββββββββββββββ | 134/283 [00:40<00:27, 5.47it/s] [2024-03-18 19:33:24] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.39.mlp.gate_up_proj.q_weight[0m", shape: (13824, 640), dtype: uint32 47%|βββββββββββββββββββββββββββββββββββββββββββ | 134/283 [00:41<00:27, 5.47it/s] [2024-03-18 19:33:25] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.39.mlp.gate_up_proj.q_scale[0m", shape: (13824, 80), dtype: float32 47%|βββββββββββββββββββββββββββββββββββββββββββ | 134/283 [00:41<00:27, 5.47it/s] 48%|βββββββββββββββββββββββββββββββββββββββββββ | 135/283 [00:41<00:53, 2.79it/s] [2024-03-18 19:33:25] INFO huggingface_loader.py:172: [Not quantized] Parameter: "[1mmodel.layers.39.post_attention_layernorm.weight[0m", shape: (2560,), dtype: float32 48%|βββββββββββββββββββββββββββββββββββββββββββ | 135/283 [00:41<00:53, 2.79it/s] [2024-03-18 19:33:25] INFO huggingface_loader.py:172: [Not quantized] Parameter: "[1mmodel.layers.39.self_attn.c_attn.bias[0m", shape: (7680,), dtype: float32 48%|βββββββββββββββββββββββββββββββββββββββββββ | 135/283 [00:41<00:53, 2.79it/s] [2024-03-18 19:33:25] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.39.self_attn.c_attn.q_weight[0m", shape: (7680, 640), dtype: uint32 48%|βββββββββββββββββββββββββββββββββββββββββββ | 135/283 [00:42<00:53, 2.79it/s] [2024-03-18 19:33:25] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.39.self_attn.c_attn.q_scale[0m", shape: (7680, 80), dtype: float32 48%|βββββββββββββββββββββββββββββββββββββββββββ | 135/283 [00:42<00:53, 2.79it/s] 49%|ββββββββββββββββββββββββββββββββββββββββββββ | 138/283 [00:42<00:35, 4.04it/s] [2024-03-18 19:33:25] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.39.self_attn.o_proj.q_weight[0m", shape: (2560, 640), dtype: uint32 49%|ββββββββββββββββββββββββββββββββββββββββββββ | 138/283 [00:42<00:35, 4.04it/s] [2024-03-18 19:33:25] INFO huggingface_loader.py:164: [Quantized] Parameter: "[1mmodel.layers.39.self_attn.o_proj.q_scale[0m", shape: (2560, 80), dtype: float32 49%|ββββββββββββββββββββββββββββββββββββββββββββ | 138/283 [00:42<00:35, 4.04it/s] 49%|βββββββββββββββββββββββββββββββββββββββββββββ | 139/283 [00:42<00:33, 4.34it/s] [2024-03-18 19:33:25] INFO huggingface_loader.py:172: [Not quantized] Parameter: "[1mmodel.norm.weight[0m", shape: (2560,), dtype: float32 49%|βββββββββββββββββββββββββββββββββββββββββββββ | 139/283 [00:42<00:33, 4.34it/s] [2024-03-18 19:33:25] INFO huggingface_loader.py:194: Unloading HF weight file: ../dist/models/Qwen1.5-4B/model-00002-of-00002.safetensors 49%|βββββββββββββββββββββββββββββββββββββββββββββ | 139/283 [00:42<00:33, 4.34it/s] [2024-03-18 19:33:26] INFO huggingface_loader.py:182: Loading HF parameters from: ../dist/models/Qwen1.5-4B/model-00001-of-00002.safetensors 49%|βββββββββββββββββββββββββββββββββββββββββββββ | 139/283 [00:42<00:33, 4.34it/s][19:33:33] /workspace/tvm/src/runtime/memory/pooled_allocator.h:65: Warning: PooledAllocator got InternalError during allocation: InternalError: Check failed: (e == cudaSuccess || e == cudaErrorCudartUnloading) is false: CUDA: out of memory [19:33:33] /workspace/tvm/src/runtime/memory/pooled_allocator.h:66: Warning: Trying to release all unused memory and reallocate... terminate called after throwing an instance of 'tvm::runtime::InternalError' what(): [19:33:33] /workspace/tvm/include/tvm/runtime/packed_func.h:1346: unknown type = 0 Stack trace: 0: _ZN3tvm7runtime6deta 1: _ZN3tvm7runtime6memory13MemoryM 2: _ZN3tvm7runtime18SimpleObjAllocator7HandlerINS0_ 3: tvm::runtime::relax_vm::VMAllocStorage(void*, tvm::runtime::ShapeTuple, long, DLDataType, tvm::runtime::String) [clone .cold] 4: tvm::runtime::TypedPackedFunc<tvm::runtime::memory::Storage (void*, tvm::runtime::ShapeTuple, long, DLDataType, tvm::runtime::String)>::AssignTypedLambda<tvm::runtime::memory::Storage (*)(void*, tvm::runtime::ShapeTuple, long, DLDataType, tvm::runtime::String)>(tvm::runtime::memory::Storage (*)(void*, tvm::runtime::ShapeTuple, long, DLDataType, tvm::runtime::String), std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >)::{lambda(tvm::runtime::TVMArgs const&, tvm::runtime::TVMRetValue*)#1}::operator()(tvm::runtime::TVMArgs const&, tvm::runtime::TVMRetValue*) const 5: _ZN3tvm7runtime13PackedFun 6: tvm::runtime::relax_vm::VirtualMachineImpl::RunInstrCall(tvm::runtime::relax_vm::VMFrame*, tvm::runtime::relax_vm::Instruction) 7: tvm::runtime::relax_vm::VirtualMachineImpl::RunLoop() 8: tvm::runtime::relax_vm::VirtualMachineImpl::InvokeBytecode(long, std::vector<tvm::runtime::TVMRetValue, std::allocator<tvm::runtime::TVMRetValue> > const&) 9: tvm::runtime::PackedFuncObj::Extractor<tvm::runtime::PackedFuncSubObj<tvm::runtime::relax_vm::VirtualMachineImpl::GetClosureInternal(tvm::runtime::String const&, bool)::{lambda(tvm::runtime::TVMArgs, tvm::runtime::TVMRetValue*)#1}> >::Call(tvm::runtime::PackedFuncObj const*, tvm::runtime::TVMArgs, tvm::runtime::TVMRetValue*) 10: tvm::runtime::relax_vm::VirtualMachineImpl::InvokeClosurePacked(tvm::runtime::ObjectRef const&, tvm::runtime::TVMArgs, tvm::runtime::TVMRetValue*) |