Mat1 and Mat2 Shapes Cannot be Multiplied (2x1024 and 768x8192)

#42
by Torcello - opened

Error occurred when executing KSampler:

mat1 and mat2 shapes cannot be multiplied (2x1024 and 768x8192)

File "B:\ComfyUI\execution.py", line 152, in recursive_execute
output_data, output_ui = get_output_data(obj, input_data_all)
File "B:\ComfyUI\execution.py", line 82, in get_output_data
return_values = map_node_over_list(obj, input_data_all, obj.FUNCTION, allow_interrupt=True)
File "B:\ComfyUI\execution.py", line 75, in map_node_over_list
results.append(getattr(obj, func)(**slice_dict(input_data_all, i)))
File "B:\ComfyUI\nodes.py", line 1368, in sample
return common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=denoise)
File "B:\ComfyUI\nodes.py", line 1338, in common_ksampler
samples = comfy.sample.sample(model, noise, steps, cfg, sampler_name, scheduler, positive, negative, latent_image,
File "B:\ComfyUI\custom_nodes\ComfyUI-Impact-Pack\modules\impact\sample_error_enhancer.py", line 22, in informative_sample
raise e
File "B:\ComfyUI\custom_nodes\ComfyUI-Impact-Pack\modules\impact\sample_error_enhancer.py", line 9, in informative_sample
return original_sample(*args, **kwargs) # This code helps interpret error messages that occur within exceptions but does not have any impact on other operations.
File "B:\ComfyUI\comfy\sample.py", line 100, in sample
samples = sampler.sample(noise, positive_copy, negative_copy, cfg=cfg, latent_image=latent_image, start_step=start_step, last_step=last_step, force_full_denoise=force_full_denoise, denoise_mask=noise_mask, sigmas=sigmas, callback=callback, disable_pbar=disable_pbar, seed=seed)
File "B:\ComfyUI\comfy\samplers.py", line 706, in sample
return sample(self.model, noise, positive, negative, cfg, self.device, sampler, sigmas, self.model_options, latent_image=latent_image, denoise_mask=denoise_mask, callback=callback, disable_pbar=disable_pbar, seed=seed)
File "B:\ComfyUI\comfy\samplers.py", line 611, in sample
samples = sampler.sample(model_wrap, sigmas, extra_args, callback, noise, latent_image, denoise_mask, disable_pbar)
File "B:\ComfyUI\comfy\samplers.py", line 550, in sample
samples = self.sampler_function(model_k, noise, sigmas, extra_args=extra_args, callback=k_callback, disable=disable_pbar, **self.extra_options)
File "C:\Users\john_\AppData\Local\Programs\Python\Python310\lib\site-packages\torch\utils_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
File "B:\ComfyUI\comfy\k_diffusion\sampling.py", line 613, in sample_dpmpp_2m_sde
denoised = model(x, sigmas[i] * s_in, **extra_args)
File "C:\Users\john_\AppData\Local\Programs\Python\Python310\lib\site-packages\torch\nn\modules\module.py", line 1511, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "C:\Users\john_\AppData\Local\Programs\Python\Python310\lib\site-packages\torch\nn\modules\module.py", line 1520, in _call_impl
return forward_call(*args, **kwargs)
File "B:\ComfyUI\comfy\samplers.py", line 282, in forward
out = self.inner_model(x, sigma, cond=cond, uncond=uncond, cond_scale=cond_scale, model_options=model_options, seed=seed)
File "C:\Users\john_\AppData\Local\Programs\Python\Python310\lib\site-packages\torch\nn\modules\module.py", line 1511, in _wrapped_call_impl
return self.call_impl(*args, **kwargs)
File "C:\Users\john_\AppData\Local\Programs\Python\Python310\lib\site-packages\torch\nn\modules\module.py", line 1520, in call_impl
return forward_call(*args, **kwargs)
File "B:\ComfyUI\comfy\samplers.py", line 272, in forward
return self.apply_model(*args, **kwargs)
File "B:\ComfyUI\comfy\samplers.py", line 269, in apply_model
out = sampling_function(self.inner_model, x, timestep, uncond, cond, cond_scale, model_options=model_options, seed=seed)
File "B:\ComfyUI\comfy\samplers.py", line 249, in sampling_function
cond_pred, uncond_pred = calc_cond_uncond_batch(model, cond, uncond
, x, timestep, model_options)
File "B:\ComfyUI\comfy\samplers.py", line 223, in calc_cond_uncond_batch
output = model.apply_model(input_x, timestep
, **c).chunk(batch_chunks)
File "B:\ComfyUI\comfy\model_base.py", line 91, in apply_model
model_output = self.diffusion_model(xc, t, context=context, control=control, transformer_options=transformer_options, **extra_conds).float()
File "C:\Users\john_\AppData\Local\Programs\Python\Python310\lib\site-packages\torch\nn\modules\module.py", line 1511, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "C:\Users\john_\AppData\Local\Programs\Python\Python310\lib\site-packages\torch\nn\modules\module.py", line 1520, in _call_impl
return forward_call(*args, **kwargs)
File "B:\ComfyUI\comfy\ldm\cascade\stage_c.py", line 257, in forward
clip = self.gen_c_embeddings(clip_text, clip_text_pooled, clip_img)
File "B:\ComfyUI\comfy\ldm\cascade\stage_c.py", line 181, in gen_c_embeddings
clip_img = self.clip_img_mapper(clip_img).view(clip_img.size(0), clip_img.size(1) * self.c_clip_seq, -1)
File "C:\Users\john_\AppData\Local\Programs\Python\Python310\lib\site-packages\torch\nn\modules\module.py", line 1511, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "C:\Users\john_\AppData\Local\Programs\Python\Python310\lib\site-packages\torch\nn\modules\module.py", line 1520, in _call_impl
return forward_call(*args, **kwargs)
File "B:\ComfyUI\comfy\ops.py", line 43, in forward
return self.forward_comfy_cast_weights(*args, **kwargs)
File "B:\ComfyUI\comfy\ops.py", line 39, in forward_comfy_cast_weights
return torch.nn.functional.linear(input, weight, bias)

You using CLIPvision? I've had this problem when using CLIPvision for conditioning the stage C. Stage B conditioning works, but has expectably almost no effect.

This happens when you use the wrong CLIP model. I made the same mistake. Just rename the stable cascade "model.safetensors" to something else before you drag it into the models/clip folder.

Sign up or log in to comment