Error occurred when executing KSampler: Tensor on device cuda:0 is not on the expected device meta!

#15
by Managerfirst - opened

File "D:\ComfyUI_windows_portable_nvidia_cu118_or_cpu\ComfyUI_windows_portable\ComfyUI\execution.py", line 151, in recursive_execute
output_data, output_ui = get_output_data(obj, input_data_all)
File "D:\ComfyUI_windows_portable_nvidia_cu118_or_cpu\ComfyUI_windows_portable\ComfyUI\execution.py", line 81, in get_output_data
return_values = map_node_over_list(obj, input_data_all, obj.FUNCTION, allow_interrupt=True)
File "D:\ComfyUI_windows_portable_nvidia_cu118_or_cpu\ComfyUI_windows_portable\ComfyUI\execution.py", line 74, in map_node_over_list
results.append(getattr(obj, func)(**slice_dict(input_data_all, i)))
File "D:\ComfyUI_windows_portable_nvidia_cu118_or_cpu\ComfyUI_windows_portable\ComfyUI\nodes.py", line 1206, in sample
return common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=denoise)
File "D:\ComfyUI_windows_portable_nvidia_cu118_or_cpu\ComfyUI_windows_portable\ComfyUI\nodes.py", line 1176, in common_ksampler
samples = comfy.sample.sample(model, noise, steps, cfg, sampler_name, scheduler, positive, negative, latent_image,
File "D:\ComfyUI_windows_portable_nvidia_cu118_or_cpu\ComfyUI_windows_portable\ComfyUI\comfy\sample.py", line 93, in sample
samples = sampler.sample(noise, positive_copy, negative_copy, cfg=cfg, latent_image=latent_image, start_step=start_step, last_step=last_step, force_full_denoise=force_full_denoise, denoise_mask=noise_mask, sigmas=sigmas, callback=callback, disable_pbar=disable_pbar, seed=seed)
File "D:\ComfyUI_windows_portable_nvidia_cu118_or_cpu\ComfyUI_windows_portable\ComfyUI\comfy\samplers.py", line 733, in sample
samples = getattr(k_diffusion_sampling, "sample_{}".format(self.sampler))(self.model_k, noise, sigmas, extra_args=extra_args, callback=k_callback, disable=disable_pbar)
File "D:\ComfyUI_windows_portable_nvidia_cu118_or_cpu\ComfyUI_windows_portable\python_embeded\lib\site-packages\torch\utils_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
File "D:\ComfyUI_windows_portable_nvidia_cu118_or_cpu\ComfyUI_windows_portable\ComfyUI\comfy\k_diffusion\sampling.py", line 154, in sample_euler_ancestral
denoised = model(x, sigmas[i] * s_in, **extra_args)
File "D:\ComfyUI_windows_portable_nvidia_cu118_or_cpu\ComfyUI_windows_portable\python_embeded\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "D:\ComfyUI_windows_portable_nvidia_cu118_or_cpu\ComfyUI_windows_portable\ComfyUI\comfy\samplers.py", line 323, in forward
out = self.inner_model(x, sigma, cond=cond, uncond=uncond, cond_scale=cond_scale, cond_concat=cond_concat, model_options=model_options, seed=seed)
File "D:\ComfyUI_windows_portable_nvidia_cu118_or_cpu\ComfyUI_windows_portable\python_embeded\lib\site-packages\torch\nn\modules\module.py", line 1501, in call_impl
return forward_call(*args, **kwargs)
File "D:\ComfyUI_windows_portable_nvidia_cu118_or_cpu\ComfyUI_windows_portable\ComfyUI\comfy\k_diffusion\external.py", line 125, in forward
eps = self.get_eps(input * c_in, self.sigma_to_t(sigma), **kwargs)
File "D:\ComfyUI_windows_portable_nvidia_cu118_or_cpu\ComfyUI_windows_portable\ComfyUI\comfy\k_diffusion\external.py", line 151, in get_eps
return self.inner_model.apply_model(*args, **kwargs)
File "D:\ComfyUI_windows_portable_nvidia_cu118_or_cpu\ComfyUI_windows_portable\ComfyUI\comfy\samplers.py", line 311, in apply_model
out = sampling_function(self.inner_model.apply_model, x, timestep, uncond, cond, cond_scale, cond_concat, model_options=model_options, seed=seed)
File "D:\ComfyUI_windows_portable_nvidia_cu118_or_cpu\ComfyUI_windows_portable\ComfyUI\comfy\samplers.py", line 289, in sampling_function
cond, uncond = calc_cond_uncond_batch(model_function, cond, uncond, x, timestep, max_total_area, cond_concat, model_options)
File "D:\ComfyUI_windows_portable_nvidia_cu118_or_cpu\ComfyUI_windows_portable\ComfyUI\comfy\samplers.py", line 241, in calc_cond_uncond_batch
c['control'] = control.get_control(input_x, timestep
, c, len(cond_or_uncond))
File "D:\ComfyUI_windows_portable_nvidia_cu118_or_cpu\ComfyUI_windows_portable\ComfyUI\comfy\sd.py", line 809, in get_control
File "D:\ComfyUI_windows_portable_nvidia_cu118_or_cpu\ComfyUI_windows_portable\python_embeded\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "D:\ComfyUI_windows_portable_nvidia_cu118_or_cpu\ComfyUI_windows_portable\ComfyUI\comfy\cldm\cldm.py", line 307, in forward
return outs
File "D:\ComfyUI_windows_portable_nvidia_cu118_or_cpu\ComfyUI_windows_portable\python_embeded\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "D:\ComfyUI_windows_portable_nvidia_cu118_or_cpu\ComfyUI_windows_portable\ComfyUI\comfy\ldm\modules\diffusionmodules\openaimodel.py", line 43, in forward
x = layer(x, context, transformer_options)
File "D:\ComfyUI_windows_portable_nvidia_cu118_or_cpu\ComfyUI_windows_portable\python_embeded\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "D:\ComfyUI_windows_portable_nvidia_cu118_or_cpu\ComfyUI_windows_portable\ComfyUI\comfy\ldm\modules\attention.py", line 696, in forward
x = block(x, context=context[i], transformer_options=transformer_options)
File "D:\ComfyUI_windows_portable_nvidia_cu118_or_cpu\ComfyUI_windows_portable\python_embeded\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "D:\ComfyUI_windows_portable_nvidia_cu118_or_cpu\ComfyUI_windows_portable\ComfyUI\comfy\ldm\modules\attention.py", line 528, in forward
return checkpoint(self._forward, (x, context, transformer_options), self.parameters(), self.checkpoint)
File "D:\ComfyUI_windows_portable_nvidia_cu118_or_cpu\ComfyUI_windows_portable\ComfyUI\comfy\ldm\modules\diffusionmodules\util.py", line 123, in checkpoint
return func(*inputs)
File "D:\ComfyUI_windows_portable_nvidia_cu118_or_cpu\ComfyUI_windows_portable\ComfyUI\comfy\ldm\modules\attention.py", line 628, in _forward
n = self.attn2(n, context=context_attn2, value=value_attn2)
File "D:\ComfyUI_windows_portable_nvidia_cu118_or_cpu\ComfyUI_windows_portable\python_embeded\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "D:\ComfyUI_windows_portable_nvidia_cu118_or_cpu\ComfyUI_windows_portable\ComfyUI\comfy\ldm\modules\attention.py", line 421, in forward
q = self.to_q(x)
File "D:\ComfyUI_windows_portable_nvidia_cu118_or_cpu\ComfyUI_windows_portable\python_embeded\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "D:\ComfyUI_windows_portable_nvidia_cu118_or_cpu\ComfyUI_windows_portable\ComfyUI\comfy\sd.py", line 862, in forward
File "D:\ComfyUI_windows_portable_nvidia_cu118_or_cpu\ComfyUI_windows_portable\python_embeded\lib\site-packages\torch_prims_common\wrappers.py", line 220, in fn
result = fn(*args, **kwargs)
File "D:\ComfyUI_windows_portable_nvidia_cu118_or_cpu\ComfyUI_windows_portable\python_embeded\lib\site-packages\torch_prims_common\wrappers.py", line 130, in fn
result = fn(**bound.arguments)
File "D:\ComfyUI_windows_portable_nvidia_cu118_or_cpu\ComfyUI_windows_portable\python_embeded\lib\site-packages\torch_refs_init
.py", line 975, in add
return prims.add(a, b)
File "D:\ComfyUI_windows_portable_nvidia_cu118_or_cpu\ComfyUI_windows_portable\python_embeded\lib\site-packages\torch_ops.py", line 287, in call
return self.op(*args, **kwargs or {})
File "D:\ComfyUI_windows_portable_nvidia_cu118_or_cpu\ComfyUI_windows_portable\python_embeded\lib\site-packages\torch_prims_init
.py", line 346, in elementwise_meta
utils.check_same_device(*args
, allow_cpu_scalar_tensors=True)
File "D:\ComfyUI_windows_portable_nvidia_cu118_or_cpu\ComfyUI_windows_portable\python_embeded\lib\site-packages\torch_prims_common_init
.py", line 596, in check_same_device
raise RuntimeError(msg)


It only seems to happen once I have already made one canny or depth image with the new clora, then I cannot make any more I have to go to another workflow, reload the already "loaded" tensors, and then go back and load the model a third time for only a single new image to complete...

Anyone know how to fix this on the windows portable install? I installed the nodes through Comfy Manager.

Thanks!

I've updated to the latest version of comfyui which appears to have fixsd this problem but now I'm getting out of memory errors - not the end of the world, except for that he fact that I think it's a ug.

Error occurred when executing CLIPTextEncode:

Allocation on device 0 would exceed allowed memory. (out of memory)
Currently allocated : 1.40 GiB
Requested : 25.00 MiB
Device limit : 10.75 GiB
Free (according to CUDA): 25.62 MiB
PyTorch limit (set by user-supplied memory fraction)
: 17179869184.00 GiB

While I'd love to have a few million terabytes of VRAM I' don't think that's accurate - and I think there's likely sdomething wrong when I'm requesting 25mb of vvram with 1.4 gb allocated but maybe not?
EDIT: Nevermind, had another process running ;D

@Kringokrungo lol been there, glad you made it

Sign up or log in to comment