out of memory
I m using rtx4090 oc, 24GB vram, and my testing workflow only 2 node, which are load image->molmo->show everything
and before running the workflow, vram only used 3%, but once i run the workflow, the vram usage will go to full and shown me the error message as below:
Traceback (most recent call last):
File "/home/comfyui/comfyui/ComfyUI/execution.py", line 323, in execute
output_data, output_ui, has_subgraph = get_output_data(obj, input_data_all, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/comfyui/comfyui/ComfyUI/execution.py", line 198, in get_output_data
return_values = _map_node_over_list(obj, input_data_all, obj.FUNCTION, allow_interrupt=True, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/comfyui/comfyui/ComfyUI/execution.py", line 169, in _map_node_over_list
process_inputs(input_dict, i)
File "/home/comfyui/comfyui/ComfyUI/execution.py", line 158, in process_inputs
results.append(getattr(obj, func)(**inputs))
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/comfyui/comfyui/ComfyUI/custom_nodes/ComfyUI-Molmo/Molmo7BDbnb.py", line 191, in generate_caption
output = self.model.generate_from_batch(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/comfyui/.local/lib/python3.12/site-packages/torch/utils/_contextlib.py", line 116, in decorate_context
return func(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^
File "/home/comfyui/.cache/huggingface/modules/transformers_modules/molmo-7B-D-bnb-4bit/modeling_molmo.py", line 2466, in generate_from_batch
out = super().generate(
^^^^^^^^^^^^^^^^^
File "/home/comfyui/.local/lib/python3.12/site-packages/torch/utils/_contextlib.py", line 116, in decorate_context
return func(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^
File "/home/comfyui/.local/lib/python3.12/site-packages/transformers/generation/utils.py", line 2215, in generate
result = self._sample(
^^^^^^^^^^^^^
File "/home/comfyui/.local/lib/python3.12/site-packages/transformers/generation/utils.py", line 3206, in _sample
outputs = self(**model_inputs, return_dict=True)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/comfyui/.local/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1736, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/comfyui/.local/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1747, in _call_impl
return forward_call(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/comfyui/.local/lib/python3.12/site-packages/accelerate/hooks.py", line 170, in new_forward
output = module._old_forward(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/comfyui/.cache/huggingface/modules/transformers_modules/molmo-7B-D-bnb-4bit/modeling_molmo.py", line 2360, in forward
outputs = self.model.forward(
^^^^^^^^^^^^^^^^^^^
File "/home/comfyui/.local/lib/python3.12/site-packages/accelerate/hooks.py", line 170, in new_forward
output = module._old_forward(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/comfyui/.cache/huggingface/modules/transformers_modules/molmo-7B-D-bnb-4bit/modeling_molmo.py", line 2085, in forward
x = self.transformer.wte(input_ids) if input_embeddings is None else input_embeddings # type: ignore
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/comfyui/.local/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1736, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/comfyui/.local/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1747, in _call_impl
return forward_call(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/comfyui/.local/lib/python3.12/site-packages/accelerate/hooks.py", line 170, in new_forward
output = module._old_forward(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/comfyui/.cache/huggingface/modules/transformers_modules/molmo-7B-D-bnb-4bit/modeling_molmo.py", line 745, in forward
return F.embedding(x, torch.cat([self.embedding, self.new_embedding], dim=0))
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
torch.OutOfMemoryError: Allocation on device
Got an OOM, unloading all loaded models.
Prompt executed in 2.91 seconds