code_execution_files / Qwen_Qwen3-VL-32B-Instruct-FP8_0.txt
ariG23498's picture
ariG23498 HF Staff
Upload Qwen_Qwen3-VL-32B-Instruct-FP8_0.txt with huggingface_hub
6e2f335 verified
```CODE:
# Use a pipeline as a high-level helper
from transformers import pipeline
pipe = pipeline("image-text-to-text", model="Qwen/Qwen3-VL-32B-Instruct-FP8")
messages = [
{
"role": "user",
"content": [
{"type": "image", "url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG"},
{"type": "text", "text": "What animal is on the candy?"}
]
},
]
pipe(text=messages)
```
ERROR:
Traceback (most recent call last):
File "/tmp/Qwen_Qwen3-VL-32B-Instruct-FP8_0RYJfB4.py", line 19, in <module>
pipe = pipeline("image-text-to-text", model="Qwen/Qwen3-VL-32B-Instruct-FP8")
File "/tmp/.cache/uv/environments-v2/49adf985b5e89d74/lib/python3.13/site-packages/transformers/pipelines/__init__.py", line 1027, in pipeline
framework, model = infer_framework_load_model(
~~~~~~~~~~~~~~~~~~~~~~~~~~^
adapter_path if adapter_path is not None else model,
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
...<5 lines>...
**model_kwargs,
^^^^^^^^^^^^^^^
)
^
File "/tmp/.cache/uv/environments-v2/49adf985b5e89d74/lib/python3.13/site-packages/transformers/pipelines/base.py", line 333, in infer_framework_load_model
raise ValueError(
f"Could not load model {model} with any of the following classes: {class_tuple}. See the original errors:\n\n{error}\n"
)
ValueError: Could not load model Qwen/Qwen3-VL-32B-Instruct-FP8 with any of the following classes: (<class 'transformers.models.auto.modeling_auto.AutoModelForImageTextToText'>, <class 'transformers.models.qwen3_vl.modeling_qwen3_vl.Qwen3VLForConditionalGeneration'>). See the original errors:
while loading with AutoModelForImageTextToText, an error is thrown:
Traceback (most recent call last):
File "/tmp/.cache/uv/environments-v2/49adf985b5e89d74/lib/python3.13/site-packages/transformers/pipelines/base.py", line 293, in infer_framework_load_model
model = model_class.from_pretrained(model, **kwargs)
File "/tmp/.cache/uv/environments-v2/49adf985b5e89d74/lib/python3.13/site-packages/transformers/models/auto/auto_factory.py", line 604, in from_pretrained
return model_class.from_pretrained(
~~~~~~~~~~~~~~~~~~~~~~~~~~~^
pretrained_model_name_or_path, *model_args, config=config, **hub_kwargs, **kwargs
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
)
^
File "/tmp/.cache/uv/environments-v2/49adf985b5e89d74/lib/python3.13/site-packages/transformers/modeling_utils.py", line 277, in _wrapper
return func(*args, **kwargs)
File "/tmp/.cache/uv/environments-v2/49adf985b5e89d74/lib/python3.13/site-packages/transformers/modeling_utils.py", line 4881, in from_pretrained
hf_quantizer, config, dtype, device_map = get_hf_quantizer(
~~~~~~~~~~~~~~~~^
config, quantization_config, dtype, from_tf, from_flax, device_map, weights_only, user_agent
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
)
^
File "/tmp/.cache/uv/environments-v2/49adf985b5e89d74/lib/python3.13/site-packages/transformers/quantizers/auto.py", line 319, in get_hf_quantizer
hf_quantizer.validate_environment(
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~^
dtype=dtype,
^^^^^^^^^^^^
...<3 lines>...
weights_only=weights_only,
^^^^^^^^^^^^^^^^^^^^^^^^^^
)
^
File "/tmp/.cache/uv/environments-v2/49adf985b5e89d74/lib/python3.13/site-packages/transformers/quantizers/quantizer_finegrained_fp8.py", line 48, in validate_environment
raise RuntimeError("No GPU or XPU found. A GPU or XPU is needed for FP8 quantization.")
RuntimeError: No GPU or XPU found. A GPU or XPU is needed for FP8 quantization.
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/tmp/.cache/uv/environments-v2/49adf985b5e89d74/lib/python3.13/site-packages/transformers/pipelines/base.py", line 311, in infer_framework_load_model
model = model_class.from_pretrained(model, **fp32_kwargs)
File "/tmp/.cache/uv/environments-v2/49adf985b5e89d74/lib/python3.13/site-packages/transformers/models/auto/auto_factory.py", line 604, in from_pretrained
return model_class.from_pretrained(
~~~~~~~~~~~~~~~~~~~~~~~~~~~^
pretrained_model_name_or_path, *model_args, config=config, **hub_kwargs, **kwargs
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
)
^
File "/tmp/.cache/uv/environments-v2/49adf985b5e89d74/lib/python3.13/site-packages/transformers/modeling_utils.py", line 277, in _wrapper
return func(*args, **kwargs)
File "/tmp/.cache/uv/environments-v2/49adf985b5e89d74/lib/python3.13/site-packages/transformers/modeling_utils.py", line 4881, in from_pretrained
hf_quantizer, config, dtype, device_map = get_hf_quantizer(
~~~~~~~~~~~~~~~~^
config, quantization_config, dtype, from_tf, from_flax, device_map, weights_only, user_agent
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
)
^
File "/tmp/.cache/uv/environments-v2/49adf985b5e89d74/lib/python3.13/site-packages/transformers/quantizers/auto.py", line 319, in get_hf_quantizer
hf_quantizer.validate_environment(
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~^
dtype=dtype,
^^^^^^^^^^^^
...<3 lines>...
weights_only=weights_only,
^^^^^^^^^^^^^^^^^^^^^^^^^^
)
^
File "/tmp/.cache/uv/environments-v2/49adf985b5e89d74/lib/python3.13/site-packages/transformers/quantizers/quantizer_finegrained_fp8.py", line 48, in validate_environment
raise RuntimeError("No GPU or XPU found. A GPU or XPU is needed for FP8 quantization.")
RuntimeError: No GPU or XPU found. A GPU or XPU is needed for FP8 quantization.
while loading with Qwen3VLForConditionalGeneration, an error is thrown:
Traceback (most recent call last):
File "/tmp/.cache/uv/environments-v2/49adf985b5e89d74/lib/python3.13/site-packages/transformers/pipelines/base.py", line 293, in infer_framework_load_model
model = model_class.from_pretrained(model, **kwargs)
File "/tmp/.cache/uv/environments-v2/49adf985b5e89d74/lib/python3.13/site-packages/transformers/modeling_utils.py", line 277, in _wrapper
return func(*args, **kwargs)
File "/tmp/.cache/uv/environments-v2/49adf985b5e89d74/lib/python3.13/site-packages/transformers/modeling_utils.py", line 4881, in from_pretrained
hf_quantizer, config, dtype, device_map = get_hf_quantizer(
~~~~~~~~~~~~~~~~^
config, quantization_config, dtype, from_tf, from_flax, device_map, weights_only, user_agent
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
)
^
File "/tmp/.cache/uv/environments-v2/49adf985b5e89d74/lib/python3.13/site-packages/transformers/quantizers/auto.py", line 319, in get_hf_quantizer
hf_quantizer.validate_environment(
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~^
dtype=dtype,
^^^^^^^^^^^^
...<3 lines>...
weights_only=weights_only,
^^^^^^^^^^^^^^^^^^^^^^^^^^
)
^
File "/tmp/.cache/uv/environments-v2/49adf985b5e89d74/lib/python3.13/site-packages/transformers/quantizers/quantizer_finegrained_fp8.py", line 48, in validate_environment
raise RuntimeError("No GPU or XPU found. A GPU or XPU is needed for FP8 quantization.")
RuntimeError: No GPU or XPU found. A GPU or XPU is needed for FP8 quantization.
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/tmp/.cache/uv/environments-v2/49adf985b5e89d74/lib/python3.13/site-packages/transformers/pipelines/base.py", line 311, in infer_framework_load_model
model = model_class.from_pretrained(model, **fp32_kwargs)
File "/tmp/.cache/uv/environments-v2/49adf985b5e89d74/lib/python3.13/site-packages/transformers/modeling_utils.py", line 277, in _wrapper
return func(*args, **kwargs)
File "/tmp/.cache/uv/environments-v2/49adf985b5e89d74/lib/python3.13/site-packages/transformers/modeling_utils.py", line 4881, in from_pretrained
hf_quantizer, config, dtype, device_map = get_hf_quantizer(
~~~~~~~~~~~~~~~~^
config, quantization_config, dtype, from_tf, from_flax, device_map, weights_only, user_agent
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
)
^
File "/tmp/.cache/uv/environments-v2/49adf985b5e89d74/lib/python3.13/site-packages/transformers/quantizers/auto.py", line 319, in get_hf_quantizer
hf_quantizer.validate_environment(
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~^
dtype=dtype,
^^^^^^^^^^^^
...<3 lines>...
weights_only=weights_only,
^^^^^^^^^^^^^^^^^^^^^^^^^^
)
^
File "/tmp/.cache/uv/environments-v2/49adf985b5e89d74/lib/python3.13/site-packages/transformers/quantizers/quantizer_finegrained_fp8.py", line 48, in validate_environment
raise RuntimeError("No GPU or XPU found. A GPU or XPU is needed for FP8 quantization.")
RuntimeError: No GPU or XPU found. A GPU or XPU is needed for FP8 quantization.