text
stringlengths 0
759
|
---|
~~~~~~~~~~~~~~~~~~~~~~~~~~^
|
adapter_path if adapter_path is not None else model,
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
...<5 lines>...
|
**model_kwargs,
|
^^^^^^^^^^^^^^^
|
)
|
^
|
File "/tmp/.cache/uv/environments-v2/148c4a4b32075dd9/lib/python3.13/site-packages/transformers/pipelines/base.py", line 333, in infer_framework_load_model
|
raise ValueError(
|
f"Could not load model {model} with any of the following classes: {class_tuple}. See the original errors:\n\n{error}\n"
|
)
|
ValueError: Could not load model openbmb/MiniCPM-V-4_5 with any of the following classes: (<class 'transformers.models.auto.modeling_auto.AutoModelForImageTextToText'>,). See the original errors:
|
while loading with AutoModelForImageTextToText, an error is thrown:
|
Traceback (most recent call last):
|
File "/tmp/.cache/uv/environments-v2/148c4a4b32075dd9/lib/python3.13/site-packages/transformers/pipelines/base.py", line 293, in infer_framework_load_model
|
model = model_class.from_pretrained(model, **kwargs)
|
File "/tmp/.cache/uv/environments-v2/148c4a4b32075dd9/lib/python3.13/site-packages/transformers/models/auto/auto_factory.py", line 607, in from_pretrained
|
raise ValueError(
|
...<2 lines>...
|
)
|
ValueError: Unrecognized configuration class <class 'transformers_modules.openbmb.MiniCPM-V-4_5.a3bb84d38a6e6f41e1cf3fa59086c124dca082e1.configuration_minicpm.MiniCPMVConfig'> for this kind of AutoModel: AutoModelForImageTextToText.
|
Model type should be one of AriaConfig, AyaVisionConfig, BlipConfig, Blip2Config, ChameleonConfig, Cohere2VisionConfig, DeepseekVLConfig, DeepseekVLHybridConfig, Emu3Config, EvollaConfig, Florence2Config, FuyuConfig, Gemma3Config, Gemma3nConfig, GitConfig, Glm4vConfig, Glm4vMoeConfig, GotOcr2Config, IdeficsConfig, Idefics2Config, Idefics3Config, InstructBlipConfig, InternVLConfig, JanusConfig, Kosmos2Config, Kosmos2_5Config, Llama4Config, LlavaConfig, LlavaNextConfig, LlavaNextVideoConfig, LlavaOnevisionConfig, Mistral3Config, MllamaConfig, Ovis2Config, PaliGemmaConfig, PerceptionLMConfig, Pix2StructConfig, PixtralVisionConfig, Qwen2_5_VLConfig, Qwen2VLConfig, ShieldGemma2Config, SmolVLMConfig, UdopConfig, VipLlavaConfig, VisionEncoderDecoderConfig.
|
During handling of the above exception, another exception occurred:
|
Traceback (most recent call last):
|
File "/tmp/.cache/uv/environments-v2/148c4a4b32075dd9/lib/python3.13/site-packages/transformers/pipelines/base.py", line 311, in infer_framework_load_model
|
model = model_class.from_pretrained(model, **fp32_kwargs)
|
File "/tmp/.cache/uv/environments-v2/148c4a4b32075dd9/lib/python3.13/site-packages/transformers/models/auto/auto_factory.py", line 607, in from_pretrained
|
raise ValueError(
|
...<2 lines>...
|
)
|
ValueError: Unrecognized configuration class <class 'transformers_modules.openbmb.MiniCPM-V-4_5.a3bb84d38a6e6f41e1cf3fa59086c124dca082e1.configuration_minicpm.MiniCPMVConfig'> for this kind of AutoModel: AutoModelForImageTextToText.
|
Model type should be one of AriaConfig, AyaVisionConfig, BlipConfig, Blip2Config, ChameleonConfig, Cohere2VisionConfig, DeepseekVLConfig, DeepseekVLHybridConfig, Emu3Config, EvollaConfig, Florence2Config, FuyuConfig, Gemma3Config, Gemma3nConfig, GitConfig, Glm4vConfig, Glm4vMoeConfig, GotOcr2Config, IdeficsConfig, Idefics2Config, Idefics3Config, InstructBlipConfig, InternVLConfig, JanusConfig, Kosmos2Config, Kosmos2_5Config, Llama4Config, LlavaConfig, LlavaNextConfig, LlavaNextVideoConfig, LlavaOnevisionConfig, Mistral3Config, MllamaConfig, Ovis2Config, PaliGemmaConfig, PerceptionLMConfig, Pix2StructConfig, PixtralVisionConfig, Qwen2_5_VLConfig, Qwen2VLConfig, ShieldGemma2Config, SmolVLMConfig, UdopConfig, VipLlavaConfig, VisionEncoderDecoderConfig.
|
Traceback (most recent call last):
|
File "/tmp/openbmb_MiniCPM-V-4_5_17QiTJm.py", line 12, in <module>
|
model = AutoModel.from_pretrained("openbmb/MiniCPM-V-4_5", trust_remote_code=True, torch_dtype="auto")
|
File "/tmp/.cache/uv/environments-v2/dcb16eeefd2766a7/lib/python3.13/site-packages/transformers/models/auto/auto_factory.py", line 586, in from_pretrained
|
model_class = get_class_from_dynamic_module(
|
class_ref, pretrained_model_name_or_path, code_revision=code_revision, **hub_kwargs, **kwargs
|
)
|
File "/tmp/.cache/uv/environments-v2/dcb16eeefd2766a7/lib/python3.13/site-packages/transformers/dynamic_module_utils.py", line 569, in get_class_from_dynamic_module
|
final_module = get_cached_module_file(
|
repo_id,
|
...<8 lines>...
|
repo_type=repo_type,
|
)
|
File "/tmp/.cache/uv/environments-v2/dcb16eeefd2766a7/lib/python3.13/site-packages/transformers/dynamic_module_utils.py", line 392, in get_cached_module_file
|
modules_needed = check_imports(resolved_module_file)
|
File "/tmp/.cache/uv/environments-v2/dcb16eeefd2766a7/lib/python3.13/site-packages/transformers/dynamic_module_utils.py", line 224, in check_imports
|
raise ImportError(
|
...<2 lines>...
|
)
|
ImportError: This modeling file requires the following packages that were not found in your environment: PIL, torchvision. Run `pip install PIL torchvision`
|
Everything was good in openbmb_MiniCPM4.1-8B_0.txt
|
Everything was good in openbmb_MiniCPM4.1-8B_1.txt
|
No suitable GPU found for IntervitensInc/pangu-pro-moe-model | 174.32 GB VRAM requirement
|
No suitable GPU found for IntervitensInc/pangu-pro-moe-model | 174.32 GB VRAM requirement
|
Everything was good in phi-3-M3-coder_0
|
Everything was good in phi-3-M3-coder_1
|
Everything was good in rednote-hilab_dots.ocr_0.txt
|
No suitable GPU found for rednote-hilab/dots.vlm1.inst | 3256.18 GB VRAM requirement
|
Traceback (most recent call last):
|
File "/tmp/sentence-transformers_all-MiniLM-L6-v2_0ymyt2o.py", line 16, in <module>
|
from sentence_transformers import SentenceTransformer
|
ModuleNotFoundError: No module named 'sentence_transformers'
|
No suitable GPU found for skt/A.X-3.1 | 83.95 GB VRAM requirement
|
No suitable GPU found for skt/A.X-3.1 | 83.95 GB VRAM requirement
|
Traceback (most recent call last):
|
File "/tmp/skt_A.X-4.0-VL-Light_0IkvuFO.py", line 13, in <module>
|
pipe = pipeline("image-text-to-text", model="skt/A.X-4.0-VL-Light", trust_remote_code=True)
|
File "/tmp/.cache/uv/environments-v2/f7387fa1610e269a/lib/python3.13/site-packages/transformers/pipelines/__init__.py", line 1008, in pipeline
|
framework, model = infer_framework_load_model(
|
~~~~~~~~~~~~~~~~~~~~~~~~~~^
|
adapter_path if adapter_path is not None else model,
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
...<5 lines>...
|
**model_kwargs,
|
^^^^^^^^^^^^^^^
|
)
|
^
|
File "/tmp/.cache/uv/environments-v2/f7387fa1610e269a/lib/python3.13/site-packages/transformers/pipelines/base.py", line 332, in infer_framework_load_model
|
raise ValueError(
|
f"Could not load model {model} with any of the following classes: {class_tuple}. See the original errors:\n\n{error}\n"
|
)
|
ValueError: Could not load model skt/A.X-4.0-VL-Light with any of the following classes: (<class 'transformers.models.auto.modeling_auto.AutoModelForImageTextToText'>,). See the original errors:
|
while loading with AutoModelForImageTextToText, an error is thrown:
|
Traceback (most recent call last):
|
File "/tmp/.cache/uv/environments-v2/f7387fa1610e269a/lib/python3.13/site-packages/transformers/pipelines/base.py", line 292, in infer_framework_load_model
|
model = model_class.from_pretrained(model, **kwargs)
|
File "/tmp/.cache/uv/environments-v2/f7387fa1610e269a/lib/python3.13/site-packages/transformers/models/auto/auto_factory.py", line 603, in from_pretrained
|
raise ValueError(
|
...<2 lines>...
|
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.