ariG23498 HF Staff commited on
Commit
54b5004
·
verified ·
1 Parent(s): 9f3c0e4

Upload allenai_olmOCR-2-7B-1025-FP8_0.txt with huggingface_hub

Browse files
Files changed (1) hide show
  1. allenai_olmOCR-2-7B-1025-FP8_0.txt +64 -0
allenai_olmOCR-2-7B-1025-FP8_0.txt ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ```CODE:
2
+ # Use a pipeline as a high-level helper
3
+ from transformers import pipeline
4
+
5
+ pipe = pipeline("image-to-text", model="allenai/olmOCR-2-7B-1025-FP8")
6
+ ```
7
+
8
+ ERROR:
9
+ Traceback (most recent call last):
10
+ File "/tmp/allenai_olmOCR-2-7B-1025-FP8_0fHpnlf.py", line 17, in <module>
11
+ pipe = pipeline("image-to-text", model="allenai/olmOCR-2-7B-1025-FP8")
12
+ File "/tmp/.cache/uv/environments-v2/d4fbfc53dd8322e0/lib/python3.13/site-packages/transformers/pipelines/__init__.py", line 1027, in pipeline
13
+ framework, model = infer_framework_load_model(
14
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~^
15
+ adapter_path if adapter_path is not None else model,
16
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
17
+ ...<5 lines>...
18
+ **model_kwargs,
19
+ ^^^^^^^^^^^^^^^
20
+ )
21
+ ^
22
+ File "/tmp/.cache/uv/environments-v2/d4fbfc53dd8322e0/lib/python3.13/site-packages/transformers/pipelines/base.py", line 293, in infer_framework_load_model
23
+ model = model_class.from_pretrained(model, **kwargs)
24
+ File "/tmp/.cache/uv/environments-v2/d4fbfc53dd8322e0/lib/python3.13/site-packages/transformers/models/auto/modeling_auto.py", line 2289, in from_pretrained
25
+ return super().from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
26
+ ~~~~~~~~~~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
27
+ File "/tmp/.cache/uv/environments-v2/d4fbfc53dd8322e0/lib/python3.13/site-packages/transformers/models/auto/auto_factory.py", line 604, in from_pretrained
28
+ return model_class.from_pretrained(
29
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~^
30
+ pretrained_model_name_or_path, *model_args, config=config, **hub_kwargs, **kwargs
31
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
32
+ )
33
+ ^
34
+ File "/tmp/.cache/uv/environments-v2/d4fbfc53dd8322e0/lib/python3.13/site-packages/transformers/modeling_utils.py", line 277, in _wrapper
35
+ return func(*args, **kwargs)
36
+ File "/tmp/.cache/uv/environments-v2/d4fbfc53dd8322e0/lib/python3.13/site-packages/transformers/modeling_utils.py", line 4881, in from_pretrained
37
+ hf_quantizer, config, dtype, device_map = get_hf_quantizer(
38
+ ~~~~~~~~~~~~~~~~^
39
+ config, quantization_config, dtype, from_tf, from_flax, device_map, weights_only, user_agent
40
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
41
+ )
42
+ ^
43
+ File "/tmp/.cache/uv/environments-v2/d4fbfc53dd8322e0/lib/python3.13/site-packages/transformers/quantizers/auto.py", line 305, in get_hf_quantizer
44
+ config.quantization_config = AutoHfQuantizer.merge_quantization_configs(
45
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~^
46
+ config.quantization_config, quantization_config
47
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
48
+ )
49
+ ^
50
+ File "/tmp/.cache/uv/environments-v2/d4fbfc53dd8322e0/lib/python3.13/site-packages/transformers/quantizers/auto.py", line 214, in merge_quantization_configs
51
+ quantization_config = AutoQuantizationConfig.from_dict(quantization_config)
52
+ File "/tmp/.cache/uv/environments-v2/d4fbfc53dd8322e0/lib/python3.13/site-packages/transformers/quantizers/auto.py", line 140, in from_dict
53
+ return target_cls.from_dict(quantization_config_dict)
54
+ ~~~~~~~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^
55
+ File "/tmp/.cache/uv/environments-v2/d4fbfc53dd8322e0/lib/python3.13/site-packages/transformers/utils/quantization_config.py", line 1398, in from_dict
56
+ return super().from_dict(config_dict, return_unused_kwargs=return_unused_kwargs, **kwargs)
57
+ ~~~~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
58
+ File "/tmp/.cache/uv/environments-v2/d4fbfc53dd8322e0/lib/python3.13/site-packages/transformers/utils/quantization_config.py", line 122, in from_dict
59
+ config = cls(**config_dict)
60
+ File "/tmp/.cache/uv/environments-v2/d4fbfc53dd8322e0/lib/python3.13/site-packages/transformers/utils/quantization_config.py", line 1328, in __init__
61
+ raise ImportError(
62
+ "compressed_tensors is not installed and is required for compressed-tensors quantization. Please install it with `pip install compressed-tensors`."
63
+ )
64
+ ImportError: compressed_tensors is not installed and is required for compressed-tensors quantization. Please install it with `pip install compressed-tensors`.