ariG23498 HF Staff commited on
Commit
fa6f779
·
verified ·
1 Parent(s): d5685b9

Upload allenai_olmOCR-2-7B-1025-FP8_1.txt with huggingface_hub

Browse files
Files changed (1) hide show
  1. allenai_olmOCR-2-7B-1025-FP8_1.txt +53 -0
allenai_olmOCR-2-7B-1025-FP8_1.txt ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ```CODE:
2
+ # Load model directly
3
+ from transformers import AutoProcessor, AutoModelForVision2Seq
4
+
5
+ processor = AutoProcessor.from_pretrained("allenai/olmOCR-2-7B-1025-FP8")
6
+ model = AutoModelForVision2Seq.from_pretrained("allenai/olmOCR-2-7B-1025-FP8")
7
+ ```
8
+
9
+ ERROR:
10
+ Traceback (most recent call last):
11
+ File "/tmp/allenai_olmOCR-2-7B-1025-FP8_1vZkyGK.py", line 18, in <module>
12
+ model = AutoModelForVision2Seq.from_pretrained("allenai/olmOCR-2-7B-1025-FP8")
13
+ File "/tmp/.cache/uv/environments-v2/8408d5cc3d3abea9/lib/python3.13/site-packages/transformers/models/auto/modeling_auto.py", line 2289, in from_pretrained
14
+ return super().from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
15
+ ~~~~~~~~~~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
16
+ File "/tmp/.cache/uv/environments-v2/8408d5cc3d3abea9/lib/python3.13/site-packages/transformers/models/auto/auto_factory.py", line 604, in from_pretrained
17
+ return model_class.from_pretrained(
18
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~^
19
+ pretrained_model_name_or_path, *model_args, config=config, **hub_kwargs, **kwargs
20
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
21
+ )
22
+ ^
23
+ File "/tmp/.cache/uv/environments-v2/8408d5cc3d3abea9/lib/python3.13/site-packages/transformers/modeling_utils.py", line 277, in _wrapper
24
+ return func(*args, **kwargs)
25
+ File "/tmp/.cache/uv/environments-v2/8408d5cc3d3abea9/lib/python3.13/site-packages/transformers/modeling_utils.py", line 4881, in from_pretrained
26
+ hf_quantizer, config, dtype, device_map = get_hf_quantizer(
27
+ ~~~~~~~~~~~~~~~~^
28
+ config, quantization_config, dtype, from_tf, from_flax, device_map, weights_only, user_agent
29
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
30
+ )
31
+ ^
32
+ File "/tmp/.cache/uv/environments-v2/8408d5cc3d3abea9/lib/python3.13/site-packages/transformers/quantizers/auto.py", line 305, in get_hf_quantizer
33
+ config.quantization_config = AutoHfQuantizer.merge_quantization_configs(
34
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~^
35
+ config.quantization_config, quantization_config
36
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
37
+ )
38
+ ^
39
+ File "/tmp/.cache/uv/environments-v2/8408d5cc3d3abea9/lib/python3.13/site-packages/transformers/quantizers/auto.py", line 214, in merge_quantization_configs
40
+ quantization_config = AutoQuantizationConfig.from_dict(quantization_config)
41
+ File "/tmp/.cache/uv/environments-v2/8408d5cc3d3abea9/lib/python3.13/site-packages/transformers/quantizers/auto.py", line 140, in from_dict
42
+ return target_cls.from_dict(quantization_config_dict)
43
+ ~~~~~~~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^
44
+ File "/tmp/.cache/uv/environments-v2/8408d5cc3d3abea9/lib/python3.13/site-packages/transformers/utils/quantization_config.py", line 1398, in from_dict
45
+ return super().from_dict(config_dict, return_unused_kwargs=return_unused_kwargs, **kwargs)
46
+ ~~~~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
47
+ File "/tmp/.cache/uv/environments-v2/8408d5cc3d3abea9/lib/python3.13/site-packages/transformers/utils/quantization_config.py", line 122, in from_dict
48
+ config = cls(**config_dict)
49
+ File "/tmp/.cache/uv/environments-v2/8408d5cc3d3abea9/lib/python3.13/site-packages/transformers/utils/quantization_config.py", line 1328, in __init__
50
+ raise ImportError(
51
+ "compressed_tensors is not installed and is required for compressed-tensors quantization. Please install it with `pip install compressed-tensors`."
52
+ )
53
+ ImportError: compressed_tensors is not installed and is required for compressed-tensors quantization. Please install it with `pip install compressed-tensors`.