ariG23498 HF Staff commited on
Commit
e02e700
·
verified ·
1 Parent(s): c131da0

Upload 0xSero_GLM-4.7-REAP-50-W4A16_0.txt with huggingface_hub

Browse files
Files changed (1) hide show
  1. 0xSero_GLM-4.7-REAP-50-W4A16_0.txt +53 -0
0xSero_GLM-4.7-REAP-50-W4A16_0.txt ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ```CODE:
2
+ # Use a pipeline as a high-level helper
3
+ from transformers import pipeline
4
+
5
+ pipe = pipeline("text-generation", model="0xSero/GLM-4.7-REAP-50-W4A16")
6
+ messages = [
7
+ {"role": "user", "content": "Who are you?"},
8
+ ]
9
+ pipe(messages)
10
+ ```
11
+
12
+ ERROR:
13
+ Traceback (most recent call last):
14
+ File "/tmp/0xSero_GLM-4.7-REAP-50-W4A16_0K0sWmy.py", line 26, in <module>
15
+ pipe = pipeline("text-generation", model="0xSero/GLM-4.7-REAP-50-W4A16")
16
+ File "/tmp/.cache/uv/environments-v2/57fea2d03e820408/lib/python3.13/site-packages/transformers/pipelines/__init__.py", line 1027, in pipeline
17
+ framework, model = infer_framework_load_model(
18
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~^
19
+ adapter_path if adapter_path is not None else model,
20
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
21
+ ...<5 lines>...
22
+ **model_kwargs,
23
+ ^^^^^^^^^^^^^^^
24
+ )
25
+ ^
26
+ File "/tmp/.cache/uv/environments-v2/57fea2d03e820408/lib/python3.13/site-packages/transformers/pipelines/base.py", line 293, in infer_framework_load_model
27
+ model = model_class.from_pretrained(model, **kwargs)
28
+ File "/tmp/.cache/uv/environments-v2/57fea2d03e820408/lib/python3.13/site-packages/transformers/models/auto/auto_factory.py", line 604, in from_pretrained
29
+ return model_class.from_pretrained(
30
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~^
31
+ pretrained_model_name_or_path, *model_args, config=config, **hub_kwargs, **kwargs
32
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
33
+ )
34
+ ^
35
+ File "/tmp/.cache/uv/environments-v2/57fea2d03e820408/lib/python3.13/site-packages/transformers/modeling_utils.py", line 277, in _wrapper
36
+ return func(*args, **kwargs)
37
+ File "/tmp/.cache/uv/environments-v2/57fea2d03e820408/lib/python3.13/site-packages/transformers/modeling_utils.py", line 4881, in from_pretrained
38
+ hf_quantizer, config, dtype, device_map = get_hf_quantizer(
39
+ ~~~~~~~~~~~~~~~~^
40
+ config, quantization_config, dtype, from_tf, from_flax, device_map, weights_only, user_agent
41
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
42
+ )
43
+ ^
44
+ File "/tmp/.cache/uv/environments-v2/57fea2d03e820408/lib/python3.13/site-packages/transformers/quantizers/auto.py", line 311, in get_hf_quantizer
45
+ hf_quantizer = AutoHfQuantizer.from_config(
46
+ config.quantization_config,
47
+ pre_quantized=pre_quantized,
48
+ )
49
+ File "/tmp/.cache/uv/environments-v2/57fea2d03e820408/lib/python3.13/site-packages/transformers/quantizers/auto.py", line 185, in from_config
50
+ return target_cls(quantization_config, **kwargs)
51
+ File "/tmp/.cache/uv/environments-v2/57fea2d03e820408/lib/python3.13/site-packages/transformers/quantizers/quantizer_gptq.py", line 49, in __init__
52
+ raise ImportError("Loading a GPTQ quantized model requires optimum (`pip install optimum`)")
53
+ ImportError: Loading a GPTQ quantized model requires optimum (`pip install optimum`)