ariG23498 HF Staff commited on
Commit
c55c030
·
verified ·
1 Parent(s): 84d4651

Upload 0xSero_GLM-4.7-REAP-50-W4A16_1.txt with huggingface_hub

Browse files
Files changed (1) hide show
  1. 0xSero_GLM-4.7-REAP-50-W4A16_1.txt +51 -0
0xSero_GLM-4.7-REAP-50-W4A16_1.txt ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ```CODE:
2
+ # Load model directly
3
+ from transformers import AutoTokenizer, AutoModelForCausalLM
4
+
5
+ tokenizer = AutoTokenizer.from_pretrained("0xSero/GLM-4.7-REAP-50-W4A16")
6
+ model = AutoModelForCausalLM.from_pretrained("0xSero/GLM-4.7-REAP-50-W4A16")
7
+ messages = [
8
+ {"role": "user", "content": "Who are you?"},
9
+ ]
10
+ inputs = tokenizer.apply_chat_template(
11
+ messages,
12
+ add_generation_prompt=True,
13
+ tokenize=True,
14
+ return_dict=True,
15
+ return_tensors="pt",
16
+ ).to(model.device)
17
+
18
+ outputs = model.generate(**inputs, max_new_tokens=40)
19
+ print(tokenizer.decode(outputs[0][inputs["input_ids"].shape[-1]:]))
20
+ ```
21
+
22
+ ERROR:
23
+ Traceback (most recent call last):
24
+ File "/tmp/0xSero_GLM-4.7-REAP-50-W4A16_1FWF30y.py", line 27, in <module>
25
+ model = AutoModelForCausalLM.from_pretrained("0xSero/GLM-4.7-REAP-50-W4A16")
26
+ File "/tmp/.cache/uv/environments-v2/c8049131ec121543/lib/python3.13/site-packages/transformers/models/auto/auto_factory.py", line 604, in from_pretrained
27
+ return model_class.from_pretrained(
28
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~^
29
+ pretrained_model_name_or_path, *model_args, config=config, **hub_kwargs, **kwargs
30
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
31
+ )
32
+ ^
33
+ File "/tmp/.cache/uv/environments-v2/c8049131ec121543/lib/python3.13/site-packages/transformers/modeling_utils.py", line 277, in _wrapper
34
+ return func(*args, **kwargs)
35
+ File "/tmp/.cache/uv/environments-v2/c8049131ec121543/lib/python3.13/site-packages/transformers/modeling_utils.py", line 4881, in from_pretrained
36
+ hf_quantizer, config, dtype, device_map = get_hf_quantizer(
37
+ ~~~~~~~~~~~~~~~~^
38
+ config, quantization_config, dtype, from_tf, from_flax, device_map, weights_only, user_agent
39
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
40
+ )
41
+ ^
42
+ File "/tmp/.cache/uv/environments-v2/c8049131ec121543/lib/python3.13/site-packages/transformers/quantizers/auto.py", line 311, in get_hf_quantizer
43
+ hf_quantizer = AutoHfQuantizer.from_config(
44
+ config.quantization_config,
45
+ pre_quantized=pre_quantized,
46
+ )
47
+ File "/tmp/.cache/uv/environments-v2/c8049131ec121543/lib/python3.13/site-packages/transformers/quantizers/auto.py", line 185, in from_config
48
+ return target_cls(quantization_config, **kwargs)
49
+ File "/tmp/.cache/uv/environments-v2/c8049131ec121543/lib/python3.13/site-packages/transformers/quantizers/quantizer_gptq.py", line 49, in __init__
50
+ raise ImportError("Loading a GPTQ quantized model requires optimum (`pip install optimum`)")
51
+ ImportError: Loading a GPTQ quantized model requires optimum (`pip install optimum`)