ariG23498 HF Staff commited on
Commit
e078b79
·
verified ·
1 Parent(s): 10f9292

Upload ai21labs_AI21-Jamba2-3B_0.txt with huggingface_hub

Browse files
Files changed (1) hide show
  1. ai21labs_AI21-Jamba2-3B_0.txt +94 -0
ai21labs_AI21-Jamba2-3B_0.txt ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ```CODE:
2
+ # Use a pipeline as a high-level helper
3
+ from transformers import pipeline
4
+
5
+ pipe = pipeline("text-generation", model="ai21labs/AI21-Jamba2-3B")
6
+ messages = [
7
+ {"role": "user", "content": "Who are you?"},
8
+ ]
9
+ pipe(messages)
10
+ ```
11
+
12
+ ERROR:
13
+ Traceback (most recent call last):
14
+ File "/tmp/ai21labs_AI21-Jamba2-3B_0crA10m.py", line 30, in <module>
15
+ pipe(messages)
16
+ ~~~~^^^^^^^^^^
17
+ File "/tmp/.cache/uv/environments-v2/1be6ad9d97bb6ec1/lib/python3.13/site-packages/transformers/pipelines/text_generation.py", line 325, in __call__
18
+ return super().__call__(Chat(text_inputs), **kwargs)
19
+ ~~~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
20
+ File "/tmp/.cache/uv/environments-v2/1be6ad9d97bb6ec1/lib/python3.13/site-packages/transformers/pipelines/base.py", line 1467, in __call__
21
+ return self.run_single(inputs, preprocess_params, forward_params, postprocess_params)
22
+ ~~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
23
+ File "/tmp/.cache/uv/environments-v2/1be6ad9d97bb6ec1/lib/python3.13/site-packages/transformers/pipelines/base.py", line 1474, in run_single
24
+ model_outputs = self.forward(model_inputs, **forward_params)
25
+ File "/tmp/.cache/uv/environments-v2/1be6ad9d97bb6ec1/lib/python3.13/site-packages/transformers/pipelines/base.py", line 1374, in forward
26
+ model_outputs = self._forward(model_inputs, **forward_params)
27
+ File "/tmp/.cache/uv/environments-v2/1be6ad9d97bb6ec1/lib/python3.13/site-packages/transformers/pipelines/text_generation.py", line 432, in _forward
28
+ output = self.model.generate(input_ids=input_ids, attention_mask=attention_mask, **generate_kwargs)
29
+ File "/tmp/.cache/uv/environments-v2/1be6ad9d97bb6ec1/lib/python3.13/site-packages/torch/utils/_contextlib.py", line 120, in decorate_context
30
+ return func(*args, **kwargs)
31
+ File "/tmp/.cache/uv/environments-v2/1be6ad9d97bb6ec1/lib/python3.13/site-packages/transformers/generation/utils.py", line 2564, in generate
32
+ result = decoding_method(
33
+ self,
34
+ ...<5 lines>...
35
+ **model_kwargs,
36
+ )
37
+ File "/tmp/.cache/uv/environments-v2/1be6ad9d97bb6ec1/lib/python3.13/site-packages/transformers/generation/utils.py", line 2784, in _sample
38
+ outputs = self(**model_inputs, return_dict=True)
39
+ File "/tmp/.cache/uv/environments-v2/1be6ad9d97bb6ec1/lib/python3.13/site-packages/torch/nn/modules/module.py", line 1775, in _wrapped_call_impl
40
+ return self._call_impl(*args, **kwargs)
41
+ ~~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^
42
+ File "/tmp/.cache/uv/environments-v2/1be6ad9d97bb6ec1/lib/python3.13/site-packages/torch/nn/modules/module.py", line 1786, in _call_impl
43
+ return forward_call(*args, **kwargs)
44
+ File "/tmp/.cache/uv/environments-v2/1be6ad9d97bb6ec1/lib/python3.13/site-packages/transformers/utils/generic.py", line 918, in wrapper
45
+ output = func(self, *args, **kwargs)
46
+ File "/tmp/.cache/uv/environments-v2/1be6ad9d97bb6ec1/lib/python3.13/site-packages/transformers/models/jamba/modeling_jamba.py", line 1350, in forward
47
+ outputs: MoeModelOutputWithPast = self.model(
48
+ ~~~~~~~~~~^
49
+ input_ids=input_ids,
50
+ ^^^^^^^^^^^^^^^^^^^^
51
+ ...<8 lines>...
52
+ cache_position=cache_position,
53
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
54
+ )
55
+ ^
56
+ File "/tmp/.cache/uv/environments-v2/1be6ad9d97bb6ec1/lib/python3.13/site-packages/torch/nn/modules/module.py", line 1775, in _wrapped_call_impl
57
+ return self._call_impl(*args, **kwargs)
58
+ ~~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^
59
+ File "/tmp/.cache/uv/environments-v2/1be6ad9d97bb6ec1/lib/python3.13/site-packages/torch/nn/modules/module.py", line 1786, in _call_impl
60
+ return forward_call(*args, **kwargs)
61
+ File "/tmp/.cache/uv/environments-v2/1be6ad9d97bb6ec1/lib/python3.13/site-packages/transformers/utils/generic.py", line 918, in wrapper
62
+ output = func(self, *args, **kwargs)
63
+ File "/tmp/.cache/uv/environments-v2/1be6ad9d97bb6ec1/lib/python3.13/site-packages/transformers/models/jamba/modeling_jamba.py", line 1195, in forward
64
+ layer_outputs = decoder_layer(
65
+ hidden_states,
66
+ ...<6 lines>...
67
+ cache_position=cache_position,
68
+ )
69
+ File "/tmp/.cache/uv/environments-v2/1be6ad9d97bb6ec1/lib/python3.13/site-packages/transformers/modeling_layers.py", line 94, in __call__
70
+ return super().__call__(*args, **kwargs)
71
+ ~~~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^
72
+ File "/tmp/.cache/uv/environments-v2/1be6ad9d97bb6ec1/lib/python3.13/site-packages/torch/nn/modules/module.py", line 1775, in _wrapped_call_impl
73
+ return self._call_impl(*args, **kwargs)
74
+ ~~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^
75
+ File "/tmp/.cache/uv/environments-v2/1be6ad9d97bb6ec1/lib/python3.13/site-packages/torch/nn/modules/module.py", line 1786, in _call_impl
76
+ return forward_call(*args, **kwargs)
77
+ File "/tmp/.cache/uv/environments-v2/1be6ad9d97bb6ec1/lib/python3.13/site-packages/transformers/utils/deprecation.py", line 172, in wrapped_func
78
+ return func(*args, **kwargs)
79
+ File "/tmp/.cache/uv/environments-v2/1be6ad9d97bb6ec1/lib/python3.13/site-packages/transformers/models/jamba/modeling_jamba.py", line 1035, in forward
80
+ hidden_states = self.mamba(
81
+ hidden_states=hidden_states,
82
+ cache_params=past_key_values,
83
+ attention_mask=attention_mask,
84
+ )
85
+ File "/tmp/.cache/uv/environments-v2/1be6ad9d97bb6ec1/lib/python3.13/site-packages/torch/nn/modules/module.py", line 1775, in _wrapped_call_impl
86
+ return self._call_impl(*args, **kwargs)
87
+ ~~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^
88
+ File "/tmp/.cache/uv/environments-v2/1be6ad9d97bb6ec1/lib/python3.13/site-packages/torch/nn/modules/module.py", line 1786, in _call_impl
89
+ return forward_call(*args, **kwargs)
90
+ File "/tmp/.cache/uv/environments-v2/1be6ad9d97bb6ec1/lib/python3.13/site-packages/transformers/models/jamba/modeling_jamba.py", line 818, in forward
91
+ raise ValueError(
92
+ "Fast Mamba kernels are not available. Make sure to they are installed and that the mamba module is on a CUDA device"
93
+ )
94
+ ValueError: Fast Mamba kernels are not available. Make sure to they are installed and that the mamba module is on a CUDA device