Update modeling_orion.py
Browse fileschange to is_flash_attn_2_available
- modeling_orion.py +2 -2
modeling_orion.py
CHANGED
@@ -25,12 +25,12 @@ from transformers.pytorch_utils import ALL_LAYERNORM_LAYERS
|
|
25 |
from transformers.utils import (
|
26 |
add_start_docstrings,
|
27 |
add_start_docstrings_to_model_forward,
|
28 |
-
|
29 |
logging,
|
30 |
replace_return_docstrings,
|
31 |
)
|
32 |
|
33 |
-
if
|
34 |
from flash_attn import flash_attn_func, flash_attn_varlen_func
|
35 |
from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
|
36 |
|
|
|
25 |
from transformers.utils import (
|
26 |
add_start_docstrings,
|
27 |
add_start_docstrings_to_model_forward,
|
28 |
+
is_flash_attn_2_available,
|
29 |
logging,
|
30 |
replace_return_docstrings,
|
31 |
)
|
32 |
|
33 |
+
if is_flash_attn_2_available():
|
34 |
from flash_attn import flash_attn_func, flash_attn_varlen_func
|
35 |
from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
|
36 |
|