Upload modeling_llama.py with huggingface_hub
Browse files- modeling_llama.py +7 -7
modeling_llama.py
CHANGED
@@ -29,18 +29,18 @@ import torch.utils.checkpoint
|
|
29 |
from torch import nn
|
30 |
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
|
31 |
|
32 |
-
from
|
33 |
-
from
|
34 |
-
from
|
35 |
-
from
|
36 |
BaseModelOutputWithPast,
|
37 |
CausalLMOutputWithPast,
|
38 |
QuestionAnsweringModelOutput,
|
39 |
SequenceClassifierOutputWithPast,
|
40 |
)
|
41 |
-
from
|
42 |
-
from
|
43 |
-
from
|
44 |
add_start_docstrings,
|
45 |
add_start_docstrings_to_model_forward,
|
46 |
is_flash_attn_2_available,
|
|
|
29 |
from torch import nn
|
30 |
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
|
31 |
|
32 |
+
from transformers.activations import ACT2FN
|
33 |
+
from transformers.cache_utils import Cache, DynamicCache, StaticCache
|
34 |
+
from transformers.modeling_attn_mask_utils import AttentionMaskConverter
|
35 |
+
from transformers.modeling_outputs import (
|
36 |
BaseModelOutputWithPast,
|
37 |
CausalLMOutputWithPast,
|
38 |
QuestionAnsweringModelOutput,
|
39 |
SequenceClassifierOutputWithPast,
|
40 |
)
|
41 |
+
from transformers.modeling_utils import PreTrainedModel
|
42 |
+
from transformers.pytorch_utils import ALL_LAYERNORM_LAYERS
|
43 |
+
from transformers.utils import (
|
44 |
add_start_docstrings,
|
45 |
add_start_docstrings_to_model_forward,
|
46 |
is_flash_attn_2_available,
|