wuyongyu commited on
Commit
662e434
1 Parent(s): a8b60cf

Upload modeling_llama.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. modeling_llama.py +7 -7
modeling_llama.py CHANGED
@@ -29,18 +29,18 @@ import torch.utils.checkpoint
29
  from torch import nn
30
  from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
31
 
32
- from ...activations import ACT2FN
33
- from ...cache_utils import Cache, DynamicCache, StaticCache
34
- from ...modeling_attn_mask_utils import AttentionMaskConverter
35
- from ...modeling_outputs import (
36
  BaseModelOutputWithPast,
37
  CausalLMOutputWithPast,
38
  QuestionAnsweringModelOutput,
39
  SequenceClassifierOutputWithPast,
40
  )
41
- from ...modeling_utils import PreTrainedModel
42
- from ...pytorch_utils import ALL_LAYERNORM_LAYERS
43
- from ...utils import (
44
  add_start_docstrings,
45
  add_start_docstrings_to_model_forward,
46
  is_flash_attn_2_available,
 
29
  from torch import nn
30
  from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
31
 
32
+ from transformers.activations import ACT2FN
33
+ from transformers.cache_utils import Cache, DynamicCache, StaticCache
34
+ from transformers.modeling_attn_mask_utils import AttentionMaskConverter
35
+ from transformers.modeling_outputs import (
36
  BaseModelOutputWithPast,
37
  CausalLMOutputWithPast,
38
  QuestionAnsweringModelOutput,
39
  SequenceClassifierOutputWithPast,
40
  )
41
+ from transformers.modeling_utils import PreTrainedModel
42
+ from transformers.pytorch_utils import ALL_LAYERNORM_LAYERS
43
+ from transformers.utils import (
44
  add_start_docstrings,
45
  add_start_docstrings_to_model_forward,
46
  is_flash_attn_2_available,