Update modeling_qwen.py
Browse files- modeling_qwen.py +4 -4
modeling_qwen.py
CHANGED
@@ -25,8 +25,8 @@ from transformers.modeling_outputs import (
|
|
25 |
BaseModelOutputWithPast,
|
26 |
CausalLMOutputWithPast,
|
27 |
)
|
28 |
-
from
|
29 |
-
from
|
30 |
|
31 |
try:
|
32 |
from einops import rearrange
|
@@ -40,8 +40,8 @@ SUPPORT_FP16 = SUPPORT_CUDA and torch.cuda.get_device_capability(0)[0] >= 7
|
|
40 |
SUPPORT_TORCH2 = hasattr(torch, '__version__') and int(torch.__version__.split(".")[0]) >= 2
|
41 |
|
42 |
|
43 |
-
from configuration_qwen import QWenConfig
|
44 |
-
from qwen_generation_utils import (
|
45 |
HistoryType,
|
46 |
make_context,
|
47 |
decode_tokens,
|
|
|
25 |
BaseModelOutputWithPast,
|
26 |
CausalLMOutputWithPast,
|
27 |
)
|
28 |
+
from transformers.modeling_utils import PreTrainedModel
|
29 |
+
from transformers.utils import logging
|
30 |
|
31 |
try:
|
32 |
from einops import rearrange
|
|
|
40 |
SUPPORT_TORCH2 = hasattr(torch, '__version__') and int(torch.__version__.split(".")[0]) >= 2
|
41 |
|
42 |
|
43 |
+
from .configuration_qwen import QWenConfig
|
44 |
+
from .qwen_generation_utils import (
|
45 |
HistoryType,
|
46 |
make_context,
|
47 |
decode_tokens,
|