Update modeling_mplug_owl2.py
Browse files- modeling_mplug_owl2.py +5 -3
modeling_mplug_owl2.py
CHANGED
@@ -32,9 +32,11 @@ sys.path.insert(0, dir_path)
|
|
32 |
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, CLIPImageProcessor, LlamaConfig, LlamaModel, LlamaForCausalLM
|
33 |
from transformers.modeling_outputs import CausalLMOutputWithPast
|
34 |
|
35 |
-
from configuration_mplug_owl2 import MPLUGOwl2Config, MplugOwlVisionConfig, MplugOwlVisualAbstractorConfig
|
36 |
-
from visual_encoder import MplugOwlVisionModel, MplugOwlVisualAbstractorModel
|
37 |
-
from modeling_llama2 import replace_llama_modality_adaptive
|
|
|
|
|
38 |
IGNORE_INDEX = -100
|
39 |
IMAGE_TOKEN_INDEX = -200
|
40 |
DEFAULT_IMAGE_TOKEN = "<|image|>"
|
|
|
32 |
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, CLIPImageProcessor, LlamaConfig, LlamaModel, LlamaForCausalLM
|
33 |
from transformers.modeling_outputs import CausalLMOutputWithPast
|
34 |
|
35 |
+
from .configuration_mplug_owl2 import MPLUGOwl2Config, MplugOwlVisionConfig, MplugOwlVisualAbstractorConfig
|
36 |
+
from .visual_encoder import MplugOwlVisionModel, MplugOwlVisualAbstractorModel
|
37 |
+
from .modeling_llama2 import replace_llama_modality_adaptive
|
38 |
+
from .modeling_attn_mask_utils import _prepare_4d_causal_attention_mask
|
39 |
+
|
40 |
IGNORE_INDEX = -100
|
41 |
IMAGE_TOKEN_INDEX = -200
|
42 |
DEFAULT_IMAGE_TOKEN = "<|image|>"
|