update ZiyaBlip2ForCausalLM
Browse files- README.md +2 -2
- __pycache__/modeling_ziya_blip2.cpython-310.pyc +0 -0
- modeling_ziya_blip2.py +1 -1
- test.py +2 -2
README.md
CHANGED
@@ -106,7 +106,7 @@ First load the Ziya-Visual model: it should be noted that the model repository o
|
|
106 |
|
107 |
```python
|
108 |
from transformers import LlamaForCausalLM, LlamaTokenizer, BlipImageProcessor
|
109 |
-
from modeling_ziya_blip2 import
|
110 |
from PIL import Image
|
111 |
|
112 |
# model path of IDEA-CCNL/Ziya-LLaMA-13B-v1
|
@@ -118,7 +118,7 @@ tokenizer = LlamaTokenizer.from_pretrained(LM_MODEL_PATH)
|
|
118 |
OPENAI_CLIP_MEAN = [0.48145466, 0.4578275, 0.40821073]
|
119 |
OPENAI_CLIP_STD = [0.26862954, 0.26130258, 0.27577711]
|
120 |
# demo.py is in the project path, so we can use local path ".". Otherwise you should use "IDEA-CCNL/Ziya-BLIP2-14B-Visual-v1"
|
121 |
-
model =
|
122 |
image_size = model.config.vision_config.image_size
|
123 |
image_processor = BlipImageProcessor(
|
124 |
size={"height": image_size, "width": image_size},
|
|
|
106 |
|
107 |
```python
|
108 |
from transformers import LlamaForCausalLM, LlamaTokenizer, BlipImageProcessor
|
109 |
+
from modeling_ziya_blip2 import ZiyaBlip2ForCausalLM
|
110 |
from PIL import Image
|
111 |
|
112 |
# model path of IDEA-CCNL/Ziya-LLaMA-13B-v1
|
|
|
118 |
OPENAI_CLIP_MEAN = [0.48145466, 0.4578275, 0.40821073]
|
119 |
OPENAI_CLIP_STD = [0.26862954, 0.26130258, 0.27577711]
|
120 |
# demo.py is in the project path, so we can use local path ".". Otherwise you should use "IDEA-CCNL/Ziya-BLIP2-14B-Visual-v1"
|
121 |
+
model = ZiyaBlip2ForCausalLM.from_pretrained(".", language_model=lm_model)
|
122 |
image_size = model.config.vision_config.image_size
|
123 |
image_processor = BlipImageProcessor(
|
124 |
size={"height": image_size, "width": image_size},
|
__pycache__/modeling_ziya_blip2.cpython-310.pyc
CHANGED
Binary files a/__pycache__/modeling_ziya_blip2.cpython-310.pyc and b/__pycache__/modeling_ziya_blip2.cpython-310.pyc differ
|
|
modeling_ziya_blip2.py
CHANGED
@@ -20,7 +20,7 @@ from transformers import (
|
|
20 |
logger = logging.get_logger(__name__)
|
21 |
|
22 |
|
23 |
-
class
|
24 |
config_class = Blip2Config
|
25 |
main_input_name = "pixel_values"
|
26 |
_keys_to_ignore_on_load_missing = [
|
|
|
20 |
logger = logging.get_logger(__name__)
|
21 |
|
22 |
|
23 |
+
class ZiyaBlip2ForCausalLM(Blip2PreTrainedModel):
|
24 |
config_class = Blip2Config
|
25 |
main_input_name = "pixel_values"
|
26 |
_keys_to_ignore_on_load_missing = [
|
test.py
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
from transformers import LlamaForCausalLM, LlamaTokenizer, BlipImageProcessor
|
2 |
-
from modeling_ziya_blip2 import
|
3 |
from PIL import Image
|
4 |
|
5 |
# 请注意目前https://huggingface.co/IDEA-CCNL/Ziya-LLaMA-13B-v1是delta权重(即差值权重)
|
@@ -14,7 +14,7 @@ tokenizer = LlamaTokenizer.from_pretrained(LM_MODEL_PATH)
|
|
14 |
OPENAI_CLIP_MEAN = [0.48145466, 0.4578275, 0.40821073]
|
15 |
OPENAI_CLIP_STD = [0.26862954, 0.26130258, 0.27577711]
|
16 |
# demo.py is in the project path, so we can use local path ".". Otherwise you should use "IDEA-CCNL/Ziya-BLIP2-14B-Visual-v1"
|
17 |
-
model =
|
18 |
image_size = model.config.vision_config.image_size
|
19 |
image_processor = BlipImageProcessor(
|
20 |
size={"height": image_size, "width": image_size},
|
|
|
1 |
from transformers import LlamaForCausalLM, LlamaTokenizer, BlipImageProcessor
|
2 |
+
from modeling_ziya_blip2 import ZiyaBlip2ForCausalLM
|
3 |
from PIL import Image
|
4 |
|
5 |
# 请注意目前https://huggingface.co/IDEA-CCNL/Ziya-LLaMA-13B-v1是delta权重(即差值权重)
|
|
|
14 |
OPENAI_CLIP_MEAN = [0.48145466, 0.4578275, 0.40821073]
|
15 |
OPENAI_CLIP_STD = [0.26862954, 0.26130258, 0.27577711]
|
16 |
# demo.py is in the project path, so we can use local path ".". Otherwise you should use "IDEA-CCNL/Ziya-BLIP2-14B-Visual-v1"
|
17 |
+
model = ZiyaBlip2ForCausalLM.from_pretrained(".", language_model=lm_model)
|
18 |
image_size = model.config.vision_config.image_size
|
19 |
image_processor = BlipImageProcessor(
|
20 |
size={"height": image_size, "width": image_size},
|