wuxiaojun's picture
update ZiyaBlip2ForCausalLM
503ee33
from transformers import LlamaForCausalLM, LlamaTokenizer, BlipImageProcessor
from modeling_ziya_blip2 import ZiyaBlip2ForCausalLM
from PIL import Image
# 请注意目前https://huggingface.co/IDEA-CCNL/Ziya-LLaMA-13B-v1是delta权重(即差值权重)
# LM_MODEL_PATH需要的是完整权重
# 因此请先根据Ziya-LLaMA-13B-v1的README.md中的说明进行转换,获取完整的Ziya-LLaMA-13B-v1权重
# 我这里是本地已经转换好的Ziya-LLaMA-13B-v1完整权重,所以直接使用
LM_MODEL_PATH="/cognitive_comp/wuxiaojun/pretrained/pytorch/huggingface/Ziya-LLaMA-13B-v1"
lm_model = LlamaForCausalLM.from_pretrained(LM_MODEL_PATH)
tokenizer = LlamaTokenizer.from_pretrained(LM_MODEL_PATH)
# visual model
OPENAI_CLIP_MEAN = [0.48145466, 0.4578275, 0.40821073]
OPENAI_CLIP_STD = [0.26862954, 0.26130258, 0.27577711]
# demo.py is in the project path, so we can use local path ".". Otherwise you should use "IDEA-CCNL/Ziya-BLIP2-14B-Visual-v1"
model = ZiyaBlip2ForCausalLM.from_pretrained(".", language_model=lm_model)
image_size = model.config.vision_config.image_size
image_processor = BlipImageProcessor(
size={"height": image_size, "width": image_size},
image_mean=OPENAI_CLIP_MEAN,
image_std=OPENAI_CLIP_STD,
)
model.cuda() # if you use on cpu, comment this line
generate_config = {
"max_new_tokens": 128,
"top_p": 0.1,
"temperature": 0.7
}
output = model.chat(
tokenizer=tokenizer,
pixel_values=image_processor(Image.open("wzry.jpg"), return_tensors="pt").pixel_values.to(model.device),
query="这是什么游戏",
previous_querys=[],
previous_outputs=[],
**generate_config,
)
print(output)
# 这是一款名为《王者荣耀》的多人在线竞技游戏。在游戏中,玩家扮演不同的角色,并与其他玩家进行战斗。游戏中的人物和环境都是虚拟的,但它们看起来非常逼真。玩家需要使用各种技能和策略来击败对手,并获得胜利。