MiniGPT-v2 / minigpt4 /configs /models /minigpt4_vicuna0.yaml
Vision-CAIR's picture
Upload 131 files
52ca9c9
raw
history blame
610 Bytes
model:
arch: minigpt4
# vit encoder
image_size: 224
drop_path_rate: 0
use_grad_checkpoint: False
vit_precision: "fp16"
freeze_vit: True
freeze_qformer: True
# Q-Former
num_query_token: 32
# generation configs
prompt: ""
llama_model: "please set this value to the path of vicuna model"
preprocess:
vis_processor:
train:
name: "blip2_image_train"
image_size: 224
eval:
name: "blip2_image_eval"
image_size: 224
text_processor:
train:
name: "blip_caption"
eval:
name: "blip_caption"