operaex2 / OPERA /minigpt4 /configs /models /llava-1.5_vicuna7b.yaml
KVAkash's picture
Upload 2398 files
bc85824 verified
raw
history blame
848 Bytes
model:
arch: llava-1.5
version: 'v1.5'
# vit encoder
cache_dir: None
vit_model: "openai/clip-vit-large-patch14"
freeze_vit: True
# finetune config
freeze_backbone: False
tune_mm_mlp_adapter: False
freeze_mm_mlp_adapter: False
# model config
mm_vision_select_layer: -2
model_max_length: 2048
# data process config
image_token_len: 576
mm_use_im_start_end: True
# training config
bf16: False
fp16: True
preprocess:
vis_processor:
train:
name: "clip_image_train_336"
proc_type: "openai/clip-vit-large-patch14-336"
eval:
name: "clip_image_eval_336"
proc_type: "openai/clip-vit-large-patch14-336"
text_processor:
train:
name: "blip_caption"
eval:
name: "blip_caption"