FAPM / lavis /configs /models /blip_vqa_okvqa.yaml
wenkai's picture
Upload 560 files
4b532c0 verified
raw
history blame
1.02 kB
# Copyright (c) 2022, salesforce.com, inc.
# All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
# For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
model:
arch: blip_vqa
load_finetuned: True
finetuned: "https://storage.googleapis.com/sfr-vision-language-research/LAVIS/models/BLIP/blip_okvqa.pth"
pretrained: "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth"
# vit encoder
vit_type: "base"
vit_grad_ckpt: False
vit_ckpt_layer: 0
vit_drop_path_rate: 0.1
image_size: 480
# bert config
med_config_path: "configs/models/med_config.json"
preprocess:
vis_processor:
train:
name: "blip_image_train"
image_size: 480
eval:
name: "blip_image_eval"
image_size: 480
text_processor:
train:
name: "blip_question"
eval:
name: "blip_question"