root
video-llama-2-test
424a94c
raw
history blame
634 Bytes
model:
arch: video_llama
# vit encoder
image_size: 224
drop_path_rate: 0
use_grad_checkpoint: False
vit_precision: "fp16"
freeze_vit: True
freeze_qformer: True
# Q-Former
num_query_token: 32
# Vicuna
llama_model: "ckpt/vicuna-7b/"
# generation configs
prompt: ""
preprocess:
vis_processor:
train:
name: "alpro_video_train"
image_size: 224
n_frms: 8
eval:
name: "alpro_video_eval"
image_size: 224
n_frms: 8
text_processor:
train:
name: "blip_caption"
eval:
name: "blip_caption"