model: arch: video_llama # vit encoder image_size: 224 drop_path_rate: 0 use_grad_checkpoint: False vit_precision: "fp16" freeze_vit: True freeze_qformer: True # Q-Former num_query_token: 32 # Vicuna llama_model: "/projectnb/ivc-ml/samarth/projects/misc/minigpt-4-chat-models/Llama-2-7b-chat-hf" # generation configs prompt: "" preprocess: vis_processor: train: name: "alpro_video_train" image_size: 224 n_frms: 8 eval: name: "alpro_video_eval" image_size: 224 n_frms: 8 text_processor: train: name: "blip_caption" eval: name: "blip_caption"