model: arch: mini_gpt4 model_type: pretrain_vicuna freeze_vit: True freeze_qformer: True max_txt_len: 160 end_sym: "###" prompt_path: "prompts/alignment.txt" prompt_template: '###Human: {} ###Assistant: ' ckpt: '/path/to/stage1/checkpoint/' datasets: cc_sbu_align: vis_processor: train: name: "blip2_image_train" image_size: 224 text_processor: train: name: "blip_caption" run: task: image_text_pretrain # optimizer lr_sched: "linear_warmup_cosine_lr" init_lr: 3e-5 min_lr: 1e-5 warmup_lr: 1e-6 weight_decay: 0.05 max_epoch: 5 iters_per_epoch: 200 batch_size_train: 12 batch_size_eval: 12 num_workers: 4 warmup_steps: 200 seed: 42 output_dir: "output/minigpt4_stage2_finetune" amp: True resume_ckpt_path: null evaluate: False train_splits: ["train"] device: "cuda" world_size: 1 dist_url: "env://" distributed: True