dataset: align_stage_components: - download/llava-laion-cc-sbu-558k/chat.json - download/llava-laion-cc-sbu-558k dataset_id: llava-v15 dataset_root_dir: data finetune_stage_components: - download/llava-v1.5-instruct/llava_v1_5_mix665k.json - download/llava-v1.5-instruct type: llava-v15 hf_token: .hf_token max_length: 4096 model: align_epochs: 1 align_global_batch_size: 256 align_learning_rate: 0.001 align_lr_scheduler_type: linear-warmup+cosine-decay align_max_grad_norm: 1.0 align_max_steps: null align_per_device_batch_size: 16 align_train_strategy: fsdp-shard-grad-op align_warmup_ratio: 0.03 align_weight_decay: 0.0 arch_specifier: qformer2_128 enable_gradient_checkpointing: true enable_mixed_precision_training: true finetune_epochs: 1 finetune_global_batch_size: 128 finetune_learning_rate: 2.0e-05 finetune_lr_scheduler_type: linear-warmup+cosine-decay finetune_max_grad_norm: 1.0 finetune_max_steps: null finetune_per_device_batch_size: 16 finetune_train_strategy: fsdp-full-shard finetune_warmup_ratio: 0.03 finetune_weight_decay: 0.1 image_resize_strategy: letterbox llm_backbone_id: vicuna-v15-7b llm_max_length: 2048 model_id: one-stage+7b_qformer2_128 reduce_in_full_precision: false type: one-stage+7b vision_backbone_id: clip-vit-l-336px pretrained_checkpoint: /home/lilei/prismatic-vlms/runs/llava-qformer/stage1_qformer2_128/checkpoints/latest-checkpoint.pt run_id: s1_s2_qformer2_128 run_root_dir: runs seed: 7 stage: finetune trackers: - jsonl - wandb wandb_entity: lilei_stones wandb_project: hf-vlms