model: arch: mini_gpt4 model_type: pretrain_vicuna freeze_vit: True freeze_qformer: True # low_resource: True max_txt_len: 384 datasets: laion: vis_processor: train: name: "blip2_image_train" image_size: 224 text_processor: train: name: "blip_caption" sample_ratio: 115 cc_sbu: vis_processor: train: name: "blip2_image_train" image_size: 224 text_processor: train: name: "blip_caption" sample_ratio: 14 run: task: image_text_pretrain # optimizer lr_sched: "linear_warmup_cosine_lr" init_lr: 1e-4 min_lr: 8e-5 warmup_lr: 1e-6 weight_decay: 0.05 max_epoch: 3 batch_size_train: 1 batch_size_eval: 1 num_workers: 12 warmup_steps: 5000 seed: 42 output_dir: "output/Meta-Llama-3-8B-Instruct-hf/" # output_dir: "output/Llama-2-7b-chat-hf/" amp: True resume_ckpt_path: null evaluate: False train_splits: ["train"] device: "cuda" world_size: 1 dist_url: "env://" distributed: True stage: 1