#!/bin/bash python llava_phi/train/convert_model2base_llava_phi.py \ --model_name_or_path susnato/phi-2 \ --version plain \ --data_path pretrain_data/blip_sample.json \ --image_folder pretrain_data/blip_images \ --vision_tower ./clip-vit-large-patch14-336 \ --mm_projector_type mlp2x_gelu \ --tune_mm_mlp_adapter True \ --mm_vision_select_layer -2 \ --mm_use_im_start_end False \ --mm_use_im_patch_token False \ --bf16 True \ --output_dir ./base_checkpoints_llava_phi \ --num_train_epochs 1 \ --per_device_train_batch_size 4 \ --per_device_eval_batch_size 4 \ --gradient_accumulation_steps 2 \ --evaluation_strategy "no" \ --save_strategy "steps" \ --save_steps 24000 \ --save_total_limit 1 \ --learning_rate 1e-3 \ --weight_decay 0.1 \ --warmup_ratio 0. \ --lr_scheduler_type "cosine" \ --logging_steps 10 \ --tf32 False \ --model_max_length 2048 \ --gradient_checkpointing True \ --dataloader_num_workers 4 \ --lazy_preprocess True \ --report_to tensorboard