#!/bin/bash

TRAIN_ID=${1:-"both"}  

PROMPT_VERSION=qwen
LLM_MODEL_PATH="checkpoints/Qwen2.5-0.5B-Instruct"
VE_MODEL_PATH="checkpoints/aimv2-1b-patch14-448"
EXP_NAME="1b-native-pretrain"
PROJ_TYPE=pixelshuffle_2x


# 1. using original train stage 1, stage 2 30%
# 2. edit 448 to native, retrain stage 2 with whole model.

train_1() {
    echo "Running Training 1..."
    accelerate launch train.py \
        --deepspeed ./trains/zero2.json \
        --llm_model_path $LLM_MODEL_PATH \
        --ve_model_path $VE_MODEL_PATH \
        --version $PROMPT_VERSION \
        --data_path ./data/sft/share-captioner_coco_lcs_sam_1246k_1107.json \
        ./data/sft/wanjuan_ocr_b3.json \
        ./data/sft/llava_recap_558k.json \
        --image_folder ./data/images_all \
        --conn_ve_llm_type $PROJ_TYPE \
        --tune_conn_ve_llm True \
        --dynamic_size True \
        --native_size False \
        --max_img_size 700 \
        --mm_vision_select_layer -2 \
        --mm_use_im_start_end False \
        --mm_use_im_patch_token False \
        --output_dir ./checkpoints/namo-$EXP_NAME-conn-ve-$PROJ_TYPE \
        --num_train_epochs 1 \
        --per_device_train_batch_size 3 \
        --auto_find_batch_size True \
        --per_device_eval_batch_size 4 \
        --gradient_accumulation_steps 19 \
        --eval_strategy "no" \
        --save_strategy "steps" \
        --save_steps 500 \
        --save_total_limit 1 \
        --learning_rate 4e-5 \
        --weight_decay 0. \
        --warmup_ratio 0.03 \
        --lr_scheduler_type "cosine" \
        --logging_steps 1 \
        --tf32 False \
        --fp16 False \
        --bf16 True \
        --model_max_length 1900 \
        --gradient_checkpointing True \
        --dataloader_num_workers 4 \
        --lazy_preprocess True
}

train_2() {
    echo "Running Training 2..."
    accelerate launch train.py \
        --deepspeed ./trains/zero2_npu.json \
        --ve_lr 0.2e-5 \
        --conn_ve_llm_lr 2e-5 \
        --llm_model_path $LLM_MODEL_PATH \
        --ve_model_path $VE_MODEL_PATH \
        --version $PROMPT_VERSION \
        --data_path ./data/sft/share-captioner_coco_lcs_sam_1246k_1107.json \
        ./data/sft/llava_recap_558k.json \
        ./data/sft/textmonkey_pretrain.json \
        ./data/sft/wanjuan_ocr_b3.json \
        ./data/sft/inhouse_crop_b1.json \
        ./data/sft/mtwi_ocr_20k_ocr.json \
        ./data/sft/icdar2019_lsvt_50k_ocr.json \
        ./data/sft/mmc.json \
        ./data/sft/icdar_mlt_ocr.json \
        ./data/sft/autoposter_76k_ocr.json \
        ./data/sft/c7s-synthdog-500k-modified.json \
        ./data/sft/hme-74k.json \
        ./data/sft/ovis_cc12m.json \
        ./data/sft/wukong_5of5_ocr.json \
        ./data/sft/wukong_4of5_ocr.json \
        ./data/sft/wukong_3of5_ocr.json \
        ./data/sft/ocr_rects.json \
        ./data/sft/my_ja_ocr_b1.json \
        ./data/sft/laion2b_ja_ocr.json \
        ./data/sft/mmc_chart_nonarxiv_caption.json \
        ./data/sft/latex_ocr.json \
        ./data/sft/ZhEn_latex_ocr.json \
        ./data/sft/invoices_ocr.json \
        --image_folder ./data/images_all \
        --dynamic_size True \
        --native_size True \
        --max_img_size 700 \
        --unfreeze_ve True \
        --conn_ve_llm_type $PROJ_TYPE \
        --pretrain_conn_ve_llm_path ./checkpoints/namo-$EXP_NAME-conn-ve-$PROJ_TYPE/ \
        --mm_vision_select_layer -2 \
        --mm_use_im_start_end False \
        --mm_use_im_patch_token False \
        --image_aspect_ratio pad \
        --group_by_modality_length True \
        --tf32 False \
        --fp16 False \
        --bf16 True \
        --output_dir ./checkpoints/namo-$EXP_NAME-$PROJ_TYPE \
        --num_train_epochs 1 \
        --auto_find_batch_size False \
        --per_device_train_batch_size 4 \
        --per_device_eval_batch_size 4 \
        --gradient_accumulation_steps 10 \
        --eval_strategy "no" \
        --save_strategy "steps" \
        --save_steps 500 \
        --save_total_limit 1 \
        --learning_rate 4e-5 \
        --weight_decay 0. \
        --warmup_steps 360 \
        --lr_scheduler_type "cosine" \
        --logging_steps 1 \
        --model_max_length 2048 \
        --gradient_checkpointing True \
        --dataloader_num_workers 4 \
        --lazy_preprocess True
}

        # --lora_enable True --lora_r 512 --lora_alpha 256 --lora_dropout 0.05 \


train_3() {

    # --data_path ./data/sft/share-captioner_coco_lcs_sam_1246k_1107.json \
    #     ./data/sft/llava_recap_558k.json \
    #     ./data/sft/textmonkey_pretrain.json \

    # --pretrain_model_path ./checkpoints/namo-$EXP_NAME-$PROJ_TYPE \
    echo "Running Training 3..."
    accelerate launch train.py \
        --deepspeed ./trains/zero2.json \
        --ve_lr 0.1e-5 \
        --conn_ve_llm_lr 1e-5 \
        --pretrain_model_path ./checkpoints/namo-$EXP_NAME-$PROJ_TYPE \
        --version $PROMPT_VERSION \
        --data_path ./data/sft/minigemini_instruction.json \
        ./data/sft/share-captioner_coco_lcs_sam_1246k_1107.json \
        ./data/sft/bunny_695k.json \
        ./data/sft/qa_cmmmu.json \
        ./data/sft/qa_mmbench.json \
        ./data/sft/multi_spot_diff.json \
        ./data/sft/multi_lrv_multi.json \
        ./data/sft/doc_reasoning.json \
        ./data/sft/autoposter_76k_chat.json \
        ./data/sft/icdar2019_lsvt_chat.json \
        --image_folder ./data/images_all \
        --dynamic_size True \
        --native_size True \
        --max_img_size 700 \
        --unfreeze_ve True \
        --conn_ve_llm_type $PROJ_TYPE \
        --mm_vision_select_layer -2 \
        --mm_use_im_start_end False \
        --mm_use_im_patch_token False \
        --image_aspect_ratio pad \
        --group_by_modality_length True \
        --tf32 False \
        --fp16 False \
        --bf16 True \
        --output_dir ./checkpoints/namo-$EXP_NAME-$PROJ_TYPE-sft-final-whole \
        --num_train_epochs 1 \
        --per_device_train_batch_size 2 \
        --auto_find_batch_size False \
        --per_device_eval_batch_size 4 \
        --gradient_accumulation_steps 12 \
        --eval_strategy "no" \
        --save_strategy "steps" \
        --save_steps 500 \
        --save_total_limit 1 \
        --learning_rate 1e-5 \
        --weight_decay 0. \
        --warmup_steps 360 \
        --lr_scheduler_type "cosine" \
        --logging_steps 1 \
        --model_max_length 2048 \
        --gradient_checkpointing True \
        --dataloader_num_workers 4 \
        --lazy_preprocess True
}

case $TRAIN_ID in
    1)
        train_1
        ;;
    2)
        train_2
        ;;
    3)
        train_3
        ;;
    both)
        train_1
        # train_2
        train_3
        ;;
    *)
        echo "Invalid argument. Use 1, 2, or leave empty for both trainings."
        exit 1
        ;;
esac
