#!/bin/bash

TRAIN_ID=${1:-"both"}

PROMPT_VERSION=qwen
LLM_MODEL_PATH="checkpoints/Qwen3-4B"
VE_MODEL_PATH="checkpoints/qwen2.5-vl-7b-instruct-ve"
PROJ_TYPE=glu
EXP_NAME="4b-hydra-ve3-test2"

IMAGE_MIN_PIXELS=$((28 * 28))
IMAGE_MAX_PIXELS=$((600 * 28 * 28))



train_1() {
    echo "Running Training 1..."

    GLOBAL_BATCH_SIZE=256
    BATCH_PER_DEVICE=2
    NUM_DEVICES=8
    GRAD_ACCUM_STEPS=$((GLOBAL_BATCH_SIZE / (BATCH_PER_DEVICE * NUM_DEVICES)))

    accelerate launch train.py \
        --deepspeed ./scripts/zero2.json \
        --llm_model_path $LLM_MODEL_PATH \
        --ve_model_path $VE_MODEL_PATH \
        --version $PROMPT_VERSION \
        --data_path ./data/sft/share-captioner_coco_lcs_sam_1246k_1107.json \
        ./data/sft/wanjuan_ocr_b3.json \
        ./data/sft/llava_recap_558k.json \
        --image_folder ./data/images_all \
        --conn_ve_llm_type $PROJ_TYPE \
        --tune_conn_ve_llm True \
        --image_min_pixels $IMAGE_MIN_PIXELS \
        --image_max_pixels $IMAGE_MAX_PIXELS \
        --enable_thinking False \
        --group_by_modality_length True \
        --learning_rate 8e-4 \
        --mm_vision_select_layer -2 \
        --mm_use_im_start_end False \
        --mm_use_im_patch_token False \
        --output_dir ./output/namo-$EXP_NAME-conn-ve-$PROJ_TYPE \
        --num_train_epochs 1 \
        --per_device_train_batch_size $BATCH_PER_DEVICE \
        --auto_find_batch_size False \
        --use_liger_kernel True \
        --gradient_accumulation_steps $GRAD_ACCUM_STEPS \
        --eval_strategy "no" \
        --save_strategy "steps" \
        --save_steps 500 \
        --save_total_limit 1 \
        --weight_decay 0. \
        --warmup_steps 400 \
        --lr_scheduler_type "cosine" \
        --logging_steps 1 \
        --tf32 False \
        --fp16 False \
        --bf16 True \
        --model_max_length 2000 \
        --gradient_checkpointing True \
        --dataloader_num_workers 4 \
        --lazy_preprocess True
}

train_2() {
    echo "Running Training 2..."

    GLOBAL_BATCH_SIZE=128
    BATCH_PER_DEVICE=3
    NUM_DEVICES=8
    GRAD_ACCUM_STEPS=$((GLOBAL_BATCH_SIZE / (BATCH_PER_DEVICE * NUM_DEVICES)))

    IMAGE_MIN_PIXELS=$((28 * 28))
    IMAGE_MAX_PIXELS=$((660 * 28 * 28))

    # note: when long document comes in, model_max_length should in
    #     ./data/sft/pdf_en_30w.json \
    # ./data/sft/multimath-300k-en-cap.json \
    # ./data/sft/multimath-300k-zh-cap.json \
    # ./data/sft/pdf_cn_30w.json \

#  ./data/sft/wukong_5of5_ocr.json \
#         ./data/sft/wukong_4of5_ocr.json \
#         ./data/sft/wukong_3of5_ocr.json \
#   ./data/sft/wanjuan_ocr_b3.json \
#         ./data/sft/inhouse_crop_b1.json \
#          ./data/sft/allava-caption-laion-4v-469k.json \
        # ./data/sft/allava-caption-vflan-4v-195k.json \


    accelerate launch train.py \
        --deepspeed ./scripts/zero2.json \
        --ve_lr 0.1e-5 \
        --ve_merger_lr 0.1e-5 \
        --conn_ve_llm_lr 1e-5 \
        --llm_model_path $LLM_MODEL_PATH \
        --ve_model_path $VE_MODEL_PATH \
        --version $PROMPT_VERSION \
        --data_path ./data/sft/mammoth_si_10M_sub_251w_notext.json \
         ./data/sft/allava-caption-laion-4v-469k.json \
        ./data/sft/allava-caption-vflan-4v-195k.json \
        ./data/sft/cc12m-description-387k.json \
        ./data/sft/textmonkey_pretrain.json \
        ./data/sft/latex_ocr.json \
        ./data/sft/ZhEn_latex_ocr.json \
        ./data/sft/invoices_ocr.json \
        ./data/sft/humancap-hq-311k.json \
        ./data/sft/multimath-300k-en-cap.json \
        ./data/sft/multimath-300k-zh-cap.json \
        ./data/sft/mmc_chart_nonarxiv_caption.json \
        ./data/sft/VLAA-Thinking-SFT-126K_qwen3_format.json \
        ./data/sft/synthdog_zh.json \
        ./data/sft/synthdog_en.json \
        --image_folder ./data/images/ \
        --image_min_pixels $IMAGE_MIN_PIXELS \
        --image_max_pixels $IMAGE_MAX_PIXELS \
        --enable_thinking False \
        --unfreeze_ve False \
        --conn_ve_llm_type $PROJ_TYPE \
        --pretrain_conn_ve_llm_path ./output/namo-4b-hydra-ve3-conn-ve-$PROJ_TYPE/ \
        --mm_vision_select_layer -2 \
        --mm_use_im_start_end False \
        --mm_use_im_patch_token False \
        --image_aspect_ratio pad \
        --group_by_modality_length False \
        --tf32 False \
        --fp16 False \
        --bf16 True \
        --output_dir ./output/namo-$EXP_NAME-$PROJ_TYPE \
        --num_train_epochs 1 \
        --auto_find_batch_size True \
        --use_liger_kernel True \
        --per_device_train_batch_size $BATCH_PER_DEVICE \
        --gradient_accumulation_steps $GRAD_ACCUM_STEPS \
        --eval_strategy "no" \
        --save_strategy "steps" \
        --save_steps 400 \
        --save_total_limit 1 \
        --learning_rate 2e-5 \
        --weight_decay 0. \
        --warmup_steps 400 \
        --lr_scheduler_type "cosine" \
        --logging_steps 1 \
        --model_max_length 2499 \
        --gradient_checkpointing True \
        --dataloader_num_workers 4 \
        --lazy_preprocess True
}

# --lora_enable True --lora_r 512 --lora_alpha 256 --lora_dropout 0.05 \

train_3() {

    # --data_path ./data/sft/share-captioner_coco_lcs_sam_1246k_1107.json \
    #     ./data/sft/llava_recap_558k.json \
    #     ./data/sft/textmonkey_pretrain.json \

    # --pretrain_model_path ./checkpoints/namo-$EXP_NAME-$PROJ_TYPE \
    echo "Running Training 3..."
    accelerate launch train.py \
        --deepspeed ./scripts/zero2_adamw.json \
        --ve_lr 0.1e-5 \
        --conn_ve_llm_lr 1e-5 \
        --pretrain_model_path ./output/namo-$EXP_NAME-$PROJ_TYPE/checkpoint-88500 \
        --version $PROMPT_VERSION \
        --data_path ./data/sft/minigemini_instruction.json \
        ./data/sft/sharegpt4v_instruct_gpt4-vision_cap100k.json \
        ./data/sft/ZhEn_latex_ocr.json \
        ./data/sft/latex_ocr.json \
        ./data/sft/bunny_695k.json \
        ./data/sft/qa_cmmmu.json \
        ./data/sft/qa_mmbench.json \
        ./data/sft/multi_spot_diff.json \
        ./data/sft/multi_lrv_multi.json \
        ./data/sft/doc_reasoning.json \
        ./data/sft/autoposter_76k_chat.json \
        ./data/sft/icdar2019_lsvt_chat.json \
        ./data/sft/symbolic_tabmw_32k.json \
        ./data/sft/mathv360k_cot.json \
        ./data/sft/lrv-instruct-and-chart-343k.json \
        ./data/sft/doclie-real-100k.json \
        ./data/sft/plotqa-157k.json \
        ./data/sft/llavar_finetune.json \
        ./data/sft/my_ja_ocr_b1.json \
        ./data/sft/pdf_cn_30w_subset.json \
        --image_folder ./data/images_all \
        --dynamic_size True \
        --native_size_batched True \
        --unfreeze_ve True \
        --enable_thinking False \
        --conn_ve_llm_type $PROJ_TYPE \
        --mm_vision_select_layer -2 \
        --mm_use_im_start_end False \
        --mm_use_im_patch_token False \
        --image_aspect_ratio pad \
        --group_by_modality_length False \
        --tf32 False \
        --fp16 False \
        --bf16 True \
        --output_dir ./output/namo-$EXP_NAME-$PROJ_TYPE-sft-final-whole \
        --num_train_epochs 1 \
        --per_device_train_batch_size 3 \
        --auto_find_batch_size False \
        --per_device_eval_batch_size 4 \
        --gradient_accumulation_steps 14 \
        --eval_strategy "no" \
        --save_strategy "steps" \
        --save_steps 500 \
        --save_total_limit 1 \
        --learning_rate 1e-5 \
        --weight_decay 0. \
        --warmup_steps 360 \
        --lr_scheduler_type "cosine" \
        --logging_steps 1 \
        --model_max_length 2048 \
        --gradient_checkpointing True \
        --dataloader_num_workers 4 \
        --lazy_preprocess True
}

case $TRAIN_ID in
1)
    train_1
    ;;
2)
    train_2
    ;;
3)
    train_3
    ;;
both)
    train_1
    # train_2
    train_3
    ;;
*)
    echo "Invalid argument. Use 1, 2, or leave empty for both trainings."
    exit 1
    ;;
esac
