#!/bin/bash

set -x

export PYTHONUNBUFFERED=1
export VLMEvalKit=/home/tione/notebook/xingy/longcot/VLMEvalKit
export HF_HOME=/home/tione/notebook/datasets/pretrained/cache/huggingface
export HF_HUB_CACHE=/home/tione/notebook/datasets/pretrained/cache/huggingface/hub
export HF_DATASETS_CACHE=/home/tione/notebook/datasets/pretrained/cache/datasets
export TORCH_HOME=/home/tione/notebook/datasets/pretrained/cache/torch
export LMUData=/home/tione/notebook/datasets/pretrained/cache/LMUData
export HUGGINGFACE_TOKEN=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
export WANDB_API_KEY=31cd09afc838780ad74822246f016ec842180fe0

huggingface-cli download Qwen/Qwen2.5-VL-7B-Instruct
huggingface-cli download XenoZLH/MMRL30k --repo-type dataset

MODEL_PATH=Qwen/Qwen2.5-VL-7B-Instruct  # replace it with your local file path

python3 -m verl.trainer.main \
    config=examples/config.yaml \
    data.train_files=XenoZLH/MMRL30k@train \
    data.val_files=XenoZLH/MMRL30k@geo3k_test \
    worker.rollout.tensor_parallel_size=1 \
    worker.actor.model.model_path=${MODEL_PATH} \
    trainer.experiment_name=qwen2_5_vl_7b_mmrl30k_grpo \
    trainer.n_gpus_per_node=8 \
    trainer.save_freq=75 \
    trainer.save_limit=11 \
    trainer.save_checkpoint_path="./checkpoints/qwen2_5_vl_7b_mmrl30k_grpo"
