iMihayo's picture
Add files using upload-large-folder tool
e188f3d verified
raw
history blame
3.27 kB
#========== settings ==========#
PROJECT_PATH=fastvla_multi_scale_query
#========== !NOTE! ==========#
RUN_MODE=simvla_PP
use_predict_future_prop=True
batch_size=16
use_action_ts_head=True
use_one_embed=True
use_multi_scaling=False
mlp_type=ffn
decoder_num_blocks=4
robot_platform=libero
MODE=${RUN_MODE}_use_pp_${use_predict_future_prop}_use_ts_${use_action_ts_head}_use_one_${use_one_embed}_use_ms_${use_multi_scaling}_mlp_${mlp_type}_decoder_num_blocks_${decoder_num_blocks}
#========== !NOTE! ==========#
use_l1_regression=True
num_images_in_input=1
wandb_entity=chenghaha
wandb_project=fastvla
wandb_log_freq=1
use_proprio=True
use_diffusion=False
use_film=False
num_steps_before_decay=20000
save_freq=5000
max_steps=50000
vla_path=/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/public-project/chengdongzhou-240108390137/ai_models/openvla/openvla-7b
data_root_dir=/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/public-project/chengdongzhou-240108390137/datasets/openvla/modified_libero_rlds
dataset_name=libero_4_task_suites_no_noops
run_root_dir=/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/public-project/chengdongzhou-240108390137/vla_projects/$PROJECT_PATH/results/$RUN_MODE
#========== get run_id ==========#
note_parts=("${MODE}")
# if [ "$use_l1_regression" = "True" ]; then
# note_parts+=("L1_regression")
# fi
# if [ "$num_images_in_input" == 1 ]; then
# note_parts+=("3rd_person_img")
# else
# note_parts+=("3rd_person_img_and_wrist")
# fi
# if [ "$use_l1_regression" = "True" ]; then
# note_parts+=("proprio_state")
# fi
# if [ "$use_film" = "True" ]; then
# note_parts+=("Film")
# fi
note_parts+=("M$max_steps-F$save_freq-D$num_steps_before_decay")
run_id_note_value=$(IFS='--'; echo "${note_parts[*]}")
#========== enter environment ==========#
# conda activate openvla-oft
cd /inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/public-project/chengdongzhou-240108390137/vla_projects/$PROJECT_PATH
export PYTHONPATH=/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/public-project/chengdongzhou-240108390137/vla_projects/$PROJECT_PATH
#========== run ==========#
WANDB_CONSOLE=off WANDB_MODE=offline torchrun --standalone --nnodes 1 --nproc-per-node 4 vla-scripts/finetune.py \
--vla_path "$vla_path" \
--data_root_dir "$data_root_dir" \
--dataset_name "$dataset_name" \
--run_root_dir "$run_root_dir" \
--use_l1_regression "$use_l1_regression" \
--use_diffusion "$use_diffusion" \
--use_film "$use_film" \
--num_images_in_input "$num_images_in_input" \
--use_proprio "$use_proprio" \
--batch_size "$batch_size" \
--learning_rate 5e-4 \
--num_steps_before_decay "$num_steps_before_decay" \
--max_steps "$max_steps" \
--save_freq "$save_freq" \
--save_latest_checkpoint_only False \
--image_aug True \
--lora_rank 32 \
--wandb_entity "$wandb_entity" \
--wandb_project "$wandb_project" \
--wandb_log_freq "$wandb_log_freq" \
--run_id_note "$run_id_note_value" \
--use_predict_future_prop "$use_predict_future_prop" \
--use_action_ts_head "$use_action_ts_head" \
--use_one_embed "$use_one_embed" \
--use_multi_scaling "$use_multi_scaling" \
--mlp_type "$mlp_type" \
--decoder_num_blocks "$decoder_num_blocks" \
--robot_platform "$robot_platform"