source ~/autodl-tmp/miniconda3/bin/activate vlnce

export CUDA_VISIBLE_DEVICES=0

# CM2-GT refers to using the ground-truth egocentric map as input, effectively only performing path prediction.
python store_episodes_vln.py \
    --root_path /root/autodl-tmp/lcy/ETPNav/habitat-lab-0.1.7/ \
    --scenes_dir data/scene_datasets/ \
    --episodes_save_dir ./datasets/cm2-gt-episodes/ \
    --gpu_capacity 4
    # --scenes_list 1pXnuDYAj8r \


# CM2 refers to our full pipeline that predicts both the egocentric map and path from RGB-D inputs.
# python store_episodes_vln_no_map.py \
#     --root_path /root/autodl-tmp/lcy/ETPNav/habitat-lab-0.1.7/ \
#     --scenes_dir data/scene_datasets/ \
#     --episodes_save_dir ./datasets/cm2-episodes/ \
#     --gpu_capacity 1 \
#     --img_segm_model_dir weights/img_segm_model.pt \
#     #--scenes_list 1pXnuDYAj8r \
