
@REM @echo off
set StartStepNum=7
set EndStepNum=7

set ColmapPath=E:\DevelopProj\Yuji\3DGaussian\depandency\COLMAP-3.8-windows-cuda\COLMAP-3.8-windows-cuda\

if %StartStepNum% leq 0 (
    if %EndStepNum% geq 0 (
      call %ColmapPath%COLMAP.bat
    )
)


set DATA_PATH=D:\DevelopProj\Yuji\Hair\implicit-hair-data\data\monocular\
set CASE_NAME=man1
set DATA_FULL_PATH=%DATA_PATH%%CASE_NAME%

set ColmapBin=%ColmapPath%bin\colmap.exe

rem Step 1. (Optional) Run COLMAP SfM to obtain cameras.
if %StartStepNum% leq 1 (
    if %EndStepNum% geq 1 (

        if not exist %DATA_FULL_PATH%/colmap (
            mkdir "%DATA_FULL_PATH%/colmap"
        )

        %ColmapBin% automatic_reconstructor --workspace_path  %DATA_FULL_PATH%/colmap  --image_path %DATA_FULL_PATH%/video_frames

        if not exist %DATA_FULL_PATH%/colmap/sparse_txt (
            mkdir "%DATA_FULL_PATH%/colmap/sparse_txt"
        ) 
        %ColmapBin% model_converter --input_path %DATA_FULL_PATH%/colmap/sparse/0  --output_path %DATA_FULL_PATH%/colmap/sparse_txt --output_type TXT

        python colmap_parsing.py --path_to_scene  %DATA_FULL_PATH% --save_path %DATA_FULL_PATH%/colmap
    )
)

rem Step 2. (Optional) Define the region of interests in obtained point cloud.
if %StartStepNum% leq 2 (
    if %EndStepNum% geq 2 (
        rem Obtained colmap/point_cloud.ply is very noisy, so we are additionally define the region of interest using MeshLab and upload it to the current folder as point_cloud_cropped.ply.
    )
)

rem Step 3. Transform cropped scene to lie in a unit sphere volume.
if %StartStepNum% leq 3 (
    if %EndStepNum% geq 3 (
        python ../preprocess_custom_data/scale_scene_into_sphere.py --case "" --scene_type "" --path_to_data %DATA_FULL_PATH%
    )
)


rem Step 4. (Optional) Crop input images and postprocess cameras.
if %StartStepNum% leq 4 (
    if %EndStepNum% geq 4 (
        rem Note, now NeuralHaircut supports only the square images.
    )
)


rem Step 5. Obtain hair, silhouette masks and orientation and confidence maps.
if %StartStepNum% leq 5 (
    if %EndStepNum% geq 5 (
        @echo on

        set path_to_modnet=D:\DevelopProj\Yuji\Hair\models\MODNet-pretrained\PretrainedModels\modnet_photographic_portrait_matting.ckpt
        set path_to_cdgnet=D:\DevelopProj\Yuji\Hair\models\CDGNet-pretrained\CE2P\LIP_epoch_149.pth
        echo %path_to_modnet%
        echo %path_to_cdgnet%

        python ../preprocess_custom_data/calc_masks.py --scene_path %DATA_FULL_PATH% --MODNET_ckpt %path_to_modnet% --CDGNET_ckpt %path_to_cdgnet%
        python ../preprocess_custom_data/calc_orientation_maps.py --img_path %DATA_FULL_PATH%/image/ --orient_dir %DATA_FULL_PATH%/orientation_maps --conf_dir %DATA_FULL_PATH%/confidence_maps
    
       @echo off
    )
)

rem Step 6. (Optional) Define views on which you want to train and save it into views.pickle file.
if %StartStepNum% leq 6 (
    if %EndStepNum% geq 6 (
        rem Do Nothing
    )
)

rem Step 7. Using multiview images and cameras obtain FLAME head.
if %StartStepNum% leq 7 (
    if %EndStepNum% geq 7 (
        rem In PIXIE project ,run fit face and save dict initialization_pixie

        python ../src/multiview_optimization/fit.py --conf ../src/multiview_optimization/confs/train_person_1.conf --batch_size 1 --train_rotation True --save_path ./experiments/fit_person_1_bs_1

        @REM python fit.py --conf confs/train_person_1.conf --batch_size 5 --train_rotation True --save_path  ./experiments/fit_person_1_bs_5 --checkpoint_path ./experiments/fit_person_1_bs_1/opt_params

        @REM python fit.py --conf confs/train_person_1_.conf --batch_size 20 --train_rotation True --train_shape True --save_path  ./experiments/fit_person_1_bs_20_train_rot_shape  --checkpoint_path ./experiments/fit_person_1_bs_5/opt_params
    )
)

rem Step 8. Cut eyes of FLAME head, needed for scalp regularizaton.
if %StartStepNum% leq 8 (
    if %EndStepNum% geq 8 (
        python  ./preprocess_custom_data/cut_eyes.py --case CASE --scene_type SCENE_TYPE --path_to_data ./implicit-hair-data/data/
    )
)








