#data=/home/work_nfs4_ssd/hfxue/data/ml_superb/kaldi/fairseq
#data=/home/work_nfs6/hfxue/workspace/fairseq/data1114
data=/home/work_nfs4_ssd/hfxue/workspace/wenet1218/examples/wenetspeech_stutter/s0/data
#save=/home/41_data/hfxue/checkpoint/hubert1124/3dialectsn_3
save=/home/work_nfs7/hfxue/checkpoint/stutter/hubert_2
# rm -rf ${save}
config=examples/hubert/config/finetune
configname=base_960h.yaml
# rm -rf $save
DEVICES=0,1,2,3,4,5,6
mkdir -p $save
# phonesMatches_reduced

CUDA_VISIBLE_DEVICES=$DEVICES python fairseq_cli/hydra_train.py \
    task.data=$data \
    common.seed=1337 \
    common.log_file=$save/train.json \
    common.tensorboard_logdir=$save/tensorboard checkpoint.save_dir=$save \
    distributed_training.distributed_world_size=7 optimization.update_freq='[2]' \
    model.w2v_path="/home/work_nfs6/hfxue_data/hubert_large_iter3_128gpu.pt" \
    --config-dir $config \
    --config-name $configname

# /home/41_data/hfxue/checkpoint/hubert/large_3dialects/hubert_large_iter3_64gpu_1/checkpoint_best.pt
