#!/usr/bin/env bash

. ./path.sh || exit 1;
. ./cmd.sh || exit 1;

export CUDA_VISIBLE_DEVICES="0,1,2,3,4,5"
#export CUDA_VISIBLE_DEVICES="4,7"
# general configuraion
stage=15
stop_stage=15
base_dir=`pwd -P`
conf_dir=${base_dir}/config/pretrain
exp_dir=/home/work_nfs8/xlgeng/workspace/checkpoint/hubert
mkdir -p ${exp_dir}
# training config
simu_gpu=32
num_node=1
num_gpu=`echo $CUDA_VISIBLE_DEVICES | tr -cd , | wc -c | awk '{print $1 + 1}'`
# data
train_set=train_with_sample
dev_set=dev_with_samlpe
test_set=test_with_sample
all_data=( "train_with_sample" "dev_with_samlpe" "test_with_sample" )
#data_dir=/home/work_nfs6/hfxue/workspace/fairseq/data1114
#data_dir=/home/work_nfs9/lhli/dialect/data
data_dir=${base_dir}/data_list
#model_dir=/home/work_nfs6/hfxue/model/hubert
#model_dir=${base_dir}/model/hubert
# . utils/parse_options.sh || exit 1;

# Set bash to 'debug' mode, it will exit on :
# -e 'error', -u 'undefined variable', -o ... 'error in pipeline', -x 'print commands',
set -e
set -u
set -o pipefail

world_size=$((num_node*num_gpu))
update_freq=$((simu_gpu/world_size))

# for hubert-large model (iter3)
exp_name=hubert_large_iter3_little_train_by_gxl
extract_layer=9            # which layer to extract hiddens features
feat=${exp_name}_extract_layer_${extract_layer}
feat_dir=${exp_dir}/feature/${feat}
echo "我是耿雪龙"
nj=78
if [ ${stage} -le 11 ] && [ ${stop_stage} -ge 11 ]; then
    echo "stage 11: Compute Hubert (iter2) features started @ `date`"
    for set in "${all_data[@]}"; do
        echo "drop feat: $set"
        for ((i = 0; i < $nj; ++i)); do
        {
            result=$((i % 6))
            export CUDA_VISIBLE_DEVICES=$result
            python ${base_dir}/simple_kmeans/dump_hubert_feature.py   \
                ${data_dir} ${set}                        \
                /home/work_nfs7/bsmu/SALMONN/resource/chinese_hubert_large.pt    \
                ${extract_layer} ${nj} $i ${feat_dir}/${set}  \
                --max_chunk 160000000
        }   &
        done
#        seq 0 $((nj-1)) | parallel -j $nj 'CUDA_VISIBLE_DEVICES={} python ${base_dir}/simple_kmeans/dump_hubert_feature.py ${data_dir} ${set} /home/work_nfs7/bsmu/SALMONN/resource/chinese_hubert_large.pt ${extract_layer} ${nj} {} ${feat_dir}/${set} 160000000'
#        # 等待所有任务完成
#        wait
        wait
    done
    ### 5 audios to long to open, delete it from the data/train/*.tsv files manually:
    ### X0000000105_79574524_S00076.wav
    ### X0000000122_86957292_S00090.wav  843200
    ### Y0000006938_HatORX3ygTw_S01283.wav  1334880
    ### Y0000013374_Ye_zDOzH6Zs_S00073.wav
    ### Y0000014924_bwvdKMGSG0s_S01827.wav
    ### here, we generate a new data/train_iter2 fold
    echo "stage 11: Done @ `date`"
fi

n_cluster=500
#exp=/home/work_nfs9/xlgeng/checkpoint/hubert0107/unsupervised
#exp=/home/work_nfs9/xlgeng/new_workspace/checkpoint/hubert0107/unsupervised
mkdir -p ${exp_dir}/k-means
km_dir=${exp_dir}/k-means/${feat}
mkdir -p ${km_dir}
if [ ${stage} -le 12 ] && [ ${stop_stage} -ge 12 ]; then
    echo "wo shi gengxuelong "
	echo "stage 12: K-means clustering on Hubert (iter2) featues started @ `date`"
    # ${train_cmd} ${km_dir}/log/k-means.log                          \
        python ${base_dir}/simple_kmeans/learn_kmeans.py      \
            ${feat_dir}/${train_set} ${train_set} ${nj}                           \
            ${km_dir}/model.mdl ${n_cluster}                        \
            --percent 0.99 --batch_size 10000
    echo "stage 12: Done @ `date`"
fi

if [ ${stage} -le 13 ] && [ ${stop_stage} -ge 13 ]; then
    echo "stage 13: K-means application started @ `date`"
    for set in "${all_data[@]}"; do
        echo "drop label: $set"
        for ((i = 0; i < $nj; ++i)); do
        {
            result=$((i % 6))
            export CUDA_VISIBLE_DEVICES=$result
            python ${base_dir}/simple_kmeans/dump_km_label.py         \
                ${feat_dir}/${set} ${set} ${km_dir}/model.mdl               \
                ${nj} $i ${km_dir}/${set}_label
        } &
        done
        wait
        # merge labels for different shards
        for rank in $(seq 0 $((nj - 1))); do
            cat ${km_dir}/${set}_label/${set}_${rank}_${nj}.km
        done > ${km_dir}/${set}.km
    done
    echo "stage 13: Done @ `date`"
fi

if [ ${stage} -le 14 ] && [ ${stop_stage} -ge 14 ]; then
    echo "stage 14: Generate dict started @ `date`"
        # fairseq-preprocess                              \
        python ../../fairseq_cli/preprocess.py                 \
            --only-source                               \
            --source-lang km                            \
            --dict-only                                 \
            --trainpref ${km_dir}/${train_set}                 \
            --validpref ${km_dir}/${dev_set}                 \
            --destdir ${km_dir}                         \
            --workers 10
    echo "stage 14: Done @ `date`"
fi

label_name=["km"]
label_rate=50
# model_size=large
conf_name=hubert_large_iter3.yaml
# exp_name=hubert_${model_size}_iter3_${simu_gpu}gpu_1
exp_name=exp_output
output_dir=${exp_dir}/${exp_name}
mkdir -p $output_dir
# km_dir=${exp_dir}/k-means/hubert_base_iter2_32gpu_l9
if [ ${stage} -le 15 ] && [ ${stop_stage} -ge 15 ]; then
echo "stage 15: Train the second iteration started @ `date`"

python ../../fairseq_cli/hydra_train.py --config-dir ${conf_dir}\
    --config-name ${conf_name}\
    hydra.run.dir=${output_dir}                                 \
    task.data=${data_dir}                                        \
    task.labels=${label_name}                                   \
    task.label_dir=${km_dir}                                    \
    model.label_rate=${label_rate}                              \
    common.tensorboard_logdir=${output_dir}/tblog               \
    checkpoint.save_dir=${output_dir}                           \
    checkpoint.restore_file=/home/work_nfs7/bsmu/SALMONN/resource/chinese_hubert_large.pt   \
    checkpoint.reset_optimizer=true \
    checkpoint.reset_dataloader=true \
    checkpoint.reset_meters=true \
    distributed_training.distributed_world_size=${world_size}   \
    +optimization.update_freq="[${update_freq}]"
echo "stage 15: Done @ `date`"
fi

label_name=["ltr"]
label_rate=50
model_size=large
conf_dir=${base_dir}/config/finetune
conf_name=large_1000h.yaml
exp_name=hubert_${model_size}_iter3_${simu_gpu}gpu_aishell_1_chihubert
finetune_dir=${exp_dir}/${exp_name}
if [ ${stage} -le 16 ] && [ ${stop_stage} -ge 16 ]; then
    echo "stage 16: Finetune the third iteration started @ `date`"
        # fairseq-hydra-train                                             \
        python ../../fairseq_cli/hydra_train.py \
            hydra.run.dir=${finetune_dir}                                 \
            task.data=${data_dir}                                      \
            task.labels=${label_name}                                   \
            model.w2v_path=/home/work_nfs6/hfxue/model/hubert/hubert_large_iter3_128gpu.pt              \
            common.tensorboard_logdir=${finetune_dir}/tblog               \
            checkpoint.save_dir=${finetune_dir}                           \
            distributed_training.distributed_world_size=${world_size}   \
            +optimization.update_freq="[${update_freq}]"                \
            --config-dir ${conf_dir}                                    \
            --config-name ${conf_name}
    echo "stage 15: Done @ `date`"
fi

if [ ${stage} -le 17 ] && [ ${stop_stage} -ge 17 ]; then
echo "stage 17: Finetune the third iteration started @ `date`"
python ../../fairseq_cli/hydra_train.py \
    --config-dir config/finetune \
    --config-name large_1000h.yaml \
    task.data=${data_dir} task.label_dir=${data_dir} \
    model.w2v_path=/home/work_nfs6/hfxue/model/hubert/hubert_large_iter3_128gpu.pt \
    common.tensorboard_logdir=${finetune_dir}/tblog               \
    checkpoint.save_dir=${finetune_dir}                           \
    distributed_training.distributed_world_size=${world_size}   \
    +optimization.update_freq="[${update_freq}]"
fi
