#!/bin/bash


# Set this to somewhere where you want to put your data, or where
# someone else has already put it.  You'll want to change this
# if you're not on the CLSP grid.
# Xinglong Gao, 2018-2019
#data=/home/gaoxinglong/env/xlg_share/k12_total/product_center_data_20200425_8csv
# base url for downloads.
stage=8 # 8 means from train_mono if we have computed features
train=true
decode=false
final_decode=false
suffix="kid1234h"
lang_suffix="_48"
only_clean=true
nj=56
mfccdir=fbank

lang_nosp=data/lang_nosp_open4k48v1
dict_nosp=data/local/dict_open4k48v1
lang_nosp_updated=${lang_nosp}_updated
trainset=from1020w_lt70



. ./cmd.sh
. ./path.sh
. parse_options.sh

# you might not want to do this for interactive shells.
set -e


if [ $stage -le 4 ];then
echo "You should start at least 7"
exit 0
fi


echo "start compute features ..."
if [ $stage -le 6 ]; then
  for part in $trainset; do
    steps/make_fbank.sh --cmd "$train_cmd" --nj 48  data/$part exp/make_fbank/$part $mfccdir
    steps/compute_cmvn_stats.sh data/$part exp/make_fbank/$part $mfccdir
  done
fi

num_total=$(wc -l data/$trainset/text | awk '{print $1}')
num_semi=$[$num_total/2]
echo "start train ..."
if [ $stage -le 7 ]; then
  # Make some small data subsets for early system-build stages.  Note, there are 29k
  # utterances in the train_clean_100 directory which has 100 hours of data.
  # For the monophone stages we select the shortest utterances, which should make it
  # easier to align the data from a flat start.

  utils/subset_data_dir.sh --shortest data/$trainset 20000 data/train_2kshort$suffix
  utils/subset_data_dir.sh data/$trainset 50000 data/train_5k$suffix
  utils/subset_data_dir.sh data/$trainset  100000 data/train_10k$suffix
  echo "ending split"
fi

if [ $stage -le 8 ]; then
  # train a monophone system
  echo 'stage mono 1'
  steps/train_mono_online.sh --boost-silence 1.25 --nj $nj --cmd "$train_cmd" \
                      data/train_2kshort$suffix $lang_nosp exp/mono$suffix

fi

if [ $stage -le 9 ]; then
  steps/align_si_online.sh --boost-silence 1.25 --nj $nj  --cmd "$train_cmd" \
                    data/train_5k$suffix $lang_nosp exp/mono$suffix exp/mono_ali_5k$suffix

  # train a first delta + delta-delta triphone system on a subset of 5000 utterances
  echo 'stage delta 2'
  steps/train_deltas_online.sh --boost-silence 1.25 --cmd "$train_cmd" \
                        2000 10000 data/train_5k$suffix $lang_nosp exp/mono_ali_5k$suffix exp/tri1$suffix

fi

if [ $stage -le 10 ]; then
  steps/align_si_online.sh --nj $nj --cmd "$train_cmd" \
                    data/$trainset  $lang_nosp exp/tri1$suffix exp/tri1_ali_${trainset}$suffix


  # train an LDA+MLLT system.
  echo 'stage lda_mllt 3'
  steps/train_lda_mllt_online.sh --cmd "$train_cmd" \
                          --splice-opts "--left-context=3 --right-context=3" 2500 15000 \
                          data/$trainset $lang_nosp exp/tri1_ali_${trainset}$suffix exp/tri2b$suffix

fi


if [ $stage -le 11 ]; then
  # Align a 10k utts subset using the tri2b model
  steps/align_si_online.sh  --nj $nj --cmd "$train_cmd" --use-graphs true data/$trainset  $lang_nosp exp/tri2b$suffix exp/tri2b_ali_${trainset}$suffix
  #steps/align_si_online.sh  --nj $nj --cmd "$train_cmd" --use-graphs true data/$trainset  $lang_nosp exp/tri2b85 exp/tri2b_ali_${trainset}$suffix

  # Train tri3b, which is LDA+MLLT+SAT on train_clean_100 utts
  echo 'stage sat 4'
  steps/train_sat_online.sh --cmd "$train_cmd" 2500 15000 data/${trainset}\
	  $lang_nosp exp/tri2b_ali_${trainset}$suffix exp/tri3b$suffix

fi

if [ $stage -le 12 ]; then
  # align the entire train_clean_100 subset using the tri3b model
  steps/align_fmllr_online.sh --nj $nj --cmd "$train_cmd" data/$trainset $lang_nosp exp/tri3b$suffix exp/tri3b_ali_${trainset}$suffix
  #steps/align_fmllr_online.sh --nj $nj --cmd "$train_cmd" data/$trainset $lang_nosp exp/tri3b85 exp/tri3b_ali_${trainset}$suffix

  # train another LDA+MLLT+SAT system on the entire 100 hour subset
  echo 'stage sat 4.1'
  steps/train_sat_online.sh  --cmd "$train_cmd" 4200 40000 \
                      data/$trainset  $lang_nosp \
                      exp/tri3b_ali_${trainset}$suffix exp/tri4b$suffix

fi

if [ $stage -le 13 ]; then
  # Now we compute the pronunciation and silence probabilities from training data,
  # and re-create the lang directory.
  steps/get_prons.sh --cmd "$train_cmd" \
                     data/$trainset $lang_nosp exp/tri4b$suffix


	[ -d data/local/dict ] && rm -rf data/local/dict
  utils/dict_dir_add_pronprobs.sh --max-normalize true \
                                  $dict_nosp \
                                  exp/tri4b$suffix/pron_counts.txt exp/tri4b$suffix/sil_counts_nowb.txt \
                                  exp/tri4b$suffix/pron_bigram_counts_nowb.txt data/local/dict

  
  utils/prepare_lang.sh data/local/dict \
                        "<UNK>" data/local/lang_tmp.$$ $lang_nosp_updated
						
  #local/format_lms.sh --src-dir $lang_common data/local/lm

  # utils/build_const_arpa_lm.sh \
    # data/local/lm/lm_tglarge.arpa.gz data/lang data/lang_test_tglarge
  # utils/build_const_arpa_lm.sh \
    # data/local/lm/lm_fglarge.arpa.gz data/lang data/lang_test_fglarge
fi



if [ $stage -le 15 ]; then
  # align the new, combined set, using the tri4b model
  steps/align_fmllr_online.sh --nj $nj --cmd "$train_cmd" \
                       data/$trainset  $lang_nosp_updated exp/tri4b$suffix exp/tri4b_ali_${trainset}$suffix

  # create a larger SAT model, trained on the 460 hours of data.
  steps/train_sat_online.sh  --cmd "$train_cmd" 5000 100000 \
                      data/$trainset $lang_nosp_updated exp/tri4b_ali_${trainset}$suffix exp/tri5b$suffix

  echo "Trained $trainset  is finished in exp/tri5b$suffix"
fi


# The following command trains an nnet3 model on the 460 hour setup.  This
# is deprecated now.
## train a NN model on the 460 hour set
#local/nnet2/run_6a_clean_460.sh

if [ $stage -le 16 ]; then
  steps/align_fmllr_online.sh --nj $nj --cmd "$train_cmd" \
                       data/$trainset $lang_nosp_updated exp/tri5b$suffix exp/tri5b_ali_${trainset}$suffix

  # train a SAT model on the 960 hour mixed data.  Use the train_quick.sh script
  # as it is faster.
  steps/train_quick_online.sh --cmd "$train_cmd" \
                       7000 150000 data/$trainset $lang_nosp_updated exp/tri5b_ali_${trainset}$suffix exp/tri6b$suffix

fi

local/nnet3/run_data_augment_only.sh
local/nnet3/run_nnet3_dnn.sh
exit 0;


if [ $stage -le 17 ]; then
  # this does some data-cleaning. The cleaned data should be useful when we add
  # the neural net and chain systems.  (although actually it was pretty clean already.)
  #local/nnet3/run_data_augment_only.sh
  local/run_cleanup_segmentation.sh
fi




if $only_clean;then
exit 0;
fi


# steps/cleanup/debug_lexicon.sh --remove-stress true  --nj 200 --cmd "$train_cmd" data/train_clean_100 \
#    data/lang exp/tri6b data/local/dict/lexicon.txt exp/debug_lexicon_100h


if [ $stage -le 19 ]; then
  # train and test nnet3 tdnn models on the entire data with data-cleaning.
  local/chain/run_tdnn_simple2.sh # set "--stage 11" if you have already run local/nnet3/run_tdnn.sh
fi

# The nnet3 TDNN recipe:
# local/nnet3/run_tdnn.sh # set "--stage 11" if you have already run local/chain/run_tdnn.sh

# # train models on cleaned-up data
# # we've found that this isn't helpful-- see the comments in local/run_data_cleaning.sh
# local/run_data_cleaning.sh

# # The following is the current online-nnet2 recipe, with "multi-splice".
# local/online/run_nnet2_ms.sh

# # The following is the discriminative-training continuation of the above.
# local/online/run_nnet2_ms_disc.sh

# ## The following is an older version of the online-nnet2 recipe, without "multi-splice".  It's faster
# ## to train but slightly worse.
# # local/online/run_nnet2.sh

# Wait for decodings in the background
wait
