#!/bin/bash

# Apache 2.0
# This is the script that trains DNN system over the filterbank features. It
# is to  be  run after run.sh. Before running this, you should already build
# the initial GMM model. This script requires a GPU card, and also the "pdnn"
# toolkit to train the DNN. The input filterbank features are with mean  and
# variance normalization. 

# For more informaiton regarding the recipes and results, visit our webiste
# http://www.cs.cmu.edu/~ymiao/kaldipdnn

layer_dim=1024
ivecGauss=128
ivecDim=25
hivecDim=100
#working_dir=exp_pdnn/warm_str_dnn_fbank_new
working_dir=exp_pdnn/warm6_str_dnn_fbank
base_dnn_dir=exp_pdnn/dnn6_fbank_new
ivec_baseline=exp_pdnn/warm_ivec_dnn_fbank_new
gmmdir=exp/tri3


# I-vectors for the training and decoding speakers. There should be an ivector.scp
# file in each of both directories.
train_ivec=exp/ivectors_train_${ivecGauss}_${ivecDim}
test_ivec=exp/ivectors_test_${ivecGauss}_${ivecDim}
dev_ivec=exp/ivectors_dev_${ivecGauss}_${ivecDim}


# Specify the gpu device to be used
gpu=gpu

cmd=run.pl
. cmd.sh
[ -f path.sh ] && . ./path.sh
. parse_options.sh || exit 1;

# At this point you may want to make sure the directory $working_dir is
# somewhere with a lot of space, preferably on the local GPU-containing machine.
if [ ! -d pdnn ]; then
  echo "Checking out PDNN code."
  svn co svn://svn.code.sf.net/p/kaldipdnn/code-0/pdnn pdnn
fi

if [ ! -d steps_pdnn ]; then
  echo "Checking out steps_pdnn scripts."
  svn co svn://svn.code.sf.net/p/kaldipdnn/code-0/trunk/steps_pdnn steps_pdnn
fi

if ! nvidia-smi; then
  echo "The command nvidia-smi was not found: this probably means you don't have a GPU."
  echo "(Note: this script might still work, it would just be slower.)"
fi

# The hope here is that Theano has been installed either to python or to python2.6
pythonCMD=python
if ! python -c 'import theano;'; then
  if ! python2.6 -c 'import theano;'; then
    echo "Theano does not seem to be installed on your machine.  Not continuing."
    echo "(Note: this script might still work, it would just be slower.)"
    exit 1;
  else
    pythonCMD=python2.6
  fi
fi

mkdir -p $working_dir/log

# Check whether i-vectors have been generated
#for f in $train_ivec/ivector.scp $test_ivec/ivector.scp $dev_ivec/ivector.scp; do
#  [ ! -f $f ] && echo "Error i-vectors for $f have NOT been extracted. " && exit 1;
#done

! gmm-info $gmmdir/final.mdl >&/dev/null && \
   echo "Error getting GMM info from $gmmdir/final.mdl" && exit 1;

num_pdfs=`gmm-info $gmmdir/final.mdl | grep pdfs | awk '{print $NF}'` || exit 1;

cp $ivec_baseline/feature_transform $working_dir/
cp $ivec_baseline/splice_opts $working_dir/

echo "number of pdfs $num_pdfs "


echo ---------------------------------------------------------------------
echo "Starting DNN training"
echo ---------------------------------------------------------------------
feat_dim=$(cat $ivec_baseline/train.pfile |head |grep num_features| awk '{print $2}') || exit 1;
echo "dimensions of features is $feat_dim " 


if [ ! -f $working_dir/dnn1.fine.done ]; then
  echo "Fine-tuning DNN"
  $cmd $working_dir/log/dnn.fine.log \
    export PYTHONPATH=$PYTHONPATH:`pwd`/ptdnn/ \; \
    export THEANO_FLAGS=mode=FAST_RUN,device=$gpu,floatX=float32 \; \
    $pythonCMD pdnn/run_DNN_Str.py --train-data "$ivec_baseline/train.pfile,partition=1000m,random=true,stream=true" \
                          --valid-data "$ivec_baseline/valid.pfile,partition=400m,random=true,stream=true" \
                          --nnet-spec "$feat_dim:$layer_dim:$layer_dim:$layer_dim:$layer_dim:$layer_dim:$layer_dim:$num_pdfs" \
                          --ptr-file $working_dir/dnn.ptr --ptr-layer-number 6 --ivec-dim $ivecDim --hivec-dim $hivecDim \
                          --output-format kaldi --lrate "D:0.08:0.5:0.2,0.2:8" --warm True\
                          --wdir $working_dir --output-file $working_dir/dnn.nnet || exit 1;
  touch $working_dir/dnn1.fine.done
fi

echo ---------------------------------------------------------------------
echo "Decode the intermediate system"
echo ---------------------------------------------------------------------
if [ ! -f  $working_dir/decode1.done ]; then
  cp $gmmdir/final.mdl $working_dir || exit 1;  # copy final.mdl for scoring
  graph_dir=$gmmdir/graph
  steps_pdnn/decode_dnn_ivec_new.sh --nj 8 --acwt 0.2 --scoring-opts "--min-lmwt 1 --max-lmwt 15" --cmd "$decode_cmd"  --is-spk-mode true \
    $graph_dir $base_dnn_dir/data/dev ${gmmdir}_ali $dev_ivec  $working_dir/decode_dev1 || exit 1;
  steps_pdnn/decode_dnn_ivec_new.sh --nj 8 --acwt 0.2 --scoring-opts "--min-lmwt 1 --max-lmwt 15" --cmd "$decode_cmd" --is-spk-mode true \
    $graph_dir $base_dnn_dir/data/test ${gmmdir}_ali  $test_ivec  $working_dir/decode_test1 || exit 1;

  touch $working_dir/decode1.done
fi

echo ---------------------------------------------------------------------
echo "Retrain all the weights"
echo --------------------------------------------------------------------


if [ ! -f $working_dir/move.done ]; then
  mv $working_dir/dnn.nnet $working_dir/dnn1.nnet
  mv $working_dir/dnn.ptr $working_dir/dnn1.ptr
  mv $working_dir/nnet.finetune.tmp $working_dir/nnet1.finetune.tmp
  mv $working_dir/log $working_dir/log1
  mv $working_dir/str.nnet.tmp $working_dir/dnn.ptr
  touch $working_dir/move.done
fi



if [ ! -f $working_dir/dnn.fine.done ]; then
  echo "Fine-tuning DNN"
  $cmd $working_dir/log/dnn.fine.log \
    export PYTHONPATH=$PYTHONPATH:`pwd`/ptdnn/ \; \
    export THEANO_FLAGS=mode=FAST_RUN,device=$gpu,floatX=float32 \; \
    $pythonCMD pdnn/run_DNN_Str.py --train-data "$ivec_baseline/train.pfile,partition=1000m,random=true,stream=true" \
                          --valid-data "$ivec_baseline/valid.pfile,partition=400m,random=true,stream=true" \
                          --nnet-spec "$feat_dim:$layer_dim:$layer_dim:$layer_dim:$layer_dim:$layer_dim:$layer_dim:$num_pdfs" \
                          --ptr-file $working_dir/dnn.ptr --ptr-layer-number 6 --ivec-dim $ivecDim --hivec-dim $hivecDim \
                          --output-format kaldi --lrate "D:0.08:0.5:0.2,0.2:8"\
                          --wdir $working_dir --output-file $working_dir/dnn.nnet || exit 1;
  touch $working_dir/dnn.fine.done
fi

echo ---------------------------------------------------------------------
echo "Decode the final system"
echo ---------------------------------------------------------------------
if [ ! -f  $working_dir/decode.done ]; then
  cp $gmmdir/final.mdl $working_dir || exit 1;  # copy final.mdl for scoring
  graph_dir=$gmmdir/graph
  steps_pdnn/decode_dnn_ivec_new.sh --nj 8 --acwt 0.2 --scoring-opts "--min-lmwt 1 --max-lmwt 15" --cmd "$decode_cmd"  --is-spk-mode true \
    $graph_dir $base_dnn_dir/data/dev ${gmmdir}_ali $dev_ivec  $working_dir/decode_dev || exit 1;
  steps_pdnn/decode_dnn_ivec_new.sh --nj 8 --acwt 0.2 --scoring-opts "--min-lmwt 1 --max-lmwt 15" --cmd "$decode_cmd" --is-spk-mode true \
    $graph_dir $base_dnn_dir/data/test ${gmmdir}_ali  $test_ivec  $working_dir/decode_test || exit 1;

  touch $working_dir/decode.done
fi

echo "Finish !!"
