#!/bin/bash

# Copyright 2013-2014 MERL (author: Felix Weninger and Shinji Watanabe)

# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#  http://www.apache.org/licenses/LICENSE-2.0
#
# THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED
# WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
# MERCHANTABLITY OR NON-INFRINGEMENT.
# See the Apache 2 License for the specific language governing permissions and
# limitations under the License.

# This is a shell script, but it's recommended that you run the commands one by
# one by copying and pasting into the shell.
# Caution: some of the graph creation steps use quite a bit of memory, so you
# should run this on a machine that has sufficient memory.

# Requirements) matlab and tcsh


. ./cmd.sh
. ./path.sh

stage=7
. utils/parse_options.sh
# Set bash to 'debug' mode, it prints the commands (option '-x') and exits on :
# -e 'error', -u 'undefined variable', -o pipefail 'error in pipeline',
set -euxo pipefail

# please make sure to set the paths of the REVERB and WSJ0 data
  REVERB_home=~/REVERB
  export wsjcam0=$REVERB_home/wsjcam0
  # set LDC WSJ0 directory to obtain LMs
  # REVERB data directory only provides bi-gram (bcb05cnp), but this recipe also uses 3-gram (tcb05cnp.z)
  export wsj0=$REVERB_home/WSJ0_LangMod_REVERB #LDC93S6A or LDC93S6B
  # It is assumed that there will be a 'wsj0' subdirectory
  # within the top-level corpus directory

export reverb_dt=$REVERB_home/REVERB_WSJCAM0_dt
export reverb_et=$REVERB_home/REVERB_WSJCAM0_et
export reverb_real_dt=$REVERB_home/MC_WSJ_AV_Dev
export reverb_real_et=$REVERB_home/MC_WSJ_AV_Eval


# set the directory of the multi-condition training data to be generated
reverb_tr=~/REVERB_cut

# LDA context size (left/right) (4 is default)
context_size=4

# The language models with which to decode (tg_5k or bg_5k)
lm="tg_5k"

# number of jobs for feature extraction and model training
nj_train=10

# number of jobs for decoding
nj_decode=6

# set to true if you want the tri2a systems (re-implementation of the HTK baselines)
./cmvnfeat.sh
if [ $stage -le 1 ]; then
  # Prepare wsjcam0 clean data and wsj0 language model.
  local/wsjcam0_data_prep.sh $wsjcam0 $wsj0

  # Prepare merged BEEP/CMU dictionary.
  local/wsj_prepare_beep_dict.sh

  # Prepare wordlists, etc.
  utils/prepare_lang.sh data/local/dict "<SPOKEN_NOISE>" data/local/lang_tmp data/lang

  echo "Prepare directory structure for clean data. Apply some language model fixes."
  local/wsjcam0_format_data.sh

  # Now it's getting more interesting.
  # Prepare the multi-condition training data and the REVERB dt set.
  # This also extracts MFCC features (!!!)
  # This creates the data sets called REVERB_tr_cut and REVERB_dt.
  # If you have processed waveforms, this is a good starting point to integrate them.
  # For example, you could have something like
  # local/REVERB_wsjcam0_data_prep.sh /path/to/processed/REVERB_WSJCAM0_dt processed_REVERB_dt dt
  # The first argument is supposed to point to a folder that has the same structure
  # as the REVERB corpus.
  
  local/REVERB_wsjcam0_data_prep.sh $reverb_tr REVERB_tr_cut tr
  local/REVERB_wsjcam0_data_prep.sh $reverb_dt REVERB_dt dt  
  local/REVERB_wsjcam0_data_prep.sh $reverb_et REVERB_et et
     
  # Prepare the REVERB "real" dt set from MCWSJAV corpus.
  # This corpus is *never* used for training.
  # This creates the data set called REVERB_Real_dt and its subfolders
  local/REVERB_mcwsjav_data_prep.sh $reverb_real_dt REVERB_Real_dt dt
  # The MLF file exists only once in the corpus, namely in the real_dt directory
  # so we pass it as 4th argument
  local/REVERB_mcwsjav_data_prep.sh $reverb_real_et REVERB_Real_et et $reverb_real_dt/mlf/WSJ.mlf
fi

if [ $stage -le 2 ]; then
  # Extract MFCC features for clean sets.
  # For the non-clean data sets, this is outsourced to the data preparation scripts.
  mfccdir=mfcc
  ### for x in si_tr si_dt; do it seems that the number of transcriptions of si_dt is not correct.
  for x in si_tr; do
   steps/make_mfcc.sh --cmd "$train_cmd" --nj $nj_train \
     data/$x exp/make_mfcc/$x $mfccdir
   steps/compute_cmvn_stats.sh data/$x exp/make_mfcc/$x $mfccdir
wait
  done
  for x in REVERB_tr_cut/SimData_tr_for_1ch_A; do
   steps/make_mfcc.sh --cmd "$train_cmd" --nj $nj_train \
     data/$x exp/make_mfcc/$x $mfccdir
   steps/compute_cmvn_stats.sh data/$x exp/make_mfcc/$x $mfccdir
wait
  done
  for x in data/REVERB_et/*; do
   steps/make_mfcc.sh --cmd "$train_cmd" --nj $nj_train \
     $x exp/make_mfcc/$x $mfccdir
   steps/compute_cmvn_stats.sh $x exp/make_mfcc/$x $mfccdir
wait
  done
for x in data/REVERB_Real_et/*; do
   steps/make_mfcc.sh --cmd "$train_cmd" --nj $nj_train \
     $x exp/make_mfcc/$x $mfccdir
   steps/compute_cmvn_stats.sh $x exp/make_mfcc/$x $mfccdir
wait
  done
fi


if [ $stage -le 3 ]; then
  echo -e "\033[47;30m stage 4 running \033[0m"
  # Train monophone model on clean data (si_tr).
  echo "### TRAINING mono0a ###"
  steps/train_mono.sh --boost-silence 1.25 --nj $nj_train --cmd "$train_cmd" \
    data/si_tr data/lang exp/mono0a

  # Align monophones with clean data.
  echo "### ALIGNING mono0a_ali ###"
  steps/align_si.sh --boost-silence 1.25 --nj $nj_train --cmd "$train_cmd" \
    data/si_tr data/lang exp/mono0a exp/mono0a_ali

  # Create first triphone recognizer.
  echo "### TRAINING tri1 ###"
  steps/train_deltas.sh --boost-silence 1.25 --cmd "$train_cmd" \
    2000 10000 data/si_tr data/lang exp/mono0a_ali exp/tri1

  echo "### ALIGNING tri1_ali ###"
  # Re-align triphones.
  steps/align_si.sh --nj $nj_train --cmd "$train_cmd" \
    data/si_tr data/lang exp/tri1 exp/tri1_ali
fi


if [ $stage -le 4 ]; then
  # Train tri2b recognizer, which uses LDA-MLLT, using the default parameters from the WSJ recipe.
  echo "### TRAINING tri2b ###"
  steps/train_lda_mllt.sh --cmd "$train_cmd" \
    --splice-opts "--left-context=$context_size --right-context=$context_size" \
    2500 15000 data/si_tr data/lang exp/tri1_ali exp/tri2b
  wait
    
  steps/align_si.sh --nj $nj_train --cmd "$train_cmd" \
    data/si_tr data/lang exp/tri2b exp/tri2b_ali
  wait

fi
#exit 0;
######   DNN train sMBR
if [ $stage -le 5 ]; then
   ./cmvnfeat.sh
   rm -rf exp/tri2b_nnet*
#   steps/nnet2/train_tanh.sh  --cmd "run.pl" --num-jobs-nnet 4 --num-threads 4\
   #steps/nnet2/train_tanh.sh  --cmd "$train_cmd"\
   steps/nnet2/train_tanh.sh  --cmd "run.pl" --num-jobs-nnet 4 --num-threads 4\
   data/REVERB_tr_cut/SimData_tr_for_1ch_A data/lang exp/tri2b_ali exp/tri2b_nnet 
   wait

    base_recog=tri2b_nnet
    denlats_dir=${base_recog}_denlats
    subsplit=`echo $nj_train \* 2 | bc`
	
	steps/nnet2/align.sh --nj $nj_train --cmd "$train_cmd" \
	 data/REVERB_tr_cut/SimData_tr_for_1ch_A data/lang exp/tri2b_nnet exp/tri2b_nnet_ali
	wait
	
   steps/nnet2/make_denlats.sh --sub-split $subsplit --nj $nj_train --cmd "$decode_cmd" \
    data/REVERB_tr_cut/SimData_tr_for_1ch_A data/lang exp/$base_recog exp/$denlats_dir
    wait
   
   # mkdir -p exp/${base_recog}_degs
   # cp exp/$denlats_dir/lat* exp/${base_recog}_degs/
   
   # steps/nnet2/get_egs_discriminative2.sh --cmd "$decode_cmd" \
	# data/REVERB_tr_cut/SimData_tr_for_1ch_A data/lang exp/tri2b_nnet_ali exp/$denlats_dir \
	# exp/$base_recog/final.mdl exp/${base_recog}_degs
	# wait
	
	steps/nnet2/train_discriminative.sh  --cmd "run.pl"  \
   	 data/REVERB_tr_cut/SimData_tr_for_1ch_A \
	  data/lang \
	  exp/tri2b_nnet_ali \
	  exp/$denlats_dir \
	  exp/$base_recog/final.mdl \
	  exp/tri2b_nnet_mbr 
	wait
fi


if [ $stage -le 6 ]; then
   echo "### MAKING GRAPH tri2b_mbr/graph_$lm ###"
   for recog in tri2b_nnet_mbr; do
     utils/mkgraph.sh data/lang_test_$lm exp/$recog exp/$recog/graph_$lm &
   done
   wait 
fi
if [ $stage -le 7 ]; then
  echo "### DECODING with , noadapt, $lm ###"
  recog=tri2b_nnet_mbr
  graph=exp/$recog/graph_$lm
  for dataset in data/REVERB*_et/*; do   #dt,
    decode_suff=${lm}_`echo $dataset | awk -F '/' '{print $2 "_" $3}'`
    steps/nnet2/decode.sh --nj $nj_decode --cmd "$decode_cmd" \
      $graph $dataset exp/$recog/decode_mbr_$decode_suff &
wait
  done
  wait
fi

#get all WERs with lmw=15
if [ $stage -le 8 ]; then
  local/calc_wer.sh --am tri2b_nnet_mbr --lmw 13 --lm tg_5k --decode mbr
fi
