\documentclass[journal]{IEEEtran}

\ifCLASSINFOpdf
   \usepackage[pdftex]{graphicx}
\else
\fi

\usepackage{float}
%\restylefloat{table}

% correct bad hyphenation here
\hyphenation{op-tical net-works semi-conduc-tor}


\begin{document}
\title{Learning Partitioned Hidden Layers for Unsupervised Speaker Adaptation of DNN based Acoustic Models}

\author{Lahiru~Samarakoon,
        Sim~Khe~Chai,~\IEEEmembership{Member,~IEEE}}% <-this % stops a space



% The paper headers
\markboth{Journal of \LaTeX\ Class Files,~Vol.~11, No.~4, December~2012}%
{Shell \MakeLowercase{\textit{et al.}}: Bare Demo of IEEEtran.cls for Journals}


% make the title area
\maketitle

% As a general rule, do not put math, special symbols or citations
% in the abstract or keywords.
\begin{abstract}

This paper proposes a modified feed-forward DNN structure for acoustic modeling which is trained progressively in a speaker aware fashion using i-vectors.  The concept of Partitioned Hidden Layers is introduced which has a separate partition of nodes to transform speaker representation. This learned speaker representation hierarchy allows to perform speaker adaptation on a  subset of parameters. Therefore, our method benefits from the implicit speaker normalization due to the incorporation of i-vectors as well as from the explicit unsupervised adaptation.    

Experiments on the TIMIT phone recognition task show that when compared with the baseline, the Structured DNN approach showed 4.2\% and 2.9\% relative improvement when input features to the model are speaker independent and speaker adapted respectively.  Unsupervised adaptation improved the performance further with relative improvements of 7.8\% and 6.9\% respectively compared with the baseline.

\end{abstract}

% Note that keywords are not normally used for peerreview papers.
\begin{IEEEkeywords}
Automated speech recognition, deep neural networks, unsupervised speaker adaptation.
\end{IEEEkeywords}

\IEEEpeerreviewmaketitle



\section{Introduction}


\IEEEPARstart{R}{ecently}, Deep Neural Network (DNN) based acoustic modeling has achieved the best performances in Automated Speech Recognition (ASR) systems  in comparison to the conventional Gaussian Mixture Model (GMM) based systems \cite{Hintonatel}. The increased computational power and the utilization of Graphical Processing Units (GPUs) in computations have made the training of these complex models affordable. Moreover, the advances in machine learning approaches in DNN training have also contributed to the increased performance. DNN-HMM systems surpass conventional GMM-HMM systems by using the superior representation learning power of DNNs to model senone log-likelihood, combined with the sequential modeling capability of Hidden Markov Models (HMMs) to model speech signals. 


DNNs, like all other machine learning techniques, suffer due to the differences in training and testing conditions. Adaptation techniques change the model to match the testing condition or change the inputs to match the model. In ASR, speaker adaptation techniques are used to optimize the performance by minimizing the training-testing mismatch introduced by the speaker variability. The two most successful ways of adapting a GMM-HMM model is to use Maximum a posteriori (MAP) \cite{MAP} or Maximum Likelihood Linear Regression (MLLR) \cite{MLLR} techniques. In MAP, instead of using Maximum Likelihood for the estimation of parameters, model parameters are reestimated by maximizing the posterior probability.  In MLLR, a linear transformation of the model parameters are estimated to construct the adapted model. It is possible to take advantage of GMM-HMM adaptation techniques with tandem systems \cite{Tandem}\cite{Tandem2} in which a DNN is trained to extract bottle-neck features for a GMM-HMM system. 

The adaptation techniques developed for generative GMMs cannot be directly utilized for discriminative DNNs. In addition, due to the large number of parameters in DNN-HMM systems, techniques developed for ANN-HMM hybrid systems \cite{ANN} are prune to overfitting when a small amount of adaptation data is available. However, DNN adaptation is important as it reduces the error rates significantly \cite{KLDNN}\cite{IVECT}\cite{IVECT1}\cite{SPEAKECODE1}.  A good adaptation technique should prevent overfitting to the adaptation data. This is achieved by finding a compact representation of the model parameters or using a regularization based method to perform the adaptation conservatively. Moreover, compact representation based methods can perform the adaptation efficiently and also minimize the storage costs due to the smaller foot print per speaker. Furthermore, it is desirable to perform adaptation in unsupervised fashion with pseudo transcriptions of the  data, which is more realistic. 

In this paper, we propose a modified DNN structure which is speaker adaptively trained on acoustic features concatenated with i-vectors \cite{IVECT2}\cite{IVECT3}.  I-vectors can be considered as low dimensional representations of speaker characteristics. DNN learning is capable of using this extra information about speakers to perform speaker normalization implicitly. Then, the resulting model is adapted using unsupervised speaker adaptation on a subset of model parameters, which is selected employing the new structure to further improve the recognition performance.

The rest of the paper is organized as follows. In Section 2, a brief review of DNN adaptation techniques are given. Section 3 describes our approach based on partitioned hidden layers. Experimental results are reported under Section 4 and we conclude our work in Section 5. 

% You must have at least 2 lines in the paragraph with the drop letter
% (should never be an issue)

%\hfill mds
 
%\hfill December 27, 2012
%\pagebreak


\section{DNN Adaptation}

\begin{figure*}[!t]
	\centering
	\includegraphics[height=3.5in, width=7in]{prgressLearning.png}
	%\includegraphics[height=4in, width=2.2in]{prgressLearning.png}
	\caption{Learning Procedure for the Proposed Method.}
	\label{fig:ProgressiveLearning}
\end{figure*}

DNN adaptation techniques can be categorized into three classes : linear transforms; regularization methods; and subspace methods.

Linear Transformation based methods augment the original DNN model with a linear layer. Usually, the linear layer is initialized with an identity matrix and zero biases and are updated with the back-propagation (BP) algorithm using the adaptation data while keeping the weights of the original DNN fixed. In linear input network (LIN) \cite{LIN1}\cite{LIBO} and feature discriminative linear regression (fDLR) \cite{FDLR} a linear layer is inserted above the input layer and the first hidden layer. The intuition is similar to fMLLR \cite{MLLR} where a speaker dependent (SD) features are linearly transformed to match the speaker independent (SI) model. When the linear transformation is applied to the softmax layer the adaptation technique is known as linear output network (LON) \cite{LIBO}. The intuition is to transform the last hidden layer's SD feature representation to match the average speaker. Depending on the number of output neurons it is possible to apply the transformation after or before the softmax layer weights. When the linear transformations is applied to the hidden layers it is known as the linear hidden network (LHN) \cite{LHN}. 

The adaptation of all the parameters are more powerful hence effective than linear transformations. However, this may leads to overfitting since the amount of adaptation data is limited. Conservative training methods address this issue by adding a regularization term to the adaptation criterion. In \cite{KLDNN}, KL divergence based method is used to force the distribution of the adapted model to be closer to the that of the SI model. The estimated distribution is a linear interpolation between the target distribution (derived using alignments) and the distribution of the SI model. Another popular approach is $L_2$ regularization \cite{L2} which aims to keep the parameters of the adapted model closer to that of the SI model. However, speaker personalization of all the parameters increases storage costs which necessitates the employment of techniques that reduce per-speaker footprint \cite{FOOTPRINT}.  
 
Updating all the DNN parameters during adaptation is computationally expensive and results high deployment costs due to storage requirements. Therefore, some approaches perform the adaptation on a subset of parameters, including the last hidden layer \cite{RLL}, output layer biases \cite{OUTBIASES} or more active hidden units of the network \cite{ACTIVE}. 


It is also possible to find a speaker subspace and perform the adaptation as a point in the subspace. In \cite{PCA}, principal component analysis (PCA) is performed on a set of adaptation matrices to get eigenvectors. Then transformations for test speakers can be estimated as a linear combination of these eigenvectors. Coefficients for each speaker is estimated using the BP procedure. This method can also be used with LIN, LHN and LON techniques \cite{LIBO}.  

Another popular subspace method is to feed the features for speaker variability with acoustic features. The idea behind it is to let the DNN algorithm learn speaker dependent transforms to perform speaker normalization implicitly. The estimation of this speaker information can be independent from DNN training. Popular methods use i-vectors \cite{IVECT}\cite{IVECT1} or bottle-neck features extracted  from a separate DNN trained for speaker classification \cite{MLP_FACTORS}.

Another way is  to estimate the speaker information during training. These methods learn a very compact feature vector representing speaker-dependent information called speaker code for each speaker. These speaker codes act as speaker dependent biases. Due to the highly compact representation of the speaker code, these methods are used successfully in supervised adaptation with small number of utterances. However, speaker code methods are sensitive to the errors in transcription and do not perform well in unsupervised adaptation. 

The proposed method takes the advantage of implicit speaker normalization of speaker-aware training by feeding i-vectors with acoustic features. Moreover, the modified structure of the DNN facilitates a selection of a subset of parameters to improve the performance further by performing unsupervised speaker adaptation explicitly. 


\section{Partitioned Hidden Layer Training}


A DNN can be viewed as a model which learns a feature representation as well as a classifier. Each hidden layer learns a non-linear transformation ($h^l$) from the lower layer's representation ($h^{l-1}$), which can be shown as:

\begin{equation}
\label{standard}
h^l = \sigma (W^l h^{l-1}+ b^l )
\end{equation} where $\sigma$ is the sigmoid activation function. $W^l$ and $b^l$ are the weight matrix and the bias vector for layer $l$, respectively. 

The proposed approach introduces partitioned hidden layers (PHL), which have two partitions of hidden units. The first partition is known as the standard partition and it learns acoustic feature representations ($h^l$), and the second (speaker) partition  is to learn speaker representations ($s^l$). The standard partition of the hidden layer receives signals from both partitions of the lower layer, whereas the speaker partition of the hidden layer receives the input signal from the speaker partition of the lower layer.

\begin{equation}
\label{partitioned_standard}
h^l = \sigma (W_1^l h^{l-1}+ (W_2^l s^{l-1} + b_{h}^l ))
\end{equation}
\begin{equation}
\label{partitioned_speaker}
s^l = \sigma (W_3^l s^{l-1}+ b_{s}^l )
\end{equation} where $W_1^l$,$W_2^l$,$W_3^l$ and $b_{h}^l$,$b_{s}^l$ are weight matrices and bias vectors for the PHL $l$, respectively.


\subsection{Training and Adaptation}

The training starts from an initial DNN model which only contains standard hidden layers (SHLs). The input features are augmented by appending the speaker i-vector for each feature frame. Then, the first SHL is replaced with a PHL and the modified DNN model is trained using the BP procedure.  In the next step, the second SHL is replaced using a PHL and trained. Similarly, this procedure is continued until all the hidden layers of the DNN are PHLs. The idea behind this progressive relacement of SHLs with PHLs from the bottom is to find the best combination of PHLs and SHLs for the given task. This training procedure is given in figure \ref{fig:ProgressiveLearning}. During the adaptation, the test speaker's i-vector is adapted unsupervisedly until convergence.

The proposed method has several advantages. First, the acoustic model is trained with speaker aware fashion using i-vectors, which performs implicit speaker normalization. Second, the number of adaptation parameters is equal to the dimensionality of the i-vector, which is a very small number. This makes the unsupervised adaptation fast, effective and more robust to overfitting. 

Although, there are some similarities to the speaker code approach \cite{SPEAKECODE1}, our method differs on two aspects. First, in \cite{SPEAKECODE1}, one speaker code is used to normalize all the hidden layers. In contrast, we learn a speaker representation hierarchy based on i-vectors. We believe it is more intuitive to learn a feature hierarchy since DNN encodes knowledge hierarchically. Second, their method works well for supervised adaptation whereas we report gains for unsupervised adaptation, which is more challenging. Third, during training, our method does not require speaker labels for input frames while it is needed by the speaker code based adaptation. 
 

\section{Experiments}

\subsection{Experimental Setup}

In this paper, all experiments are performed on the TIMIT corpus. The standard training set of 462 speaker is used with all the SA sentences removed. A development set of 50 speakers are used in meta parameter tuning. We report the results on the standard core test set of 24 speakers. 

Two baseline DNN models are built on two types of features with 6 hidden layers and 1024 units per layer, with 2042 senones as outputs.  The first baseline, is trained on top of 40 filterbank coefficients plus energy with first and second derivatives.  The second baseline is trained on 40 dimensional fMLLR features.  In both cases, 11 neighboring frames are used and cepstral mean and variance normalization is performed globally.  All the DNNs are trained to optimize the cross-entropy criterion with a mini-batch size of 256. Kaldi toolkit \cite{KALDI}  is used for GMM-HMM system building and in training of initial DNNs.  Progressive learning of partition DNNs  are done using the Kaldi+PDNN framework \cite{PDNN}. 

The extraction of i-vectors is different for filterbank and fMLLR models.  For the filterbank models,  i-vector extractor is trained on mfcc features whereas for fMLLR models extractor is trained on fMLLR features.  In both cases, UBMs have 128 gaussians and the dimensionality of i-vectors  are 25.  In our experiments, Kaldi \cite{KALDI} i-vector tools are used.  

\subsection{Results}

First, we investigated how many PHLs are necessary for both filterbank and fMLLR models (Fig \ref{tbl:num_layers}). For the filterbank DNN, the best performance was recoded when five PHLs are used. Furthermore, the phone error rate (PER) of the filterbank DNN  decreased gradually with the inclusion of PHLs until the  number of PHLs reaches 5. Therefore, we use a DNN with 5 PHLs and one SHL in our filterbank experiments. For the fMLLR DNN, the PER improved only when the DNN contains 3 PHLs. Furthermore, for the best performace only 4 PHLs were sufficient. Therefore, in our fMLLR experiments we are using a DNN with 4 PHLs and 2 SHLs.   


\begin{table}[H]
	\renewcommand{\arraystretch}{1.3}
	\caption{PER (\%) on the development set as a function of the number of partitioned hidden layers. All models are adapted.}
	\label{tbl:num_layers}
	\centering
		\begin{tabular}{|c||c|c|c|c|c|c|c|}	
			\hline
			Number of SHLs & 6 & 5 & 4 & 3 & 2 & 1 & 0  \\
			\hline
			Number of PHLs & 0 & 1 & 2 & 3 & 4 & 5 & 6 \\
			\hline
			filterbank DNN  & 16.3 & 16.0 & 15.8 & 15.7 & 15.6 & 15.5 & 15.6 \\
			\hline
			fMLLR DNN & 15.2 & 15.2 & 15.2 & 15.1 & 15.0 & 15.0 & 15.0 \\
			\hline
		\end{tabular}		
\end{table}

Table \ref{tbl:dimensions} shows how PER (\%) changes with the size of the speaker representation in PHLs. The following results are for the best combinations of hidden layers from Table \ref{tbl:num_layers}. For the values of 100 and 150 models performed equally. As it can be clearly seen, when the dimension is 50 performance is little worse than other values. In consequence, in all experiments, 100 is used as the dimensionality of the speaker representation in PHLs. Therefore, PHLs in this paper, have total of 1124 hidden units with 1024 units for the standard partition and 100 units for the speaker partition. 

\begin{table}[H]
	\caption{PER (\%) on the development set as a function of the speaker representation size of PHLs}
	\label{tbl:dimensions}
	\begin{center}
		\begin{tabular}{|c||c|c|c|}	
			\hline
			Units & 50 & 100 & 150  \\
			\hline
			Partitioned-DNN(Fbank) & 15.6 & 15.5 & 15.5 \\
			\hline
			Partitioned-DNN(fMLLR) & 15.1 & 15.0 & 15.0 \\
			\hline
		\end{tabular}		
	\end{center}	
\end{table}



Table \ref{tbl:results} presents results for the best configuration for both filterbank and fMLLR models before the adaptation. For the comparison, we also included results for models as in \cite{IVECT}, with only SHLs (i-vector DNN)  and these models use the same features as Partitioned DNNs where i-vectors are concatenated to acoustic feature frames. With filterbank features, Partitioned DNN achieves the PER of 18.4\% which is a 4.2\% relative improvement over the baseline model. The Partitioned DNN trained on fMLLR features reported a smaller gain (2.9\%) than the that of filterbank Partitioned DNN. This is simply because fMLLR features are already transformed to reduce the mismatch due to speaker variability.  It is also worthwhile to mention that Partitioned-DNN performance was considerably better than the that of the corresponding i-vector-DNN model for both filterbank and fMLLR features. 

\begin{table}[H]
	\caption{PER (\%) of various DNN models on test set (before adaptation) for filterbank and fMLLR features respectively. Relative improvement over the baseline is given in brackets.}
	\label{tbl:results}
	\begin{center}
		\begin{tabular}{|c||c|c|}	
			\hline
			Model & Fbank & fMLLR  \\
			\hline
			Initial DNN & 19.2 & 17.3 \\
			\hline
			i-vector DNN & 18.8 (2.1) & 17.2 (0.6) \\
			\hline
			Partitioned DNN & 18.4 (4.2) & 16.8 (2.9) \\
			\hline
		\end{tabular}		
	\end{center}	
\end{table}


Finally, we report the results for unsupervised adaptation for Partitioned-DNNs in Table \ref{tbl:adaptresults}. On both features, Partitioned-DNN model performed significantly better than the corresponding baseline model. For instance, Partitioned-DNN trained on filterbank features achieves 7.8\% relative improvement while fMLLR partitioned-DNN records a relative improvement of 6.9\%.  For fair comparison, we also performed the adaptation on i-vector DNN models. As it can be clearly seen in table \ref{tbl:adaptresults} the Partitioned-DNN adaptations displayed better recognition performances than that of the respective i-vector DNNs.   

\begin{table}[H]
	\caption{Adaptation Results}
	\label{tbl:adaptresults}
	\begin{center}
		\begin{tabular}{|c||c|c|}	
			\hline
			Model & Fbank & fMLLR  \\
			\hline
			Initial DNN & 19.2 & 17.3 \\
			\hline
			i-vector DNN & 18.2 (5.2) & 16.4 (5.2) \\
			\hline
			Partitioned-DNN & 17.7 (7.8) & 16.1 (6.9) \\
			\hline
		\end{tabular}		
	\end{center}	
\end{table}


\section{Conclusions}

We have presented a technique to minimize the training-testing mismatch due to the speaker variability of DNN based acoustic models. Our technique  benefits from both implicit and explicit speaker normalization.  Results on TIMIT phone recognition task show relative improvements of 7.8\% and 6.9\%  with speaker independent filterbank features and fMLLR features respectively. 



\ifCLASSOPTIONcaptionsoff
  \newpage
\fi	


%\pagebreak
\begin{thebibliography}{1}

\bibitem{Hintonatel}
G.~Hinton, L.~Deng, D.~Yu, G.~Dahl, A.~Mohamed, N.~Jaitly, A.~Senior,
V.~Vanhoucke, P.~Nguyen, T.~Sainath, and B.~Kingsbury.
\newblock Deep neural networks for acoustic modeling in speech recognition: The
shared views of four research groups.
\newblock {\em Signal Processing Magazine, IEEE}, 29(6):82--97, Nov 2012.

\bibitem{MAP}
J.~Gauvain and C.-H. Lee.
\newblock Maximum a posteriori estimation for multivariate gaussian mixture
observations of markov chains.
\newblock {\em Speech and Audio Processing, IEEE Transactions on},
2(2):291--298, Apr 1994.

\bibitem{MLLR}
C.~Leggetter and P.~Woodland.
\newblock Maximum likelihood linear regression for speaker adaptation of
continuous density hidden markov models.
\newblock {\em Computer Speech and language}, 9(2):171 -- 185, 1995.

\bibitem{Tandem}
Hermansky, H.; Ellis, D.P.W.; Sharma, S. 
Tandem connectionist feature extraction for conventional HMM systems Acoustics, Speech, and Signal Processing, 2000. ICASSP '00. Proceedings. 2000 IEEE International Conference on , vol.3, no., pp.1635,1638 vol.3, 2000

\bibitem{Tandem2}
Bell, P.; Swietojanski, P.; Renals, S., "Multi-level adaptive networks in tandem and hybrid ASR systems," Acoustics, Speech and Signal Processing (ICASSP), 2013 IEEE International Conference on , vol., no., pp.6975,6979, 26-31 May 2013

\bibitem{ANN}
Morgan, N.; Bourlard, H., "Continuous speech recognition using multilayer perceptrons with hidden Markov models," Acoustics, Speech, and Signal Processing, 1990. ICASSP-90., 1990 International Conference on , vol., no., pp.413,416 vol.1, 3-6 Apr 1990

\bibitem{KLDNN}
D.~Yu, K.~Yao, H.~Su, G.~Li, and F.~Seide.
\newblock Kl-divergence regularized deep neural network adaptation for improved
large vocabulary speech recognition.
\newblock In {\em Acoustics, Speech and Signal Processing (ICASSP), 2013 IEEE
	International Conference on}, pages 7893--7897, May 2013.

\bibitem{IVECT}
V.~Gupta, P.~Kenny, P.~Ouellet, and T.~Stafylakis.
\newblock I-vector-based speaker adaptation of deep neural networks for french
broadcast audio transcription.
\newblock In {\em Acoustics, Speech and Signal Processing (ICASSP), 2014 IEEE
	International Conference on}, pages 6334--6338, May 2014

\bibitem{IVECT1}
G.~Saon, H.~Soltau, D.~Nahamoo, and M.~Picheny.
\newblock Speaker adaptation of neural network acoustic models using i-vectors.
\newblock In {\em Automatic Speech Recognition and Understanding (ASRU), 2013
	IEEE Workshop on}, pages 55--59, Dec 2013.

\bibitem{SPEAKECODE1}
O.~Abdel-Hamid and H.~Jiang.
\newblock Fast speaker adaptation of hybrid nn/hmm model for speech recognition
  based on discriminative learning of speaker code.
\newblock In {\em Acoustics, Speech and Signal Processing (ICASSP), 2013 IEEE
  International Conference on}, pages 7942--7946, May 2013.

\bibitem{IVECT2}
Dehak, N.; Kenny, P.; Dehak, R.; Dumouchel, P.; Ouellet, P., "Front-End Factor Analysis for Speaker Verification," Audio, Speech, and Language Processing, IEEE Transactions on , vol.19, no.4, pp.788,798, May 2011

\bibitem{IVECT3}
Glembek, O.; Burget, L.; Matejka, P.; Karafiat, M.; Kenny, P., "Simplification and optimization of i-vector extraction," Acoustics, Speech and Signal Processing (ICASSP), 2011 IEEE International Conference on , vol., no., pp.4516,4519, 22-27 May 2011

\bibitem{ASR_BOOK}
Automatic Speech Recognition A Deep Learning Approach


\bibitem{LIN1}
V.~Abrash, H.~Franco, A.~Sankar, and M.~Cohen.
\newblock Connectionist speaker normalization and adaptation.
\newblock In {\em in Eurospeech}, pages 2183--2186, 1995.

\bibitem{LIBO}
Comparison of discriminative input and output transformation for speaker adaptation in the hybrid NN/HMM systems

\bibitem{FDLR}
Feature Engineering in Context Dependent Deep Neural Networks for conversational speech transcription


\bibitem{LHN}
R.~Gemello, F.~Mana, S.~Scanzio, P.~Laface, and R.~D. Mori.
\newblock Linear hidden transformations for adaptation of hybrid ann/hmm
models.
\newblock {\em Speech Communication}, 49(10–11):827 -- 835, 2007.

\bibitem{L2}
Xiao Li; Bilmes, J., "Regularized Adaptation of Discriminative Classifiers," Acoustics, Speech and Signal Processing, 2006. ICASSP 2006 Proceedings. 2006 IEEE International Conference on , vol.1, no., pp.I,I, 14-19 May 2006

\bibitem{FOOTPRINT}
Singular value decomposition based low-footprint speaker adaptation and personalization for deep neural network

\bibitem{RLL}
J.~Stadermann and G.~Rigoll.
\newblock Two-stage speaker adaptation of hybrid tied-posterior acoustic
models.
\newblock In {\em Acoustics, Speech, and Signal Processing, 2005. Proceedings.
	(ICASSP '05). IEEE International Conference on}, volume~1, pages 977--980,
March 2005.

\bibitem{OUTBIASES}
Adaptation of context-dependent deep neural networks for automatic speech recognition


\bibitem{ACTIVE}
Stadermann, J. and Rigoll, G. (2005). Two-stage speaker adaptation of hybrid tied-posterior acoustic models. In Acoustics, Speech, and Signal Processing, 2005. Proceedings. (ICASSP ’05). IEEE International Conference on, vol. 1, 977–980. 28, 29


\bibitem{PCA}
Fast Speaker Adaptation of artificial neural networks for automatic speech recognition ICASSP 2000


\bibitem{MLP_FACTORS}
M.~Ferras and H.~Bourlard.
\newblock Mlp-based factor analysis for tandem speech recognition.
\newblock In {\em Acoustics, Speech and Signal Processing (ICASSP), 2013 IEEE
	International Conference on}, pages 6719--6723, May 2013.



\bibitem{KALDI}
D.~Povey, A.~Ghoshal, G.~Boulianne, L.~Burget, O.~Glembek, N.~Goel,
M.~Hannemann, P.~Motlicek, Y.~Qian, P.~Schwarz, J.~Silovsky, G.~Stemmer, and
K.~Vesely.
\newblock The kaldi speech recognition toolkit.
\newblock In {\em IEEE 2011 Workshop on Automatic Speech Recognition and
	Understanding}. IEEE Signal Processing Society, Dec. 2011.
\newblock IEEE Catalog No.: CFP11SRW-USB.


\bibitem{PDNN}
Yaiie
\newblock PDNN






\bibitem{RLL1}
J.~Baxter.
\newblock Learning internal representations.
\newblock In {\em In Proceedings of the Eighth International Conference on
  Computational Learning Theory}, pages 311--320. ACM Press, 1995.

\bibitem{RTN1}
R.~Collobert and S.~Bengio.
\newblock Links between perceptrons, mlps and svms.
\newblock In {\em Proceedings of the Twenty-first International Conference on
  Machine Learning}, ICML '04, pages 23--, New York, NY, USA, 2004. ACM.

\bibitem{RTN}
A.~L. H. M. M. C. N. L. R. S. R.~T. Neto, J.
\newblock Speaker-adaptation for hybrid hmm-ann continuous speech recognition
  system.
\newblock In {\em Proceedings of the EUROSPEECH}, pages 2171--2174, 1995.

\bibitem{NORMDNN}
Y.~Tang, A.~Mohan, R.~C. Rose, and C.~Ma.
\newblock Deep neural network trained with speaker representation for speaker
  normalization.
\newblock In {\em Acoustics, Speech and Signal Processing (ICASSP), 2014 IEEE
  International Conference on}, pages 6329--6333, May 2014.

\bibitem{SPEAKECODE2}
S.~Xue, O.~Abdel-Hamid, H.~Jiang, and L.~Dai.
\newblock Direct adaptation of hybrid dnn/hmm model for fast speaker adaptation
  in lvcsr based on speaker code.
\newblock In {\em Acoustics, Speech and Signal Processing (ICASSP), 2014 IEEE
  International Conference on}, pages 6339--6343, May 2014.




\end{thebibliography}
\end{document}


