\documentclass[compress,mathserif]{beamer}
\usetheme{CambridgeUS}
\usecolortheme{lily}
\usepackage{epsfig}
\usepackage{graphics}
\usepackage{multirow}

% For handout
%\usepackage{handoutWithNotes}
%\pgfpagesuselayout{2 on 1 with notes landscape}[a4paper,border shrink=5mm]

\setbeameroption{hide notes}

%\mode<notes>
\mode<presentation>

%\usetheme{Boadilla}
%\usetheme{AnnArbor}



\setbeamerfont{largeFont}{series=\bfseries,size=\large}
\setbeamerfont{smallFont}{size=\small}
\title[Discriminative Training]{Discriminative Training of Acoustic Models for Automatic Speech Recognition}

\author[Wang Guangsen]{Wang Guangsen (HT080165A) }

\date{\today}

\institute[NUS SoC]{School of Computing \\ National University of Singapore}


\begin{document}

% the title page
\begin{frame}
	\titlepage
\end{frame}

\note[itemize]
{
\item Good morning, everyone! 
\item Today, I'm going to talk about discriminative training techniques for the automatic speech recognition system.
}

% the outline page


\begin{frame}
	\frametitle{Outline}
	\tableofcontents
\end{frame}
\note[itemize]
{
\item The agenda for this presentation is that: (show the outline page)
\item first I will briefly explain the research problem we are working on;
\item then we will review several dominate models and techniques proposed in the literature;
\item finally, we will show some preliminary experimental results, then propose some initial future plans for my research.
}

\section{Introduction}

\subsection{ASR Problem}
\begin{frame}{Automatic Speech Recognition(ASR)}
\begin{block}{Goal}
Accurately and efficiently convert a speech utterance to a text transcription of the spoken words, independent of speakers, devices, and environment.
\end{block}
\begin{block}{Standard Architecture}
\begin{figure}
\centerline{\epsfig{figure=figures/arcASR.eps,width=10cm}}
\end{figure}
\end{block}
\end{frame}

\note[itemize]
{
\item OK. now let's give a big picture of the ASR problem.
%\item To realise the speech based human-machine communication, automatic speech recognition is one of the most important technique should be conquered firstly.
\item The goal of speech recognition is to accurately and efficiently convert speech signals to text transcriptions, independent of speakers, devices, and environment.
\item The standard ASR system consists of 6 modules illustrated in the figure. Details about each part could be found in the report.
\item Here, I would like to emphasise the acoustic model module, which converts the features extracted from the audio signals to basic speech units, such as phonemes or syllables, it it the engine of the speech recognition system.
}

\subsection{Challenges}
\begin{frame}{Challenges}
%\begin{block}{ASR Research}
%\begin{itemize}
%\item ASR research emerged at 1950's.
%\item Although significant process has been made, ASR systems still fall far short of human speech perception in all but the simplest, most constrained tasks.
%\item ASR task usually has the properties of multi-speaker, continuous, large vocabulary and natural speech.
%\end{itemize}
%\end{block}
\begin{block}{Variabilities in ASR}
\begin{itemize}
	\item Speaker variability, i.e. inter-speaker variability, intra-speaker variability;
	\item Environmental noise;
	\item Different recoding devices;
	\item Channel variabilities;
	\item ...
\end{itemize}
\end{block}
\end{frame}
\begin{frame}{Challenges}
\begin{block}{Challenges in ASR}
\begin{itemize}
\item The variabilities in the speech signal are handled by a \alert{\textbf{statistical pattern recognition approach}} in ASR research. 
 % 
\item Compared to many other pattern recognition problems, ASR is a very large problem (hundreds/thousands of hours of acoustic training data and billions of words of language model data) with the desire to have a real time performance.
\end{itemize}
\end{block}
\end{frame}
 
\note[itemize]
{
\item Speech recognition research has lasted for a long time. The first speech recogniser appeared early in 1950s. 
\item Since then many systems have been built in lab,
\item however, till now, there are just some limited commercial application available for limited tasks, like diction;
\item in other words, we are still far from creating a machine talks with us naturally.
\item The problem is most of the systems are not robust enough for realistic applications, because there exists many variabilities in ASR.
}

\subsection{Generative Training VS. Discriminative Training}
\begin{frame}{Generative Training}
%
\begin{block}{Description}
\begin{itemize} 
	\item Aims to estimate probability distribution of data for each class using density estimation methods;
	\item The estimated models are used for classification based on the Bayes decision rule (a.k.a maximum \emph{a posterior} decision rule);
	\item Leads to the optimal classifier as long as the presumed probability models indeed represent the true distribution of data.
\end{itemize}
\end{block}
\end{frame}

\note[itemize]
{
\item To solve a large pattern recognition problem like ASR, we need to build a robust system which can be learnt effectively and efficiently.
\item There exists two distinct categories of learning algorithms in machine learning: generative training and discriminative training.
\item Let's first look at the generative training approach.
}

\begin{frame}{Generative Training}
\begin{block}{Advantages}
\begin{itemize} 
	\item The inherent dependency or various relationship of data can be easily exploited by imposing all kinds of structure constraints, e.g. graphical models.
	\item Many efficient learning algorithms, such as the Expectation Maximization (EM) algorithm are available for estimating a variety of generative models, even for many rather complicated ones.
\end{itemize}
\end{block}
\begin{block}{Limitations}
\begin{itemize}
\item The genuine model assumption it imposes does not hold in ASR;%, i.e. HMM is not the correct model for speech production.
\item The training data is limited.
\end{itemize}
%
\end{block}
%
\end{frame}

\note[itemize]
{
\item Generative training has several advantages which makes it a very popular data modelling approach for classification and regression in many practical applications.
\item In ASR, the generative approach has been extensively explored for estimating various types of Hidden Markov Models (HMMs).
\item advantages....
\item however, despite these advantages, generative approach does suffer from several limitations which effects its performance for large scale problems.
\item limitations. firstly...., secondly...
\item Because of these limitations, maximizing the likelihood does not necessarily improve the recognition performance. Therefore, \usebeamerfont{largefont}\null\alert{discriminative training} is proposed as an alternative approach.
}


\begin{frame}{Discriminative Training}
%
\begin{block}{Description}
\begin{itemize} 
	\item Makes no explicit attempt to model the underlying distribution of data;
	\item Directly optimize a mapping function from the input data to the desired output labels;
	\item Only the decision boundary is adjusted without forming a data generator in the entire feature space;
	\item The mapping function can be estimated using some criteria directly relevant to the ultimate classification and regression purpose.
\end{itemize}
\end{block}
%\begin{block}{Standard Discriminative Training Criteria for HMM}
%Many discriminative training criteria exist in ASR: 
%\begin{itemize}
%\item Maximum Mutual Information Estimation (MMIE);
%\item Minimum Classification Error (MCE);
%\item Minimum Phone Error/Minimum Word Error (MPE/MWE);
%\item Large Margin Estimation (LME). 
%\end{itemize}
%These criteria directly reflect the recognition error, optimization of mapping functions according to these criteria will guarantee an improved recognition performance.
%\end{block}
\end{frame}
\note[itemize]
{
\item Discriminative training (DT) has recently gained tremendous popularity in machine learning because it enjoys several advantages and outperforms generative approach in many ASR tasks.
\item descriptions......
\item  In this paper, we will explore DT on some ASR task to see the strengths and also address some issues concerning DT.
}



%\begin{frame}{Discriminative Training}
%\begin{block}{Limitations}
%Although discriminative training approach can outperform the generative training scheme in many cases, it also suffers from some limitations.
%\begin{itemize} 
%	\item It is not straightforward to deal with latent variables and exploit the underlying structure of data in discriminative models.
%	\item Computational complexity is considerably higher since it requires simultaneous consideration of data from all classes.
%	\item Above mentioned criteria normally lead to complex non-convex objective functions involving over millions of free parameters. Optimization becomes challenging since it can easily trapped in a local optimum.
%\end{itemize}
%\end{block}
%In spite of these limitations, DT is reported to .
%\end{frame}

\begin{frame}{Research Problem}
\begin{block}{\textbf{Problem}}
\textbf{Instead of standard Maximum Likelihood training approach, we want to train the acoustic models of  ASR systems based on some discriminative training criteria for a better recognition performance.
}
\end{block}
\end{frame}  
\note[itemize]
{
\item at last of this section, I would like to reformulate our research problem in this paper.
}

%Before discussing the adaptation techniques let's have a brief review of the acoustic modelling in ASR systems. Two widely adopted acoustic models for speech recognition are Hidden Markov Model and Hybrid NN/HMM.

\section{ASR Systems Overview}

\begin{frame}
	\frametitle{Outline}
	\tableofcontents[currentsection]
\end{frame}

\note[itemize]
{
\item Before we begin discussing about the discriminative training techniques, 
\item let's have a brief review of two widely adopted acoustic modelling approaches for speech recognition, 
\item the Hidden Markov Model based and the Hybrid Neural Network/Hidden Markov Model based methods.
}

% 
\subsection{HMM System}
\begin{frame}{Hidden Markov Model(HMM)}
\begin{block}{Definition}
\begin{itemize}
\item $\mathbf{O}=\{o_1,o_2,\ldots,o_M\}$ - An output observation alphabet;
\item $\mathbf{\Omega}=\{1,2,\ldots,N\}$ - A set of states representing the state space;
\item $\mathbf{A}=\{a_{ij}\}$ - A transition probability matrix;
\item $\mathbf{B}=\{b_i(u)\}$ - An output probability matrix;
\item $\mathbf{\pi}=\{\pi_i\}$ - An initial state distribution.
\end{itemize}
\end{block}
\begin{block}{Example}
\begin{figure}
\centerline{\epsfig{figure=figures/3statehmm_continuouse.eps,width=4cm}}
\end{figure}
\end{block}
\end{frame}

\note[itemize]
{
\item HMM is the predominant acoustic modelling approach in various ASR systems.
\item Simply speaking, a HMM is a collection of states connected by transitions and each state is related with a probabilistic emission function. 
\item In speech recognition, commonly used HMM structure is a 3-state left-to-right structure, as illustrated in the figure. Each state is characterised by a Gaussian Mixture distribution.
\item In the example, there are 3 states: 1,2 and 3; each aij defines the transitions from state i to state j and bis are the emission function for state i.
}

\begin{frame}{Hidden Markov Model(HMM)}
\begin{block}{Basic Problems}
\begin{enumerate}
\item \textbf{Evaluation Problem} - Given an observation sequence and a model, how to efficiently compute the likelihood of the model given this observation sequence? - \alert{\textbf{The Forward-Backward Algorithm}}
\item \textbf{Decoding Problem} - Given a model and a sequence of observations, how to find the most likely state sequence in the model that produces the observations? - \alert{\textbf{The Viterbi Algorithm}}
\item \textbf{Training Problem} - Given a model assumption and a set of observations, how to adjust the model parameters to maximise the joint probability of the model parameters and the observations? - \alert{\textbf{The Baum-Welch Algorithm}}
\end{enumerate}
\end{block}
\end{frame}

\note[itemize]
{
\item There are 3 basic problems related with Hidden Markov Model;
\item First evaluation problem, how to evaluate the probability of the model generating given observation, which is solved by the forward algorithm;
\item Second decoding problem, how to find the most likely state sequence, the corresponding algorithm is the Viterbi algorithm;
\item The last one is learning problem, how to estimate the model parameters, which could be done efficiently using Baum-Welch algorithm.
}

\begin{frame}{Hidden Markov Model(HMM)}
\begin{block}{Assumptions}
\begin{itemize}
	\item \textbf{Instantaneous first-order transition}: the probability of making a transition to the next state is independent of the historical states, given the current state;
	\item \textbf{Conditional independence assumption}: the probability of observing a feature vector at time t is independent of the historical observations and states, given the current state.
	%\item \textbf{Maximum likelihood estimation}: poor discrimination between different speech units;
	%\item \textbf{Model topology}: the speech data distribution is assumed to be Gaussian Mixture Model (GMM);
	%\item \textbf{Acoustic context}: No acoustic context is used for training.
\end{itemize}
\end{block}
\end{frame}
\note[itemize]
{
\item For tractable inference and learning algorithms, HMM imposes several assumptions.
\item Firstly, secondly 
}


\begin{frame}{Limitations of HMM}
%although HMM works well for speech recognition, it suffers from some limitations because the constraints it imposes.
\begin{block}{Limitations}
\begin{itemize}
	\item \textbf{Maximum likelihood estimation}: Poor discrimination due to the training algorithm, which maximizes likelihoods instead of posterior probabilities (i.e., the models are trained independently of each other).
	\item \textbf{Model topology}: A prior topology of model has to be chosen, the speech data distribution is assumed to be Gaussian Mixture Model (GMM).
	\item \textbf{Acoustic context}: no acoustical context is used, therefore, possible correlations between successive acoustic vectors is overlooked.
\end{itemize}
\end{block}
\end{frame}

\note[itemize]
{
\item although HMM works well for speech recognition, it suffers some limitations because of the constraints it imposes.. 
\item such as first-order transition, conditional independent assumption%, Gaussian Mixture model assumption and no context information, etc.
}

\subsection{Hybrid NN/HMM System}
\begin{frame}{Neural Network(NN)}
\begin{block}{Multilayer Perceptron (MLP)}
\begin{figure}
\centerline{\epsfig{figure=figures/mlparc.eps,width=4cm}}
\end{figure}
\end{block}

Other NN Architectures:
\begin{itemize}
\item Radial Basis Function
\item Recurrent Neural Network
\item Time Delay Neural Network
\end{itemize}
\end{frame}

\note[itemize]
{
\item Due to those limitations of HMM, different techniques are investigated, among which the hybrid NN/HMM system is one of the most successful one.
\item The commonly used NN is the multilayer perceptron, which has an input layer, an output layer and several hidden layers. The figure shows a simple 3-layer MLP.
\item researchers also investigated other kinds of NN into the hybrid system, such as Radial Basis Function, Recurrent Neural Network and Time Delay Neural Network etc.
\item due to the simplicity, MLP is much more popular.
}

\begin{frame}{Hybrid NN/HMM System}
\begin{block}{Estimating HMM Emission Probabilities}
\begin{figure}
\centerline{\epsfig{figure=figures/nnhmm.eps,width=5cm}}
\end{figure}
\end{block}

Other Hybrids such as: emulate HMMs, global optimisation, vector quantisation for discrete HMMs...
\end{frame}

\note[itemize]
{
\item Also different ways of incorporating NN into speech recognition framework were proposed in the literature, the most successful one is using NN to estimate HMM emission probabilities;
\item As shown in the figure, in traditional HMM system, the emission probabilities are generated by Gaussian Mixture Models;
\item while in the hybrid system we use NN to predict the emission probability, which could discriminate between states and make use of the acoustic context to alleviate HMM's limitations.
}

% 
\section{GMMs Based HMM System Discriminative Training Criteria}

\begin{frame}
	\frametitle{Outline}
	\tableofcontents[currentsection]
\end{frame}

\begin{frame}{Discriminative Training Techniques for HMM Systems}
\begin{itemize}
\item Maximum Mutual Information Estimation (MMIE)
\item Minimum Classification Error (MCE)
\item Minimum Phone Error (MPE)
\item Large Margin Estimation (LME) 
\end{itemize}
\end{frame}
\note[itemize]
{
\item Several criteria pertain in the discriminative training for the GMMs based HMM systems. namely,......
\item We will discuss them in details in the following of this section. 
}

%%\begin{frame}{Discriminative Training Techniques for HMM Systems}
%\begin{block}{Notation}
%\begin{itemize}
%\item $\mathbf{X_r}$ - sequence $x_{r,1}, x_{r,2},\ldots, x_{r,T_r}$ acoustic observation vectors;
%\item $\mathbf{W_r}$ - spoken word sequence $w_{r,1}, w_{r,2}, \ldots, w_{r,N_r}$ in training utterance $r$;
%\item $\mathbf{W}$ - any utterance sequence;
%\item $\mathbf{p_{\theta}(X_r|W)}$ - acoustic emission probability/acoustic model;
%\item $\mathbf{\theta}$ - set of all parameters of the acoustic model.
%\item $\mathbf{M_r}$ - set of competing word sequences to be considered.
%\end{itemize}
%\end{block}
%\end{frame}

\begin{frame}{Maximum Mutual Information Estimation (MMIE)}
\begin{block}{Description}
\begin{itemize}
\item Derived from the perspective of information theory; 
\item Attempts to maximize the mutual information between the training data and their corresponding labels.
\end{itemize}
\end{block}
\begin{block}{Formula}
\[
F_{MMI}(\theta)=\sum_{r=1}^R{log\frac{P_{\theta}(X_r|M_{w_r})P(w_r)}{\sum_{\hat{w}}P_{\theta}(X_r|M_{\hat{w}})P(\hat{w})}}
\]
where $M_{w_r}$ is the HMMs corresponding to the transcription of utterance $w_r$, $P(w_r)$ is the probability of sentence $w_r$ which is determined by a language model, and the denominator sums over each possible word sequences $\hat{w}$.
\end{block}
\end{frame}

\note[itemize]
{
\item From this formula, we can see in order to maximize this objective function, we want to increase the probability of the numerator model while decrease the denominator form, i.e. we are training the system so that the correct class (corresponding to the numerator model) will have a higher probability at the same time, the probabilities of its competing classes (corresponding to the denominator form) are suppressed.
\item Besides, the numerator and denominator model can be efficiently encoded as lattice structures which includes the acoustic model score, language model score, and other alignment information.
}

%\begin{frame}{Maximum Mutual Information Estimation (MMIE)}
%\begin{block}{More Discussions}
%\begin{itemize}
%\item Considers competing classes and therefore decision boundaries;
%\item Necessitates set of competing classes on training data;
%\item Optimization for standard modelling (HMMs, mixture distributions): Gradient Descent (GD), Extendend Baum-Welch (EBW).
%\end{itemize}
%\end{block}
%\end{frame}

\begin{frame}{Minimum Classification Error (MCE)}
\begin{block}{Description}
The key idea of MCE is to approximate the empirical classification errors in training data as a smoothed and differentiable objective function.
\end{block}

%For each training data $X_r$, the mis-classification measure is firstly constructed as follows:
\begin{block}{Mis-classification Measure for $X_t$}
\[
d_r(X_t,\Theta)=-log[p^k(X_t|W_t)p^k(W_t)]+log[\sum_{w_t\neq W_t}p^k(w_t)p^k(X_t|w_t)],
\]
where $k$ is a smoothing factor, log-sum is introduced as soft-max to determine the most competing hypothesis for $X_t$ from all competing hypotheses. 
With $k$ properly set, we have $d_r(X_t,\Theta)\leq0$ if $X_t$ is correctly recognized and $d_r(X_t,\Theta)\geq 0$ otherwise. 
\end{block}
\end{frame}

\begin{frame}{Minimum Classification Error (MCE)}
\begin{block}{Smoothed Error Count}
The above mentioned mis-classification measure is plugged into a sigmoid function to compute the so-called the smoothed error count for $X_t$ as follows:
\[
l_r(d_r(X_t,\Theta))=\frac{1}{1+e^{-d_r(X_t,\Theta)}}.
\]
\end{block}
\begin{block}{MCE Criterion}
Finally, MCE aims to minimize the total smoothed error counts summed over the whole training set. The MCE criterion can be then represented as follows:
\[
F_{MCE}=\arg\min_{\Theta}\sum_1^Tl_r(d_r(X_t,\Theta)).
\]
\end{block}
\end{frame}

\note[itemize]
{
\item now let's move to another criterion, minimum classification error.
\item The key idea ....
\item Firstly a mis-classification measure is defined for each training sentence. **formula** Note in this formula, we exclude the correct class in the second term on the RHS. Essentially, this mis-classification measure is the differences between the two joint probabilities. first term is the joint probability of the sentence with the correct model, the second term is the  log sum of joint probability between the sentence and all the competing classes which is a softmax. Therefore, the mis-classification measure can be interpreted as the score differences between the correct model and the most probable competing class.
\item Then this mis-classification measure is embedded into a sigmoid function to form a differentiable function for further optimization.
\item at last, MCE aims....
}

\begin{frame}{ Minimum Phone/Word Error (MPE/MWE)}
\begin{block}{Description}
MWE/MPE attempts to minimize model-based expected word/phone error on training data to directly reflect the recognition error. 
\end{block}

\begin{block}{MPE Criterion}
\[
F_{MPE}=\arg\max_{\Theta}\sum_1^T\frac{\sum_{s_t}p^k(s_t)p^k(X_t|s_t)A(S_t,s_t)}{\sum_{s_t}p^k(s_t)p^k(X_t|s_t)},
\]
where $A(S_t,s_t)$ is called raw accuracy count.
\end{block}
\begin{block}{More Details}
\begin{itemize}
\item $A(S_t,s_t)$ is usually approximated, but exact case based on edit (Levenshtein) distance is also possible.
\item Regularization techniques, e.g. I-smoothing is necessary to avoid over-training.
\end{itemize}
\end{block}
\end{frame}

\note[itemize]
{
\item MWE/MPE attempts.....
\item Let's look at the formula of MPE. Maximizing this formula will maximize the expected phone accuracies of all the training sentences, therefore minimize the phone errors.
\item $A(S_t,s_t)$ is usually approximated....
\item Regularization...... I-smoothing is an interpolation technique for introducing maximum likelihood trained components into the discriminative training to solve the possible data sparse problem and also useful for avoiding over-training which can occur easily in DT.
}

\begin{frame}{Large Margin Estimation (LME)}
\begin{block}{Description}
LME aims to estimate HMM parameters based on the principle of maximizing minimum margin of training data towards a better generalization capability and a more robust classifier.
\end{block} 

\begin{block}{Margin Definition}
The separation margin for each training sentence, $X_t$ is defined as follows:
\[
d(X_t|\Theta)=log[p^k(X_t|W_t)p^k(W_t)]-log[\sum_{w_t\neq W_t} p^k(w_t)p^k(X_t|w_t)],
\]
Obviously, $d(X_t|\Theta)>0$ if and only if $X_t$ is correctly recognized.
\end{block}

\begin{block}{LME Criterion}
%
\[
F_{LME}=\arg\max_{\Theta}\, min\, d(X_t|\Theta).
\]
\end{block}
\end{frame}

\note[itemize]
{
\item The last is Large Margin Estimation (LME) criterion which aims to ....
\item the margin between each training sentence is defined in *** formula***, this formula is similar to the mis-classification measure in MCE training. The first term is ...., the second term is ....
\item The idea of large margin training leads to estimating the HMM model parameters based on the criterion of maximizing the minimum margin of all training data as follows. Maximizing this margin will push all the samples far away from the decision boundary which will result in a more robust classifier.
}

\section{Optimization for GMMs Based Discriminative Training Criteria}

\begin{frame}
	\frametitle{Outline}
	\tableofcontents[currentsection]
\end{frame}

\note[itemize]
{
\item We have given several DT criteria in the last section. We now turn our topic to how to optimize these criteria.
\item Above mentioned criteria normally lead to complex non-convex objective functions involving over millions of free parameters which do not have closed-form solutions. Therefore, gradient based method is usually adopted.
\item Firstly, we talk about extended Baum-Welch Algorithm which is an extension for the idely used in Maximum Likelihood training for HMMs.
}


\begin{frame}{Extended Baum-Welch Algorithm }
The Baum-Welch algorithm widely used in Maximum Likelihood training for HMMs is extended for the optimization problem of MMIE.

\begin{block}{Re-Estimation Formulea}
%The re-estimation formula for the mean and covariance for a state $j$, mixture component $m$, $\mu_{jm}$ and $\sigma_{jm}^2$ is defined as follows:
\begin{align}
{\hat{\mu}}_{jm}=\frac{\{ \theta_{jm}^{num}(O)-\theta_{jm}^{den}(O)\}+D\mu_{jm}}{\{\gamma_{jm}^{num}-\gamma_{jm}^{den}+D\}}, \\
{\hat{\sigma}}_{jm}^2=\frac{\{ \theta_{jm}^{num}(O^2)-\theta_{jm}^{den}(O^2)\}+D(\sigma_{jm}^2+\mu_{jm}^2)}{\{\gamma_{jm}^{num}-\gamma_{jm}^{den}+D\}}-{\hat{\mu}}_{jm}^2,\\
\theta_{jm}(O)=\sum_{r=1}^{R}{\sum_{t=1}^{T_r}O^r(t)\gamma_{jm}^r(t)} ,\\
\theta_{jm}(O^2)=\sum_{r=1}^{R}{\sum_{t=1}^{T_r}{O^r(t)}^2\gamma_{jm}^r(t)},\quad
\gamma_{jm}=\sum_{t=1}^T\gamma_{jm}(t).
\end{align}
\end{block}
\end{frame}

\note[itemize]
{
\item The re-estimation formula for the mean and covariance for a state $j$, mixture component $m$, $\mu_{jm}$ and $\sigma_{jm}^2$ is defined as follows:
\item ******formula
\item ${\theta_{jm}(O)}$ and ${\theta_{jm}(O^2)}$ are sums over time of the observation data and squared data, weighted by their posterior probability of the Gaussian mixture component $m$ of state $j$
\item The sum over time of the Gaussian posterior probability is the Gaussian occupancy, $\gamma_{jm}=\sum_{t=1}^T\gamma_{jm}(t)$.
}


%\begin{frame}{Extended Baum-Welch Algorithm }
%\begin{block}{Implementation Issues}
%\begin{itemize}
%\item $D$ is a smoothing constant which is an important implementation issue in EBW.
%\item If set too large, the training converges very slowly, if set too small, the updates may not increase the objective function on each iteration.
%\item The Gaussian specific $D_{jm}$ is set at the maximum of (i) twice the value necessary to ensure positive variance updates for all dimensions of the Gaussian; or (ii) a global constant $E$ multiplied by the denominator occupancy $\gamma_{jm}^{den}$.
%\end{itemize}
%\end{block}
%\end{frame}


\begin{frame}{Gradient Descent}
%The gradient descent method is a simple and general scheme that can be flexibly applied to any differential objective functions.

\begin{block}{Description}
\begin{itemize}
\item Computes the gradient of the loss function for each training utterance $O_n$;
\item Updates parameters in the opposite direction. 
\item Learning process is controlled by a learning rate $\epsilon_{n}$ which decreases as the token presentation index $n$ increases.
\end{itemize}
\end{block}
\begin{block}{Formula}
%Given any differential objective function $F(\Theta)$, the general form of gradient descent search can be represented into an iterative updating formula along the gradient direction:
\[
\theta^{n+1}=\theta^{n}-\epsilon_{n}\nabla_{\theta}F(\theta^{n}),
\]
where $\theta^{n}$ denotes the set of model parameters in iteration $n$. 
\end{block}
\end{frame}

\note[itemize]
{
\item The gradient descent method is a simple and general scheme that can be flexibly applied to any differential objective functions. ***Description*******
\item Given any differential objective function $F(\Theta)$, the general form of gradient descent search can be represented into an iterative updating formula along the gradient direction:
}

\begin{frame}{Limitations of Gradient Descent}
%Although its simplicity, gradient descent method suffers from several limitations:
\begin{itemize}
\item Very slow to converge since it only explores the first-order derivative during optimization;
\item A uniform learning rate $\epsilon_{n}$ may not be appropriate for all parameters ;
\item To ensure the convergence for every parameter, an extremely small learning rate has to be used, which in turn leads to very slow convergence overall.
\end{itemize}
%In view of these limitations, \alert{\textbf{second order derivative}} approaches are proposed.
\end{frame}

\note[itemize]
{
\item Despite its simplicity, gradient descent method suffers from several limitations:
\item LIMITATIONS
\item In view of these limitations, \alert{\textbf{second order derivative}} approaches are proposed.
}

\begin{frame}{Second Order Derivative Methods}
\begin{block}{Traditional Newton's Method}
\begin{itemize}
\item If the objective function can be approximated by a quadratic function and its Hessian Matrix is positive definite, the optimum point $\Theta^{opt}$ can be reached from any staring point $\Theta^{0}$ in one single step along the gradient direction;
\item This direction can be calculated from Hessian Matrix:
\[
\Theta^{opt}=\Theta^{0}-H^{-1}\nabla{}F(\Theta)
\]
\end{itemize}
\end{block}
\end{frame}

\begin{frame}{Second Order Derivative Methods}
\begin{block}{Practical Issues for Newton's Method} 
\begin{itemize}
\item No guarantee that Hessian Matrix is positive definite;
\item Hessian Matrix is usually very large in size, i.e., the square of the number of model parameters, therefore, diagonal approximation of the true Hessian Matrix is usually adopted. 
\end{itemize}
\end{block}
\end{frame}
\note[itemize]
{
\item According to traditional Newton's method, blah.....
\item however, there are two problems when applying this method in practice. ********Limitations*****
\item There are two widely adopted second-order derivative optimization methods, namely Quickprop, Rprop
}

%\begin{frame}{Quickprop}
%\begin{block}{Description}
%\begin{itemize}
%\item Quickprop is a batch-oriented second-order optimization method which loosely based on Newton's method;
%\item Diagonal Hessian Matrix is used which can be efficiently updated over iterations. 
%\end{itemize}
%\end{block} 
%\end{frame}

\begin{frame}{Quickprop}
\begin{block}{Formulae}
\begin{itemize}
\item %The $i$-th diagonal element of Hessian at $n$-th iteration can be approximated:
\[
H_{ii}=\nabla_{ii}^2F(\Theta)=\frac{\delta^2F(\Theta^n)}{\delta^2\theta_i}\approx\frac{\frac{\delta F(\Theta^n)}{\delta\theta_i}-\frac{\delta F(\Theta^{n-1})}{\delta\theta_i}}{\Delta\theta_i^{n-1}},
\]
where $\Delta\theta_i^{n-1}$ denotes the update step size of $i$-th parameter, $\theta_i$, at iteration $n-1$. 

\item %Substituting this approximation into Newton's updating formula, we derive the Quickprop updating formula for $i$-th parameter, $\theta_i$:
\[
\theta_i^{n+1}=\theta_i^{n}-\Delta\theta_i^n\nabla F(\theta^n),
\]
where $\Delta\theta_i^n$ is the step size for $\theta_i$; 
\item% $\Delta\theta_i^n$ can then be calculated based on Hessian matrix:
\[
\Delta\theta_i^n=\frac{\Delta\theta_i^{n-1}}{\frac{\delta F(\Theta^n)}{\delta\theta_i}-\frac{\delta F(\Theta^{n-1})}{\delta\theta_i}}.
\]
\end{itemize}
\end{block}
\end{frame}

\note[itemize]
{
\item Quickprop is a batch-oriented second-order optimization method which loosely based on Newton's method;
\item item Diagonal Hessian Matrix is used which can be efficiently updated over iterations. 
\item The $i$-th diagonal element of Hessian at $n$-th iteration can be approximated: *****, where $F(\Theta^n)$ is the interested objection function in iteration $n$.
\item Substituting this approximation into Newton's updating formula, we derive the Quickprop updating formula for $i$-th parameter, $\theta_i$: $\theta_i^{n+1}=\theta_i^{n}-\Delta\theta_i^n\nabla F(\theta^n)$
}

\begin{frame}{Rprop}
\begin{itemize}
\item The sign of the derivative is used to determine the update direction instead of using the magnitude aiming at eliminating possible negative effects of the magnitude of the partial derivative.
\item Each time the partial derivative with respect to a parameter changes its sign, it is considered that the last update was too large and missed a local minimum. Therefore the update value is the reduced by a certain factor.
\item If the sign stays the same, this represents that the parameter is in a shallow region of the error surface, so the update value should be increased by another factor to speed up the convergence.
\end{itemize}
\end{frame}

\note[itemize]
{
\item Another second order optimization method is called Rprop, which also uses different step sizes to update different model parameters.
\item in contrast, the updating step size in Rprop is determined based on only the sign of derivative, note the magnitude.
\item  Next comes to the discriminative training of the hybrid NN/HMM system. As discussed previously, in the hybrid system we adopted, NN is used to estimate the HMM state emission probabilities. Thus the discriminative ability of this hybrid system relies on the discriminative nature of neural networks.

}

% Next comes to the discriminative training of the hybrid NN/HMM system. As discussed previously, in the hybrid system we adopted, NN is used to estimate the HMM state emission probabilities. Thus the discriminative ability of this hybrid system relies on the discriminative nature of neural networks.

%\section{Discriminative Training in Hybrid NN/HMM System}

%\begin{frame}
%	\frametitle{Outline}
	%\tableofcontents[currentsection]
%\end{frame}

%\begin{frame}{Motivations for Acoustic Modelling using NN}
%NNs have several advantages which make them particularly attractive for ASR:
%\begin{block}{Motivations}
%\begin{itemize}
%\item Naturally accommodate discriminant training;
%\item Can incorporate multiple constraints and automatically find the optimized constraint combination for classification;
%\item Do not have the independent assumptions for features as opposed in HMM;
%\item Can accommodate contextual information and feedback in its architecture to get a better performance.
%\end{itemize}
%\end{block}
%\end{frame}

\note[itemize]
{
\item NNs have several advantages which make them particularly attractive for ASR acoustic modelling: Firstly, secondly,....
\item By using NN as our acoustic modelling approach, we have equipped our hybrid system with the ability to discriminate between classes which is more relevant to the classification task.
\item Now how can we use NN to estimate the emission probability of HMM?
}

%\begin{frame}{Estimating HMM Emission Probabilities with NN}
%\begin{itemize}
%\item Given the basic HMM equations, we would like to estimate the emission probability $p(x_n|s_k)$;%, that is, the probability of the observed data vector $x_n$ given a HMM state $s_k$.
%\item NNs can be trained to produce the posterior probability, $p(s_k|x_n)$, of the HMM state given the acoustic data;
%HMM requires the likelihood of the data, t
%\item %These NN posterior probabilities can be converted back to emission probabilities using Bayes' rule:
%\[
%\frac{p(x_n|s_k)}{p(x_n)} = \frac{p(s_k|x_n)}{p(s_k)};
%\]
%where $p(s_k)$ is the class prior, i.e. the relative frequencies of each class determined from the class labels;
%The scaled likelihood 
%\item $\frac{p(x_n|s_k)}{p(x_n)}$ (a.k.a scaled likelihood) can be used as an emission probability for HMM.
%\end{itemize}
%\end{frame}

\note[itemize]
{
\item Given the basic HMM equations, we would like to estimate the emission probability $p(x_n|s_k)$, that is, the probability of the observed data vector $x_n$ given a HMM state $s_k$.
}


\section{Preliminary Results}

\begin{frame}
	\frametitle{Outline}
	\tableofcontents[currentsection]
\end{frame}

\note[itemize]
{
\item After briefly review major discriminative training techniques for both standard HMM and hybrid system, 
\item we now come to the experiment part. In this section, we will explore how the hybrid NN/HMM system can be applied in real ASR tasks, and what are the strengths and limitations. 
\item Then based on the analysis of experiment results, we then give some proposals in our future work to address the issues of the hybrid system.
\item All our experiments are conducted on the well known Wall Street Journal (WSJ0) dataset. A summary of the WSJ0 dataset is illustrated in Table~\ref{tbl:wsj0sum}.
}

\begin{frame}{Database Description}
\begin{block}{WSJ0 Description}
%All our experiments are conducted on the well known Wall Street Journal (WSJ0) dataset. A summary of the WSJ0 dataset is illustrated in Table~\ref{tbl:wsj0sum}. 
\begin{table}
	\caption{Summary of the WSJ0 training and testing sets.}
	\label{tbl:wsj0sum}
	%\begin{center}
		\begin{tabular}{|c||c|c|c|}	
			\hline
			Data Set & Speaker Number & Utterance Number & Data Length(hours) \\
			\hline
			train & 92 & 9889 & 18.84 \\
			\hline
			test\_A & 20 & 368 & 0.73\\
			test\_B & 20 & 374 & 0.67 \\
			\hline
		\end{tabular}		
	%\end{center}	
\end{table}
\end{block}

%In this session, We will present some preliminary results on discriminative training using Neural Networks under NN/HMM framework for both phone and word recognition.

\end{frame}

\begin{frame}{Experiment Setup}
\begin{block}{Speech Feature}
The phoneme set of WSJ0 contains 40 monophones. We use 12 Mel Cepstral Coefficients, plus the energy term, and their first and second order derivatives as the feature.
\end{block}
\begin{block}{MLP Structure for Monophone System}
A simple three-layer MLP referred to as MLP-mono is used:
\begin{description}
\item[Input layer] A window of 15 frames are augmented ($39\times15$) to form a 585-dimension input feature vector which corresponds to the 585 input units; %By doing this, the neighbouring frame contexts of a specific frame are considered during training which will benefit the recognition performance.
\item[Hidden layer] A hidden layer of 2000 units is used;
\item[Output layer] The 120 output units correspond to the posterior probabilities of the 40 3-state monophone states. 
\end{description}
\end{block}
\end{frame}

\note[itemize]
{
\item Following is the setups of our experiment.
\item features...
\item We adopt a three layer MLP to predict the posteriors of each monophone state.
\item MLP description
}

\begin{frame}{Context Dependent Triphone Modelling}
\begin{block}{Triphone Modelling}
\begin{itemize}
\item Most of the state of the art Large Vocabulary Continuous Speech Recognition (LVCSR) systems adopt triphone acoustic models;
\item Triphone modelling takes both its left and right neighbour monophone as its contexts thus can model the speech more accurately.
\end{itemize}
\end{block}
\begin{block}{Context Dependent Acoustic Modelling Scheme}
\begin{itemize}
%\item 
\item According to some acoustic expert knowledge, a decision tree is built to cluster these individual triphone states into clusters.
\item Based on these clusters, we can build a second NN set to incorporate triphone context information: for each monophone output state in MLP-mono, we build an NN to predicate all the cluster elements of this state.
\end{itemize}
\end{block}
\end{frame}

\note[itemize]
{
\item Besides predicting the monophone state posteriors, we also use MLPs for context dependent triphone modelling.
\item Most of the state ....
\item Triphones take ......
\item However, Number of triphones needed modelling is usually tens of thousands, which requires a lot of training data for a robust acoustic modelling. Therefore certain techniques in the literature is adopted to reduce the triphone set size.
}

%\begin{frame}{Combining Context Independent NNs with Context Dependent NNs}
%An interpolation factor $\alpha$ is introduced to combine the posterior probabilities of these two sets of NNs. 
%\begin{block}{Interpolation}
%The final posterior probability of a cluster is calculated as:
%\[
%P_{cluster}=P_{first\_set\_posterior}*\alpha+P_{second\_set\_posterior}*(1-\alpha).
%\]
%\end{block}
%\end{frame}

\note[itemize]
{
\item For the triphone based system, we have two sets of NNs thus two set of posterior probabilities. We need to combine two sets of posteriors to get the final posterior probability of one specific triphone.
\item An interpolation factor ...
}
\begin{frame}{Phone Recognition on WSJ0}
\begin{block}{Monophone Model Performance: Phone Error Rate (PER)}
\begin{table}
	\caption{PER (\%) for HMM and NN/HMM monophone ASRs on WSJ0.}
	%\begin{center}
		\begin{tabular}{|c||c|}	
			\hline
			System & PER \\
			\hline
			HMM & 43.71 \\
			NN/HMM & 35.97 \\
			MMIE & 39.52 \\
			\hline
		\end{tabular}		
	%\end{center}	
\end{table}
\end{block}

\begin{block}{Triphone Model Performance: Phone Error Rate (PER)}
\begin{table}
	\caption{PER (\%) for HMM and NN/HMM triphone ASRs on WSJ0.}
	\begin{center}
		\begin{tabular}{|c||c|}	
			\hline
			System & PER \\
			\hline
			HMM & 27.47 \\
			NN/HMM & 21.46 \\
			\hline
		\end{tabular}		
	\end{center}	
\end{table}
\end{block}
\end{frame}


\note[itemize]
{
\item After we obtain the posterior probabilities, we convert them to the scaled likelihood as we discussed for the HMM system, then decoding can be done to reveal the phone sequences.
\item In the first table, we report the monophone system performance in terms of phone error rate. We can see our hybrid system has the best performance. 
\item As we expected, the maximum likelihood trained HMM has the worst performance. The MMIE trained HMM system has a better performance than ML trained HMM due to its discriminative nature. 
\item Our hybrid system has an even better performance than MMIE, this shows that our hybrid system has a better frame discriminative power than MMIE.
\item same pattern shows in the triphone based system.
}

\begin{frame}{Word Recognition Results on WSJ0}
\begin{block}{Bigram Language Model Performance}
\begin{table}
	\caption{Triphone System WER (\%) on WSJ0 using Bigram.}
	\label{tbl:wsj0word}
	%\begin{center}
		\begin{tabular}{|c||c|}	
			\hline
			Acoustic Models & WER \\
			\hline
			NN/HMM &13.12 \\
			ML & 10.14\\
			MMIE & 9.39\\
  			MPE & 9.71\\
			\hline
		\end{tabular}		
%	\end{center}	
\end{table}
\end{block}
\end{frame}

\begin{frame}{Word Recognition Results on WSJ0}
\begin{block}{Trigram Language Model Performance}
\begin{table}
	\caption{Triphone System WER (\%) on WSJ0 using Trigram.}
	\label{tbl:hmmword}
%	\begin{center}
		\begin{tabular}{|c||c|}	
			\hline
			Acoustic Models & WER \\
			\hline
			NN/HMM & 11.85 \\
			ML & 7.40 \\
			MMIE & 6.32\\
  			MPE & 6.88\\
			\hline
		\end{tabular}		
%	\end{center}	
\end{table} 
\end{block}
\end{frame}

\note[itemize]
{
\item Our hybrid system has the best performance for the phone recognition task. We then explore how it performs on the word recognition tasks.
\item The first table shows the word error rate using bigram language model with 4 acoustic model configurations. Suprisingly, although our hybrid sytem has the worst performance for this task. It is even worse than the maximum likelihood trained HMM system.
\item On the other hand, discriminative training criteria for standard GMM based HMM system, like MMIE, MPE has the best performances although they have a poorer phone recognition performance.
}

\begin{frame}{Conclusion}
\begin{itemize}
\item NN/HMM hybrid system has the best phone recognition result compared to maximum likelihood training and some other HMM discriminative training criteria;
\item However the frame discriminative trained NN/HMM system has the worst word level performance.
\end{itemize}
\end{frame}

\subsection{Discussions}
\begin{frame}{Standard Training Criteria for NNs}
%There are two commonly used training criteria for a conventional NN, namely \emph{Mean Square Error} (MSE) criterion and relative entropy criterion.
\begin{block}{Minimum Squared Error (MSE)}
\[
E_e=\frac{1}{2}\sum_{n=1}^N \sum_{k=1}^K[d_k(x_n)-g_k(x_n,\Theta)]^2
\]
\end{block}
\begin{block}{Relative Entropy}
\[
E_e=\sum_{n=1}^N \sum_{k=1}^K d_k(x_n) \ln \frac{d_k(x_n)}{g_k(x_n,\Theta)}
\]
\end{block}
where $g(x_n,\Theta)=(g_1(x_n,\Theta),\ldots,g_k(x_n,\Theta),\ldots,g_K(x_n,\Theta))$ represents the actual MLP output vector and $d(x_n)=(d_1(x_n),\ldots,d_k(x_n),\ldots,d_K(x_n))$ represents the desired output vectors, $K$ is the total number of classes, and $N$ is the total number of training samples.
\end{frame}
\begin{frame}{Standard Training Criteria for NNs}
\begin{block}{Discussions}
\begin{itemize}
\item Updating the parameters according to these two criteria can guarantee a frame accuracy increase unless a local maxima is achieved which will results in a better phone recognition result since the frame is directly related to the phone state. 
\item For word level recognition, these two criteria cannot reflect word recognition errors directly, thus word level performance does not gain from the increases of frame accuracy.
\end{itemize}
\end{block}
\end{frame}

\note[itemize]
{
\item why does our hybrid system has the best phone recognition performance while performs worst for word recognition? To address this problem, let's first have a review of two training criteria commonly used for NN, namely minimum squared error, MSE and cross entropy critrion.
\item MSE attempts to minimize the errors between the output of NN and its corresponding label, while cross entropy criterion aims at minimize the cross entropy between them.
\item updating these two ....
\item For word ....
}


\begin{frame}{Limitations of HMM System Based Discriminative Training Criteria}
%Training criteria like MMIE, MCE, MWE are designed for standard HMM models and work well for speech recognition. However, they also suffer from several problems:
\begin{block}{Limitations}
\begin{itemize}
\item They result in large-scale nonconvex optimization problems, and these problems do not have a closed-form solution for their objective functions;% Therefore, gradient optimization or approximation schemes are usually adopted. However, these schemes can be easily trapped in a shallow local optimum point in the complicated surface of the objective function.
\item They usually have slow convergence speed. %Second order derivative optimization methods, e.g., Quickprop, Rprop, are adopted to explore the Hessian Matrix to speed up the convergence. However, Hessian Matrix is usually too large in size, diagonal approximation of the true Hessian Matrix is usually adopted.
\item They all adopt GMMs as their acoustic models. Feature independence is assumed for a tractable learning problem.
\end{itemize} 
\end{block}
\end{frame}

\note[itemize]
{
\item Although HMM system based discriminative training criteria have a better word level performance than the frame-discriminant trained NN/HMM system, they suffer from several limitations.
\item Firstly,..., secondly, thirdly,....
}

\begin{frame}{Advantages of NN Acoustic Modelling}
Neural Network is a useful alternative to traditional GMM acoustic modelling in speech recognition in several aspects:
\begin{itemize}
\item They relax the assumptions about the distribution of the input features, allowing a flexible front-end feature extraction; %NNs can automatically exploit the independence of the features, therefore, no independence assumptions are imposed.
\item Evidences from multiple feature streams can be easily combined in a single NN/HMM system thanks to the posterior probabilities estimation;
\item NNs can make good use of long span feature vectors, e.g., they can take several consecutive frames and augment them into a new feature vectors to incorporate the context information among frames.
\end{itemize}
\end{frame}

\note[itemize]
{
\item NN has shown its privilege in phone recognition task, although it has a worst performance in word recognition task due to its frame-based discriminative training criteria. NN itself enjoys several advantages which makes it a very promising acoustic model approach.
\item firstly, secondly, thirdly..
\item It would be interesting for combining the advantages of NN and other HMM discriminative criteria, e.g. replacing the MSE or cross entropy training criteria in NN with the sequential classification criteria like MPE, MMIE. Actually this is the future work we want to carry on.
}

\section{Future Work}

\begin{frame}
	\frametitle{Outline}
	\tableofcontents[currentsection]
\end{frame}



\begin{frame}{Problems}
\begin{itemize}
\item Although frame discriminative trained NN/HMM hybrid system has the best phone recognition result, it performs worst on word level recognition task;
\item Although discriminative training schemes for standard HMM systems like MMIE, MPE suffer from several limitations, they have a better word recognition performance than the frame-based NN/HMM system because they adopt a sequence classification based training criteria which are more closely related to word error rate;
\item We believe that it is possible to replace the frame discriminative training criteria by the sequential classification criteria in NN/HMM system to improve the word recognition performance.
\end{itemize}
%Thus, we would like to extend the current work in two aspects to improve the NN/HMM system performance on word recognition task.
\end{frame}

\note[itemize]
{
\item Next, I'm going to give a brief description of my future plan.
\item From our experiment, we find that Although frame ....
\item Although MMIE MPE suffer from ....
\item Therefore, we believe that it .....
\item we would like to extend the current work in two aspects to improve the NN/HMM system performance on word recognition task.
}

\begin{frame}{NNs for Feature Transformation}
%In our first approach, we treat NN as a way of feature transformation.
\begin{figure}[!htbp]
  \begin{center}
    \leavevmode
      \includegraphics[width=6.3cm]{figures/feature_transform.eps}
    \caption{Illustration of feature transform scheme.}
    \label{dia}
  \end{center}
\end{figure}
\end{frame}

\note[itemize]
{
\item First approach is to use NN as a way of feature transformation. The scheme is illustrated in the digram.
\item We can firstly trained a NN system like before, instead of applying it on the test data, we apply this trained NN to the training data and use the activations as new features for the HMM system.
\item Since this new feature is obtained from NN output, they will have a better discriminative power than the raw feature which may benefits the system performance.
\item Meanwhile, we can still trained the HMM system like before, discriminative training criteria like MPE, MMIE can also apply.
}


\begin{frame}{Sequence Classification Based NN Training Criteria}
\begin{block}{Cross Entropy Criterion}
%Consider the cross entropy of NN training:
\[
L_{XENT}(\theta)=\sum_{r=1}^R\sum_{t=1}^T\sum_{i=1}^N\hat{y}_{rt}(i)log\frac{\hat{y}_{rt}(i)}{y_{rt}(i)},
\]
where $\theta$ denotes the parameters of NNs, $y_{rt}(i)$ is the network output for physical state $i$ at time $t$ in sample $r$ and $\hat{y}_{rt}(i)$ is the hard label for state $i$. %During training, the Error Back-Propagation algorithm adjusts $\theta$ to minimize $L_{XENT}(\theta)$.
\end{block}
\begin{block}{Softmax Output Activation Function for NN}
Softmax function is usually adopted as the output layer nonlinearity, 
\[
y_{rt}(i)=\frac{e^{a_{rt}(i)}}{\sum_j^{N}e^{a_{rt}(j)}}.
\]
where $a_{rt}(i)$ is the input to the softmax for state $i$ at time $t$ for sample $r$.
\end{block}
\end{frame}

\begin{frame}{Sequence Classification Based NN Training Criteria}
During NN training, the Error Back-Propagation algorithm adjusts $\theta$ to minimize $L_{XENT}(\theta)$
\begin{block}{Gradient Descent Training for NN}
Gradient-descent training can be based on a convenient expression for the derivative of the loss with respect to the activations:
\[
\frac{\delta L_{XENT}}{\delta a_{rt}(i)}=y_{rt}(i)-\hat{y}_{rt}(i).
\]
\end{block}
\end{frame}

\note[itemize]
{
\item Next approach is replacing the frame-based training method for NN with the sequence classification criteria adopted in MMIE, MPE.
\item Firsly, the cross entropy criterion is reviewed in the terms of our ASR task.********formula=****** 
\item We use softmax as our output non-linearity for the NN which takes the form of: *******formula*************
\item During NN training .....
\item Gradient descent training is adopted in NN which attempts to change the parameters in the opposite direction of the gradient. The gradient can be obtained by taking the derivative of the cross entropy criterion with respect to the inputs to the output layer:****formula************
}


\begin{frame}{Sequential Classification Criteria for NN}
Let $L_{SEQ}(\theta)$ be any sequence classification criterion, e.g., MMIE, MPE or MWE. 
\begin{itemize}
\item In GMMs based discriminative training criteria, we have:
%The expected occupancies $\gamma_{rt}^{NUM}(i)$ and $\gamma_{rt}^{DEN}(i)$ for each physical state are related to the gradient of the loss with respect to state log-likelihoods:
\[
\frac{\delta L_{SEQ}}{\delta l_{rt}(i)}=k(\gamma_{rt}^{DEN}(i)-\gamma_{rt}^{NUM}(i)),
\]
where $l_{rt}(i)$ is the log-likelihood of physical state $i$ at time $t$ in sample $r$ and $k$ is the acoustic scaling factor. 
\item In NN/HMM system, $l_{rt}(i)=logy_{rt}(i)-logp(i)$, where $p(i)$ is the prior of state $i$, $y_{rt}(i)$ is the softmax output for state $i$ at time $t$ in sample $r$. By the chain rule, 
\[
\frac{\delta L_{SEQ}}{\delta y_{rt}(i)}=k\frac{(\gamma_{rt}^{DEN}(i)-\gamma_{rt}^{NUM}(i))}{ y_{rt}(i)}.
\]
\end{itemize}
\end{frame}

\begin{frame}{Sequential Classification Criteria for NN}
\begin{itemize}
\item Derivatives of sequence classification criterion with respect to the softmax activations can then be derived: 
\[
\frac{\delta L_{SEQ}}{\delta a_{rt}(i)}=k(\gamma_{rt}^{DEN}(i)-\gamma_{rt}^{NUM}(i)).
\]
\end{itemize}
\end{frame}

\begin{frame}{Sequential Classification Criteria for NN}
\begin{itemize}
\item The derivative of the loss with respect to the activations in cross entropy criterion:
\[
\frac{\delta L_{XENT}}{\delta a_{rt}(i)}=y_{rt}(i)-\hat{y}_{rt}(i).
\]
\item The derivative of sequence classification criterion with respect to the softmax activations:
\[
\frac{\delta L_{SEQ}}{\delta a_{rt}(i)}=k(\gamma_{rt}^{DEN}(i)-\gamma_{rt}^{NUM}(i)).
\]
\end{itemize}
\end{frame}
\begin{frame}{Sequential Classification Criteria for NN}
%Based on these two formulea, we have a simple recipe for training NN acoustic models using any of the sequence classification developed for GMMs in the lattice based EBW framework:
\begin{block}{A Simple Recipe}
\begin{itemize}
\item The gradient with respect to the cross entropy criterion is replaced with the gradient with respect to the sequence-classification criterion.
\item Backpropagation can be run as usual.
\end{itemize}
\end{block}
Instead of using a frame based classification criterion, NNs now update their parameters based on a sequence classification criterion which will benefit the word level recognition.
\end{frame}



\section*{Q \& A}

\begin{frame}
\begin{center}
\usebeamerfont{largeFont}
Q \& A
\end{center}
\end{frame}

% This empty frame indicates the end of the presentation.
\begin{frame}
\null
\end{frame}

% Following are several back-off slides


\end{document}

