\input{../slidesComun}

\title[8. Symmetric matrices and quadratic forms]{Chapter 8. Symmetric matrices and quadratic forms}  
\COSS

% ==============================================
\begin{frame}\frametitle{References} 

\begin{figure}
	\includegraphics[scale=0.7]{../lay_linearalgebra.jpg}
\end{figure}
D. Lay. Linear algebra and its applications (3rd ed). Pearson (2006). Chapter 7.

\end{frame}

% ==============================================
\begin{frame}\frametitle{Applications} 
In this example of particle picking in Single Particles, one of the features we analyze is the autocorrelation function at different subbands. The autocorrelation is a symmetric matrix.
\begin{center}
	\includegraphics[height=5cm]{figParticlePicking.jpg}
\end{center}
\begin{tiny}
V. Abrishami, A. Zaldívar-Peraza, J.M. de la Rosa-Trevín, J. Vargas, J. Otón, R. Marabini, Y. Shkolnisky, J.M. Carazo, C.O.S. Sorzano. \textit{A pattern matching approach to the automatic selection of particles from low-contrast electron micrographs}. Bioinformatics (\textbf{2013})
\end{tiny}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Applications} 
In one of the steps, we construct a basis that spans the set of rotations of the particle template. For doing so, perform a Principal Component Analysis that diagonalizes the covariance matrix (which is again a symmetric matrix).
\begin{center}
	\includegraphics[height=5cm]{figParticlePicking2.jpg}
\end{center}
\begin{tiny}
V. Abrishami, A. Zaldívar-Peraza, J.M. de la Rosa-Trevín, J. Vargas, J. Otón, R. Marabini, Y. Shkolnisky, J.M. Carazo, C.O.S. Sorzano. \textit{A pattern matching approach to the automatic selection of particles from low-contrast electron micrographs}. Bioinformatics (\textbf{2013})
\end{tiny}

\end{frame}

% ==============================================
\setnextsection{8}
\section{Symmetric matrices and quadratic forms} 
\subsection{Diagonalization of symmetric matrices (a)} 
\Outline

\begin{frame}\frametitle{Diagonalization of symmetric matrices} 
\begin{ceudef}[Symmetric matrix]
	$A\in\mathcal{M}_{n\times n}$ is a \textbf{symmetric matrix} iff $A=A^T$.
\end{ceudef}

\begin{exampleblock}{Example}
	The following two matrices are symmetric
	\begin{center}
		\begin{tabular}{cc}
			$\begin{pmatrix}1 & 0 \\ 0 & -3 \end{pmatrix}$ &
			$\begin{pmatrix}0 & -1 & 0 \\ -1 & 5 & 8 \\ 0 & 8 & -7 \end{pmatrix}$
		\end{tabular}
	\end{center}
\end{exampleblock}

\begin{exampleblock}{Example}
	Let's diagonalize the matrix $A=\begin{pmatrix}6 & -2 & -1 \\ -2 & 6 & -1 \\ -1 & -1 & 5 \end{pmatrix}$
	The characteristic equation is
	\begin{center}
		$|A-\lambda I|=0=-\lambda^3+17\lambda^2-90\lambda+144=-(\lambda-8)(\lambda-6)(\lambda-3)$
	\end{center}
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Diagonalization of symmetric matrices} 
\begin{exampleblock}{}
	The associated eigenvectors are
	\begin{center}
		\begin{tabular}{cl}
			$\lambda=8$ & $\mathbf{v}_1=(-1,1,0) \rightarrow \mathbf{u}_1=(-\frac{1}{\sqrt{2}},\frac{1}{\sqrt{2}},0) $ \\
			$\lambda=6$ & $\mathbf{v}_2=(-1,-1,2) \rightarrow \mathbf{u}_2=(-\frac{1}{\sqrt{6}},-\frac{1}{\sqrt{6}},\frac{2}{\sqrt{6}})$ \\
			$\lambda=3$ & $\mathbf{v}_3=(1,1,1) \rightarrow \mathbf{u}_3=(\frac{1}{\sqrt{3}},\frac{1}{\sqrt{3}},\frac{1}{\sqrt{3}})$\\
		\end{tabular}
	\end{center}
	The $\mathbf{v}$ vectors constitute an orthogonal basis of $\mathbb{R}^3$ and after normalizing them ($\mathbf{u}_i=\frac{\mathbf{v}_i}{\|\mathbf{v}_i\|}$), we have an orthonormal basis
	Thus, we can factorize $A$ as $A=PDP^{-1}$ with
	\begin{center}
		\begin{tabular}{cc}
			$P=\begin{pmatrix} -\frac{1}{\sqrt{2}} & -\frac{1}{\sqrt{6}} & \frac{1}{\sqrt{3}} \\
			   \frac{1}{\sqrt{2}} & -\frac{1}{\sqrt{6}} & \frac{1}{\sqrt{3}} \\ 0 & \frac{2}{\sqrt{6}} & \frac{1}{\sqrt{3}} \end{pmatrix}$ & 
			$D=\begin{pmatrix} 8 & 0 & 0 \\ 0 & 6 & 0 \\ 0 & 0 & 3\end{pmatrix}$
		\end{tabular}
	\end{center}
	Exploiting the fact that $P$ is orthonormal, then $P^{-1}=P^T$ and $A=PDP^T$.
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Diagonalization of symmetric matrices} 
\begin{ceuthm}
	If $A$ is symmetric, then any two eigenvectors from different eigenspaces are orthogonal.\\
	\underline{\textit{Proof}}\\
	Let $\mathbf{v}_1$ and $\mathbf{v}_2$ be two eigenvectors from two different eigenvalues $\lambda_1$ and $\lambda_2$. Let's show that $\mathbf{v}_1\cdot\mathbf{v}_2=0$
	\begin{center}
		$\begin{array}{rcll}
			\lambda_1(\mathbf{v}_1\cdot\mathbf{v}_2)&=&(\lambda_1\mathbf{v}_1)^T\mathbf{v}_2 & \quad\text{[By definition]} \\
			   &=&(A\mathbf{v}_1)^T\mathbf{v}_2 & \quad \text{[Definition of eigenvector]} \\
			   &=&\mathbf{v}_1^TA^T\mathbf{v}_2 & \quad \text{[Transpose of product]} \\
			   &=&\mathbf{v}_1^T(A\mathbf{v}_2) & \quad \text{[A is symmetric]} \\
			   &=&\mathbf{v}_1^T(\lambda_2\mathbf{v}_2) & \quad \text{[Definition of eigenvector]} \\
			   &=&\lambda_2(\mathbf{v}_1\cdot\mathbf{v}_2) & \quad \text{[By definition]} \\
		\end{array}$
	\end{center}
	Hence $(\lambda_1-\lambda_2)(\mathbf{v}_1\cdot\mathbf{v}_2)=0$ but $\lambda_1-\lambda_2 \neq 0$ because the two eigenvalues are different. Consequently, 
	$\mathbf{v}_1\cdot\mathbf{v}_2=0$ (q.e.d.)
	\label{thm:orthogonalEigenvectors}
\end{ceuthm}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Diagonalization of symmetric matrices} 
\begin{ceudef}[Orthogonal diagonalization]
	$A$ is \textbf{orthogonally diagonalizable} iff $A=PDP^T$ being $P$ an orthogonal (i.e., $P^{-1}=P^T$).
\end{ceudef}
\begin{ceuthm}
	$A$ is orthogonally diagonalizable iff $A$ is symmetric.\\
	\underline{\textit{Proof orthogonally diagonalizable $\Rightarrow$ symmetric}}\\
	Let us assume that $A=PDP^T$, then
	\begin{center}
		$A^T=(PDP^T)^T=(P^T)^TD^TP^T=PD^TP^T=PDP^T=A$
	\end{center}
	\underline{\textit{Proof orthogonally diagonalizable $\Leftarrow$ symmetric}}\\
	We omit this proof since it is more difficult.
\end{ceuthm}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Diagonalization of symmetric matrices} 
\begin{exampleblock}{Example}
	Let's orthogonally diagonalize $A=\begin{pmatrix}3 & -2 & 4 \\ -2 & 6 & 2 \\ 4 & 2 & 3\end{pmatrix}$.\\
	\underline{\textit{Solution}}\\
	The characteristic equation is
	\begin{center}
		$|A-\lambda I|=0=-\lambda^3+12\lambda^2-21\lambda-98=-(\lambda-7)^2(\lambda+2)$
	\end{center}
	Its associated eigenvectors are
	\begin{center}
		\begin{tabular}{cl}
			$\lambda=7$ & $\mathbf{v}_1=(1,0,1) \rightarrow \mathbf{u}_1=(\frac{1}{\sqrt{2}},0,\frac{1}{\sqrt{2}})$ \\
			            & $\mathbf{v}_2=(-\frac{1}{2},1,2) \rightarrow \mathbf{u}_2=(-\frac{1}{\sqrt{5}},\frac{2}{\sqrt{5}},0)$ \\
			$\lambda=-2$ & $\mathbf{v}_3=(-1,-\frac{1}{2},1) \rightarrow \mathbf{u}_3=(-\frac{2}{3},-\frac{1}{3},\frac{2}{3})$\\
		\end{tabular}
	\end{center}
	$\mathbf{u}_1$ and $\mathbf{u}_2$ are unitary and linearly independent, but they are not orthogonal. $\mathbf{u}_3$ is orthogonal to the other
	two vectors because it belongs to a different eigenspace (see Theorem \ref{thm:orthogonalEigenvectors}).
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Diagonalization of symmetric matrices} 
\begin{exampleblock}{}
	We can orthogonalize $\mathbf{u}_1$ and $\mathbf{u}_2$ following the Gram-Schmidt procedure:
	\begin{center}
		$\begin{array}{l}
			\mathbf{w}_1=\mathbf{v}_1=(\frac{1}{\sqrt{2}},0,\frac{1}{\sqrt{2}}) \\
			\mathbf{w}_2'=\mathbf{v}_2-\left<\mathbf{v}_2,\mathbf{w}_1\right>\mathbf{w}_1=
			   (-\frac{1}{\sqrt{5}},\frac{2}{\sqrt{5}},0)-\left(-\frac{1}{\sqrt{10}}\right)(\frac{1}{\sqrt{2}},0,\frac{1}{\sqrt{2}}) =
				 (-\frac{1}{2\sqrt{5}},\frac{2}{\sqrt{5}},\frac{1}{2\sqrt{5}})\\
			\mathbf{w}_2=\frac{\mathbf{w}_2'}{\|\mathbf{w}_2'\|}=(-\frac{1}{3\sqrt{2}},\frac{2\sqrt{2}}{3},\frac{1}{3\sqrt{2}})\\
			\mathbf{w}_3=\mathbf{v}_3=(-\frac{2}{3},-\frac{1}{3},\frac{2}{3}) \\
		\end{array}$
	\end{center}
	So $A=PDP^T$ with
	\begin{center}
		\begin{tabular}{cc}
			$P=\begin{pmatrix} \frac{1}{\sqrt{2}} & -\frac{1}{3\sqrt{2}} & -\frac{2}{3} \\
			   0 & \frac{2\sqrt{2}}{3} & -\frac{1}{3} \\ 
				 \frac{1}{\sqrt{2}} & \frac{1}{3\sqrt{2}} & \frac{2}{3} \end{pmatrix}$ & 
			$D=\begin{pmatrix} 7 & 0 & 0 \\ 0 & 7 & 0 \\ 0 & 0 & -2\end{pmatrix}$
		\end{tabular}
	\end{center}
	
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Diagonalization of symmetric matrices} 
\begin{ceudef}[Spectrum of a matrix]
	The set of eigenvalues of a matrix is called the \textbf{spectrum} of that matrix.
\end{ceudef}
\begin{ceuthm}[Spectral theorem for symmetric matrices]
	An $n\times n$ symmetric matrix has the following properties:
	\begin{enumerate}
		\item $A$ has $n$ real eigenvalues (including multiplicities).
		\item The dimension of each eigenspace is the multiplicity of the corresponding eigenvalue as root of the
		      characteristic equation.
		\item Eigenspaces corresponding to distinct eigenvalues are mutually orthogonal.
		\item $A$ is orthogonally diagonalizable.
	\end{enumerate}
\end{ceuthm}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Diagonalization of symmetric matrices} 
\begin{ceudef}[Spectral decomposition of symmetric matrices]
	Let $A=PDP^T$ with $P=\begin{pmatrix} \mathbf{u}_1 & \mathbf{u}_2 & ... & \mathbf{u}_n\end{pmatrix}$. Then
	\begin{center}
		$\begin{array}{rcl}A&=&\begin{pmatrix} \mathbf{u}_1 & \mathbf{u}_2 & ... & \mathbf{u}_n\end{pmatrix}\begin{pmatrix} \lambda_1 & 0 & ...& 0 \\ 0 & \lambda_2 & ... & 0 \\
		  0 & 0 & ... & \lambda_n\end{pmatrix}
		   \begin{pmatrix} \mathbf{u}_1^T \\ \mathbf{u}_2^T \\ ... \\ \mathbf{u}_n^T\end{pmatrix}\\
			&=&\begin{pmatrix} \lambda_1\mathbf{u}_1 & \lambda_2\mathbf{u}_2 & ... & \lambda_n\mathbf{u}_n\end{pmatrix}
			   \begin{pmatrix} \mathbf{u}_1^T \\ \mathbf{u}_2^T \\ ... \\ \mathbf{u}_n^T\end{pmatrix}\\
			&=&\lambda_1\mathbf{u}_1\mathbf{u}_1^T+\lambda_2\mathbf{u}_2\mathbf{u}_2^T+...+\lambda_n\mathbf{u}_n\mathbf{u}_n^T
			\end{array}$
	\end{center}
	The latest equation is the \textbf{spectral decomposition} of $A$. Each one of the terms $\lambda_i\mathbf{u}_i\mathbf{u}_i^T$ is an $n\times n$ matrix of rank 1 (since all the columns are multiples of $\mathbf{u}_i$. Additionally,
	$\mathbf{u}_i\mathbf{u}_i^T\mathbf{x}$ is the orthogonal projection of any vector onto the subspace generated by $\mathbf{u}_i$.
\end{ceudef}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Diagonalization of symmetric matrices} 
\begin{exampleblock}{Example}
	Write the spectral decomposition of
	\begin{center}
		$A=\begin{pmatrix}  \frac{2}{\sqrt{5}} & -\frac{1}{\sqrt{5}} \\  \frac{1}{\sqrt{5}} &  \frac{2}{\sqrt{5}}\end{pmatrix}
		   \begin{pmatrix}  8 & 0 \\  0 & 3\end{pmatrix}
			 \begin{pmatrix}  \frac{2}{\sqrt{5}} &  \frac{1}{\sqrt{5}} \\ -\frac{1}{\sqrt{5}} &  \frac{2}{\sqrt{5}}\end{pmatrix}$
	\end{center}
	\underline{\textit{Solution}}\\
	Consider $\mathbf{u}_1=(\frac{2}{\sqrt{5}},\frac{1}{\sqrt{5}})$ be the first column of $P$ and $\mathbf{u}_2=(-\frac{1}{\sqrt{5}},\frac{2}{\sqrt{5}})$.
	Then
	\begin{center}
		\begin{tabular}{cc}
			$\mathbf{u}_1\mathbf{u}_1^T=\begin{pmatrix}  \frac{4}{5} & \frac{2}{5} \\  \frac{2}{5} &  \frac{1}{5}\end{pmatrix}$ &
			$\mathbf{u}_2\mathbf{u}_2^T=\begin{pmatrix}  \frac{1}{5} & -\frac{2}{5} \\ -\frac{2}{5} &  \frac{4}{5}\end{pmatrix}$
		\end{tabular}
	\end{center}
	The spectral decomposition is therefore
	\begin{center}
		$A=\lambda_1\mathbf{u}_1\mathbf{u}_1^T+\lambda_2\mathbf{u}_2\mathbf{u}_2^T=
		   8\begin{pmatrix}  \frac{4}{5} & \frac{2}{5} \\  \frac{2}{5} &  \frac{1}{5}\end{pmatrix}+
			 3\begin{pmatrix}  \frac{1}{5} & -\frac{2}{5} \\ -\frac{2}{5} &  \frac{4}{5}\end{pmatrix}$
	\end{center}
\end{exampleblock}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Exercises} 

\begin{exerciseblock}{Exercises}
	From Lay (3rd ed.), Chapter 7, Section 1:
	\begin{itemize}
		\item 7.1.6
		\item 7.1.7
		\item 7.1.13
		\item 7.1.23
		\item 7.1.27
		\item 7.1.29
		\item 7.1.35
	\end{itemize}
\end{exerciseblock}

\end{frame}

% ==============================================
\subsection{Quadratic forms (b)} 
\Outline

\begin{frame}\frametitle{Quadratic forms} 
\begin{block}{Introduction}
	Most expressions appearing so far are linear: $A\mathbf{x}$, $\left<\mathbf{w},\mathbf{x}\right>$, $\mathbf{x}^T$, that is, if we construct
	an operator $T(\mathbf{x})$ with them (e.g., $T(\mathbf{x})=A\mathbf{x}$, $T(\mathbf{x})=\left<\mathbf{w},\mathbf{x}\right>$, $T(\mathbf{x})=\mathbf{x}^T$), it meets
	\begin{center}
		$T(a\mathbf{x}_1+b\mathbf{x}_2)=aT(\mathbf{x}_1)+bT(\mathbf{x}_2)$
	\end{center}
	However, there are nonlinear expressions like $\mathbf{x}^T\mathbf{x}$. Particularly, this one is said to be quadratic and they normally appear
	in applications of linear algebra to engineering (like optimization) and signal processing (like signal power). They also arise in physics (as potential
	and kinetic energy), differential geometry (as the normal curvature of surfaces) and statistics (as confidence ellipsoids).
\end{block}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Quadratic forms} 
\begin{ceudef}[Quadratic forms]
	A quadratic form in $\mathbb{R}^n$ is a function $Q(\mathbf{x}): \mathbb{R}^n \rightarrow \mathbb{R}$ that can be computed as
	\begin{center}
		$Q(\mathbf{x})=\mathbf{x}^TA\mathbf{x}$
	\end{center}
	being $A\in\mathcal{M}_{n\times n}$ a symmetric matrix.
\end{ceudef}

\begin{exampleblock}{Example}
	\begin{enumerate}
		\item $Q(\mathbf{x})=\mathbf{x}^TI\mathbf{x}=\begin{pmatrix}x_1&x_2\end{pmatrix}\begin{pmatrix}1 & 0 \\ 0 & 1\end{pmatrix}\begin{pmatrix}x_1\\x_2\end{pmatrix}=x_1^2+x_2^2$
		\item $Q(\mathbf{x})=\mathbf{x}^T\begin{pmatrix}4 & 0 \\ 0 & 3\end{pmatrix} \mathbf{x}=4x_1^2+3x_2^2$
		\item $Q(\mathbf{x})=\mathbf{x}^T\begin{pmatrix}3 & -2 \\ -2 & 7\end{pmatrix} \mathbf{x}=3x_1^2+7x_2^2-4x_1x_2$
		\item $Q(\mathbf{x})=\mathbf{x}^T\begin{pmatrix}5 & -\frac{1}{2} & 0 \\ -\frac{1}{2} & 3 & 4 \\ 0 & 4 & 2\end{pmatrix} \mathbf{x}=5x_1^2+3x_2^2+2x_3^2-x_1x_2+8x_2x_3$
	\end{enumerate}
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Change of variables in quadratic forms}
%By making a change of variable we can get rid of the cross products (e.g., $x_1x_2$, $x_2x_3$, ...)
\begin{block}{Change of variables}
	A change of variables is an equation of the form $\mathbf{x}=P\mathbf{y}$ or equivalently $P^{-1}\mathbf{x}=\mathbf{y}$, where $P$ is an invertible matrix. Exploiting
	the fact that, in a quadratic form, $A$ is symmetric, then we have $A=PDP^T$. We perform the change of variables
	\begin{center}
		$\mathbf{x}=P\mathbf{y}$
	\end{center}
	to obtain
	\begin{center}
		$Q(\mathbf{x})=(P\mathbf{y})^TA(P\mathbf{y})=\mathbf{y}^TP^TAP\mathbf{y}=Q(\mathbf{y})$
	\end{center}
	But we know
	\begin{center}
		$A=PDP^T \Rightarrow D=P^TAP$
	\end{center}
	Consequently
	\begin{center}
		$Q(\mathbf{y})=\mathbf{y}^TD\mathbf{y}$
	\end{center}
	That is, there is a basis, in which the matrix of the quadratic form is diagonal.
\end{block}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Change of variables in quadratic forms}
\begin{exampleblock}{Example}
	Consider $Q(\mathbf{x})=\mathbf{x}^TA\mathbf{x}$ with
	\begin{center}
		$A=\begin{pmatrix}  1 & -4 \\  -4 & -5\end{pmatrix}
		  =\begin{pmatrix}  \frac{2}{\sqrt{5}} & \frac{1}{\sqrt{5}} \\  -\frac{1}{\sqrt{5}} &  \frac{2}{\sqrt{5}}\end{pmatrix}
		   \begin{pmatrix}  3 & 0 \\  0 &-7\end{pmatrix}
			 \begin{pmatrix}  \frac{2}{\sqrt{5}} &-\frac{1}{\sqrt{5}} \\  \frac{1}{\sqrt{5}} &  \frac{2}{\sqrt{5}}\end{pmatrix}$
	\end{center}
	That is
	\begin{center}
		$Q(\mathbf{x})=x_1^2-5x_2^2-8x_1x_2$
	\end{center}
	If we make the change of variable
	\begin{center}
		$\mathbf{y}=P^T\mathbf{x}=\begin{pmatrix} \frac{2}{\sqrt{5}}x_1 - \frac{1}{\sqrt{5}}x_2 \\  \frac{1}{\sqrt{5}}x_1 + \frac{2}{\sqrt{5}}x_2\end{pmatrix}$
	\end{center}
	then
	\begin{center}
		$Q(\mathbf{y})=\mathbf{y}^TD\mathbf{y}=3y_1^2-7y_2^2$
	\end{center}
	
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Change of variables in quadratic forms}
\begin{exampleblock}{}
	Let's check that effectively both ways of calculating the quadratic form are equivalent. For doing so, we'll calculate
	the value of $Q(\mathbf{x})$ for $\mathbf{x}=(2,-2)$:
	\begin{center}
		$Q(\mathbf{x})=\mathbf{x}^TA\mathbf{x}=2^2-5\cdot (-2)^2-8\cdot 2 \cdot (-2)=4-20+32=16$
	\end{center}
	If we make the change of variable
	\begin{center}
		$\mathbf{y}=\begin{pmatrix} \frac{2}{\sqrt{5}}2 - \frac{1}{\sqrt{5}}(-2) \\  \frac{1}{\sqrt{5}}2 + \frac{2}{\sqrt{5}}(-2)\end{pmatrix}=
		   \begin{pmatrix} \frac{6}{\sqrt{5}} \\  -\frac{2}{\sqrt{5}}\end{pmatrix}$
	\end{center}
	then
	\begin{center}
		$Q(\mathbf{y})=\mathbf{y}^TD\mathbf{y}=3\left(\frac{6}{\sqrt{5}}\right)^2-7\left( -\frac{2}{\sqrt{5}}\right)^2=
		   3\frac{36}{5}-7\frac{4}{5}=\frac{80}{5}=16$
	\end{center}
	
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Change of variables in quadratic forms}
\begin{exampleblock}{}
	\begin{center}
		\includegraphics[scale=0.4]{figChangeOfVariables.jpg}
	\end{center}
	
\end{exampleblock}

\begin{ceuthm}[Principal axes theorem]
	Let $A\in\mathcal{M}_{n\times n}$ be a symmetric matrix. Then, there exists a change of variable $\mathbf{x}=P\mathbf{y}$ such that the
	quadratic form $\mathbf{x}^TA\mathbf{x}$ becomes $\mathbf{y}^TD\mathbf{y}$ with $D$ an $n\times n$ diagonal matrix. The columns of $P$ are
	the principal axes.
	\label{thm:principalAxes}
\end{ceuthm}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Principal axes}

\begin{block}{A geometric view of the principal axes}
	Consider the quadratic form $Q(\mathbf{x})=\mathbf{x}^TA\mathbf{x}$ with $\mathbf{x}\in\mathbb{R}^2$ and the isocurve $Q(\mathbf{x})=c$. The isocurve is either an ellipse, a circle, a hyperbola, two intersecting
	lines, a point, or contains no points at all. If $A$ is diagonal, then
	\begin{center}
		$Q(\mathbf{x})=a_{11}x_1^2+a_{22}x_2^2=c$
	\end{center}
	The equation of an ellipse is
	\begin{columns}
		\begin{column}{4cm}
			\begin{center}
				$\frac{x_1^2}{a^2}+\frac{x_2^2}{b^2}=1$
			\end{center}
			with $a,b>0$. Therefore
			\begin{center}
				\begin{tabular}{cc}
					$a=\sqrt{\frac{c}{a_{11}}}$ & 
					$b=\sqrt{\frac{c}{a_{22}}}$
				\end{tabular}
			\end{center}
		\end{column}
		\begin{column}{7cm}
			\begin{center}
				\includegraphics[scale=0.35]{figEllipse.jpg}
			\end{center}
		\end{column}
	\end{columns}
\end{block}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Principal axes}

\begin{block}{}
	The equation of a hyperbola is
	\begin{columns}
		\begin{column}{4cm}
			\begin{center}
				$\frac{x_1^2}{a^2}-\frac{x_2^2}{b^2}=1$
			\end{center}
			with $a,b>0$
		\end{column}
		\begin{column}{7cm}
			\begin{center}
				\includegraphics[scale=0.45]{figHyperbola.jpg}
			\end{center}
		\end{column}
	\end{columns}
\end{block}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Principal axes}

\begin{block}{}
	If $A$ is not diagonal, then the ellipse or the hyperbola are rotated
	\begin{center}
		\includegraphics[width=11cm]{figRotatedHyperbola.jpg}
	\end{center}
\end{block}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Principal axes}

\begin{exampleblock}{Example}
	Let's analyze the rotated ellipse
	\begin{center}
		$5x_1^2-4x_1x_2+5x_2^2=48$
	\end{center}
	The corresponding matrix is
	\begin{center}
		$A=\begin{pmatrix}  5 & -2 \\  -2 & 5\end{pmatrix}
		  =\begin{pmatrix}  \frac{1}{\sqrt{2}} & -\frac{1}{\sqrt{2}} \\  \frac{1}{\sqrt{2}} &  \frac{1}{\sqrt{2}}\end{pmatrix}
		   \begin{pmatrix}  3 & 0 \\  0 & 7\end{pmatrix}
			 \begin{pmatrix}  \frac{1}{\sqrt{2}} & \frac{1}{\sqrt{2}} \\  -\frac{1}{\sqrt{2}} &  \frac{1}{\sqrt{2}}\end{pmatrix}$
	\end{center}
	So,
	\begin{center}
		\begin{tabular}{cc}
			$a=\sqrt{\frac{c}{a_{11}}}=\sqrt{\frac{48}{3}}=3$ & 
			$b=\sqrt{\frac{c}{a_{22}}}=\sqrt{\frac{48}{7}}\approx 2.65$
		\end{tabular}
	\end{center}
	The change of variable $\mathbf{x}=\begin{pmatrix}  \frac{1}{\sqrt{2}} & -\frac{1}{\sqrt{2}} \\  \frac{1}{\sqrt{2}} &  \frac{1}{\sqrt{2}}\end{pmatrix}\mathbf{y}$ diagonalizes
	the quadratic form (see the new axes in the previous slide).
	
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Classification of quadratic forms}

\begin{exampleblock}{Example}
	Look at the following surfaces defined as $z=Q(\mathbf{x})$
	\begin{center}
		\includegraphics[width=11cm]{figExampleQuadratics.jpg}
	\end{center}
	The curves seen in $\mathbb{R}^2$ are the cut of these surfaces with the plane $z=c$. It is obvious that some of the surfaces are always above $z=0$ (a and b), others are always below $z=0$ (d), and still other are sometimes below and sometimes above $z=0$ (c).
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Classification of quadratic forms}

\begin{ceudef}[Classification of quadratic forms]
  \begin{columns}
		\begin{column}{7cm}
			We say $Q(\mathbf{x})$ is
			\begin{itemize}
				\item \textbf{positive definite} if $Q(\mathbf{x})>0 \quad \forall\mathbf{x}\in\mathbb{R}^n, \mathbf{x}\neq\mathbf{0}$
				\item \textbf{negative definite} if $Q(\mathbf{x})<0 \quad \forall\mathbf{x}\in\mathbb{R}^n, \mathbf{x}\neq\mathbf{0}$
				\item \textbf{indefinite} if $Q(\mathbf{x})$ assumes both positive and negative values
				\item \textbf{positive semidefinite} if $Q(\mathbf{x})\geq 0 \quad \forall\mathbf{x}\in\mathbb{R}^n, \mathbf{x}\neq\mathbf{0}$
				\item \textbf{negative semidefinite} if $Q(\mathbf{x})\leq 0 \quad \forall\mathbf{x}\in\mathbb{R}^n, \mathbf{x}\neq\mathbf{0}$
			\end{itemize}
		\end{column}
		\begin{column}{5cm}
			\includegraphics[height=7cm]{figClassificationQuadratics.jpg}
		\end{column}
	\end{columns}
\end{ceudef}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Classification of quadratic forms}

\begin{ceuthm}[Classification of quadratic forms and quadratic forms]
	Let $Q(\mathbf{x})=\mathbf{x}^TA\mathbf{x}$ with $A\in\mathcal{M}_{n\times n}$ and symmetric. Let $\lambda_i$ be the eigenvalues of $A$.
	$Q(\mathbf{x})$ is 
	\begin{itemize}
		\item \textbf{positive definite} iff $\lambda_i>0 \quad \forall i$
		\item \textbf{negative definite} iff $\lambda_i<0 \quad \forall i$
		\item \textbf{indefinite} iff there are positive and negative eigenvalues
		\item \textbf{positive semidefinite} iff $\lambda_i\geq 0 \quad \forall i$
		\item \textbf{negative semidefinite} iff $\lambda_i\leq 0 \quad \forall i$
	\end{itemize}
	\underline{\textit{Proof}}\\
	By the Theorem of Principal Axes (Theorem \ref{thm:principalAxes}), there is a change of variable such that
	\begin{center}
		$Q(\mathbf{y})=\mathbf{y}^TD\mathbf{y}=\lambda_1y_1^2+\lambda_2y_2^2+...+\lambda_ny_n^2$
	\end{center}
	where $\lambda_i$ is the $i$-th eigenvalue. The values of $Q$ depend on $\lambda_i$ in the way that the theorem states (e.g., $\forall \mathbf{y}\neq \mathbf{0}\quad Q(\mathbf{y})>0$
	iff $\lambda_i>0 \quad \forall i$, etc.)
	
\end{ceuthm}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Classification of quadratic forms}

\begin{exampleblock}{Examples}
	\begin{itemize}
		\item $Q(\mathbf{x})=3x_1^2+7x_2^2$ is positive definite because its eigenvalues are 3 and 7 (both larger than 0).
		\item $Q(\mathbf{x})=3x_1^2$ is positive semidefinite because its eigenvalues are 3 and 0 (both larger or equal than 0).
		\item $Q(\mathbf{x})=3x_1^2-7x_2^2$ is indefinite because its eigenvalues are 3 and -7 (one positive and another negative).
		\item $Q(\mathbf{x})=-3x_1^2-7x_2^2$ is negative definite because its eigenvalues are -3 and -7 (both smaller than 0).
	\end{itemize}
\end{exampleblock}

\begin{ceudef}[Classification of symmetric matrices]
	A symmetric \textbf{matrix is positive definite} if its corresponding quadratic form is positive definite. Analogously for the 
	rest of the classification.
\end{ceudef}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Classification of quadratic forms}

\begin{block}{Cholesky factorization}
	Cholesky factorization factorizes a symmetric matrix $A$ as
	\begin{center}
		$A=R^TR$
	\end{center}
	being $R$ an upper triangular matrix. $A$ is positive definite if all entries in the diagonal of $R$ are positive.
\end{block}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Exercises} 

\begin{exerciseblock}{Exercises}
	From Lay (3rd ed.), Chapter 7, Section 2:
	\begin{itemize}
		\item 7.2.1
		\item 7.2.3
		\item 7.2.5
		\item 7.2.7
		\item 7.2.19
		\item 7.2.23
		\item 7.2.24
		\item 7.2.26
		\item 7.2.27
	\end{itemize}
\end{exerciseblock}

\end{frame}

% ==============================================
\subsection{Constrained optimization (b)} 
\Outline

\begin{frame}\frametitle{Constrained optimization} 
\begin{block}{Introduction}
	Many problems in engineering or physics are of the form
	\begin{center}
		\begin{tabular}{ccc}
			$\begin{array}{cc}
				 \min & Q(\mathbf{x}) \\
				 \text{subject to} & \|\mathbf{x}\|^2=1 
				\end{array}$ &
			or &
			$\begin{array}{cc}
				 \max & Q(\mathbf{x}) \\
				 \text{subject to} & \|\mathbf{x}\|^2=1 
				\end{array}$
		\end{tabular}
	\end{center}
\end{block}

\begin{exampleblock}{Example}
  Calculate the minimum and maximum of $Q(\mathbf{x})=9x_1^2+4x_2^2+3x_3^2$ subject to $\|\mathbf{x}\|^2=1$.
	\underline{\textit{Solution}}\\
	By taking the minimum and maximum coefficient in $Q(\mathbf{x})$ we have
	\begin{center}
		$\begin{array}{rcl}
		    3x_1^2+3x_2^2+3x_3^2 \leq & Q(\mathbf{x}) & \leq 9x_1^2+9x_2^2+9x_3^2 \\
		    3(x_1^2+x_2^2+x_3^2) \leq & Q(\mathbf{x}) & \leq 9(x_1^2+x_2^2+x_3^2) \\
		    3 \leq & Q(\mathbf{x}) & \leq 9 \\
			\end{array}$ 
	\end{center}
	The minimum value $Q(\mathbf{x})=3$ is attained for $\mathbf{x}=(0,0,1)$, while the maximum value $Q(\mathbf{x})=9$ is attained for $\mathbf{x}=(1,0,0)$.
	In fact the minimum and maximum values that the constrained quadratic form can take are $\lambda_{min}$ and $\lambda_{max}$.
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Constrained optimization} 
\begin{exampleblock}{Example}
  Calculate the minimum and maximum of $Q(\mathbf{x})=3x_1^2+7x_2^2$ subject to $\|\mathbf{x}\|^2=1$.
	\underline{\textit{Solution}}\\
	$\|\mathbf{x}\|^2=1$ is a cylinder in $\mathbb{R}^3$ while $z=Q(\mathbf{x})$ is a parabolic surface. The minimum and maximum of the constrained problem 
	are attained among those points belonging to the curve that is the intersection of both surfaces.
	\begin{center}
		\includegraphics[width=11cm]{figConstrainedOptimization.jpg}
	\end{center}
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Constrained optimization} 
\begin{ceuthm}
  Let $A$ be a symmetric matrix and let
	\begin{center}
		$m=\min\left\{\mathbf{x}^TA\mathbf{x}\left|\|\mathbf{x}\|^2=1\right.\right\}$\\
		$M=\max\left\{\mathbf{x}^TA\mathbf{x}\left|\|\mathbf{x}\|^2=1\right.\right\}$
	\end{center}
	Then, $M=\lambda_{max}$ and $m=\lambda_{min}$. $M$ is attained for $\mathbf{x}=\mathbf{u}_{max}$ (the eigenvector associated to $\lambda_{max}$) and
	$m$ is attained for $\mathbf{x}=\mathbf{u}_{min}$ (the eigenvector associated to $\lambda_{min}$).\\
	\underline{\textit{Proof}}\\
	Let's orthogonally diagonalize $A$ as $A=PDP^T$ and we make the change variables $\mathbf{y}=P^T\mathbf{x}$. We already know that
	\begin{center}
		$Q(\mathbf{x})=\mathbf{x}^TA\mathbf{x}=\mathbf{y}^TD\mathbf{y}$
	\end{center}
	Additionally $\|\mathbf{y}\|^2=\|\mathbf{x}\|^2$ because
	\begin{center}
		$\|\mathbf{y}\|^2=\mathbf{y}^T\mathbf{y}=(P^T\mathbf{x})^T(P^T\mathbf{x})=\mathbf{x}^TPP^T\mathbf{x}=\mathbf{x}^T\mathbf{x}=\|\mathbf{x}\|^2$
	\end{center}
	In particular $\|\mathbf{y}\|=1 \Leftrightarrow \|\mathbf{x}\|=1$.
	\label{thm:constrainedOptimization}
\end{ceuthm}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Constrained optimization} 
\begin{block}{}
	Then,
	\begin{center}
		$m=\min\left\{\mathbf{y}^TD\mathbf{y}\left|\|\mathbf{y}\|^2=1\right.\right\}$\\
		$M=\max\left\{\mathbf{y}^TD\mathbf{y}\left|\|\mathbf{y}\|^2=1\right.\right\}$
	\end{center}
	Since $D$ is diagonal we have
	\begin{center}
		$\mathbf{y}^TD\mathbf{y}=\lambda_1y_1^2+\lambda_2y_2^2+...+\lambda_ny_n^2$
	\end{center}
	Let's look for the maximum of these values subject to $\|\mathbf{y}\|=1$. Consider the maximum eigenvalue, $\lambda_{max}$, then
	\begin{center}
		$\begin{array}{rcl}
		  \mathbf{y}^TD\mathbf{y}&=&\lambda_1y_1^2+\lambda_2y_2^2+...+\lambda_ny_n^2\\
		     &\leq&\lambda_{max}y_1^2+\lambda_{max}y_2^2+...+\lambda_{max}y_n^2\\
		     &=&\lambda_{max}(y_1^2+y_2^2+...+y_n^2)\\
		     &=&\lambda_{max}\|\mathbf{y}\|=\lambda_{max}\\
			\end{array}$
	\end{center}
\end{block}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Constrained optimization} 
\begin{block}{}
	In fact the value $\lambda_{max}$ is attained for $\mathbf{y}_{max}=\begin{pmatrix} 0 & 0 & ... & 0 & 1 & 0 & ...  & 0\end{pmatrix}$, where
	the 1 is at the location corresponding to $\lambda_{max}$. The corresponding $\mathbf{x}$ is 
	\begin{center}
		$\mathbf{x}=P\mathbf{y}=\begin{pmatrix}\mathbf{u}_1 & \mathbf{u}_2 & ... & \mathbf{u}_{max-1} & \mathbf{u}_{max} & \mathbf{u}_{max+1} & ... &\mathbf{u}_n\end{pmatrix}
		   \begin{pmatrix} 0 \\ 0 \\ ... \\ 0 \\ 1 \\ 0 \\ ...  \\ 0\end{pmatrix}=\mathbf{u}_{max}$
	\end{center}
	We could reason analogously for the minimum.
\end{block}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Constrained optimization} 
\begin{exampleblock}{Example}
	Let $A=\begin{pmatrix}3 & 2 & 1 \\ 2 & 3 & 1 \\ 1 & 1 & 4\end{pmatrix}$. Solve the following optimization problem
	\begin{center}
		$\begin{array}{cc}
			 \max & Q(\mathbf{x})=\mathbf{x}^TA\mathbf{x} \\
			 \text{subject to} & \|\mathbf{x}\|^2=1 
			\end{array}$
	\end{center}
	\underline{\textit{Solution}}\\
	The characteristic equation is
	\begin{center}
		$|A-\lambda I|=0=-(\lambda-6)(\lambda-3)(\lambda-1)$
	\end{center}
	The maximum eigenvalue is $\lambda=6$ and its corresponding eigenvector is $\mathbf{u}=(\frac{1}{\sqrt{3}},\frac{1}{\sqrt{3}},\frac{1}{\sqrt{3}})$.
	Therefore, the maximum of $Q(\mathbf{x})$ is 6 that is attained for $\mathbf{x}=(\frac{1}{\sqrt{3}},\frac{1}{\sqrt{3}},\frac{1}{\sqrt{3}})$.
	
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Constrained optimization} 
\begin{ceuthm}
  Let $A$, $\lambda_{max}$ and $\mathbf{u}_{max}$ be defined as in the previous theorem. Then the solution of
	\begin{center}
		$\begin{array}{cc}
			 \max & Q(\mathbf{x})=\mathbf{x}^TA\mathbf{x} \\
			 \text{subject to} & \|\mathbf{x}\|^2=1 \\
			                   & \mathbf{x}\cdot\mathbf{u}_{max}=0 \\
			\end{array}$
	\end{center}
	is given by the second largest eigenvalue $\lambda_{max-1}$ that is attained for its associated eigenvector ($\mathbf{u}_{max-1}$).
	
\end{ceuthm}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Exercises} 

\begin{exerciseblock}{Exercises}
	From Lay (3rd ed.), Chapter 7, Section 3:
	\begin{itemize}
		\item 7.3.1
		\item 7.3.3
		\item 7.3.13
	\end{itemize}
\end{exerciseblock}

\end{frame}

% ==============================================
\subsection{Singular Value Decomposition (SVD) (c)} 
\Outline

\begin{frame}\frametitle{Singular Value Decomposition (SVD)} 
\begin{block}{Introduction}
	Unfortunately, not all matrices can be diagonalized and factorized as
	\begin{center}
		$A=PDP^{-1}$
	\end{center}
	However, all of them (even rectangular matrices) can be factorized as
	\begin{center}
		$A=QDP^{-1}$
	\end{center}
	This is called the \textbf{Singular Value Decomposition}. It imitates the property of stretching/shrinking of eigenvalues and eigenvectors. For instance,
	assume $\mathbf{u}$ is an eigenvector, then
	\begin{center}
		$A\mathbf{u}=\lambda\mathbf{u} \Rightarrow \|A\mathbf{u}\|=|\lambda|\|\mathbf{u}\|$
	\end{center}
	If $|\lambda|>1$, then the transformed vector $A\mathbf{u}$ is stretched with respect to $\mathbf{u}$. On the contrary,
	if $|\lambda|<1$, then the transformed vector $A\mathbf{u}$ is shrinked with respect to $\mathbf{u}$.
\end{block}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Singular Value Decomposition (SVD)} 
\begin{exampleblock}{Example}
	Consider $A=\begin{pmatrix}4 & 11 & 14 \\ 8 & 7 & -2 \end{pmatrix}$ and the linear transformation
	$T(\mathbf{x})=A\mathbf{x}$. It transforms the unit sphere in $\mathbb{R}^3$ onto an ellipse of $\mathbb{R}^2$
	\begin{center}
		\includegraphics[width=11cm]{figSVD.jpg}
	\end{center}
	Look for the direction that maximizes $\|A\mathbf{x}\|$ subject to $\|\mathbf{x}\|=1$.
	\label{example:SVD}
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Singular Value Decomposition (SVD)} 
\begin{exampleblock}{}
	\underline{\textit{Solution}}\\
	We may maximize $\|A\mathbf{x}\|^2$ because $\|A\mathbf{x}\|$ is maximum iff $\|A\mathbf{x}\|^2$ is maximum.
	\begin{center}
		$\|A\mathbf{x}\|^2=(A\mathbf{x})^T(A\mathbf{x})=\mathbf{x}^TA^TA\mathbf{x}$
	\end{center}
	which is a quadratic form since $A^TA$ is symmetric: 
	\begin{center}
		$A^TA=\begin{pmatrix}80 & 100 & 40 \\ 100 & 170 & 140 \\ 40 & 140 & 200\end{pmatrix}$
	\end{center}
	By Theorem \ref{thm:constrainedOptimization}, the maximum eigenvalue is $\max\|A\mathbf{x}\|^2=\lambda_{max}=360$ and its associated eigenvector $\mathbf{u}_{max}=(\frac{1}{3},\frac{2}{3},\frac{2}{3})$.
	Consequently $\max\|A\mathbf{x}\|=\sqrt{360}=6\sqrt{10}$ that is attained for
	\begin{center}
		$A\mathbf{u}_{max}=\begin{pmatrix}18 \\ 6\end{pmatrix}$
	\end{center}
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Singular Value Decomposition (SVD)} 
\begin{ceudef}[Singular Values of a matrix]
	Let $A\in\mathcal{M}_{m\times n}$. $A^TA$ can always be orthogonally diagonalized. Let $\{\mathbf{v}_1,\mathbf{v}_2,...,\mathbf{v}_n\}$ a base of $\mathbb{R}^n$ formed
	by the eigenvectors of $A^TA$ and let $\lambda_1,\lambda_2,...,\lambda_n$ be its corresponding eigenvalues. Then
	\begin{center}
		$\|A\mathbf{v}_i\|^2=(A\mathbf{v}_i)^T(A\mathbf{v}_i)=\mathbf{v}_i^TA^TA\mathbf{v}_i=\mathbf{v}_i^T(\lambda_i \mathbf{v}_i)=\lambda_i\|\mathbf{v}_i\|^2$
	\end{center}
	If we take the square root
	\begin{center}
		$\|A\mathbf{v}_i\|=\sqrt{\lambda_i}\|\mathbf{v}_i\|$
	\end{center}
	That is, $\sqrt{\lambda_i}$ reflects the amount by which $\mathbf{v}_i$ is stretched or shrinked. $\sqrt{\lambda_i}$ is called a \textbf{singular value} and it
	is denoted as $\sigma_i$.
	\label{def:singularValues}
\end{ceudef}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Singular Value Decomposition (SVD)} 
\begin{exampleblock}{Example (continued)}
	\begin{center}
		\includegraphics[width=11cm]{figSVD.jpg}
	\end{center}
	In the example of Slide \ref{example:SVD}, the singular values are the lengths of the ellipse in $\mathbb{R}^2$ and they are $6\sqrt{10}$, $3\sqrt{10}$ and $0$. From the
	singular values we learn that the unit sphere in $\mathbb{R}^3$ (there are 3 singular values) is collapsed in 2D (one of the singular values is 0) onto an ellipse
	(the remaining two singular values are different from each other).
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Singular Value Decomposition (SVD)} 
\begin{ceuthm}
	Let $\{\mathbf{v}_1,\mathbf{v}_2,...,\mathbf{v}_n\}$ a basis of $\mathbb{R}^n$ formed
	by the eigenvectors of $A^TA$ sorted in descending order and let $\lambda_1,\lambda_2,...,\lambda_n$ be its corresponding eigenvalues. Let us assume that $A$ has
	$r$ non-null singular values. Then
	\begin{center}
		$S=\{A\mathbf{v}_1,A\mathbf{v}_2,...,A\mathbf{v}_r\}$
	\end{center}
	is a basis of $\mathrm{Col}\{A\}$ and
	\begin{center}
		$\mathrm{Rank}\{A\}=r$
	\end{center}
	\underline{\textit{Proof}}\\
	By Theorem \ref{thm:orthogonalEigenvectors}, any two eigenvectors are orthogonal to each other if they correspond to different eigenvalues, that is, $\mathbf{v}_i\cdot
	\mathbf{v}_j=0$. Then,
	\begin{center}
		$(A\mathbf{v}_i)\cdot(A\mathbf{v}_j)=\mathbf{v}_i^TA^TA\mathbf{v}_j=\mathbf{v}_i^T(\lambda_j \mathbf{v}_j)=\lambda_j(\mathbf{v}_i^T\mathbf{v}_j)=\lambda_j
		(\mathbf{v}_i\cdot\mathbf{v}_j)=0$
	\end{center}
	That is $A\mathbf{v}_i$ and $A\mathbf{v}_j$ are also orthogonal.
	\label{thm:auxSVD}
\end{ceuthm}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Singular Value Decomposition (SVD)} 
\begin{block}{}
	Additionally, if the eigenvectors $\mathbf{v}_i$ are unitary, then (see Definition \ref{def:singularValues})
	\begin{center}
		$\sigma_i=\|A\mathbf{v}_i\|$
	\end{center}
	Since there are $r$ non-null singular values, $A\mathbf{v}_i\neq \mathbf{0}$ only for $i=1,2,...,r$. So the set $S$ is a set
	of non-null, orthogonal vectors. To show it is a basis of $\mathrm{Col}\{A\}$ we still need to show that any vector in $\mathrm{Col}\{A\}$
	can be expressed as a linear combination of the vectors in $S$. We know that the eigenvectors of $A^TA$ is a basis of $\mathbb{R}^n$. Then
	for any vector $\mathbf{x}\in\mathbb{R}^n$ there exist coefficients $c_1, c_2, ..., c_n$ not all of them zero such that
	\begin{center}
		$\mathbf{x}=c_1\mathbf{v}_1+c_2\mathbf{v}_2+...+c_n\mathbf{v}_n$
	\end{center}
	If we transform this vector
	\begin{center}
		$\begin{array}{rcll}
		   A\mathbf{x}&=&A(c_1\mathbf{v}_1+c_2\mathbf{v}_2+...+c_n\mathbf{v}_n)& \quad\text{[Linear transformation]} \\
		   &=&c_1A\mathbf{v}_1+c_2A\mathbf{v}_2+...+c_nA\mathbf{v}_n& \quad\text{[non-null singular values]} \\
		   &=&c_1A\mathbf{v}_1+c_2A\mathbf{v}_2+...+c_rA\mathbf{v}_r& \\
		\end{array}$
	\end{center}
\end{block}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Singular Value Decomposition (SVD)} 
\begin{block}{}
	That is any transformed vector $A\mathbf{x}$ can be expressed as a linear combination of the elements in $S$. Consequently,
	$S$ is a basis of $\mathrm{Col}\{A\}$.
	
	Finally, $\mathrm{Rank}\{A\}$ is nothing more than the dimension of $\mathrm{Col}\{A\}$. Since $A$ is a basis of $\mathrm{Col}\{A\}$ and
	it has $r$ vectors, then $\mathrm{Rank}\{A\}=r$.
\end{block}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Singular Value Decomposition (SVD)} 
\begin{ceuthm}[The Singular Value Decomposition]
	Let $A\in\mathcal{M}_{m\times n}$ be a matrix with rank $r$. Then, there exists a matrix $\Sigma\in\mathcal{M}_{m\times n}$ whose diagonal entries
	are the first $r$ singular values of $A$ sorted in descending order ($\sigma_1 \geq \sigma_2 \geq ... \geq \sigma_r > 0$) and there exist
	orthogonal matrices $U\in\mathcal{M}_{m\times m}$ and $V\in\mathcal{M}_{n\times n}$ such that
	\begin{center}
		$A=U\Sigma V^T$
	\end{center}
	$\Sigma$ is unique but $U$ and $V$ are not. The columns of $U$ are called the left singular vectors, and the columns of $V$ are the right singular vectors.
\end{ceuthm}

\begin{exampleblock}{Example}
	\begin{center}
		$\begin{pmatrix}a_{11} & a_{12} & a_{13} & a_{14} \\
		   a_{21} & a_{22} & a_{23} & a_{24}\end{pmatrix}=
			\begin{pmatrix}u_{11} & u_{12}  \\
		   u_{21} & u_{22}\end{pmatrix}
			\begin{pmatrix}\sigma_1 & 0 & 0 & 0 \\
		   0 & \sigma_2 & 0 & 0\end{pmatrix}
			\begin{pmatrix}v_{11} & v_{21} & v_{31} & v_{41} \\
		   v_{12} & v_{22} & v_{32} & v_{42} \\
			 v_{13} & v_{23} & v_{33} & v_{43} \\
			 v_{14} & v_{24} & v_{34} & v_{44} \\
			\end{pmatrix}$
	\end{center}
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Singular Value Decomposition (SVD)} 
\begin{block}{}
	\underline{\textit{Proof}}\\
	Let $\lambda_i$ and $\mathbf{v}_i$ ($i=1,2,...,n$) be the eigenvalues and eigenvectors of $A^TA$. By Theorem \ref{thm:auxSVD} we know that
	$S=\{A\mathbf{v}_1,A\mathbf{v}_2,...,A\mathbf{v}_r\}$ is an orthogonal basis of $\mathrm{Col}\{A\}$. Let's normalize these vectors
	\begin{center}
		$\mathbf{u}_i=\frac{A\mathbf{v}_i}{\sigma_i} \quad i=1,2,...,r$
	\end{center}
	and we extend the set $\{\mathbf{u}_1,\mathbf{u}_2,...,\mathbf{u}_r\}$ to be an orthogonal basis of $\mathbb{R}^m$. Let us construct the matrices
	\begin{center}
		$U=\begin{pmatrix}\mathbf{u}_1 & \mathbf{u}_2 & ... & \mathbf{u}_m \end{pmatrix}$\\
		$V=\begin{pmatrix}\mathbf{v}_1 & \mathbf{v}_2 & ... & \mathbf{v}_n \end{pmatrix}$\\
	\end{center}
	By construction $U$ and $V$ are orthogonal, and
	\begin{center}
		$\begin{array}{rcc}AV&=&\begin{pmatrix}A\mathbf{v}_1 & A\mathbf{v}_2 & ... & A\mathbf{v}_r & \mathbf{0} & ... & \mathbf{0} \end{pmatrix}\\
		   		&=&\begin{pmatrix}\sigma_1\mathbf{u}_1 & \sigma_2\mathbf{u}_2 & ... & \sigma_r\mathbf{u}_r & \mathbf{0} & ... & \mathbf{0} \end{pmatrix}\end{array}$
	\end{center}
	
\end{block}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Singular Value Decomposition (SVD)} 
\begin{block}{}
	\underline{\textit{Proof (continued)}}\\
	On the other side, let
	\begin{center}
		$\begin{array}{cc}
		   D=\begin{pmatrix}\sigma_1 & 0 & ... & 0 \\ 0 & \sigma_2 & ... & 0 \\ .. & ... & ... & ... \\ 0 & 0 & ... & \sigma_r \end{pmatrix} &
			 \Sigma=\begin{pmatrix} D & 0 \\ 0 & 0 \end{pmatrix}
		\end{array}$
	\end{center}
	Then, 
	\begin{center}
		$U\Sigma=\begin{pmatrix}\mathbf{u}_1 & \mathbf{u}_2 & ... & \mathbf{u}_m \end{pmatrix}\begin{pmatrix} D & 0 \\ 0 & 0 \end{pmatrix}=
		   \begin{pmatrix}\sigma_1\mathbf{u}_1 & \sigma_2\mathbf{u}_2 & ... & \sigma_r\mathbf{u}_r & \mathbf{0} & ... & \mathbf{0} \end{pmatrix}$
	\end{center}
	Therefore,
	\begin{center}
		$U\Sigma=AV \Rightarrow A=U\Sigma V^T$
	\end{center}
	since $V$ is orthogonal.
\end{block}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Singular Value Decomposition (SVD)} 
\begin{ceuthm}[Properties of the SVD decomposition]
	In a SVD decomposition
	\begin{itemize}
		\item The left singular vectors of $A$ are eigenvectors of $AA^T$.
		\item The right singular vectors of $A$ are eigenvectors of $A^TA$.
		\item The singular values are the square root of the eigenvalues of both $AA^T$ and $A^TA$.
		\item The singular values are the length of the semiaxes of the mapping of the unit hypersphere in $\mathbb{R}^n$ onto $\mathbb{R}^m$.
		\item The columns of $U$ form an orthogonal basis of $\mathbb{R}^m$.
		\item The columns of $V$ form an orthogonal basis of $\mathbb{R}^n$.
	\end{itemize}
\end{ceuthm}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Singular Value Decomposition (SVD)} 
\begin{exampleblock}{Example}
	Let's calculate the SVD decomposition of $A=\begin{pmatrix} 4 & 11 & 14 \\ 8 & 7 & -2 \end{pmatrix}$.\\
	\underline{\textit{Step 1}}: Orthogonally diagonalize $A^TA$ \\
	\begin{center}
		$A^TA=\begin{pmatrix}80 & 100 & 40 \\ 100 & 170 & 140 \\ 40 & 140 & 200\end{pmatrix}$
	\end{center}
	Its eigenvalues and eigenvectors are
	\begin{center}
		\begin{tabular}{rl}
			$\lambda_1=360$ & $\mathbf{v}_1=(\frac{1}{3},\frac{2}{3},\frac{2}{3})$ \\
			$\lambda_2=90$ & $\mathbf{v}_2=(-\frac{2}{3},-\frac{1}{3},\frac{2}{3})$ \\
			$\lambda_3=0$ & $\mathbf{v}_3=(\frac{2}{3},-\frac{2}{3},\frac{1}{3})$ \\
		\end{tabular}
	\end{center}
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Singular Value Decomposition (SVD)} 
\begin{exampleblock}{}
	\underline{\textit{Step 2}}: Construct $V$ and $\Sigma$ \\
	\begin{center}
		$V=\begin{pmatrix}\mathbf{v}_1 & \mathbf{v}_2 & \mathbf{v}_3\end{pmatrix}=
		   \begin{pmatrix} \frac{1}{3} & -\frac{2}{3} & \frac{2}{3} \\ \frac{2}{3} & -\frac{1}{3} & -\frac{2}{3} \\ \frac{2}{3} & \frac{2}{3} & \frac{1}{3}\end{pmatrix}$\\
		$\Sigma=\begin{pmatrix}\sqrt{\lambda_1} & 0 & 0 \\ 0 & \sqrt{\lambda_2} & 0 \end{pmatrix}=
		        \begin{pmatrix}6\sqrt{10} & 0 & 0 \\ 0 & 3\sqrt{10} & 0\end{pmatrix}$
	\end{center}
	\underline{\textit{Step 3}}: Construct $U$ \\
	\begin{center}
		$\mathbf{u}_1=\frac{A\mathbf{v}_1}{\sigma_1}=(\frac{3}{\sqrt{10}},\frac{1}{\sqrt{10}})$\\
		$\mathbf{u}_2=\frac{A\mathbf{v}_2}{\sigma_2}=(\frac{1}{\sqrt{10}},-\frac{3}{\sqrt{10}})$\\
	\end{center}
	The set $\{\mathbf{u}_1,\mathbf{u}_2\}$ is already a basis of $\mathbb{R}^2$, so there is no need to extend it.
\end{exampleblock}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Singular Value Decomposition (SVD)} 
\begin{exampleblock}{}
	Finally we have
	\begin{center}
		$A=U\Sigma V^T$ \\
		$\begin{pmatrix} 4 & 11 & 14 \\ 8 & 7 & -2 \end{pmatrix}=
		   \begin{pmatrix} \frac{3}{\sqrt{10}} & \frac{1}{\sqrt{10}} \\ \frac{1}{\sqrt{10}} & -\frac{3}{\sqrt{10}} \end{pmatrix}
			 \begin{pmatrix}6\sqrt{10} & 0 & 0 \\ 0 & 3\sqrt{10} & 0 \end{pmatrix}
		   \begin{pmatrix} \frac{1}{3} & \frac{2}{3} & \frac{2}{3} \\ -\frac{2}{3} & -\frac{1}{3} & \frac{2}{3} \\ \frac{2}{3} & -\frac{2}{3} & \frac{1}{3}\end{pmatrix}$
	\end{center}
	MATLAB: {\color{blue}\texttt{[U,S,V]=svd([4 11 14; 8 7 -2])}}
\end{exampleblock}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Singular Value Decomposition (SVD)} 
\begin{exampleblock}{Example}
	Let's calculate the SVD decomposition of $A=\begin{pmatrix} 1 & -1 \\ -2 & 2 \\ 2 & -2 \end{pmatrix}$.\\
	\underline{\textit{Step 1}}: Orthogonally diagonalize $A^TA$ \\
	\begin{center}
		$A^TA=\begin{pmatrix}9 & -9 \\ -9 & 9\end{pmatrix}$
	\end{center}
	Its eigenvalues and eigenvectors are
	\begin{center}
		\begin{tabular}{rl}
			$\lambda_1=18$ & $\mathbf{v}_1=(\frac{1}{\sqrt{2}},-\frac{1}{\sqrt{2}})$ \\
			$\lambda_2=0$ & $\mathbf{v}_2=(\frac{1}{\sqrt{2}},\frac{1}{\sqrt{2}})$ \\
		\end{tabular}
	\end{center}
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Singular Value Decomposition (SVD)} 
\begin{exampleblock}{}
	\underline{\textit{Step 2}}: Construct $V$ and $\Sigma$ \\
	\begin{center}
		$V=\begin{pmatrix}\mathbf{v}_1 & \mathbf{v}_2 \end{pmatrix}=
		   \begin{pmatrix} \frac{1}{\sqrt{2}} & \frac{1}{\sqrt{2}} \\ -\frac{1}{\sqrt{2}} & \frac{1}{\sqrt{2}} \end{pmatrix}$\\
		$\Sigma=\begin{pmatrix}\sqrt{\lambda_1} & 0 \\0 & \sqrt{\lambda_2} \\ 0 & 0 \end{pmatrix}=
		        \begin{pmatrix}3\sqrt{2} & 0 \\ 0 & 0 \\ 0 & 0\end{pmatrix}$
	\end{center}
	\underline{\textit{Step 3}}: Construct $U$ \\
	\begin{center}
		$\mathbf{u}_1=\frac{A\mathbf{v}_1}{\sigma_1}=(\frac{1}{3},-\frac{2}{3},\frac{2}{3})$\\
	\end{center}
	The set $\{\mathbf{u}_1\}$ is not yet a basis of $\mathbb{R}^3$, so we need to extend it with orthogonal vectors. All vectors orthogonal to $\mathbf{u}_1$
	fulfill
	\begin{center}
		$\mathbf{u}_1\cdot\mathbf{u}=0=\frac{1}{3}x_1-\frac{2}{3}x_2+\frac{2}{3}x_3\Rightarrow x_1=2x_2-2x_3$
	\end{center}
\end{exampleblock}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Singular Value Decomposition (SVD)} 
\begin{exampleblock}{}
	\underline{\textit{Step 3}}: Construct $U$ (continued) \\
	A basis of this space is $\mathbf{w}_2=(2,1,0)$ and $\mathbf{w}_3=(-2,0,1)$. But this basis is not orthogonal. Let's make it orthogonal following
	Gram-Schmidt procedure
	\begin{center}
		\begin{tabular}{l}
			$\mathbf{u}_2=\frac{\mathbf{w}_2}{\|\mathbf{w}_2\|}=(\frac{2}{\sqrt{5}},\frac{1}{\sqrt{5}},0)$\\
			$\mathbf{w}_3'=\mathbf{w}_3-<\mathbf{w}_3,\mathbf{v}_2>\mathbf{v}_2=(-\frac{2}{5},\frac{4}{5},1)$\\
			$\mathbf{u}_3=\frac{\mathbf{w}_3}{\|\mathbf{w}_3\|}=(-\frac{2}{3\sqrt{5}},\frac{4}{3\sqrt{5}},\frac{\sqrt{5}}{3})$\\
		\end{tabular}
	\end{center}
	In fact, SVD does not require the $\mathbf{u}$ vectors to be unitary, but it is simply convenient. We can make $\mathbf{u}_2$ and $\mathbf{u}_3$ unitary
	because they are ``free'' (we are constructing them simply to extend the set of $\mathbf{u}$ vectors to be a basis of $\mathbb{R}^3$), but not
	$\mathbf{u}_1$ because it is ``bound'' to the singular value.
\end{exampleblock}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Singular Value Decomposition (SVD)} 
\begin{exampleblock}{}
	Finally we have
	\begin{center}
		$A=U\Sigma V^T$ \\
		$\begin{pmatrix} 1 & -1 \\ -2 & 2 \\ 2 & -2 \end{pmatrix}=
		   \begin{pmatrix} \frac{1}{3} & \frac{2}{\sqrt{5}} & -\frac{2}{3\sqrt{5}} \\ -\frac{2}{3} & \frac{1}{\sqrt{5}} & \frac{4}{3\sqrt{5}} \\
			   \frac{2}{3} & 0 & \frac{\sqrt{5}}{3}\end{pmatrix}
			 \begin{pmatrix}3\sqrt{2} & 0 \\ 0 & 0 \\ 0 & 0\end{pmatrix}
		   \begin{pmatrix} \frac{1}{\sqrt{2}} & -\frac{1}{\sqrt{2}} \\ \frac{1}{\sqrt{2}} & \frac{1}{\sqrt{2}}\end{pmatrix}$
	\end{center}
\end{exampleblock}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Algebraic applications of SVD} 
\begin{block}{Matrix condition number}
	Let $\sigma_1$ and $\sigma_r$ be the largest and smallest singular values of a matrix $A$. The condition number of the matrix is defined as
	\begin{center}
		$\kappa(A)=\frac{\sigma_1}{\sigma_r}$
	\end{center}
	If this condition number is very large, the equations system $A\mathbf{x}=\mathbf{b}$ is ill-posed and small perturbations in $\mathbf{b}$ translate
	into large perturbations in $\mathbf{x}$. As a rule of thumb, if $\kappa(A) = 10^k$, then you may lose up to $k$ digits of accuracy.
\end{block}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Algebraic applications of SVD} 
\begin{block}{Bases for fundamental spaces}
	The $U$ and $V$ matrices provide bases for $\mathrm{Row}\{A\}$, $\mathrm{Col}\{A\}=\mathrm{Row}\{A^T\}$, $\mathrm{Nul}\{A\}$ and $\mathrm{Nul}\{A^T\}$
	\begin{center}
		\includegraphics[scale=0.45]{figSVDBases.jpg}
	\end{center}
\end{block}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Algebraic applications of SVD} 
\begin{ceuthm}[The Invertible Matrix Theorem (continued)]
	The Invertible Matrix Theorem has been developed in Theorems 5.1 and 11.5 of Chapter 3, Theorem 10.5 of Chapter 5, Theorem 2.1 of Chapter 6. Here, we give an extension
	if $A$ is invertible, then the following statements are equivalent to the previous statements:
	\begin{enumerate}[i.]
		\setcounter{enumi}{26}
		\item $(\mathrm{Col}\{A\})^\perp=\{\mathbf{0}\}$.
		\item $(\mathrm{Nul}\{A\})^\perp=\mathbb{R}^n$.
		\item $(\mathrm{Row}\{A\})=\mathbb{R}^n$.
		\item $A$ has $n$ non-null singular values.
	\end{enumerate}
\end{ceuthm}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Algebraic applications of SVD} 
\begin{block}{Reduced SVD and pseudoinverse of A}
	If within $U$ and $V$ we distinguish two submatrices, each one with $r$ columns we have
	\begin{center}
		$U=\begin{pmatrix} U_r U_{m-r}\end{pmatrix}$ and
		$V=\begin{pmatrix} V_r V_{n-r}\end{pmatrix}$
	\end{center}
	Then,
	\begin{center}
		$A=U\Sigma V^T=\begin{pmatrix} U_r U_{m-r}\end{pmatrix}\begin{pmatrix} D & 0 \\ 0 & 0 \end{pmatrix} \begin{pmatrix} V_r^T \\ V_{n-r}^T\end{pmatrix}=
		   U_rDV_r^T$
	\end{center}
	Despite the fact that we may have removed many columns of $U$ and $V$, we have not lost any information and the recovery of $A$ is exact.
	The Moore-Penrose pseudoinverse is defined as
	\begin{center}
		$A^+=V_rD^{-1}U_r^T$
	\end{center}
	that is a $n\times m$ matrix such that
	\begin{center}
		$A^+AA^+=A^+ \quad AA^+A=A$\\
	\end{center}
\end{block}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Algebraic applications of SVD} 
\begin{block}{Pseudoinverse of A and Least Squares}
	It can be shown that the least-squares solution of the equation system $A\mathbf{x}=\mathbf{b}$ is given by
	\begin{center}
		$\hat{\mathbf{x}}=A^+\mathbf{b}$
	\end{center}
\end{block}

\begin{block}{Matrix approximation}
	If instead of taking $r$ components in the split of $U$ and $V$ (see previous slide) we take only $k$ (assuming singular values have been
	ordered in descending order), and we reconstruct $A_k$
	\begin{center}
		$A_k=U_kD_kV_k^T$
	\end{center}
	This matrix is the matrix of rank $k$ that minimizes the Frobenius norm of the difference
	\begin{center}
		$A_k=\min\limits_{\mathrm{Rank}\{B\}=k}\|A-B\|^2_F=\min\limits_{\mathrm{Rank}\{B\}=k}\sum\limits_{i,j=1}^n{(a_{ij}-b_{ij})^2}$
	\end{center}
\end{block}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Exercises} 

\begin{exerciseblock}{Exercises}
	From Lay (3rd ed.), Chapter 7, Section 4:
	\begin{itemize}
		\item 7.4.3
		\item 7.4.11
		\item 7.4.15
		\item 7.4.17
		\item 7.4.18
		\item 7.4.19
		\item 7.4.20
		\item 7.4.23
		\item 7.4.24
	\end{itemize}
\end{exerciseblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Applications of SVD} 
\begin{exampleblock}{Eigengenes and eigenassays}
	SVD is very much used to analyze the response of different genes to different assays or conditions.
	\begin{center}
		\includegraphics[height=6cm]{figSVDGEA.jpg}
	\end{center}
\end{exampleblock}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Applications of SVD} 
\begin{exampleblock}{Eigengenes and eigenassays}
	SVD is very much used to analyze the response of different genes to different assays or conditions.
	\begin{center}
		\includegraphics[height=4cm]{figSVDGEA2.png}
	\end{center}
	\begin{tiny}
		Alter, O., Brown, P. O. and Botstein, D. (2000) Proc. Natl. Acad. Sci. USA 97, 10101 
	\end{tiny}
\end{exampleblock}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Applications of SVD} 
\begin{exampleblock}{Eigenfaces}
	In this example we see the effect of matrix approximation by the reduced SVD.
	\begin{center}
		\includegraphics[height=6cm]{figEigenfaces.png}
	\end{center}
\end{exampleblock}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Applications of SVD} 
\begin{exampleblock}{Eigenfaces}
	We can also use SVD to automatically analyze documents.
	\begin{center}
		\includegraphics[width=11cm]{figSVDDocuments.png}
	\end{center}
	\begin{tiny}
		P. Marksberry, D. Parsley. Managing the IE (Industrial Engineering) \textit{Mindset: A quantitative investigation of Toyota’s practical thinking shared among employees}.
		J. Industrial Engineering and Manegement, 4: 771-799 (\textbf{2011})
	\end{tiny}
\end{exampleblock}
\end{frame}

\OutlineFinal

\end{document}