\input{../slidesComun}

\title[6. Eigenvalues and eigenvectors]{Chapter 6. Eigenvalues and eigenvectors}  
\COSS

% ==============================================
\begin{frame}\frametitle{References} 

\begin{figure}
	\includegraphics[scale=0.7]{../lay_linearalgebra.jpg}
\end{figure}
D. Lay. Linear algebra and its applications (3rd ed). Pearson (2006). Chapter 5.

\end{frame}

% ==============================================
\begin{frame}\frametitle{A little bit of history} 

Eigenvalues (or ``proper values'') were first used in the study of the motion of rigid bodies through the inertia matrix by \href{http://en.wikipedia.org/wiki/Leonhard_Euler}{Leonhard Euler} and \href{http://en.wikipedia.org/wiki/Lagrange}{Joseph-Louis Lagrange} in the mid of XVIIIth century. Then \href{http://en.wikipedia.org/wiki/Augustin_Louis_Cauchy}{Augustin-Louis Cauchy} used it to analyze quadratic surfaces and conic sections in the early XIXth. Since then, they have found applications in most scientific problems.

\begin{figure}
	\includegraphics[height=3cm]{Euler.png}
	\includegraphics[height=3cm]{Lagrange.jpg}
	\includegraphics[height=3cm]{../Tema4/Cauchy.jpg}
\end{figure}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Applications} 
In this example eigenvalues are used to estimate the size of carotid in a volumetric image.
\begin{center}
	\includegraphics[height=5cm]{figInertiaMatrix.jpg}
\end{center}
\begin{tiny}
Hameeteman, K.; Zuluaga, M. A.; et al. \textit{Evaluation framework for carotid bifurcation lumen segmentation and stenosis grading}. Med Image Anal, \textbf{2011}, 15, 477-488.

\end{tiny}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Applications} 
In this example eigenvalues were used as a part of another technique (Principal Component Analysis) to automatically analyze luminiscent images.
\begin{center}
	\includegraphics[height=5cm]{figPCA.png}
\end{center}
\begin{tiny}
Spinelli, A.E., Boschi, F. \textit{Unsupervised analysis of small animal dynamic Cerenkov luminescence imaging}. J Biomed Opt, \textbf{2011}, 16, 120506

\end{tiny}

\end{frame}

% ==============================================
\setnextsection{6}
\section{Eigenvalues and eigenvectors} 
\subsection{Definition (a)} 
\Outline

\begin{frame}\frametitle{Eigenvalues and eigenvectors} 
\begin{exampleblock}{Example}
	Consider the linear transformation $T(\mathbf{x})=\begin{pmatrix}3 & -2 \\ 1 & 0\end{pmatrix}\mathbf{x}$ on the vectors $\mathbf{u}=(-1,1)$ and $\mathbf{v}=(2,1)$
	\begin{columns}
		\begin{column}{5.5cm}
			$\begin{array}{rcl}
				T(\mathbf{u})&=&\begin{pmatrix}3 & -2 \\ 1 & 0\end{pmatrix}\begin{pmatrix}-1\\1\end{pmatrix}=\begin{pmatrix}-5\\-1\end{pmatrix} \\
				T(\mathbf{v})&=&\begin{pmatrix}3 & -2 \\ 1 & 0\end{pmatrix}\begin{pmatrix}2\\1\end{pmatrix}=\begin{pmatrix}4\\2\end{pmatrix} \\
			\end{array}$
		\end{column}
		\begin{column}{5.5cm}
			\includegraphics[scale=0.25]{figEigenValue.jpg}
		\end{column}
	\end{columns}
	$\mathbf{u}$ is changing its direction and module, but $\mathbf{v}$ is only changing its module.
	
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Eigenvalues and eigenvectors} 
\begin{ceudef}[Eigenvalue and eigenvector]
	Given the matrix $A\in\mathcal{M}_{n\times n}$, $\lambda$ is an \textbf{eigenvalue} of $A$ if there exists a non-trivial solution $\mathbf{v}\in\mathbb{R}^n$ of the equation
	\begin{center}
		$A\mathbf{v}=\lambda \mathbf{v}$
	\end{center}
	The solution $\mathbf{v}$ is the \textbf{eigenvector} associated to the eigenvalue $\lambda$.
\end{ceudef}

\begin{exampleblock}{Example (continued)}
	In the previous example, $\mathbf{v}$ was an eigenvector with eigenvalue 2 (because $(2,1)\rightarrow (4,2)$, while $\mathbf{u}$ was not an eigenvector.
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Eigenvalues and eigenvectors} 
\begin{exampleblock}{Example}
	Show that $\lambda=7$ is an eigenvalue of $A=\begin{pmatrix}1 & 6 \\ 5 & 2\end{pmatrix}$.\\
	\underline{\textit{Solution}}\\
	We must find a solution of the equation $A\mathbf{v}=\lambda \mathbf{v}$, or what is the same
	\begin{center}
		$A\mathbf{v}-\lambda \mathbf{v}=\mathbf{0} \Rightarrow (A-\lambda I)\mathbf{v}=\mathbf{0}$\\
		$\left(\begin{pmatrix}1 & 6 \\ 5 & 2\end{pmatrix}-7\begin{pmatrix}1 & 0 \\ 0 & 1\end{pmatrix}\right)\begin{pmatrix}v_1\\v_2\end{pmatrix}=
		 \begin{pmatrix}-6 & 6 \\ 5 & -5\end{pmatrix}\begin{pmatrix}v_1\\v_2\end{pmatrix}=\begin{pmatrix}0\\0\end{pmatrix}$
	\end{center}
	Any vector of the form $\mathbf{v}=(v_1,v_1)$ satisfies the previous equation
\end{exampleblock}

\begin{ceuthm}
	In general, eigenvectors are solution of the equation
	\begin{center}
		$(A-\lambda I)\mathbf{v}=\mathbf{0}$\\
	\end{center}
	That is, all eigenvectors belong to $\mathrm{Nul}\{A-\lambda I\}$. This is called the \textbf{eigenspace}.
\end{ceuthm}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Eigenvalues and eigenvectors} 
\begin{exampleblock}{Example (continued)}
	We see that we have a whole set of vectors associated to $\lambda=7$, this is a subspace of the eigenspace:
	\begin{center}
		$\mathrm{Eigenspace}\{7\}=\{(v_1,v_1) \;\forall v_1\in\mathbb{R}\}$
	\end{center}
	It is a line passing through the origin with the direction $(1,1)$.
	
	The other eigenvalue of matrix $A$ is $\lambda=-4$
	\begin{columns}
		\begin{column}{6.5cm}
			$\mathrm{Eigenspace}\{-4\}=\{(v_1,-\frac{5}{6}v_1) \;\forall v_1\in\mathbb{R}\}$
		\end{column}
		\begin{column}{5cm}
			\includegraphics[scale=0.25]{figEigenvalue2.jpg}
		\end{column}
	\end{columns}
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Eigenvalues and eigenvectors} 
\begin{exampleblock}{Example}
	Knowing that $\lambda=2$ is an eigenvalue of $A=\begin{pmatrix}4 & -1 & 6\\2 & 1 & 6 \\ 2 & -1 & 8\end{pmatrix}$, find a basis
	of its eigenspace.\\
	\underline{\textit{Solution}}\\
	\begin{center}
		$A-2I=\begin{pmatrix}4 & -1 & 6\\2 & 1 & 6 \\ 2 & -1 & 8\end{pmatrix}-\begin{pmatrix}2 & 0 & 0\\0 & 2 & 0 \\ 0 & 0 & 2\end{pmatrix}=
		      \begin{pmatrix}2 & -1 & 6\\2 & -1 & 6 \\ 2 & -1 & 6\end{pmatrix}\sim \begin{pmatrix}2 & -1 & 6\\0 & 0 & 0 \\ 0 & 0 & 0\end{pmatrix}$
	\end{center}
	So any vector fulfilling this equation must satisfy
	\begin{center}
		$x_1=\frac{1}{2}x_2-3x_3\Rightarrow \mathrm{Eigenspace}\{2\}\ni\mathbf{x}=x_2\begin{pmatrix}\frac{1}{2}\\1\\0\end{pmatrix}+x_3\begin{pmatrix}-3\\0\\1\end{pmatrix}$
	\end{center}
	Finally the basis is formed by the vectors $(\frac{1}{2},1,0)$ and $(-3,0,1)$.
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Eigenvalues and eigenvectors} 
\begin{exampleblock}{Example (continued)}
	Within the eigenspace, $A$ acts as a dilation.
	\begin{center}
		\includegraphics[scale=0.5]{figEigenvalue3.jpg}
	\end{center}
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Eigenvalues and eigenvectors} 
\begin{ceuthm}
	The eigenvalues of a triangular matrix $A$ are the elements of the main diagonal ($a_{ii}$, $i=1,2,...,n$).\\
  \underline{\textit{Proof}}\\
	Consider the matrix $A-\lambda I$
	\begin{center}
		$\begin{pmatrix}a_{11}-\lambda & a_{12} & a_{13} & ... & a_{1n}\\
		                0 & a_{22}-\lambda & a_{23} & ... & a_{2n}\\
										0 & 0 & a_{33}-\lambda & ... & a_{3n} \\
										... & ... &... &... &...\\
										0 & 0 & 0 & ... & a_{nn}-\lambda\end{pmatrix}$
	\end{center}
	The equation system $A-\lambda I=\mathbf{0}$ has a non-trivial solution if at least 1 of the entries in the diagonal is 0. Therefore,
	it must be $\lambda=a_{ii}$ for some $i$. Varying $i$ from 1 to $n$ we obtain that all the elements in the main diagonal are the $n$ eigenvalues of the matrix $A$.
\end{ceuthm}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Eigenvalues and eigenvectors} 
\begin{exampleblock}{Example}
	The eigenvalues of $A=\begin{pmatrix}3 & 6 & -8\\ 0 & 0 & 6 \\ 0 & 0 & 2\end{pmatrix}$ are $\lambda=3,0,2$.
\end{exampleblock}

\begin{ceuthm}
	Let $\mathbf{v}_1$, $\mathbf{v}_2$, ..., $\mathbf{v}_r$ be $r$ eigenvectors associated to $r$ different eigenvalues. Then, the set
	$S=\{\mathbf{v}_1, \mathbf{v}_2, ..., \mathbf{v}_r\}$ is linearly independent.\\
	\underline{\textit{Proof}}\\
	Let us assume that $S$ is linearly dependent. Without loss of generality, we may assume that the first $p$ ($p<r$) are linearly independent, and that the $p+1$-th vector
	is dependent on the precedent vectors. Then, there must exist $c_1, c_2, ..., c_p$ not all of them zero such that
		\begin{equation}\mathbf{v}_{p+1}=c_1\mathbf{v}_1+c_2\mathbf{v}_2+...+c_p\mathbf{v}_p\label{eq:thm1}\end{equation}
	\label{thm:linearIndependenceEigenvectors}
\end{ceuthm}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Eigenvalues and eigenvectors} 
\begin{block}{}
	If we multiply both sides of the equation by $A$, then we have
		\begin{equation}\begin{array}{rcl}A\mathbf{v}_{p+1}&=&c_1A\mathbf{v}_1+c_2A\mathbf{v}_2+...+c_pA\mathbf{v}_p\\
		\lambda_{p+1}\mathbf{v}_{p+1}&=&c_1\lambda_1\mathbf{v}_1+c_2\lambda_2\mathbf{v}_2+...+c_p\lambda_p\mathbf{v}_p\end{array}
		\label{eq:thm2}
		\end{equation}
	If we multiply Eq. (\ref{eq:thm1}) by $\lambda_{p+1}$ and subtract from Eq. (\ref{eq:thm2}), we have
	\begin{center}
		$\mathbf{0}=c_1(\lambda_1-\lambda_{p+1})\mathbf{v}_1+c_2(\lambda_2-\lambda_{p+1})\mathbf{v}_2+...+c_p(\lambda_p-\lambda_{p+1})\mathbf{v}_p$
	\end{center}
	Since the first $p$ vectors are linearly independent it must be for $i=1,2,...,p$
	\begin{center}
		$c_i(\lambda_i-\lambda_{p+1})=0$
	\end{center}
	Because all eigenvalues are different, then it must be $c_i=0$ ($i=1,2,...,p$). But this is a contradiction with the
	initial hypothesis that not all of them were 0. Consequently, the set $S$ must be linearly independent. (q.e.d.)
\end{block}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Eigenvalues and eigenvectors} 
\begin{exampleblock}{Difference equations}
	Let us assume we have two populations of cells: stem cells and mature cells. Everyday we measure the number of them and we observe that:
	\begin{columns}[t]
		\begin{column}{6.5cm}
			\textbf{Stem cells:}
			\begin{small}
			\begin{itemize}
				\item 80\% of them have remained as stem cells
				\item 15\% of them have differentiated into somatic cells
				\item 5\% of them have died
				\item There are 20\% new stem cells.
			\end{itemize}
			\end{small}
		\end{column}
		\begin{column}{5.5cm}
			\textbf{Somatic cells:}
			\begin{small}
			\begin{itemize}
				\item 95\% of them have remained as somatic cells
				\item 5\% of them have died
			\end{itemize}
			\end{small}
		\end{column}
	\end{columns}
	\begin{center}
		\includegraphics[scale=0.25]{figDiffEq.jpg}
	\end{center}
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Eigenvalues and eigenvectors} 
\begin{exampleblock}{Difference equations (continued)}
	If we call $x_{stem}^{(k)}$ the number of stem cells on the day $k$, and $x_{somatic}^{(k)}$ the number of somatic cells the same day, then the following equation
	reflects the dynamics of the system:
	\begin{center}
		$\begin{pmatrix}x_{stem}^{(k+1)}\\x_{somatic}^{(k+1)}\end{pmatrix}=
		 \begin{pmatrix}1 & 0 \\ 0.15 & 0.95 \end{pmatrix}\begin{pmatrix}x_{stem}^{(k)}\\x_{somatic}^{(k)}\end{pmatrix}$
	\end{center}
	Let us assume that the day $0$, there are $10,000$ stem cells, and $0$ somatic cells. Then, the evolution over time is
	\begin{center}
		$\begin{pmatrix}x_{stem}^{(1)}\\x_{somatic}^{(1)}\end{pmatrix}=
		 \begin{pmatrix}1 & 0 \\ 0.15 & 0.95 \end{pmatrix}\begin{pmatrix}x_{stem}^{(0)}\\x_{somatic}^{(0)}\end{pmatrix}=
		 \begin{pmatrix}1 & 0 \\ 0.15 & 0.95 \end{pmatrix}\begin{pmatrix}10,000\\0\end{pmatrix}=\begin{pmatrix}10,000\\1,500\end{pmatrix}$\\
		$\begin{pmatrix}x_{stem}^{(2)}\\x_{somatic}^{(2)}\end{pmatrix}=
		 \begin{pmatrix}1 & 0 \\ 0.15 & 0.95 \end{pmatrix}\begin{pmatrix}x_{stem}^{(1)}\\x_{somatic}^{(1)}\end{pmatrix}=
		 \begin{pmatrix}1 & 0 \\ 0.15 & 0.95 \end{pmatrix}\begin{pmatrix}10,000\\1,500\end{pmatrix}=\begin{pmatrix}10,000\\2,925\end{pmatrix}$\\
	\end{center}	
		...
\end{exampleblock}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Eigenvalues and eigenvectors} 
\begin{block}{Difference equations}
	The previous model is of the form
	\begin{center}
		$\mathbf{x}^{(k+1)}=A\mathbf{x}^{(k)}$
	\end{center}
	The simplest way of constructing a solution of the previous equation is by taking an eigenvector $\mathbf{x}_1$ and its corresponding eigenvalue, $\lambda$:
	\begin{center}
		$\mathbf{x}^{(k)}=\lambda^k_1\mathbf{x}_1$
	\end{center}
	This is actually a solution because:
	\begin{center}
		$\mathbf{x}^{(k+1)}=A\mathbf{x}^{(k)}=A(\lambda^k_1\mathbf{x}_1)=\lambda^k_1(A\mathbf{x}_1)=\lambda^k_1(\lambda_1\mathbf{x}_1)=\lambda^{k+1}_1\mathbf{x}_1$
	\end{center}
	It turns out that any linear combination of eigenvectors is also a solution
	\begin{center}
		$\mathbf{x}^{(k)}=c_1\lambda^k_1\mathbf{x}_1+c_2\lambda^k_2\mathbf{x}_2+...+c_n\lambda^k_n\mathbf{x}_n$
	\end{center}
\end{block}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Exercises} 

\begin{exerciseblock}{Exercises}
	From Lay (3rd ed.), Chapter 5, Section 1:
	\begin{itemize}
		\item 5.1.1
		\item 5.1.3
		\item 5.1.9
		\item 5.1.17
		\item 5.1.19
		\item 5.1.23
		\item 5.1.25
		\item 5.1.26
		\item 5.1.27
	\end{itemize}
\end{exerciseblock}

\end{frame}

% ==============================================
\subsection{Characteristic equation (a)} 
\Outline

% ==============================================
\begin{frame}\frametitle{Characteristic equation} 
\begin{exampleblock}{Example}
	Find the eigenvalues of $A=\begin{pmatrix}2 & 3 \\ 3 & -6\end{pmatrix}$\\
	\underline{\textit{Solution}}\\
	We need to find scalar values $\lambda$ such that the equation
	\begin{center}
		$(A-\lambda I)\mathbf{x}=\mathbf{0}$
	\end{center}
	has non-trivial solutions. By the Invertible Matrix theorem we know that this problem is equivalent to that of finding $\lambda$ values such that
	\begin{center}
		$|A-\lambda I|=0$
	\end{center}
	In this case
	\begin{center}
		$\left|\begin{pmatrix}2 & 3 \\ 3 & -6\end{pmatrix}-\begin{pmatrix}\lambda & 0 \\ 0 & \lambda\end{pmatrix}\right|=0$\\
		$\left|\begin{array}{cc}2-\lambda & 3 \\ 3 & -6-\lambda\end{array}\right|=(2-\lambda)(-6-\lambda)-9=\lambda^2+4\lambda-21=0$\\
		
	\end{center}
\end{exampleblock}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Characteristic equation} 
\begin{exampleblock}{Example (continued)}
	\begin{center}
		$\lambda^2+4\lambda-21=0 \Rightarrow \lambda=\frac{-4\pm\sqrt{4^2-4\cdot 1 \cdot (-21)}}{2\cdot 1}=\left\{\begin{array}{c}-7\\3\end{array}\right.$
	\end{center}
\end{exampleblock}

\begin{ceuthm}[The invertible matrix theorem (continued)]
	This theorem adds to the Theorems 5.1, 11.5 of Chapter 3 and 10.4 of Chapter 5.
	\begin{enumerate}[i.]
		\setcounter{enumi}{24}
		\item $|A|\neq 0$.
		\item 0 is not an eigenvalue of $A$.
	\end{enumerate}
\end{ceuthm}

\begin{ceudef}[Characteristic equation]
	A scalar $\lambda$ is an eigenvalue of a matrix $A\in\mathcal{M}_{n\times n}$ iff it is solution of the \textbf{characteristic equation}
	\begin{center}
		$|A-\lambda I|=0$
	\end{center}
	The determinant of $A-\lambda I$ is called the \textbf{characteristic polynomial}.
\end{ceudef}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Characteristic equation} 
\begin{exampleblock}{Example}
	Let us calculate the eigenvalues of $A=\begin{pmatrix}5&-2&6&-1\\0&3&-8&0\\0&0&5&4\\0&0&0&1\end{pmatrix}$.
	\begin{center}
		$|A-\lambda I|=\left|\begin{array}{cccc}5-\lambda & -2 &6 &-1\\0 & 3-\lambda & -8 & 0\\0&0&5-\lambda &4\\0 &0 & 0& 1-\lambda\end{array}\right|=(5-\lambda)^2,(3-\lambda)(1-\lambda)=0$
	\end{center}
	whose solutions are $\lambda=5$ (with multiplicity 2), $\lambda=3$, and $\lambda=1$.
\end{exampleblock}

\begin{exampleblock}{Example}
	Let us find the eigenvalues of a matrix whose characteristic polynomial is
	\begin{center}
		$|A-\lambda I|=\lambda^6-4\lambda^5-12\lambda^4=\lambda^4(\lambda^2-4\lambda-12)=\lambda^4(\lambda-6)(\lambda+2)=0$
	\end{center}
	whose solutions are $\lambda=0$ (with multiplicity 4), $\lambda=6$, and $\lambda=-2$.
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Characteristic equation} 
\begin{ceudef}[Similarity between matrices]
	Given two matrices $A,B\in\mathcal{M}_{n\times n}$, $A$ is \textbf{similar} to $B$ iff there exists an invertible matrix $P\in\mathcal{M}_{n\times n}$ such that
	\begin{center}
		$B=P^{-1}AP$
	\end{center}
\end{ceudef}
Watch out that \textit{similarity} is not the same as \textit{row equivalence} ($A$ and $B$ are row equivalent if there exists a $E$ such that $B=EA$ being $E$ invertible and the product of row operation matrices).
\end{frame}

% ==============================================
\begin{frame}\frametitle{Characteristic equation} 
\begin{ceuthm}
	If $A$ is similar to $B$, then $B$ is similar to $A$.\\
	\underline{\textit{Proof}}\\
	It suffices to take the definition of $A$ similar to $B$ and solve for $B$. If we multiply by $P$ on the right
	\begin{center}
		$B=P^{-1}AP \Rightarrow PB=AP$
	\end{center}
	Now, we multiply by $P$ on the left ($P^{-1}$ exists because $P$ is invertible)
	\begin{center}
		$PB=AP \Rightarrow PBP^{-1}=A$
	\end{center}
	and this is the definition of $B$ being similar to $A$.
\end{ceuthm}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Characteristic equation} 
\begin{ceuthm}
	If $A$ and $B$ are similar matrices, then they have the same characteristic polynomial.\\
	\underline{\textit{Proof}}\\
	If $A$ is similar to $B$, then there exists an invertible matrix $P$ such that
	\begin{center}
		$B=P^{-1}AP$
	\end{center}
	If we subtract on both sides $\lambda I$ we have
	\begin{center}
		$B-\lambda I=P^{-1}AP-\lambda I=P^{-1}AP-\lambda P^{-1}P=P^{-1}(A-\lambda I)P$
	\end{center}
	Now taking the determinant of both sides
	\begin{center}
		$|B-\lambda I|=|P^{-1}(A-\lambda I)P|=|P^{-1}||A-\lambda I||P|=|P|^{-1}|A-\lambda I||P|=|A-\lambda I|$
	\end{center}
	
\end{ceuthm}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Characteristic equation} 
\begin{ceuthm}
	If $A$ and $B$ are similar matrices, then they have the same characteristic polynomial.\\
	\underline{\textit{Proof}}\\
	If $A$ is similar to $B$, then there exists an invertible matrix $P$ such that
	\begin{center}
		$B=P^{-1}AP$
	\end{center}
	If we subtract on both sides $\lambda I$ we have
	\begin{center}
		$B-\lambda I=P^{-1}AP-\lambda I=P^{-1}AP-\lambda P^{-1}P=P^{-1}(A-\lambda I)P$
	\end{center}
	Now taking the determinant of both sides
	\begin{center}
		$|B-\lambda I|=|P^{-1}(A-\lambda I)P|=|P^{-1}||A-\lambda I||P|=|P|^{-1}|A-\lambda I||P|=|A-\lambda I|$
	\end{center}
	
\end{ceuthm}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Exercises} 

\begin{exerciseblock}{Exercises}
	From Lay (3rd ed.), Chapter 5, Section 2:
	\begin{itemize}
		\item 5.2.1
		\item 5.2.9
		\item 5.2.18
		\item 5.2.19
		\item 5.2.20
		\item 5.2.23
		\item 5.2.24
		\item 5.2.28 (computer)
	\end{itemize}
\end{exerciseblock}

\end{frame}

% ==============================================
\subsection{Diagonalization (b)} 
\Outline

\begin{frame}\frametitle{Diagonalization}
\begin{ceudef}[Diagonalization]
	$A\in\mathcal{M}_{n\times n}$ is \textbf{diagonalizable} if there exists $P,D\in\mathcal{M}_{n\times n}$ (with $P$ invertible and $D$ diagonal) such that
	\begin{center}
		$A=PDP^{-1}$
	\end{center}
\end{ceudef}
Diagonalization simplifies the calculation of powers of $A$ ($A^k$), is used
to decouple dynamic systems, and in multivariate statistics to produce uncorrelated random variables.

\begin{exampleblock}{Example}
	\begin{center}
		\begin{tabular}{ccc}
			$D=\begin{pmatrix}5 & 0 \\ 0 & 3\end{pmatrix}$ & $D^2=\begin{pmatrix}5^2 & 0 \\ 0 & 3^2\end{pmatrix}$ $D^3=\begin{pmatrix}5^3 & 0 \\ 0 & 3^3\end{pmatrix}$
		\end{tabular}
	\end{center}
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Diagonalization} 
\begin{exampleblock}{Example}
	Let us assume that $A=PDP^{-1}$. Let us calculate calculate now the different powers of $A$
	\begin{center}
		\begin{tabular}{l}
			$A^2=A\cdot A=(PDP^{-1})(PDP^{-1})=(PD)(P^{-1}P)(DP^{-1})=PDDP^{-1}=PD^2P^{-1}$\\
			$A^3=A^2\cdot A=(PD^2P^{-1})(PDP^{-1})=PD^3P^{-1}$\\
			...\\
			$A^k=PD^kP^{-1}$\\		
		\end{tabular}
	\end{center}
	Let us particularize this result for $A=\begin{pmatrix}7 & 2 \\ -4 & 1\end{pmatrix}$ that can be factorized with
	$P=\begin{pmatrix}1 & 1 \\ -1 & -2\end{pmatrix}$ and $D=\begin{pmatrix}5 & 0 \\ 0 & 3\end{pmatrix}$ as $A=PDP^{-1}$.
	\begin{center}
			$A^k=PD^kP^{-1}=\begin{pmatrix}1 & 1 \\ -1 & -2\end{pmatrix} \begin{pmatrix}5^k & 0 \\ 0 & 3^k\end{pmatrix} \begin{pmatrix}2 & 1 \\ -1 & -1\end{pmatrix}=
			  \begin{pmatrix}2\cdot 5^k-3^k & 5^k-3^k \\ 2\cdot 3^k-2\cdot 5^k & 2\cdot 3^k-5^k\end{pmatrix} $
	\end{center}
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Diagonalization}
\begin{ceuthm}[Diagonalization theorem]
	$A\in\mathcal{M}_{n\times n}$ is \textbf{diagonalizable} iff $A$ has $n$ linearly independent eigenvectors.\\
	In this case, we may construct $P$ by stacking the $n$ eigenvectors, and $D$ as a diagonal matrix with the corresponding eigenvalues.\\
	\underline{\textit{Proof}}\\
	Consider the columns of $P=\begin{pmatrix}\mathbf{p}_1 & \mathbf{p}_2 & ... & \mathbf{p}_n\end{pmatrix}$ and
	$D=\begin{pmatrix}d_1&0&...&0\\0&d_2&...&0\\...&...&...&...\\0 & 0& ...&d_n\end{pmatrix}$\\
	Let us assume that $A=PDP^{-1}$ and we multiply by $P$ on the right
	\begin{center}
		$\begin{array}{rcl}
		 AP&=&PD\\
		 A\begin{pmatrix}\mathbf{p}_1 & \mathbf{p}_2 & ... & \mathbf{p}_n\end{pmatrix}&=&\begin{pmatrix}\mathbf{p}_1 & \mathbf{p}_2 & ... & \mathbf{p}_n\end{pmatrix}
			\begin{pmatrix}d_1&0&...&0\\0&d_2&...&0\\...&...&...&...\\0 & 0& ...&d_n\end{pmatrix}\\
		 \begin{pmatrix}A\mathbf{p}_1 & A\mathbf{p}_2 & ... & A\mathbf{p}_n\end{pmatrix}&=&\begin{pmatrix}d_1\mathbf{p}_1 & d_2\mathbf{p}_2 & ... & d_n\mathbf{p}_n\end{pmatrix}
		\end{array}$
	\end{center}
	\label{thm:diagonalization}
\end{ceuthm}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Diagonalization}
\begin{block}{}
	This implies that 
	\begin{center}
		$A\mathbf{p}_1=d_1\mathbf{p}_1$\\
		$A\mathbf{p}_2=d_2\mathbf{p}_2$\\
		...\\
		$A\mathbf{p}_n=d_n\mathbf{p}_n$\\
	\end{center}
	But this is the definition of eigenvector, so the columns of $P$ ($\mathbf{p}_i$) must be eigenvectors of $A$ and $d_i$ its corresponding eigenvalue. Since
	$P$ is invertible, its columns must be linearly independent.
\end{block}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Diagonalization}
\begin{exampleblock}{Example}
	Diagonalize $A=\begin{pmatrix}1&3&3\\-3&-5&-3\\3&3&1\end{pmatrix}$.\\
	\underline{\textit{Step 1:}} Find the eigenvalues of $A$\\
	\begin{center}
		$|A-\lambda I|=0 \Rightarrow -\lambda^3-3\lambda^2+4=-(\lambda-1)(\lambda+2)^2=0$
	\end{center}
	whose solutions are $\lambda=1$ and $\lambda=-2$ (double).\\
	\underline{\textit{Step 2:}} Find a linearly independent set of eigenvectors \\
	\hspace{0.5cm}\underline{$\lambda=1$}\\
	\begin{center}
		$A-\lambda I=\begin{pmatrix}1&3&3\\-3&-5&-3\\3&3&1\end{pmatrix}-\begin{pmatrix}1&0&0\\0&1&0\\0&0&1\end{pmatrix}=
		   \begin{pmatrix}0&3&3\\-3&-6&-3\\3&3&0\end{pmatrix} \sim
			  \begin{pmatrix}{\color{blue}0}&1&{\color{blue}1}\\{\color{blue}0}&0&{\color{blue}0}\\{\color{blue}1}&1&{\color{blue}0}\end{pmatrix}$
	\end{center}
\end{exampleblock}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Diagonalization}
\begin{exampleblock}{Example (continued)}
	\underline{\textit{Step 2:}} Find a linearly independent set of eigenvectors \\
	\hspace{0.5cm}\underline{$\lambda=1$}\\
	\begin{center}
		$A-\lambda I \sim
			  \begin{pmatrix}{\color{blue}0}&1&{\color{blue}1}\\{\color{blue}0}&0&{\color{blue}0}\\{\color{blue}1}&1&{\color{blue}0}\end{pmatrix}
				\Rightarrow
				\begin{array}{c}
					x_1=-x_2\\
					x_3=-x_2
				\end{array} \Rightarrow \mathbf{v}_1=\begin{pmatrix} 1 \\ -1 \\ 1\end{pmatrix}$
	\end{center}
	\hspace{0.5cm}\underline{$\lambda=-2$}\\
	\begin{center}
		$A-\lambda I=\begin{pmatrix}1&3&3\\-3&-5&-3\\3&3&1\end{pmatrix}-\begin{pmatrix}-2&0&0\\0&-2&0\\0&0&-2\end{pmatrix}=
		   \begin{pmatrix}3&3&3\\-3&-3&-3\\3&3&3\end{pmatrix} \sim
			  \begin{pmatrix}{\color{blue}1}&1&1\\{\color{blue}0}&0&0\\{\color{blue}0}&0&0\end{pmatrix} \Rightarrow
				x_1=-x_2-x_3\Rightarrow \mathbf{v}_2=\begin{pmatrix}-1\\1\\0\end{pmatrix},\mathbf{v}_3=\begin{pmatrix}-1\\0\\1\end{pmatrix}$
	\end{center}
\end{exampleblock}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Diagonalization}
\begin{exampleblock}{Example (continued)}
	\underline{\textit{Step 3:}} Construct $P$ and $D$ \\
	\begin{center}
		\begin{tabular}{cc}
			$P=\begin{pmatrix}1 & -1 & -1 \\ -1 & 1 & 0 \\ 1 & 0 & 1\end{pmatrix}$ & $D=\begin{pmatrix}1 & 0 & 0 \\ 0 & -2 & 0 \\ 0 & 0 & -2\end{pmatrix}$
		\end{tabular}
	\end{center}
	\underline{\textit{Step 4:}} Check everything is correct \\
	\hspace{0.5cm}\underline{$P$ is invertible $|P|\neq 0$}\\
	\begin{center}
			$|P|=1$
	\end{center}
	\hspace{0.5cm}\underline{$A=PDP^{-1} \Rightarrow AP=PD$}\\
	\begin{center}
			$\begin{array}{cc}AP=\begin{pmatrix}1 & 2 & 2\\-1 & -2 & 0\\1 & 0 & -2\end{pmatrix} & 
			                  PD=\begin{pmatrix}1 & 2 & 2\\-1 & -2 & 0\\1 & 0 & -2\end{pmatrix}\end{array}$\\
	\end{center}
\end{exampleblock}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Diagonalization}
\begin{exampleblock}{Example (continued)}
	\underline{\textit{Step 4:}} Check everything is correct \\
	\hspace{0.5cm}\underline{$P$ is invertible $|P|\neq 0$}\\
	MATLAB:\\
	{\color{blue}\texttt{
	P=[1 -1 -1; -1 1 0; 1 0 1];\\
	det(P)
	}}\\
	\hspace{0.5cm}\underline{$A=PDP^{-1} \Rightarrow AP=PD$}\\
	MATLAB:\\
	{\color{blue}\texttt{
	A=[1 3 3; -3 -5 3; 3 3 1];\\
	P=[1 -1 -1; -1 1 0; 1 0 1];\\
	D=[1 0 0; 0 -2 0; 0 0 -2]; \\
	A*P\\
	P*D
	}}
\end{exampleblock}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Diagonalization}
\begin{exampleblock}{Example}
	Diagonalize $A=\begin{pmatrix}2&4&3\\-4&-6&-3\\3&3&1\end{pmatrix}$.\\
	\underline{\textit{Step 1:}} Find the eigenvalues of $A$\\
	\begin{center}
		$|A-\lambda I|=0 \Rightarrow -\lambda^3-3\lambda^2+4=-(\lambda-1)(\lambda+2)^2=0$
	\end{center}
	whose solutions are $\lambda=1$ and $\lambda=-2$ (double). (Same eigenvalues as in the previous example)\\
	\underline{\textit{Step 2:}} Find a linearly independent set of eigenvectors \\
	\hspace{0.5cm}\underline{$\lambda=1$}\\
	\begin{center}
		$A-\lambda I=\begin{pmatrix}2&4&3\\-4&-6&-3\\3&3&1\end{pmatrix}-\begin{pmatrix}1&0&0\\0&1&0\\0&0&1\end{pmatrix}=
		   \begin{pmatrix}1&4&3\\-4&-7&-3\\3&3&0\end{pmatrix} \sim
			  \begin{pmatrix}{\color{blue}1}&{\color{blue}0}&-1\\{\color{blue}0}&{\color{blue}1}&1\\{\color{blue}0}&{\color{blue}0}&0\end{pmatrix}$
	\end{center}
\end{exampleblock}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Diagonalization}
\begin{exampleblock}{Example (continued)}
	\underline{\textit{Step 2:}} Find a linearly independent set of eigenvectors \\
	\hspace{0.5cm}\underline{$\lambda=1$}\\
	\begin{center}
		$A-\lambda I \sim
			  \begin{pmatrix}{\color{blue}1}&{\color{blue}0}&-1\\{\color{blue}0}&{\color{blue}1}&1\\{\color{blue}0}&{\color{blue}0}&0\end{pmatrix}
				\Rightarrow
				\begin{array}{c}
					x_1=x_3\\
					x_2=-x_3
				\end{array} \Rightarrow \mathbf{v}_1=\begin{pmatrix} 1 \\ -1 \\ 1\end{pmatrix}$
	\end{center}
	(The same eigenspace as in the previous example).\\
	\hspace{0.5cm}\underline{$\lambda=-2$}\\
	\begin{center}
		$A-\lambda I=\begin{pmatrix}2&4&3\\-4&-6&-3\\3&3&1\end{pmatrix}-\begin{pmatrix}-2&0&0\\0&-2&0\\0&0&-2\end{pmatrix}=
		   \begin{pmatrix}4&4&3\\-4&-4&-3\\3&3&3\end{pmatrix} \sim
			  \begin{pmatrix}{\color{blue}1}&1&{\color{blue}\frac{3}{4}}\\{\color{blue}0}&0&{\color{blue}0}\\{\color{blue}0}&0&{\color{blue}\frac{1}{4}}\end{pmatrix} \Rightarrow
				\begin{array}{c}x_1=-x_2-\frac{3}{4}x_3\\ \frac{1}{4}x_3=0\end{array} \Rightarrow \mathbf{v}_2=\begin{pmatrix}-1\\1\\0\end{pmatrix}$
	\end{center}
	($A$ cannot be diagonalized because there are not 3 linearly independent vectors)
\end{exampleblock}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Diagonalization}
\begin{ceuthm}
	If a $n\times n$ matrix has $n$ different eigenvalues, then it is diagonalizable.\\
	\underline{\textit{Proof}}\\
	Let $\mathbf{v}_1$, $\mathbf{v}_2$, ..., $\mathbf{v}_n$ be the $n$ eigenvectors corresponding to the $n$ different eigenvalues. The set
	\begin{center}
		$\{\mathbf{v}_1,\mathbf{v}_2, ..., \mathbf{v}_n\}$
	\end{center}
	is linearly independent by Theorem \ref{thm:linearIndependenceEigenvectors} and $A$ is diagonalizable by Theorem \ref{thm:diagonalization}.
\end{ceuthm}

\begin{exampleblock}{Example}
	Is $A=\begin{pmatrix}5 & -8 & 1 \\ 0 & 0 & 7 \\ 0 & 0 &-2\end{pmatrix}$ diagonalizable?\\
	\underline{\textit{Solution}}\\
	$A$ is a triangular matrix and its eigenvalues are 5, 0 and -2, all of them distinct, and by the previous theorem $A$ is diagonalizable.
\end{exampleblock}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Diagonalization}
\begin{ceuthm}
	Let $A\in\mathcal{M}_{n\times n}$ with $p\leq n$ different eigenvalues. Let $d_k$ be the dimension associated to the eigenvalue $\lambda_k$. Then,
	\begin{enumerate}
		\item $d_k$ is smaller or equal the multiplicity of $\lambda_k$.
		\item $A$ is diagonalizable iff $d_k$ is equal to the multiplicity of $\lambda_k$. In this case,
					\begin{center}
						$\sum\limits_{k=1}^p{d_k}=n$
					\end{center}
		\item If $A$ is diagonalizable and $B_k$ are the bases of each one of the eigenspaces, then $\{B_1, B_2, ..., B_p\}$ is a basis of $\mathbb{R}^n$.
	\end{enumerate}
\end{ceuthm}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Diagonalization}
\begin{exampleblock}{Example}
	Let $A=\begin{pmatrix}5 & 0 & 0 & 0 \\ 0 & 5 & 0 & 0 \\ 1 & 4 & -3 & 0 \\ -1 & -2 & 0 & 3 \end{pmatrix}$. Let's factorize it as $A=PDP^{-1}$. The eigenvalues and associated eigenvectors are
	\begin{center}
		$\begin{array}{rcll}
			 \lambda_1=5 & \leftrightarrow & \mathbf{v}_1=\begin{pmatrix}-8 \\ 4 \\ 1 \\ 0\end{pmatrix} & \mathbf{v}_2=\begin{pmatrix}-16 \\ 4 \\ 0 \\ 1\end{pmatrix} \\
			 \lambda_2=-3 & \leftrightarrow & \mathbf{v}_3=\begin{pmatrix}0 \\ 0 \\ 1 \\ 0\end{pmatrix} & \mathbf{v}_4=\begin{pmatrix} 0 \\ 0 \\ 0 \\ 1\end{pmatrix}
		\end{array}\Rightarrow
		\begin{array}{c}
			 P=\begin{pmatrix} -8 & -16 & 0 & 0 \\ 4 & 4 & 0 & 0 \\ 1 & 0 & 1 & 0 \\ 0 & 1 & 0 & 1 \end{pmatrix} \\
			 D=\begin{pmatrix} 5 & 0 & 0 & 0 \\ 0 & 5 & 0 & 0 \\ 0 & 0 & -3 & 0 \\ 0 & 0 & 0 & -3 \end{pmatrix}
		\end{array}$\\
	\end{center}
	
\end{exampleblock}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Exercises} 

\begin{exerciseblock}{Exercises}
	From Lay (3rd ed.), Chapter 5, Section 3:
	\begin{itemize}
		\item 5.3.1
		\item 5.3.23
		\item 5.3.27
		\item 5.3.28
		\item 5.3.29
		\item 5.3.31
		\item 5.3.32
		\item 5.3.33 (computer)
	\end{itemize}
\end{exerciseblock}

\end{frame}

% ==============================================
\subsection{Eigenvectors and linear transformations (b)} 
\Outline

\begin{frame}\frametitle{The matrix of a linear transformation}
The objective of this section is to show that if $A$ is diagonalizable ($A=PDP^{-1}$), then the transformation $T_A(\mathbf{x})=A\mathbf{x}$ is essentially the same as
$T_D(\mathbf{u})=D\mathbf{u}$.

\begin{ceudef}[The matrix of a linear transformation]
	Consider a linear transformation between two vectors spaces $T: U \rightarrow V$. Let $B$ be a basis of $V$, and $C$ be a basis of $W$. Let $\mathbf{x}\in V$ and consider
	its coordinates $[\mathbf{x}]_B=(r_1,r_2,...,r_n)$.
	\begin{center}
		\includegraphics[scale=0.32]{figLinearTransformation.jpg}
	\end{center}
\end{ceudef}
\end{frame}

% ==============================================
\begin{frame}\frametitle{The matrix of a linear transformations}
\begin{block}{}
	Let's analyze $\mathbf{x}$ and $T(\mathbf{x})$
	\begin{center}
		$\begin{array}{rcl}
			   \mathbf{x}&=&r_1\mathbf{b}_1+r_2\mathbf{b}_2+...+r_n\mathbf{b}_n \Rightarrow \\
			   T(\mathbf{x})&=&T(r_1\mathbf{b}_1+r_2\mathbf{b}_2+...+r_n\mathbf{b}_n) \quad \text{[T is linear]}\\
			   &=&r_1T(\mathbf{b}_1)+r_2T(\mathbf{b}_2)+...+r_nT(\mathbf{b}_n)
			\end{array}$
	\end{center}
	Now, let us consider the coordinates in $C$ of the transformed vector
	\begin{center}
		$[T(\mathbf{x})]_C=r_1[T(\mathbf{b}_1)]_C+r_2[T(\mathbf{b}_2)]_C+...+r_n[T(\mathbf{b}_n)]_C$
	\end{center}
	We can write this equation in matrix form as
	\begin{center}
		$[T(\mathbf{x})]_C=M[\mathbf{x}]_B$
	\end{center}
	where $M\in \mathcal{M}_{m\times n}$ is a matrix formed by the transformations of each one of the basis vectors in $B$
	\begin{center}
		$M=\begin{pmatrix}[T(\mathbf{b}_1)]_C & [T(\mathbf{b}_2)]_C & ... & [T(\mathbf{b}_n)]_C \end{pmatrix}$
	\end{center}
	Matrix $M$ is called the \textbf{matrix of} $T$ \textbf{relative to the bases} $B$ \textbf{and} $C$.
\end{block}
\end{frame}

% ==============================================
\begin{frame}\frametitle{The matrix of a linear transformations}
\begin{block}{}
	\begin{center}
		\includegraphics[scale=0.35]{figLinearTransformation2.jpg}
	\end{center}
\end{block}

\begin{exampleblock}{Example}
  Let $B=\{\mathbf{b}_1,\mathbf{b}_2\}$ and $C=\{\mathbf{c}_1,\mathbf{c}_2,\mathbf{c}_3\}$ and
	\begin{center}
		$\begin{array}{rcl}
			T(\mathbf{b}_1)&=&3\mathbf{c}_1-2\mathbf{c}_2+5\mathbf{c}_3\\
			T(\mathbf{b}_2)&=&4\mathbf{c}_1+7\mathbf{c}_2-\mathbf{c}_3\\
		\end{array} \Rightarrow M=\begin{pmatrix}3 & 4 \\ -2 & 7 \\ 5 & -1 \end{pmatrix}$
	\end{center}
\end{exampleblock}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Transformations from $V$ into $V$}
\begin{ceudef}[$B$-matrix for $T$]
	If $T$ is a transformation from $V$ into $V$ and $B$ is a basis of $V$, then the matrix $M$ is called the $B$\textbf{-matrix of} $T$.
\end{ceudef}

\begin{exampleblock}{Example}
	Consider in the vector space of polynomials of degree 2 ($\mathbb{P}_2$), the derivative transformation
	\begin{center}
		$\begin{array}{rl}
		   T: & \mathbb{P}_2 \rightarrow \mathbb{P}_2 \\
			    & T(a_0+a_1t+a_2t^2)=a_1+2a_2t\end{array}$
	\end{center}
	Consider the standard basis of $\mathbb{P}_2$, $B=\{1,t,t^2\}$.
\end{exampleblock}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Transformations from $V$ into $V$}
\begin{exampleblock}{Example (continued)}
	Which is the $B$-transformation matrix? \\
	\underline{\textit{Solution}}\\
	\begin{center}
		$\begin{array}{rcl}
			T(1)=0 &\rightarrow& [T(1)]_B=\begin{pmatrix}0 \\ 0 \\ 0\end{pmatrix} \\
			T(t)=1 &\rightarrow& [T(t)]_B=\begin{pmatrix}1 \\ 0 \\ 0\end{pmatrix} \\
			T(t^2)=2t &\rightarrow& [T(t^2)]_B=\begin{pmatrix}0 \\ 2 \\ 0\end{pmatrix} \\
		\end{array}
		\Rightarrow M=\begin{pmatrix} 0 & 1 & 0 \\ 0 & 0 & 2 \\ 0 & 0 & 0\end{pmatrix}$
	\end{center}
\end{exampleblock}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Transformations from $V$ into $V$}
\begin{exampleblock}{Example (continued)}
	Verify that $[T(\mathbf{x})]_B=M[\mathbf{x}]_B$\\
	\underline{\textit{Solution}}\\
	Given any polynomial $p(t)=a_0+a_1t+a_2t^2$ its coordinates are $[p(t)]_B=(a_0,a_1,a_2)$. The derivative of $p(t)$ is 
	$T(p(t))=a_1+2a_2t$, then
	\begin{center}
		$[T(p(t))]_B=\begin{pmatrix}a_1 \\ 2a_2 \\ 0\end{pmatrix}=\begin{pmatrix}0 & 1 & 0 \\ 0 & 0 & 2 \\ 0 & 0 & 0\end{pmatrix}\begin{pmatrix}a_0\\a_1\\a_2\end{pmatrix}$\\
		\includegraphics[scale=0.27]{figLinearTransformation3.jpg}
	\end{center}
\end{exampleblock}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Transformations from $\mathbb{R}^n$ into $\mathbb{R}^n$}
\begin{ceuthm}[Diagonal matrix representation]
	Suppose matrix $A$ is diagonalizable ($A=PDP^{-1}$). If $B$ is the basis of $\mathbb{R}^n$ formed by the columns of $P$, then $D$ is the $B$-matrix of the linear transformation
	$T(\mathbf{x})=A\mathbf{x}$.\\
	\underline{\textit{Proof}} \\
	Let $\mathbf{b}_1, \mathbf{b}_2, ..., \mathbf{b}_n$ be the columns of $P$ so that $B=\{\mathbf{b}_1, \mathbf{b}_2, ..., \mathbf{b}_n\}$ is a basis. We know that for any basis in
	$\mathbb{R}^n$
	\begin{center}
		$\mathbf{x}=P[\mathbf{x}]_B \Rightarrow [\mathbf{x}]_B=P^{-1}\mathbf{x}$
	\end{center}
	Let $[T]_B$ be the transformation matrix in the basis $B$. We know that by definition
	\begin{center}
		$\begin{array}{rcll}
			[T]_B &=& \begin{pmatrix}[T(\mathbf{b}_1)]_B & [T(\mathbf{b}_2)]_B & ... & [T(\mathbf{b}_n)]_B \end{pmatrix} & \quad(T(\mathbf{x})=A\mathbf{x}) \\
			      &=& \begin{pmatrix}[A\mathbf{b}_1]_B & [A\mathbf{b}_2]_B & ... & [A\mathbf{b}_n]_B \end{pmatrix} & \quad\text{(change of coordinates)} \\
			      &=& \begin{pmatrix}P^{-1}A\mathbf{b}_1 & P^{-1}A\mathbf{b}_2 & ... & P^{-1}A\mathbf{b}_n \end{pmatrix} & \quad\text{(matrix multiplication)} \\
			      &=& P^{-1}A\begin{pmatrix}\mathbf{b}_1 & \mathbf{b}_2 & ... & \mathbf{b}_n \end{pmatrix} & \quad\text{(definition of P)} \\
			      &=& P^{-1}AP=D & \\
		\end{array}$
	\end{center}
\end{ceuthm}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Transformations from $\mathbb{R}^n$ into $\mathbb{R}^n$}
\begin{exampleblock}{Example}
	Let $T(\mathbf{x})=\begin{pmatrix}7 & 2 \\ -4 & 1\end{pmatrix}\mathbf{x}$. Find a basis $B$ in which the $B$-matrix of $T$ is diagonal.\\
	\underline{\textit{Solution}}\\
	We diagonalize $A$ as $A=PDP^{-1}$, with $P=\begin{pmatrix}1 & 1 \\ -1 & -2\end{pmatrix}$ and $D=\begin{pmatrix}5 & 0 \\ 0 & 3\end{pmatrix}$. We may change vectors
	$\mathbf{x}$ to the basis $B=\{(1,-1),(1,-2)\}$ by applying
	\begin{center}
		$\mathbf{u}=P^{-1}\mathbf{x}$
	\end{center}
	Then, in this new basis, $T$ can be applied as 
	\begin{center}
		$T(\mathbf{u})=D\mathbf{u}=DP^{-1}\mathbf{x}$
	\end{center}
	If we now, come back to the original basis
	\begin{center}
		$T(\mathbf{x})=PT(\mathbf{u})=PDP^{-1}\mathbf{x}=A\mathbf{x}$
	\end{center}
	Understanding $D$ as the transformation matrix in some basis gives us insight on its effect (in this example, an anisotropic dilation).
\end{exampleblock}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Similar matrices}
\begin{ceudef}[Similar matrices]
	$A$ and $C$ are \textbf{similar matrices} iff there exists another matrix $P$ such that $A=PCP^{-1}$. Given the transformation $T(\mathbf{x})=A\mathbf{x}$, $C$ is the $B$-matrix of the transformation $T$, when $B$ is the basis defined by the columns of the matrix $P$.\\
	\vspace{0.5cm}
	Conversely, if $B$ is any basis and $P$ is the matrix formed by the vectors in the basis $B$, then the $B$-matrix of the transformation $T$ is $P^{-1}AP$.
	\begin{center}
		\includegraphics[scale=0.4]{figLinearTransformation4.jpg}
	\end{center}
\end{ceudef}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Similar matrices}
\begin{exampleblock}{Example}
	Let $A=\begin{pmatrix}4 & -9 \\ 4 & 8\end{pmatrix}$, $T(\mathbf{x})=A\mathbf{x}$ and $\mathbf{b}_1=(3,2)$, $\mathbf{b}_2=(2,1)$. $A$ is not diagonalizable but the basis $B=\{\mathbf{b}_1,\mathbf{b}_2\}$ has the property that $[T]_B$ is triangular (it is said to be in Jordan form). According to the previous definition, the $B$-matrix of the transformation $T$ is
	\begin{center}
		$[T]_B=P^{-1}AP=\begin{pmatrix}-1 & 2 \\ 2 & -3\end{pmatrix}\begin{pmatrix}4 & -9 \\ 4 & 8\end{pmatrix}\begin{pmatrix}3 & 2 \\ 2 & 1\end{pmatrix}=
		   \begin{pmatrix}-2 & 1 \\ 0 & -2\end{pmatrix}$
	\end{center}
\end{exampleblock}

\begin{block}{Numerical note}
	An easy way to compute $P^{-1}AP$ once we have $AP$ is to find a row equivalent matrix
	\begin{center}
		$(\begin{array}{c|c} P & AP\end{array}) \sim(\begin{array}{c|c} I & P^{-1}AP\end{array})$
	\end{center}
\end{block}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Exercises} 

\begin{exerciseblock}{Exercises}
	From Lay (3rd ed.), Chapter 5, Section 4:
	\begin{itemize}
		\item 5.4.1
		\item 5.4.3
		\item 5.4.5
		\item 5.4.13
		\item 5.4.18
		\item 5.4.22
		\item 5.4.23
		\item 5.4.25
		\item 5.4.27 (computer)
	\end{itemize}
\end{exerciseblock}

\end{frame}

% ==============================================
\subsection{Complex eigenvalues (c)} 
\Outline

\begin{frame}\frametitle{Complex eigenvalues} 
Complex eigenvalues are always related to a rotation around a certain axis.
\begin{exampleblock}{Example}
	Consider the linear transformation $T(\mathbf{x})=\begin{pmatrix}0 & -1 \\ 1 & 0 \end{pmatrix}\mathbf{x}$ is a rotation of 90\degree.
	\begin{center}
		\includegraphics[scale=0.3]{figRotation.png}
	\end{center}
	Obviously, there cannot be any real eigenvector since all the vectors are rotating. All eigenvalues are complex:
	\begin{center}
		$|A-\lambda I|=0=\lambda^2+1=(\lambda-i)(\lambda+i)$
	\end{center}
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Complex eigenvalues} 
\begin{exampleblock}{Example (continued)}
	Let's see what happens if we allow applying the transformation on complex vectors:
	\begin{center}
		$\begin{pmatrix}0 & -1 \\ 1 & 0 \end{pmatrix}\begin{pmatrix}1\\ -i\end{pmatrix}=i\begin{pmatrix}1\\ -i\end{pmatrix}$\\
		$\begin{pmatrix}0 & -1 \\ 1 & 0 \end{pmatrix}\begin{pmatrix}1\\ i\end{pmatrix}=-i\begin{pmatrix}1\\ i\end{pmatrix}$
	\end{center}

\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Complex eigenvalues} 
\begin{exampleblock}{Example}
	Find the eigenvalues and eigenvectors of $A=\begin{pmatrix}\frac{1}{2} & -\frac{3}{5} \\ \frac{3}{4} & \frac{11}{10}\end{pmatrix}$.\\
	\underline{\textit{Solution}}\\
	To find the eigenvalues we solve the characteristic equation:
	\begin{center}
		$0=|A-\lambda I|=\left|\begin{array}{cc} \frac{1}{2}-\lambda & -\frac{3}{5} \\ \frac{3}{4} & \frac{11}{10}-\lambda \end{array}\right|=
		   \lambda^2-\frac{8}{5}\lambda+1\Rightarrow \lambda=\frac{4}{5}\pm \frac{3}{5}i$
	\end{center}
	MATLAB: {\color{blue}\texttt{A=[1/2 -3/5; 3/4 11/10]; l=eigs(A)}}\\
	\label{ex:A}
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Complex eigenvalues} 
\begin{exampleblock}{Example (continued)}
	\underline{$\lambda_1=\frac{4}{5}-\frac{3}{5}i$}\\
	\begin{center}
		$\begin{array}{rcl}
			A-\lambda_1 I&=&\left(\begin{array}{cc} \frac{1}{2}-(\frac{4}{5}-\frac{3}{5}i) & -\frac{3}{5} \\ \frac{3}{4} & \frac{11}{10}- (\frac{4}{5}-\frac{3}{5}i)\end{array}\right)=
		                 \left(\begin{array}{cc} -\frac{3}{10}+\frac{3}{5}i & -\frac{3}{5} \\ \frac{3}{4} & \frac{3}{10}+\frac{3}{5}i\end{array}\right)\\
			           &\sim&\left(\begin{array}{cc} 1 & \frac{2}{5}+\frac{4}{5}i \\ 0 & 0\end{array}\right) \Rightarrow x_1=-(\frac{2}{5}+\frac{4}{5}i)x_2 \Rightarrow
								\mathbf{v}_1=\begin{pmatrix}-2-4i\\5\end{pmatrix}
		\end{array}$
	\end{center}
	MATLAB:\\ {\color{blue}\texttt{A\_lI=A-l(1)*eye(2); \\ A\_lI(1,:)=A\_lI(1,:)/A\_lI(1,1) \\  A\_lI(2,:)=A\_lI(2,:)-A\_lI(1,:)*A\_lI(2,1) }}\\
	\underline{$\lambda_2=\frac{4}{5}+\frac{3}{5}i=\lambda_1^*$}\\
	\begin{center}
		$\begin{array}{rcl}
			A-\lambda_2 I&\sim&\left(\begin{array}{cc} 1 & \frac{2}{5}-\frac{4}{5}i \\ 0 & 0\end{array}\right) \Rightarrow x_1=-(\frac{2}{5}-\frac{4}{5}i)x_2 \Rightarrow
								\mathbf{v}_2=\begin{pmatrix}-2+4i\\5\end{pmatrix}=\mathbf{v}_1^*
		\end{array}$
	\end{center}
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Complex eigenvalues} 
\begin{exampleblock}{Example (continued)}
	The application of $A$ on $\mathbb{R}^2$ is a rotation. To see this, we may start with $\mathbf{x}_0=(2,0)$ and calculate 
	\begin{center}
		\includegraphics[scale=0.32]{figRotation2.jpg}
	\end{center}
	\label{ex:ellipse}
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Complex eigenvalues} 
\begin{ceudef}[Conjugate of a vector and matrix]
	The \textbf{conjugate of a vector} is defined as
	\begin{center}
		$\mathbf{v}=\begin{pmatrix}v_1 \\ v_2 \\ ... \\ v_n\end{pmatrix} \Rightarrow \mathbf{v}^*=\begin{pmatrix}v_1^* \\ v_2^* \\ ... \\ v_n^*\end{pmatrix}$
	\end{center}
	In the same way, the \textbf{conjugate of a matrix} is defined as 
	\begin{center}
		$A=\begin{pmatrix}a_{11} & a_{12} & ... & a_{1n} \\ a_{21} & a_{22} & ... & a_{2n} \\ ... & ... & ... & ... \\ a_{m1} & a_{m2} & ... & a_{mn}\end{pmatrix} \Rightarrow
		 A^*=\begin{pmatrix}a_{11}^* & a_{12}^* & ... & a_{1n}^* \\ a_{21}^* & a_{22}^* & ... & a_{2n}^* \\ ... & ... & ... & ... \\ a_{m1}^* & a_{m2}^* & ... & a_{mn}^*\end{pmatrix}$
	\end{center}
\end{ceudef}

\begin{ceuthm}[Properties]
	\begin{columns}
		\begin{column}{5cm}
			$(r\mathbf{v})^*=r^*\mathbf{v}^*$\\
			$(A\mathbf{v})^*=A^*\mathbf{v}^*$\\
		\end{column}
		\begin{column}{5cm}
			$(AB)^*=A^*B^*$\\
			$(rA)^*=r^*A^*$\\
		\end{column}
	\end{columns}
\end{ceuthm}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Eigenanalysis of a real matrix that acts on $\mathbb{C}^n$} 
\begin{ceuthm}
	Let $A\in\mathcal{M}_{n\times n}$ be a matrix with real coefficients.
	If $\lambda$ is an eigenvalue of $A$, then $\lambda^*$ is also an eigenvalue. If $\mathbf{v}$ is an eigenvector associated to $\lambda$, then
	$\mathbf{v}^*$ is an eigenvector associated to $\lambda^*$.\\
	\underline{\textit{Proof}}\\
	If $\lambda$ is an eigenvalue and $\mathbf{v}$ one of its eigenvectors, then we know that
	\begin{center}
		$A\mathbf{v}=\lambda\mathbf{v}$
	\end{center}
	If we now conjugate both sides
	\begin{center}
		$(A\mathbf{v})^*=(\lambda\mathbf{v})^* \Rightarrow A\mathbf{v}^*=\lambda^*\mathbf{v}^*$
	\end{center}
	(Remind that $A$ has real coefficients and that's why $A^*=A$). \\
	The previous equation means that $\mathbf{v}^*$ is also an eigenvector of $A$ and that $\lambda^*$ is its eigenvalue.
\end{ceuthm}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Eigenanalysis of a real matrix that acts on $\mathbb{C}^n$} 
\begin{exampleblock}{Example}
	Let $A=\begin{pmatrix}a & -b \\ b & a\end{pmatrix}$. Its eigenvalues are $\lambda=a\pm b i$ and the corresponding eigenvectors $\mathbf{v}=\begin{pmatrix}1\\\pm i\end{pmatrix}$.
	\begin{center}
		$\begin{pmatrix}a & -b \\ b & a\end{pmatrix}\begin{pmatrix}1\\ -i\end{pmatrix}=\begin{pmatrix}a+bi\\ b-ai\end{pmatrix}=(a+bi)\begin{pmatrix}1\\ -i\end{pmatrix}$\\
		$\begin{pmatrix}a & -b \\ b & a\end{pmatrix}\begin{pmatrix}1\\ i\end{pmatrix}=\begin{pmatrix}a-bi\\ b+ai\end{pmatrix}=(a-bi)\begin{pmatrix}1\\ i\end{pmatrix}$\\
	\end{center}
	In particular if $a=\cos(\phi)$ and $b=\sin(\phi)$, then we have a rotation matrix whose eigenvalues are
	\begin{center}
		$\begin{pmatrix}\cos(\phi) & -\sin(\phi) \\ \sin(\phi) & \cos(\phi)\end{pmatrix} \Rightarrow \lambda=\cos(\phi)\pm\sin(\phi)i=e^{\pm i \phi}$
	\end{center}
	
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Eigenanalysis of a real matrix that acts on $\mathbb{C}^n$} 
\begin{exampleblock}{Example on Slide \pageref{ex:A} (continued)}
	Let $A=\begin{pmatrix}\frac{1}{2} & -\frac{3}{5} \\ \frac{3}{4} & \frac{11}{10}\end{pmatrix}$. Consider $\lambda_1=\frac{4}{5}-\frac{3}{5}i$ and its corresponding eigenvector $\mathbf{v}_1=(-2-4i,5)$. Now, we construct the matrix
	\begin{center}
		$P=\begin{pmatrix} \mathrm{Re}\{\mathbf{v}_1\} & \mathrm{Im}\{\mathbf{v}_1\} \end{pmatrix}=\begin{pmatrix}-2 & -4 \\ 5 & 0 \end{pmatrix}$
	\end{center}
	and make a change of basis to the basis whose vectors are the columns of $P$:
	\begin{center}
		$C=P^{-1}AP=\begin{pmatrix}\frac{4}{5} & -\frac{3}{5} \\ \frac{3}{5} & \frac{4}{5} \end{pmatrix}=\begin{pmatrix}\cos(36.87\degree) & -\sin(36.87\degree) \\ \sin(36.87\degree) & \cos(36.87\degree) \end{pmatrix}$
	\end{center}
	That is, $C$ is a pure rotation and thanks to the change of basis we obtain an elliptical rotation as shown in Slide \pageref{ex:ellipse}. 
	
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Eigenanalysis of a real matrix that acts on $\mathbb{C}^n$} 
\begin{ceuthm}
	Let $A$ be a real, $2\times 2$ matrix with complex eigenvalue $\lambda=a-bi$ ($b\neq 0$) and an associated eigenvector in $\mathbb{C}^2$. Then
	\begin{center}
		$A=PCP^{-1}$
	\end{center}
	where
	\begin{center}
		$P=\begin{pmatrix} \mathrm{Re}\{\mathbf{v}\} & \mathrm{Im}\{\mathbf{v}\} \end{pmatrix}$
	\end{center}
	and
	\begin{center}
		$C=\begin{pmatrix} a & -b \\ b & a \end{pmatrix}$
	\end{center}
	\underline{\textit{Proof}}\\
	It makes use of
	\begin{center}
		$\mathrm{Re}\{A\mathbf{v}\}=A\mathrm{Re}\{\mathbf{v}\}$\\
		$\mathrm{Im}\{A\mathbf{v}\}=A\mathrm{Im}\{\mathbf{v}\}$\\
	\end{center}
	
\end{ceuthm}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Eigenanalysis of a real matrix that acts on $\mathbb{C}^n$} 
\begin{exampleblock}{Example: Rotations extend to higher dimensions}
	Consider $A=\begin{pmatrix}\frac{4}{5} & -\frac{3}{5} & 0 \\ \frac{3}{5} & \frac{4}{5} & 0 \\ 0 & 0 & 1.07\end{pmatrix}$.
	This is the rotation previously described in the $XY$ plane plus a scaling in the $Z$ direction. Any point in the $XY$ (for instance, $\mathbf{w}_0=(2,0,0)$) plane rotates within the plane. Any point outside the plane (for instance, $\mathbf{x}_0=(2,0,1)$ rotates in $XY$ and shifts along $Z$). The following figure shows the successive application of $A$ on $\mathbf{w}_0$ and $\mathbf{x}_0$.
	\begin{center}
		\includegraphics[scale=0.35]{figRotation3.jpg}
	\end{center}
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Exercises} 

\begin{exerciseblock}{Exercises}
	From Lay (3rd ed.), Chapter 5, Section 5:
	\begin{itemize}
		\item 5.5.1
		\item 5.5.7
		\item 5.5.13
		\item 5.5.23
		\item 5.5.24
		\item 5.5.25
		\item 5.5.26
		\item 5.5.27
	\end{itemize}
\end{exerciseblock}

\end{frame}

\OutlineFinal

\end{document}