\input{../slidesComun}

\title[3. Matrix algebra]{Chapter 3. Matrix algebra}  
\COSS

% ==============================================
\begin{frame}\frametitle{References} 

\begin{figure}
	\includegraphics[scale=0.7]{../lay_linearalgebra.jpg}
\end{figure}
D. Lay. Linear algebra and its applications (3rd ed). Pearson (2006). Chapter 2.

\end{frame}

% ==============================================
\begin{frame}\frametitle{A little bit of history} 

Matrices appeared as a regular arrangement of numbers more than 2,000 years ago. However, it was during the XVII$^{th}$, XVIII$^{th}$ and XIX$^{th}$ centuries that they developed in the way we know them now. Some important names in their modern development are \href{https://en.wikipedia.org/wiki/Seki_Takakazu}{Seki Takakazu} (1683), \href{http://en.wikipedia.org/wiki/Gottfried_Wilhelm_Leibniz}{Gottfried Leibniz} (1693), \href{http://en.wikipedia.org/wiki/Gabriel_Cramer}{Gabriel Cramer} (1750), \href{http://en.wikipedia.org/wiki/James_Joseph_Sylvester}{James Sylvester} (1850), and \href{http://en.wikipedia.org/wiki/Arthur_Cayley}{Arthur Cayley} (1858). They were applied in all kind of mathematical problems as a way to organize calculations.

\begin{figure}
	\includegraphics[height=3cm]{Seki.jpg}
	\includegraphics[height=3cm]{Leibniz.jpg}
	\includegraphics[height=3cm]{Cramer.jpg}
	\includegraphics[height=3cm]{Sylvester.jpg}
	\includegraphics[height=3cm]{Cayley.jpg}
\end{figure}

To know more about the history of matrix algebra visit
\begin{itemize}
	\item \url{http://www-groups.dcs.st-and.ac.uk/~history/PrintHT/Matrices_and_determinants.html}
\end{itemize}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Applications} 

\textbf{Finite elements} has been one of the most successful approaches to biomechanical modeling. In the figure we show one of such a model for the heart. Using this model, all kind of local stresses can be calculated.

\begin{figure}
	\includegraphics[width=5.5cm]{figHeartModel.jpg}
	\includegraphics[width=5.5cm]{figHeartModel2.jpg}
\end{figure}

\begin{tiny}
J. Berkley, S. Weghorst, H. Gladstone, G. Raugi, D. Berg, M. Ganter. Banded Matrix Approach to Finite Element Modeling for Soft Tissue Simulation. 
\end{tiny}

\end{frame}

% ==============================================
\setnextsection{3}
\section{Matrix algebra} 
\subsection{Matrix operations (a)} 
\Outline

\begin{frame}\frametitle{Basic definitions} 
\begin{ceudef}[Matrix]
	Informally, we can define a \textbf{matrix} as a regular arrangement of numbers that are laid out in a grid of $m$ rows and $n$ columns.
	More formally, we say that $A \in \mathcal{M}_{m\times n}$. We denote as $\mathbf{a}_j$ as its $j$-th \textbf{column}, and $a_{ij}$ the element in
	the $i$-th row and the $j$-th column.
	\begin{center}
		$A=\begin{pmatrix}\mathbf{a}_1 & \mathbf{a}_2 & ... & \mathbf{a}_n \end{pmatrix} =
		   \begin{pmatrix}a_{11} & a_{12} & ... & a_{1n} \\
			                a_{21} & a_{22} & ... & a_{2n} \\
											... & ... & ... & ... \\
											a_{m1} & a_{m2} & ... & a_{mn} \\
				\end{pmatrix}$
	\end{center}
	The \textbf{main diagonal} is the vector given by $(a_{11},a_{22},...)$. Two important special matrices are the \textbf{identity matrix} ($I \in \mathcal{M}_{n\times n}$) that is zero
	everywhere except the main diagonal that is full of 1s; and the \textbf{zero matrix} ($0 \in \mathcal{M}_{m\times n}$) that is zero everywhere.
\end{ceudef}

\begin{columns}
	\begin{column}{5cm}
		\begin{exampleblock}{Example}
			\begin{center}
				MATLAB: {\color{blue}\texttt{A=[1 2 3; 4 5 6]}}
			\end{center}
		\end{exampleblock}
	\end{column}
\end{columns}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Matrix operations} 
\begin{ceudef}[Sum with a scalar]
	We define the \textbf{sum operator between a scalar and a matrix} as:
	\begin{center}
		$\begin{array}{ccccc} +: & \mathbb{R} \times \mathcal{M}_{m\times n} & \rightarrow & \mathcal{M}_{m\times n} & \\
					                   & +(k,A) & \rightarrow & B=k+A & | b_{ij}=k+a_{ij} \end{array}$
	\end{center}
	We overload the notation to define the \textbf{sum operator between a matrix and a scalar} as
	\begin{center}
		$\begin{array}{ccccc} +: & \mathcal{M}_{m\times n} \times \mathbb{R} & \rightarrow & \mathcal{M}_{m\times n} & \\
					                   & +(A,k) & \rightarrow & B=A+k & | b_{ij}=a_{ij}+k \end{array}$
	\end{center}
\end{ceudef}

\begin{columns}
	\begin{column}{5.5cm}
		\begin{exampleblock}{Example}
		$\begin{array}{rcl}
			A&=&\begin{pmatrix}1&2&3\\-1&-2&-3\end{pmatrix}\\
			B=1+A&=&\begin{pmatrix}2&3&4\\0&-1&-2\end{pmatrix}\\
		\end{array}$\\
		MATLAB: {\color{blue}\texttt{B=1+A}}
		\end{exampleblock}
	\end{column}
	\begin{column}{6cm}
		\begin{block}{Properties}
		$\begin{array}{rcl}
			k+A&=&A+k\\
			(k_1+k_2)+A&=&k_1+(k_2+A)\\
		\end{array}$
		\end{block}
	\end{column}
\end{columns}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Matrix operations} 
\begin{ceudef}[Multiplication with a scalar]
	We define the \textbf{multiplication operator between a scalar and a matrix} as:
	\begin{center}
		$\begin{array}{ccccc} \cdot: & \mathbb{R} \times \mathcal{M}_{m\times n} & \rightarrow & \mathcal{M}_{m\times n} & \\
					                   & \cdot(k,A) & \rightarrow & B=k+A & | b_{ij}=ka_{ij} \end{array}$
	\end{center}
	We overload the notation to define the \textbf{multiplication operator between a matrix and a scalar} as
	\begin{center}
		$\begin{array}{ccccc} \cdot: & \mathcal{M}_{m\times n} \times \mathbb{R} & \rightarrow & \mathcal{M}_{m\times n} & \\
					                   & \cdot(A,k) & \rightarrow & B=Ak & | b_{ij}=a_{ij}k \end{array}$
	\end{center}
\end{ceudef}

\begin{columns}
	\begin{column}{5cm}
		\begin{exampleblock}{Example}
		$\begin{array}{rcl}
			A&=&\begin{pmatrix}1&2&3\\-1&-2&-3\end{pmatrix}\\
			B=2A&=&\begin{pmatrix}2&4&6\\-2&-4&-6\end{pmatrix}\\
		\end{array}$\\
		MATLAB: {\color{blue}\texttt{B=2*A}}
		\end{exampleblock}
	\end{column}
	\begin{column}{5cm}
		\begin{block}{Properties}
		$\begin{array}{rcl}
			kA&=&Ak\\
			(k_1k_2)A&=&k_1(k_2A)\\
			(k_1+k_2)A&=&k_1A+k_2A\\
		\end{array}$
		\end{block}
	\end{column}
\end{columns}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Matrix operations} 
\begin{ceudef}[Sum of two matrices]
	We define the \textbf{sum operator between two matrices} as:
	\begin{center}
		$\begin{array}{ccccc} +: & \mathcal{M}_{m\times n} \times \mathcal{M}_{m\times n} & \rightarrow & \mathcal{M}_{m\times n} & \\
					                   & +(A,B) & \rightarrow & C=A+B & | c_{ij}=a_{ij}+b_{ij} \end{array}$
	\end{center}
\end{ceudef}

\begin{columns}
	\begin{column}{6cm}
		\begin{exampleblock}{Example}
		$\begin{array}{rcl}
			A&=&\begin{pmatrix}1&2&3\\-1&-2&-3\end{pmatrix}\\
			B&=&\begin{pmatrix}4&5&6\\0&1&1\end{pmatrix}\\
			C=A+B&=&\begin{pmatrix}5&7&9\\-1&-1&-2\end{pmatrix}\\
		\end{array}$\\
		MATLAB: {\color{blue}\texttt{C=A+B}}
		\end{exampleblock}
	\end{column}
	\begin{column}{5cm}
		\begin{block}{Properties}
		$\begin{array}{rcl}
			A+B&=&B+A \\
			A+(B+C)&=&(A+B)+C \\
			A+0&=&A \\
			k(A+B)&=&kA+kB \\
		\end{array}$
		\end{block}
	\end{column}
\end{columns}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Matrix operations} 
\begin{block}{Proof of the properties}
	We are not proving all properties, although all of them follow the same strategy. Let's see an example
	\begin{center}
		$k(A+B)=kA+kB$
	\end{center}
	\underline{\textit{Proof}}\\
	Let us develop the left hand side\\
	\begin{center}$\begin{array}{r|l}
		C=A+B & c_{ij}=a_{ij}+b_{ij}\\
		D=kC=k(A+B) & d_{ij}=kc_{ij}=k(a_{ij}+b_{ij})=ka_{ij}+kb_{ij} \\
	\end{array}$\end{center}
	Now, the right hand side\\
	\begin{center}$\begin{array}{r|l}
		E=kA & e_{ij}=ka_{ij} \\
		F=kB & f_{ij}=kb_{ij} \\
		G=E+F=kA+kB & g_{ij}=e_{ij}+f_{ij}=ka_{ij}+kb_{ij} \\
	\end{array}$\end{center}
	It is obvious that $d_{ij}=g_{ij}$, and consequently $k(A+B)=kA+kB$. (q.e.d.)
\end{block}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Matrix operations} 
\begin{ceudef}[Multiplication of two matrices]
	We define the\textbf{ multiplication operator between two matrices} as:
	\begin{center}
		$\begin{array}{ccccc} \cdot: & \mathcal{M}_{m\times n} \times \mathcal{M}_{n\times p} & \rightarrow & \mathcal{M}_{m\times p} & \\
					                   & \cdot(A,B) & \rightarrow & C=AB & | c_{ij}=\sum\limits_{k=1}^n{a_{ik}b_{kj}} \end{array}$
	\end{center}
	If we consider the different columns of $B$, then we have 
	\begin{center}
		$B=\begin{pmatrix}\mathbf{b}_1 & \mathbf{b}_2 & ... & \mathbf{b}_p \end{pmatrix} \Rightarrow
		 AB=\begin{pmatrix}A\mathbf{b}_1 & A\mathbf{b}_2 & ... & A\mathbf{b}_p \end{pmatrix}$
	\end{center}
	That can be interpreted as ``the $j$-th column of $AB$ is a weighted sum of the columns of matrix $A$ using the weights defined by the $j$-th column of B''.
\end{ceudef}
\begin{columns}
	\begin{column}{5cm}
		\begin{exampleblock}{Example}
			\begin{center}
				MATLAB: {\color{blue}\texttt{A*B}}
			\end{center}
		\end{exampleblock}
	\end{column}
\end{columns}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Matrix operations} 
\begin{exampleblock}{Example}
	Let $A=\begin{pmatrix}2 & 3\\1 & -5\end{pmatrix}$ and $B=\begin{pmatrix} 4&3&6\\1 & -2&3\end{pmatrix}$.
	Then,\\
	$\begin{array}{rcl}
		A\mathbf{b}_1&=&\begin{pmatrix}2 & 3\\1 & -5\end{pmatrix}\begin{pmatrix}4\\1\end{pmatrix}=\begin{pmatrix}11\\-1\end{pmatrix} \\
		A\mathbf{b}_2&=&\begin{pmatrix}2 & 3\\1 & -5\end{pmatrix}\begin{pmatrix}3\\-2\end{pmatrix}=\begin{pmatrix}0\\13\end{pmatrix} \\
		A\mathbf{b}_3&=&\begin{pmatrix}2 & 3\\1 & -5\end{pmatrix}\begin{pmatrix}6\\3\end{pmatrix}=\begin{pmatrix}21\\-9\end{pmatrix} \\
	  AB&=&\begin{pmatrix}A\mathbf{b}_1 & A\mathbf{b}_2 & A\mathbf{b}_3 \end{pmatrix} =\begin{pmatrix} 11&0&21\\-1&13&-9\end{pmatrix}\\
	\end{array}$\\
	To directly compute a specific entry, for instance, $(AB)_{23}$ we have to multiply the 2nd row of $A$ and the third column of $B$\\
	$\begin{array}{rcl}
	(AB)_{23}&=&\left[\begin{pmatrix}2 & 3\\{\color{red}1} & {\color{red}-5}\end{pmatrix} \begin{pmatrix} 4&3&{\color{red}6}\\1 & -2&{\color{red}3}\end{pmatrix}\right]={\color{red}1\cdot 6+(-5)\cdot 3}=-9
	\end{array}$\\
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Matrix operations} 
\begin{block}{Geometrical interpretation}
	Consider the linear transformations
	\begin{center}
		$T_A(\mathbf{x})=A\mathbf{x}$\\
		$T_B(\mathbf{x})=B\mathbf{x}$\\
	\end{center}
	that map any input vector using the matrix $A$ or $B$, respectively. Now consider the sequential application of first $T_B$, and then $T_A$, as shown in the following figure:
	\begin{center}
		\includegraphics[scale=0.33]{figMatrixMultiplication.jpg}
	\end{center}
	Matrix multiplication helps us to define a single transformation such that we can transform the original vectors in a single step:
	\begin{center}
		$T_{AB}(\mathbf{x})=(AB)\mathbf{x}=A(B\mathbf{x})=T_A(T_B(\mathbf{x}))$
	\end{center}
	
\end{block}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Matrix operations} 
\begin{block}{Property}
	$\mathrm{row}_i(AB)=\mathrm{row}_i(A)B$
\end{block}
\begin{exampleblock}{Example (continued)}
	$\mathrm{row}_1(AB)=\mathrm{row}_1(A)B=\begin{pmatrix}2&3\end{pmatrix}\begin{pmatrix} 4&3&6\\1 & -2&3\end{pmatrix}=\begin{pmatrix} 11&0&21\end{pmatrix}$
\end{exampleblock}
\begin{block}{More properties}
	\begin{tabular}{cl}
		 $A(BC)=(AB)C$ & Associativity \\
	   $A(B+C)=AB+AC$ & Left distributivity \\
		 $(A+B)C=AC+BC$ & Right distributivity \\
		 $r(AB)=(rA)B=A(rB)$ & For any scalar $r$ \\
		 $I_mA=A=AI_n$ & For $A\in\mathcal{M}_{m \times n}$
	\end{tabular}
\end{block}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Matrix operations} 
\begin{block}{}
	\underline{\textit{Proof $A(BC)=(AB)C$}}\\
	Let us consider the column decomposition of matrix $C$.
	\begin{center}
		$C=\begin{pmatrix}\mathbf{c}_1 & \mathbf{c}_2 & ... & \mathbf{c}_p \end{pmatrix} \Rightarrow$ \\
		$BC=\begin{pmatrix}B\mathbf{c}_1 & B\mathbf{c}_2 & ... & B\mathbf{c}_p \end{pmatrix} \Rightarrow$ \\
		$A(BC)=\begin{pmatrix}A(B\mathbf{c}_1) & A(B\mathbf{c}_2) & ... & A(B\mathbf{c}_p) \end{pmatrix}$
	\end{center}
	But we have seen in the geometrical interpretation of matrix multiplication that $A(B\mathbf{c}_i)=(AB)\mathbf{c}_i$, therefore
	\begin{center}
		$A(BC)=\begin{pmatrix}(AB)\mathbf{c}_1 & (AB)\mathbf{c}_2 & ... & (AB)\mathbf{c}_p \end{pmatrix} =(AB)C$
	\end{center}
\end{block}

\begin{block}{Warnings}
	\begin{itemize}
		\item $AB\neq BA$, matrix multiplication is not commutative.
		\item $AB=AC \nRightarrow B=C$.
		\item $AB=0 \nRightarrow B=0$ or $C=0$.
	\end{itemize}
\end{block}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Matrix operations} 
\begin{ceudef}[Power of a matrix]
	If $A \in \mathcal{M}_{n\times n}$, then the $k$-th power of the matrix is defined as
	\begin{center}
		$A^k=\underbrace{A\cdot A \cdot ... \cdot A}_\text{k times}$
	\end{center}
	Note: $A^0=I_n$
\end{ceudef}
\begin{columns}
	\begin{column}{5cm}
		\begin{exampleblock}{Example}
			\begin{center}
				MATLAB: {\color{blue}\texttt{A\^{}k}}
			\end{center}
		\end{exampleblock}
	\end{column}
\end{columns}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Matrix operations} 
\begin{ceudef}[Transpose]
	If $A \in \mathcal{M}_{m\times n}$, then the transpose of $A$ ($A^T$) is a matrix in
	$\mathcal{M}_{n\times m}$ such that the rows of $A$ are the columns of $A^T$, or more formally
	\begin{center}
		$(A^T)_{ij}=A_{ji}$
	\end{center}
\end{ceudef}
\begin{columns}
	\begin{column}{6cm}
		\begin{exampleblock}{Example}
			\begin{center}
				$A=\begin{pmatrix}1 & 2 & 3 \\ 4 & 5 & 6 \end{pmatrix} \Rightarrow A^T=\begin{pmatrix}1 & 4\\ 2 & 5 \\ 3 & 6\end{pmatrix}$\\
				MATLAB: {\color{blue}\texttt{A'}}
			\end{center}
		\end{exampleblock}
	\end{column}
	\begin{column}{4cm}
		\begin{block}{Properties}
			\begin{center}
				$(A^T)^T=A$\\
				$(A+B)^T=A^T+B^T$\\
				$(rA)^T=rA^T$ \\
				$(AB)^T=B^TA^T$
			\end{center}
		\end{block}
	\end{column}
\end{columns}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Matrix operations} 
\begin{block}{}
	\underline{\textit{Proof $(AB)^T=B^TA^T$}}\\
	Let $A\in\mathcal{M}_{m\times n}$ and $B\in\mathcal{M}_{n\times p}$
	By the definition of matrix multiplication we know that
	\begin{center}
		$(AB)_{ij}=\sum\limits_{k=1}^n{a_{ik}b_{kj}}$
	\end{center}
	Let $B'=B^T$ and $A'=A^T$. For the same reason
	\begin{center}
		$(B^TA^T)_{ij}=(B'A')_{ij}=\sum\limits_{k=1}^n{b'_{ik}a'_{kj}}$
	\end{center}
	But $b'_{ik}=b_{ki}$ and $a'_{kj}=a_{jk}$, consequently
	\begin{center}
		$(B^TA^T)_{ij}=\sum\limits_{k=1}^n{b_{ki}a_{jk}}=\sum\limits_{k=1}^n{a_{jk}b_{ki}}=(AB)_{ji}$
	\end{center}
	or what is the same
	\begin{center}
		$B^TA^T=(AB)^T$
	\end{center}
\end{block}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Exercises} 

\begin{exerciseblock}{Exercises}
	From Lay (3rd ed.), Chapter 2, Section 1:
	\begin{columns}
		\begin{column}{5cm}
			\begin{itemize}
				\item 2.1.3
				\item 2.1.10
				\item 2.1.12
				\item 2.1.18
				\item 2.1.19
				\item 2.1.20
				\item 2.1.22
			\end{itemize}
		\end{column}
		\begin{column}{5cm}
			\begin{itemize}
				\item 2.1.23
				\item 2.1.24
				\item 2.1.25
				\item 2.1.26
				\item 2.1.27
				\item 2.1.39 (bring computer)
				\item 2.1.40 (bring computer)
			\end{itemize}
		\end{column}
	\end{columns}
\end{exerciseblock}

\end{frame}

% ==============================================
\subsection{Inverse of a matrix (b)} 
\Outline

\begin{frame}\frametitle{Matrix inverse} 
\begin{exampleblock}{Example}
	The inverse of a number is a clear concept
	\begin{center}
		$5\frac{1}{5}=5\cdot 5^{-1}=1=5^{-1} \cdot 5$
	\end{center}
\end{exampleblock}

\begin{ceudef}[Inverse of a matrix]
	A matrix $A \in \mathcal{M}_{n\times n}$ is \textbf{invertible} (or \textbf{non-singular}) if there exists another matrix $C  \in \mathcal{M}_{n\times n}$ such that $AC=I_n=CA$. $C$ is called the inverse of
	$A$ and it is denoted as $A^{-1}$. If $A$ is not invertible, it is said to be \textbf{singular}. 
	(MATLAB: {\color{blue}\texttt{inv(A)}})
\end{ceudef}

\begin{block}{Properties}
	The inverse of a matrix is unique.\\
	\underline{\textit{Proof}}\\
	Let us assume that there exist two different inverses $C_1$ and $C_2$. Then,
	\begin{center}
		$C_2=C_2I=C_2(AC_1)=(C_2A)C_1=IC_1=C_1$
	\end{center}
	which is a contradiction and, therefore, the inverse must be unique. (q.e.d.)
\end{block}


\end{frame}

% ==============================================
\begin{frame}\frametitle{Matrix inverse} 
\begin{exampleblock}{Example}
	\begin{center}
		Let $A=\begin{pmatrix}2 & 5 \\ -3 & -7 \end{pmatrix}$ and 
		$A^{-1}=\begin{pmatrix}-7 & -5 \\ 3 & 2 \end{pmatrix}$ \\
	\end{center}
	It can easily be verified that
	\begin{center}
		$AA^{-1}=A^{-1}A=I_2=\begin{pmatrix}1 & 0 \\ 0 & 1 \end{pmatrix}$ \\
	\end{center}
\end{exampleblock}

\begin{ceuthm}[Inverse of a $2\times 2$ matrix]
	Let $A=\begin{pmatrix}a & b \\ c & d\end{pmatrix}$. If $ad-bc\neq 0$, then $A$ is invertible and its inverse is
	\begin{center}
		$A^{-1}=\frac{1}{ad-bc}\begin{pmatrix}d & -b \\ -c & a \end{pmatrix}$ \\
	\end{center}
	\underline{\textit{Proof}}\\
	It is easy to verify that $AA^{-1}=A^{-1}A=I_2$.
\end{ceuthm}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Matrix inverse} 
\begin{ceuthm}
	If $A \in \mathcal{M}_{n\times n}$ is invertible, then for every $b\in\mathbb{R}^n$, the equation $A\mathbf{x}=\mathbf{b}$ has a unique
	solution that is $\mathbf{x}=A^{-1}\mathbf{b}$.\\
	\underline{\textit{Proof}}\\
		\parbox{11cm}{
			\underline{\textit{Proof $\mathbf{x}=A^{-1}\mathbf{b}$ is a solution}}\\
			\leftskip5mm If we substitute the solution in the equation we have
			\begin{center}
				$A\mathbf{x}=A(A^{-1}\mathbf{b})=(AA^{-1})\mathbf{b}=\mathbf{b}$ (q.e.d.)
			\end{center}
		}
		\parbox{11cm}{
			\underline{\textit{Proof $\mathbf{x}=A^{-1}\mathbf{b}$ is the unique solution}}\\
			\leftskip5mm Let us assume that $\mathbf{x}'\neq \mathbf{x}$ is also a solution, then 
			\begin{center}
				$A\mathbf{x}'=\mathbf{b}$
			\end{center}
			If we multiply on the left by $A^{-1}$, we have
			\begin{center}
				$A^{-1}A\mathbf{x}'=A^{-1}\mathbf{b} \Rightarrow \mathbf{x}'=\mathbf{x}$
			\end{center}
			which is obviously a contradiction and, therefore, $\mathbf{x}=A^{-1}\mathbf{b}$ must be the unique solution. (q.e.d.)
		}
		\label{thm:eqSystemInverse}
\end{ceuthm}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Matrix inverse} 
\begin{exampleblock}{Example}
	\begin{center}
		\includegraphics[width=11cm]{figMatrixInverse.jpg}
	\end{center}
\end{exampleblock}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Matrix inverse} 
\begin{exampleblock}{Example (continued)}
	Consider the equation $\mathbf{y}=D\mathbf{f}$, $D=\begin{pmatrix} 1 & \frac{1}{2} & \frac{1}{4} \\ \frac{1}{2} & 1 & \frac{1}{2} \\ \frac{1}{4} & \frac{1}{2} & 1\end{pmatrix}$
	and the fact that 
	\begin{center}
		$D=DI=\begin{pmatrix}D\mathbf{e}_1 &D\mathbf{e}_2 & D\mathbf{e}_3 \end{pmatrix}$
	\end{center}
	Therefore, the $i$-th column of $D$ can be interpreted as the deflection at the different points when a unit force ($\mathbf{e}_i$) is applied onto the $i$-th point.
	In our example when we apply a unit force at point 1, the first column of $D$ is $(1,\frac{1}{2},\frac{1}{4})$ meaning that the first point displaces 1 m.,
	the second point $\frac{1}{2}$ m., and the third point $\frac{1}{4}$ m.
	
\end{exampleblock}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Matrix inverse} 
\begin{exampleblock}{Example (continued)}
	If we now consider that
	$\mathbf{f}=D^{-1}\mathbf{y}$, $D^{-1}=\begin{pmatrix} \frac{4}{3} & -\frac{2}{3} & 0 \\ -\frac{2}{3} & \frac{4}{3} & -\frac{2}{3} \\
	0 & -\frac{2}{3} & \frac{4}{3} \end{pmatrix}$ and the fact that
	\begin{center}
		$D^{-1}=D^{-1}I=\begin{pmatrix}D^{-1}\mathbf{e}_1 &D^{-1}\mathbf{e}_2 & D^{-1}\mathbf{e}_3 \end{pmatrix}$
	\end{center}
	Therefore, the $i$-th column of $D^{-1}$ can be interpreted as the forces needed to be applied at the different points to produce a unit deformation ($\mathbf{e}_i$) at
	the $i$-th point. In our example, to produce a displacement of 1 m. in the first point and none at the other points ($\mathbf{e}_1=(1,0,0)$, we need to push point 1 with a force of
	$\frac{4}{3}$ N., to pull point 2 with a force of $-\frac{2}{3}$ N., and we do not need to apply any force at point 3.
	
\end{exampleblock}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Matrix inverse} 
\begin{ceuthm}
	\begin{enumerate}
		\item If $A$ is invertible, then $A^{-1}$ is also invertible and its inverse is $A$.
		\item If $A$ and $B$ are invertible, then $AB$ is also invertible and its inverse is $B^{-1}A^{-1}$
		\item If $A$ is invertible, then $A^T$ is also invertible and its inverse is $(A^{-1})^T$.
	\end{enumerate}
	\underline{\textit{Proof 1)}}\\
	The definition of $A^{-1}$ is that it is a matrix such that
	\begin{center}
		$AA^{-1}=A^{-1}A=I$
	\end{center}
	The inverse of $A^{-1}$ must be a matrix $C$ such that
	\begin{center}
		$CA^{-1}=A^{-1}C=I$
	\end{center}
	If we compare this equation with the previous one, we easily see that $C=A$ is the inverse of $A^{-1}$.
	\label{thm:basicInverses}
\end{ceuthm}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Matrix inverse} 
\begin{block}{}
	\underline{\textit{Proof 2)}}\\
	Let us check that $B^{-1}A^{-1}$ is actually the inverse of $AB$
	\begin{center}
		$(AB)(B^{-1}A^{-1})=A(BB^{-1})A^{-1}=AIA^{-1}=AA^{-1}=I$\\
		$(B^{-1}A^{-1})(AB)=B^{-1}(A^{-1}A)B=B^{-1}IB=B^{-1}B=I$\\
	\end{center}
	\vspace{0.25cm}
	\underline{\textit{Proof 3)}}\\
	Let us check that $(A^{-1})^T$ is actually the inverse of $A^T$
	\begin{center}
		$A^T(A^{-1})^T=[(AB)^T=B^TA^T]=(A^{-1}A)^T=I^T=I$ \\
		$(A^{-1})^TA^T=[(AB)^T=B^TA^T]=(AA^{-1})^T=I^T=I$
	\end{center}
\end{block}

\begin{ceuthm}
	We may generalize the previous theorem and state that
	\begin{center}
		$(A_1A_2 ... A_p)^{-1}=A^{-1}_pA^{-1}_{p-1}...A^{-1}_2A^{-1}_1$
	\end{center}
\end{ceuthm}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Matrix inverse} 
\begin{block}{}
	\underline{\textit{Proof}}\\
	Let's prove it by weak induction. That is, we know that the statement is true for $p=2$ (by the previous theorem). Let us
	assume it is true for $p-1$, that is
	\begin{center}
		$(A_1A_2 ... A_{p-1})^{-1}=A^{-1}_{p-1}...A^{-1}_2A^{-1}_1$
	\end{center}
	We wonder if it is also true for $p$. Let us define $B=A_1A_2 ... A_{p-1}$. Then, we can rewrite the left hand side of the theorem as
	\begin{center}
		$(A_1A_2 ... A_p)^{-1}=(BA_p)^{-1}$
	\end{center}
	This is the inverse of the multiplication of two matrices. We know by the previous theorem that 
		$(BA_p)^{-1}=A_p^{-1}B^{-1}$
	But we presumed that
	\begin{center}
		$B^{-1}=(A_1A_2 ... A_{p-1})^{-1}=A^{-1}_{p-1}...A^{-1}_2A^{-1}_1$
	\end{center}
	And consequently
	\begin{center}
		$(BA_p)^{-1}=A_p^{-1}A^{-1}_{p-1}...A^{-1}_2A^{-1}_1$ (q.e.d.)
	\end{center}
	
	
\end{block}
\end{frame}

% ==============================================
\subsection{Elementary matrices (b)} 
\Outline

\begin{frame}\frametitle{Elementary matrices} 
The elementary operations we can perform on the rows of a matrix are
\begin{enumerate}
	\item Multiply by a scalar
	\item Swap two rows
	\item Replace a row by a linear combination of two or several rows
\end{enumerate}
All these operations can be represented as matrix multiplications.
\begin{exampleblock}{Example}
	Consider the matrix $A=\begin{pmatrix}a & b & c\\d & e & f\\g & h & i\end{pmatrix}$
	\begin{enumerate}
		\item We can multiply the third row by a scalar $r$ by multiplying on the left by the matrix
			\begin{center}
				$E_1=\begin{pmatrix}1 & 0 & 0\\0 & 1 & 0\\0 & 0 & r\end{pmatrix} \Rightarrow E_1A=\begin{pmatrix}a & b & c\\d & e & f\\rg & rh & ri\end{pmatrix}$
			\end{center}
	\end{enumerate}
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Elementary matrices} 
\begin{exampleblock}{Example (continued)}
	\begin{enumerate}
		\setcounter{enumi}{1}
		\item We can swap the first and second rows of the matrix by multiplying on the left by the matrix
			\begin{center}
				$E_2=\begin{pmatrix}0 & 1 & 0\\1 & 0 & 0\\0 & 0 & 1\end{pmatrix} \Rightarrow E_2A=\begin{pmatrix}d & e & f\\a & b & c\\g & h & i\end{pmatrix}$
			\end{center}
		\item We can substitute the third row by $\mathbf{r}_3+k_1\mathbf{r}_1$ by multiplying on the left by the matrix
			\begin{center}
				$E_3=\begin{pmatrix}1 & 0 & 0\\0 & 1 & 0\\k_1 & 0 & 1\end{pmatrix} \Rightarrow E_3A=\begin{pmatrix}a & b & c\\d & e & f\\g+k_1a & h+k_1b & i+k_1c\end{pmatrix}$
			\end{center}
	\end{enumerate}
\end{exampleblock}
\begin{ceudef}[Elementary matrix]
	An \textbf{elementary matrix} is one that differs from the identity matrix by one single, elementary row operation.
\end{ceudef}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Elementary matrices} 
\begin{ceuthm}
	The inverse of an elementary matrix is another elementary matrix of the same type. That is, row operations can be undone.
\end{ceuthm}
\begin{exampleblock}{Example (continued)}
	\begin{enumerate}
		\item $E_1^{-1}= \begin{pmatrix}1 & 0 & 0\\0 & 1 & 0\\0 & 0 & \frac{1}{r}\end{pmatrix}$
		\item $E_2^{-1}= \begin{pmatrix}0 & 1 & 0\\1 & 0 & 0\\0 & 0 & 1\end{pmatrix}$
		\item 
				\begin{tabular}{ll}
					$E_3^{-1}= \begin{pmatrix}1 & 0 & 0\\0 & 1 & 0\\-k_1 & 0 & 1\end{pmatrix}$ &
					\begin{tabular}{l}
						MATLAB:\\
						{\color{blue}\texttt{syms k1}}\\
						{\color{blue}\texttt{E3=[1 0 0; 0 1 0; k1 0 1];}}\\
						{\color{blue}\texttt{inv(E3)}}
					\end{tabular}
				\end{tabular}
		
	\end{enumerate}
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Elementary matrices} 
\begin{ceuthm}
	A matrix $A\in\mathcal{M}_{n\times n}$ is invertible iff it is row-equivalent to $I_n$. In this case, the sequence of operations
	that transforms $A$ into $I_n$ is also the one that transforms $I_n$ into $A^{-1}$.
		\parbox{11cm}{
			\underline{\textit{Proof $\Rightarrow$}}\\
			\leftskip5mm If $A$ is invertible, then by theorem \ref{thm:eqSystemInverse} we know that the equation system $A\mathbf{x}=\mathbf{b}$ has a unique
				solution for every $\mathbf{b}$. If it has a solution for every $\mathbf{b}$, then it must have a pivot in every row, that must be in the diagonal
				and, consequently the reduced echelon form of $A$ must be $I_n$.
		}
		\parbox{11cm}{
			\underline{\textit{Proof $\Leftarrow$}}\\
			\leftskip5mm If $A$ is row-equivalent $I_n$, then there exists a sequence of elementary matrices that transform $A$ into $I_n$
			\begin{center}
				$A \sim E_1A \sim E_2E_1A \sim ... \sim E_nE_{n-1}...E_2E_1A = I_n$
			\end{center}
			$E=E_nE_{n-1}...E_2E_1$ is a candidate to be the inverse of $A$. Since each of the elementary matrices is invertible, and
			the product of invertible matrices is invertible, then $E$ is invertible and $A$ must be its (unique) inverse. Conversely, $E$ is the inverse of $A$ and
			$A$ is invertible.
		}
	\label{thm:elementaryMatrices}
\end{ceuthm}
\end{frame}

% ==============================================
\subsection{An algorithm to invert matrices (b)} 
\Outline

\begin{frame}\frametitle{An algorithm to invert matrices} 
\begin{block}{Algorithm}
	\textbf{Algorithm}: Reduce the augmented matrix $\left(\begin{array}{c|c}A & I\end{array}\right)$\\
	If $A$ is invertible, then $\left(\begin{array}{c|c}A & I\end{array}\right) \sim \left(\begin{array}{c|c}I & A^{-1}\end{array}\right)$.\\
	If $A$ is not invertible, then we will not be able to reduce $A$ into $I$.\\
	This algorithm is very much used in practice because it is numerically stable and rather efficient.
\end{block}

\begin{exampleblock}{Example}
	Let $A=\left(\begin{array}{ccc}0 & 1 & 2 \\ 1 & 0 & 3 \\ 4 & -3 & 8\end{array}\right)$.\\We construct the augmented matrix\\
	\begin{center}
	  $\left(\begin{array}{ccc|ccc}0 & 1 & 2 & 1 & 0 & 0\\ 1 & 0 & 3 & 0 & 1 & 0\\ 4 & -3 & 8 & 0 & 0 & 1\end{array}\right)$
	\end{center}
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{An algorithm to invert matrices} 
\begin{exampleblock}{Example (continued)}
	And now we transform it\\
	\begin{center}
		\begin{tabular}{cc}
			& $\left(\begin{array}{ccc|ccc}0 & 1 & 2 & 1 & 0 & 0\\ 1 & 0 & 3 & 0 & 1 & 0\\ 4 & -3 & 8 & 0 & 0 & 1\end{array}\right)$ \\
			$\mathbf{r}_1 \leftrightarrow \mathbf{r}_2$ & $\left(\begin{array}{ccc|ccc}1 & 0 & 3 & 0 & 1 & 0\\ 0 & 1 & 2 & 1 & 0 & 0\\ 4 & -3 & 8 & 0 & 0 & 1\end{array}\right)$ \\
			$\mathbf{r}_3 \leftarrow \mathbf{r}_3-4\mathbf{r}_1$ & $\left(\begin{array}{ccc|ccc}1 & 0 & 3 & 0 & 1 & 0\\ 0 & 1 & 2 & 1 & 0 & 0\\ 0 & -3 & -4 & 0 & -4 & 1\end{array}\right)$ \\
			$\mathbf{r}_3 \leftarrow \mathbf{r}_3+3\mathbf{r}_2$ & $\left(\begin{array}{ccc|ccc}1 & 0 & 3 & 0 & 1 & 0\\ 0 & 1 & 2 & 1 & 0 & 0\\ 0 & 0 & 2 & 3 & -4 & 1\end{array}\right)$ \\
			$\mathbf{r}_3 \leftarrow \frac{1}{2}\mathbf{r}_3$ & $\left(\begin{array}{ccc|ccc}1 & 0 & 3 & 0 & 1 & 0\\ 0 & 1 & 2 & 1 & 0 & 0\\ 0 & 0 & 1 & \frac{3}{2} & -2 & \frac{1}{2}\end{array}\right)$ \\
		\end{tabular}
	\end{center}
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{An algorithm to invert matrices} 
\begin{exampleblock}{Example (continued)}
	\begin{center}
		\begin{tabular}{cc}
			 & $\left(\begin{array}{ccc|ccc}1 & 0 & 3 & 0 & 1 & 0\\ 0 & 1 & 2 & 1 & 0 & 0\\ 0 & 0 & 1 & \frac{3}{2} & -2 & \frac{1}{2}\end{array}\right)$ \\
			 $\mathbf{r}_2 \leftarrow \mathbf{r}_2-2\mathbf{r}_3$ & $\left(\begin{array}{ccc|ccc}1 & 0 & 3 & 0 & 1 & 0\\ 0 & 1 & 0 & -2 & 4 & -1\\ 0 & 0 & 1 & \frac{3}{2} & -2 & \frac{1}{2}\end{array}\right)$ \\
			 $\mathbf{r}_1 \leftarrow \mathbf{r}_1-3\mathbf{r}_3$ & $\left(\begin{array}{ccc|ccc}1 & 0 & 0 & -\frac{9}{2} & 7 & -\frac{3}{2}\\ 0 & 1 & 0 & -2 & 4 & -1\\ 0 & 0 & 1 & \frac{3}{2} & -2 & \frac{1}{2}\end{array}\right)$ \\
		\end{tabular}
	\end{center}
	Since $A$ is row-equivalent to $I_3$, then $A$ is invertible and its inverse is $A^{-1}=\begin{pmatrix}-\frac{9}{2} & 7 & -\frac{3}{2}\\ -2 & 4 & -1\\ \frac{3}{2} & -2 & \frac{1}{2}
	\end{pmatrix}$. To finalize the exercise we should check that \\ \begin{center}$AA^{-1}=A^{-1}A=I_3$\end{center}
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{An algorithm to invert matrices} 
\begin{block}{A new interpretation of matrix inversion}
	By constructing the augmented matrix $\left(\begin{array}{c|c}A & I\end{array}\right)$ we are simultaneously solving multiple equation systems
	\begin{center}
		\begin{tabular}{cccc}
			$A\mathbf{x}=\mathbf{e}_1$ & $A\mathbf{x}=\mathbf{e}_2$ & $A\mathbf{x}=\mathbf{e}_3$ & ...
		\end{tabular}
	\end{center}
\end{block}

\begin{exampleblock}{Example (continued)}
	\begin{center}
		\begin{tabular}{ccc}
			$\left(\begin{array}{ccc|c}0 & 1 & 2 & 1 \\ 1 & 0 & 3 & 0 \\ 4 & -3 & 8 & 0 \end{array}\right)$ &
			$\left(\begin{array}{ccc|c}0 & 1 & 2 & 0 \\ 1 & 0 & 3 & 1 \\ 4 & -3 & 8 & 0 \end{array}\right)$ &
			$\left(\begin{array}{ccc|c}0 & 1 & 2 & 0 \\ 1 & 0 & 3 & 0 \\ 4 & -3 & 8 & 1 \end{array}\right)$
		\end{tabular}
	\end{center}
\end{exampleblock}

\begin{block}{}
	This note is important because if we want to compute only the $i$-th column of $A^{-1}$ it is enough to solve the equation system
	\begin{center}
		$A\mathbf{x}=\mathbf{e}_i$
	\end{center}
\end{block}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Exercises} 

\begin{exerciseblock}{Exercises}
	From Lay (3rd ed.), Chapter 2, Section 2:
	\begin{itemize}
		\item 2.2.7
		\item 2.2.11
		\item 2.2.13
		\item 2.2.17
		\item 2.2.19
		\item 2.2.21
		\item 2.2.25
		\item 2.2.36
	\end{itemize}
\end{exerciseblock}

\end{frame}

% ==============================================
\subsection{Characterization of invertible matrices (c)} 
\Outline

\begin{frame}\frametitle{Characterization of invertible matrices} 
\begin{ceuthm}[The invertible matrix theorem]
	Let $A \in \mathcal{M}_{n\times n}$. The following statements are equivalent (either they are all true or they are all false):
	\begin{enumerate}[i.]
		\item $A$ is invertible.
		\item $A$ is row-equivalent to $I_n$.
		\item $A$ has $n$ pivot positions.
		\item $A\mathbf{x}=\mathbf{0}$ only has the trivial solution $\mathbf{x}=\mathbf{0}$.
		\item The columns of $A$ are linearly independent.
		\item The transformation $T(\mathbf{x})=A\mathbf{x}$ is injective.
		\item The equation $A\mathbf{x}=\mathbf{b}$ has at least one solution for every $\mathbf{b}\in \mathbb{R}^n$.
		\item The columns of $A$ span $\mathbb{R}^n$.
		\item The transformation $T(\mathbf{x})=A\mathbf{x}$ maps $\mathbb{R}^n$ onto $\mathbb{R}^n$.
		\item There exists a matrix $C\in\mathcal{M}_{n\times n}$ such that $CA=I_n$.
		\item There exists a matrix $D\in\mathcal{M}_{n\times n}$ such that $AD=I_n$.
		\item $A^T$ is an invertible matrix
	\end{enumerate}
	\label{thm:characterization}
\end{ceuthm}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Characterization of invertible matrices} 
\begin{block}{}
	To prove the theorem we will follow the reasoning scheme below:
	\begin{center}
		\includegraphics[width=4cm]{figTheoremInvertibleMatrix.jpg}
	\end{center}
	\underline{\textit{Proof i $\Rightarrow$ x}}\\
	If i is true, then x is true simply by doing $C=A^{-1}$.\\
	\underline{\textit{Proof x $\Rightarrow$ iv}}\\
	See Exercise 2.1.23 in Lay.\\
	\underline{\textit{Proof iv $\Rightarrow$ iii}}\\
	See Exercise 2.2.23 in Lay. \\
	\underline{\textit{Proof iii $\Rightarrow$ ii}}\\
	If iii is true, then the $n$ pivots have to be in the main diagonal and in this case, the reduced echelon
	form must be $I_n$. \\
\end{block}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Characterization of invertible matrices} 
\begin{block}{}
	\underline{\textit{Proof ii $\Rightarrow$ i}}\\
	If ii is true, then i is true thanks to Theorem \ref{thm:elementaryMatrices}. \\
	\underline{\textit{Proof i $\Rightarrow$ xi}}\\
	If i is true, then xi is true simply by doing $D=A^{-1}$. \\
	\underline{\textit{Proof xi $\Rightarrow$ vii}}\\
	See Exercise 2.1.24 in Lay.\\
	\underline{\textit{Proof vii $\Rightarrow$ i}}\\
	See Exercise 2.2.24 in Lay.\\
	\underline{\textit{Proof vii $\Leftrightarrow$ viii $\Leftrightarrow$ ix}}\\
	See Theorems 3.2 and 8.2 in Chapter 2.\\
	\underline{\textit{Proof iv $\Leftrightarrow$ v $\Leftrightarrow$ vi}}\\
	See Theorems 3.2, 5.1 and 8.1 in Chapter 2.\\
	\underline{\textit{Proof i $\Rightarrow$ xii}}\\
	See Theorem \ref{thm:basicInverses}.\\
	\underline{\textit{Proof i $\Leftarrow$ xii}}\\
	See Theorem \ref{thm:basicInverses} interchanging the roles of $A$ and $A^T$.\\
\end{block}
	The power of this theorem is that it connects equation systems to invertibility, linear independence and subspace bases.

\end{frame}

% ==============================================
\begin{frame}\frametitle{Characterization of invertible matrices} 
\begin{block}{Corollary}
	\begin{enumerate}
		\item If $A$ is invertible, then $A\mathbf{x}=\mathbf{b}$ has a unique solution for every $\mathbf{b}\in\mathbb{R}^n$.
		\item If $A,B\in\mathcal{M}_{n\times n}$ and $AB=I_n$, then $A$ and $B$ are invertible and $B=A^{-1}$ and $A=B^{-1}$.
	\end{enumerate}
	Watch out that this corollary only applies to square matrices.\\
\end{block}
\end{frame}

% ==============================================
\subsection{Invertible linear transformations (c)} 
\Outline

\begin{frame}\frametitle{Invertible linear transformations}
Consider the linear transformation
\begin{center}
	$\begin{array}{rcl}
		T:\mathbb{R}^n & \rightarrow & \mathbb{R}^n \\
		\mathbf{x} & \rightarrow & A\mathbf{x}
	\end{array}$
\end{center}
\begin{ceudef}[Inverse transformation]
	$T$ is invertible iff there exists $S:\mathbb{R}^n \rightarrow \mathbb{R}^n$ such that $\forall \mathbf{x}\in\mathbb{R}^n$:
	\begin{center}
		$S(T(\mathbf{x}))=\mathbf{x}=T(S(\mathbf{x}))$
	\end{center}
\end{ceudef}
\begin{exampleblock}{Example}
	$T(\mathbf{x})=\begin{pmatrix}-1 & 0 \\ 0 & 1\end{pmatrix}\mathbf{x}$ is invertible and its inverse is $S(\mathbf{x})=\begin{pmatrix}-1 & 0 \\ 0 & 1\end{pmatrix}\mathbf{x}$.\\
	\underline{\textit{Proof}}\\
	\begin{center}
		$S(T(\mathbf{x}))=S\left(\begin{pmatrix}-1 & 0 \\ 0 & 1\end{pmatrix}\mathbf{x}\right)=
			\begin{pmatrix}-1 & 0 \\ 0 & 1\end{pmatrix}\begin{pmatrix}-1 & 0 \\ 0 & 1\end{pmatrix}\mathbf{x}=\mathbf{x}$\\
		$T(S(\mathbf{x}))=T\left(\begin{pmatrix}-1 & 0 \\ 0 & 1\end{pmatrix}\mathbf{x}\right)=
			\begin{pmatrix}-1 & 0 \\ 0 & 1\end{pmatrix}\begin{pmatrix}-1 & 0 \\ 0 & 1\end{pmatrix}\mathbf{x}=\mathbf{x}$\\
	\end{center}
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Invertible linear transformations}
\begin{exampleblock}{Example}
	$T(\mathbf{x})=\begin{pmatrix} 1 & 0 \\ 0 & 0\end{pmatrix}\mathbf{x}$ is not invertible because $T((1,0))=T((1,1))=(1,0)$, so given the ``output'' (1,0), we cannot recover the input vector that originated this output.
\end{exampleblock}
\begin{ceuthm}
	If $T$ is invertible, then it is surjective.\\
	\underline{\textit{Proof}}\\
	Consider any vector $\mathbf{b}\in\mathbb{R}^n$, we can always apply the transformation $S$ to get a new vector $\mathbf{x}=S(\mathbf{b})$. And then, recover $\mathbf{b}$ making use of the fact that $T$ is the inverse of $S$, that is, $\mathbf{b}=T(\mathbf{x})$. In other words, any vector $\mathbf{b}$ is in the range of $T$ and, therefore, $T$ is surjective.
\end{ceuthm}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Invertible linear transformations}
\begin{ceuthm}
	$T$ is invertible iff $A$ is invertible. If $T$ is invertible, then the only function that satisfies the previous definition is
	\begin{center}
		$S(\mathbf{x})=A^{-1}\mathbf{x}$
	\end{center}
	\underline{\textit{Proof $\Rightarrow$}}\\
	If $T$ is invertible, then it is surjective (see previous Theorem). Then, $A$ is invertible by Theorem \ref{thm:characterization} (items i and ix).\\
	\underline{\textit{Proof $\Leftarrow$}}\\
	If $A$ is invertible, then we may construct the linear transformation $S=A^{-1}\mathbf{x}$. $S$ is an inverse of $T$ since
	\begin{center}
		$S(T(\mathbf{x}))=S(A\mathbf{x})=A^{-1}(A\mathbf{x})=(A^{-1}A)\mathbf{x}=\mathbf{x}$\\
		$T(S(\mathbf{x}))=T(A^{-1}\mathbf{x})=A(A^{-1}\mathbf{x})=(AA^{-1})\mathbf{x}=\mathbf{x}$\\
	\end{center}	
\end{ceuthm}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Invertible linear transformations}
\begin{block}{}
	\underline{\textit{Proof uniqueness}}\\
	Let us assume that there are two inverses $S_1(\mathbf{x})=B_1\mathbf{x}$ and $S_2(\mathbf{x})=B_2\mathbf{x}$ with $B_1\neq B_2$.
	Let $\mathbf{v} \in \mathbb{R}^n$ and $\mathbf{v}=T(\mathbf{x})$ for some $\mathbf{x} \in \mathbb{R}^n$ (since $T$ is invertible and, therefore,
	surjective, we are guaranteed that there exists at least one such $\mathbf{x}$). Now
	\begin{center}
		$\left.
		\begin{array}{c}
			S_1(\mathbf{v})=B_1A\mathbf{x}=\mathbf{x}=B_1\mathbf{v} \\
			S_2(\mathbf{v})=B_2A\mathbf{x}=\mathbf{x}=B_2\mathbf{v} \\
		\end{array}
		\right\}\Rightarrow B_1\mathbf{v}=B_2\mathbf{v}\; [\forall \mathbf{v}\in\mathbb{R}^n]\; \Rightarrow B_1=B_2$
	\end{center}
	which is a contradiction and, consequently, there exists only one inverse (q.e.d.)
\end{block}

\begin{ceudef}[Ill-conditioned matrix]
	Informally, we say that a matrix $A$ is \textbf{ill-conditioned} if it is ``nearly singular''. In practice, this implies that the equation system
	$A\mathbf{x}=\mathbf{b}$ may have large variations in the solution ($\mathbf{x}$) when $\mathbf{b}$ varies slightly.
\end{ceudef}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Exercises} 

\begin{exerciseblock}{Exercises}
	From Lay (3rd ed.), Chapter 2, Section 3:
	\begin{itemize}
		\item 2.3.13
		\item 2.3.16
		\item 2.3.17
		\item 2.3.33
		\item 2.3.41
	\end{itemize}
\end{exerciseblock}

\end{frame}

% ==============================================
\subsection{Partitioned matrices (c)} 
\Outline

\begin{frame}\frametitle{Partitioned matrices}
Partitioned matrices sometimes help us to gain insight into the structure of the problem by identifying blocks within the matrix.
\begin{exampleblock}{Example}
	\begin{center}
		$A=\left(\begin{array}{ccc|cc|c}
			3 & 0 & -1 & 5 & 9 & -2 \\
			-5 & 2 & 4 & 0 & -3 & 1 \\
			\hline
			-8 & -6 & 3 & 1 & 7 & -4
		\end{array}\right)=
		\left(\begin{array}{c|c|c}
			A_{11} & A_{12} & A_{13} \\
			\hline
			A_{21} & A_{22} & A_{23} \\
		\end{array}\right)$
	\end{center}
	$A \in \mathcal{M}_{3\times 6}$, \\$A_{11} \in \mathcal{M}_{2\times 3}$, $A_{12} \in \mathcal{M}_{2\times 2}$, $A_{13} \in \mathcal{M}_{2\times 1}$,\\
	$A_{21} \in \mathcal{M}_{1\times 3}$, $A_{22} \in \mathcal{M}_{1\times 2}$, $A_{23} \in \mathcal{M}_{1\times 1}$.\\
	MATLAB:\\
	{\color{blue}\texttt{
		A=[3 0 -1 5 9 -2; -5 2 4 0 -3 1; -8 -6 3 1 7 -4]; \\
		A11=A(1:2,1:3)\\
		A12=A(1:2,4:5)\\
		A13=A(1:2,6)\\
		A21=A(3,1:3)\\
		A22=A(3,4:5)\\
		A23=A(3,6)\\
		}}
\end{exampleblock}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Partitioned matrices}
\begin{ceudef}[Sum of partitioned matrices]
	Let $A$ and $B$ be two matrices partitioned in the same way. Then the blocks of $A+B$ are simply the sum of the corresponding blocks.
	\begin{center}
		$A+B=
		\left(\begin{array}{c|c|c}
			  &  &  \\
			\hline
			 & A_{ij} & \\
			\hline
			  &  &  \\
		\end{array}\right)+
		\left(\begin{array}{c|c|c}
			  &  &  \\
			\hline
			 & B_{ij} & \\
			\hline
			  &  &  \\
		\end{array}\right)=
		\left(\begin{array}{c|c|c}
			  &  &  \\
			\hline
			 & A_{ij}+B_{ij} & \\
			\hline
			  &  &  \\
		\end{array}\right)
		$
	\end{center}
	
\end{ceudef}

\begin{ceudef}[Multiplication by scalar]
	The multiplication by a scalar simply multiplies each one of the blocks independently
	\begin{center}
		$rA=
		r\left(\begin{array}{c|c|c}
			  &  &  \\
			\hline
			 & A_{ij} & \\
			\hline
			  &  &  \\
		\end{array}\right)=
		\left(\begin{array}{c|c|c}
			  &  &  \\
			\hline
			 & rA_{ij} & \\
			\hline
			  &  &  \\
		\end{array}\right)
		$
	\end{center}
	
\end{ceudef}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Partitioned matrices}
\begin{ceudef}[Multiplication of partitioned matrices]
	Multiply the different block as if they were scalars (but applying matrix multiplication).
\end{ceudef}
\begin{exampleblock}{Example}
	Let $A=\left(\begin{array}{ccc|cc}
			2 & -3 & 1 & 0 & -4  \\
			1 & 5 & -2 & 3 & -1 \\
			\hline
			0 & -4 & -2 & 7 & -1
		\end{array}\right)=
		\left(\begin{array}{c|c}
			A_{11} & A_{12} \\
			\hline
			A_{21} & A_{22} \\
		\end{array}\right)$\\
	and $B=\left(\begin{array}{cc}
			6 & 4\\
			-2 & 1 \\
			-3 & 7 \\
			\hline
			-1 & 3\\
			5 & 2\\
		\end{array}\right)=
		\left(\begin{array}{c}
			B_{1} \\
			\hline
			B_{2} \\
		\end{array}\right)$.\\
	Then, $AB=\left(\begin{array}{cc}
			A_{11} & A_{12} \\
			A_{21} & A_{22} \\
		\end{array}\right)
		\left(\begin{array}{c}
			B_{1} \\
			B_{2} \\
		\end{array}\right)
		=\left(\begin{array}{c}
			A_{11}B_{1}+A_{12}B_2 \\
			A_{21}B_1+A_{22}B_{2} \\
		\end{array}\right)=
		\left(\begin{array}{cc}
			-5 & 4\\
			-6 & 2 \\
			2 & 1\\
		\end{array}\right)$
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Partitioned matrices}
\begin{ceuthm}[Multiplication of matrices]
	Let $A \in \mathcal{M}_{m\times n}$ and $B \in \mathcal{M}_{n \times p}$, then
	\begin{center}
		$AB=\sum\limits_{k=1}^n{\mathrm{column}_k(A)\mathrm{row}_k(B)}$
	\end{center}
	\underline{\textit{Proof}}\\
	Let us analyze each one of the terms in the sum
	\begin{center}
		$\mathrm{column}_k(A)\mathrm{row}_k(B)=\begin{pmatrix}a_{1k} \\ a_{2k} \\ ... \\ a_{mk}\end{pmatrix}
			\begin{pmatrix}b_{k1} & b_{k2} & ... & b_{kp}\end{pmatrix} =
			\begin{pmatrix}a_{1k}b_{k1} & a_{1k}b_{k2} & ... & a_{1k}b_{kp} \\
			               a_{2k}b_{k1} & a_{2k}b_{k2} & ... & a_{2k}b_{kp} \\
										 ... & ... & ... & ... \\
										 a_{mk}b_{k1} & a_{mk}b_{k2} & ... & a_{mk}b_{kp}
			\end{pmatrix}$
	\end{center}
	
\end{ceuthm}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Partitioned matrices}
\begin{block}{}
	In general, the $ij$-th term is 
	\begin{center}
		$(\mathrm{column}_k(A)\mathrm{row}_k(B))_{ij}=a_{ik}b_{kj}$
	\end{center}
	If we now analyze the $ij$-th element of the sum\\
	\begin{center}
		$\left(\sum\limits_{k=1}^n{\mathrm{column}_k(A)\mathrm{row}_k(B)}\right)_{ij}=\sum\limits_{k=1}^n{(\mathrm{column}_k(A)\mathrm{row}_k(B))_{ij}}=
		   \sum\limits_{k=1}^n{a_{ik}b_{kj}}$
	\end{center}
	But this is the definition of matrix multiplication and, therefore,
	\begin{center}
		$\left(\sum\limits_{k=1}^n{\mathrm{column}_k(A)\mathrm{row}_k(B)}\right)_{ij}=(AB)_{ij}$ (q.e.d.)
	\end{center}
\end{block}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Partitioned matrices}
\begin{ceudef}[Transpose of partitioned matrices]
	Transpose the partitioned matrix as if it were composed of scalars, and transpose each one of the blocks.
\end{ceudef}
\begin{exampleblock}{Example}
	\begin{center} $A=\left(\begin{array}{c|c|c}
			A_{11} & A_{12} & A_{13}  \\
			\hline
			A_{21} & A_{22} & A_{23}  \\
			\hline
			A_{31} & A_{32} & A_{33}  \\
		\end{array}\right) \Rightarrow
		A^T=\left(\begin{array}{c|c|c}
			A_{11}^T & A_{21}^T & A_{31}^T  \\
			\hline
			A_{12}^T & A_{22}^T & A_{32}^T  \\
			\hline
			A_{13}^T & A_{23}^T & A_{33}^T  \\
		\end{array}\right)$ \end{center}
\end{exampleblock}

\begin{exampleblock}{Example}
	\begin{center} $A=\left(\begin{array}{ccc|cc}
		2 & -3 & 1 & 0 & -4 \\
		1 &  5 & -2 & 3 & -1 \\
		\hline
		0 & -4 & -2 & 7 & -1 \\
		\end{array}\right) \Rightarrow
		A^T=\left(\begin{array}{cc|c}
		2 & 1 & 0 \\
		-3 & 5 & -4 \\
		1 & -2 & -2 \\
		\hline
		0 & 3 & 7 \\
		-4 & -1 & -1
		\end{array}\right)$ \end{center}
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Partitioned matrices}
\begin{ceudef}[Inverse of partitioned matrices]
	The formula for each one of the cases is worked out particularly for that case. Here go a couple of examples.
\end{ceudef}
\begin{exampleblock}{Example}
	Let $A=\left(\begin{array}{c|c|c}
			A_{11} & 0 & 0  \\
			\hline
			0 & A_{22} & 0  \\
			\hline
			0 & 0 & A_{33}  \\
		\end{array}\right)$. \\
	$A \in \mathcal{M}_{n\times n}$, $A_{11}\in \mathcal{M}_{p\times p}$, $A_{22}\in \mathcal{M}_{q\times q}$, $A_{33}\in \mathcal{M}_{r\times r}$ such that
	$p+q+r=n$.\\
	We look for a matrix $B$ such that
	\begin{center}
		$\left(\begin{array}{c|c|c}
			A_{11} & 0 & 0  \\
			\hline
			0 & A_{22} & 0  \\
			\hline
			0 & 0 & A_{33}  \\
		\end{array}\right)
		\left(\begin{array}{c|c|c}
			B_{11} & B_{12} & B_{13}  \\
			\hline
			B_{21} & B_{22} & B_{23}  \\
			\hline
			B_{31} & B_{32} & B_{33}  \\
		\end{array}\right)=
		\left(\begin{array}{c|c|c}
			I_p & 0 & 0  \\
			\hline
			0 & I_q & 0  \\
			\hline
			0 & 0 & I_r  \\
		\end{array}\right)\Rightarrow
		$\\
		$
		\left(\begin{array}{c|c|c}
			A_{11}B_{11} & A_{11}B_{12} & A_{11}B_{13}  \\
			\hline
			A_{22}B_{21} & A_{22}B_{22} & A_{22}B_{23}  \\
			\hline
			A_{33}B_{31} & A_{33}B_{32} & A_{33}B_{33}  \\
		\end{array}\right)=
		\left(\begin{array}{c|c|c}
			I_p & 0 & 0  \\
			\hline
			0 & I_q & 0  \\
			\hline
			0 & 0 & I_r  \\
		\end{array}\right)
		$\\
		
	\end{center}
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Partitioned matrices}
\begin{exampleblock}{Example (continued)}
	For each one of the entries we have a set of equations:
	\begin{center}
		\begin{tabular}{l}
			$\forall A_{11}\in\mathcal{M}_{p\times p}\; A_{11}B_{11}=I_p \Rightarrow B_{11}=A_{11}^{-1}$ \\
			$\forall A_{11}\in\mathcal{M}_{p\times p}\; A_{11}B_{12}=0 \Rightarrow B_{12}=0$ \\
			$\forall A_{11}\in\mathcal{M}_{p\times p}\; A_{11}B_{13}=0 \Rightarrow B_{13}=0$ \\
			$\forall A_{22}\in\mathcal{M}_{q\times q}\; A_{22}B_{21}=0 \Rightarrow B_{21}=0$ \\
			$\forall A_{22}\in\mathcal{M}_{q\times q}\; A_{22}B_{22}=I_q \Rightarrow B_{22}=A_{22}^{-1}$ \\
			$\forall A_{22}\in\mathcal{M}_{q\times q}\; A_{22}B_{23}=0 \Rightarrow B_{23}=0$ \\
			$\forall A_{33}\in\mathcal{M}_{r\times r}\; A_{33}B_{31}=0 \Rightarrow B_{31}=0$ \\
			$\forall A_{33}\in\mathcal{M}_{r\times r}\; A_{33}B_{32}=0 \Rightarrow B_{32}=0$ \\
			$\forall A_{33}\in\mathcal{M}_{r\times r}\; A_{33}B_{33}=I_r \Rightarrow B_{33}=A_{33}^{-1}$ \\
		\end{tabular}
	\end{center}
	Finally,
	\begin{center}
		$B=\left(\begin{array}{c|c|c}
			A_{11}^{-1} & 0 & 0  \\
			\hline
			0 & A_{22}^{-1} & 0  \\
			\hline
			0 & 0 & A_{33}^{-1} \\
		\end{array}\right)$
	\end{center}
	
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Partitioned matrices}
\begin{exampleblock}{Example}
	Let $A=\left(\begin{array}{c|c}
			A_{11} & A_{12}  \\
			\hline
			0 & A_{22}  \\
		\end{array}\right)$. \\
	$A \in \mathcal{M}_{n\times n}$, $A_{11}\in \mathcal{M}_{p\times p}$, $A_{12}\in \mathcal{M}_{p\times q}$, $A_{22}\in \mathcal{M}_{q\times q}$ such that
	$p+q=n$.\\
	We look for a matrix $B$ such that
	\begin{center}
		$=\left(\begin{array}{c|c}
			A_{11} & A_{12}  \\
			\hline
			0 & A_{22}  \\
		\end{array}\right)
		\left(\begin{array}{c|c}
			B_{11} & B_{12}  \\
			\hline
			B_{21} & B_{22}  \\
		\end{array}\right)=
		\left(\begin{array}{c|c}
			I_p & 0   \\
			\hline
			0 & I_q   \\
		\end{array}\right)\Rightarrow
		$\\
		$
		\left(\begin{array}{c|c}
			A_{11}B_{11} +A_{12}B_{21} & A_{11}B_{12}+A_{12}B_{22}  \\
			\hline
			A_{22}B_{21} & A_{22}B_{22} \\
		\end{array}\right)=
		\left(\begin{array}{c|c}
			I_p & 0   \\
			\hline
			0 & I_q   \\
		\end{array}\right)
		$\\
		
	\end{center}
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Partitioned matrices}
\begin{exampleblock}{Example (continued)}
	For each one of the entries we have a set of equations:
	\begin{center}
		\begin{tabular}{l}
			$\forall A_{22}\in\mathcal{M}_{q\times q}\; A_{22}B_{21}=0 \Rightarrow B_{21}=0$ \\
			$\forall A_{22}\in\mathcal{M}_{q\times q}\; A_{22}B_{22}=I_q \Rightarrow B_{22}=A_{22}^{-1}$ \\
			$\begin{array}{ll}\forall A_{11}\in\mathcal{M}_{q\times q},A_{12}\in\mathcal{M}_{p\times q}\; & A_{11}B_{11} +A_{12}B_{21}=I_p \Rightarrow [B_{21}=0] \Rightarrow \\ 
				& A_{11}B_{11}=I_p \Rightarrow B_{11}=A_{11}^{-1}\end{array}$ \\
			$\begin{array}{ll}\forall A_{11}\in\mathcal{M}_{q\times q},A_{12}\in\mathcal{M}_{p\times q}\; & A_{11}B_{12}+A_{12}B_{22}=0 \Rightarrow [B_{22}=A_{22}^{-1}] \Rightarrow \\ 
				& A_{11}B_{12}+A_{12}A_{22}^{-1}=0 \Rightarrow A_{11}B_{12}=-A_{12}A_{22}^{-1} \Rightarrow \\
				& B_{12}=-A_{11}^{-1}A_{12}A_{22}^{-1}\end{array}$ \\
		\end{tabular}
	\end{center}
	Finally,
	\begin{center}
		$B=\left(\begin{array}{c|c}
			A_{11}^{-1} & -A_{11}^{-1}A_{12}A_{22}^{-1}  \\
			\hline
			0 & A_{22}^{-1}  \\
		\end{array}\right)$
	\end{center}
	
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Partitioned matrices}
\begin{exampleblock}{Example}
	Computational Tomography (CT) with multiple rows gives a non-block structure for the system matrix that forces the problem to be solved in 3D. However,
	with a single row detector, the system matrix has a block structure so that the problem can be solved as a series of 2D problems strongly accelerating the process
	(on the other side the redundancy introduced by multiple row offers better resolution and robustness to noise).
	\begin{center}
		\includegraphics[height=3.25cm]{figSliceCT1.jpg}
		\includegraphics[height=3.25cm]{figSliceCT2.jpg}
	\end{center}	
\end{exampleblock}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Exercises} 

\begin{exerciseblock}{Exercises}
	From Lay (3rd ed.), Chapter 2, Section 4:
	\begin{itemize}
		\item 2.4.15
		\item 2.4.16
		\item 2.4.18
		\item 2.4.19
	\end{itemize}
\end{exerciseblock}

\end{frame}

% ==============================================
\subsection{LU factorization (d)} 
\Outline

\begin{frame}\frametitle{LU factorization} 
\begin{exampleblock}{Example}
	Let us presume that we have a collection of equation systems
	\begin{center}
		$A\mathbf{x}=\mathbf{b}_1$ \\
		$A\mathbf{x}=\mathbf{b}_2$ \\
		...
	\end{center}
	and $A$ is not invertible, which could be an efficient way of solving all of them together?
	Factorize $A$ as $A=LU$ (see below) and solve the equation system in two steps. In fact the method
	is so efficient it is even used to solve a single equation system.
\end{exampleblock}
\end{frame}

% ==============================================
\begin{frame}\frametitle{LU factorization} 
\begin{ceudef}[LU factorization]
	Let $A \in \mathcal{M}_{m\times n}$ that can be reduced to a reduced echelon form without row permutations. We can factorize $A$ as 
		$A=LU$,
	where $L$ is an invertible, lower triangular matrix (with 1s in the main diagonal) of size $m\times m$ and $U$ is an upper triangular matrix of size $m\times n$.\\
	MATLAB: {\color{blue}\texttt{[L,U]=lu(A)}}
\end{ceudef}
\begin{exampleblock}{Example}
	Let $A \in \mathcal{M}_{4\times 5}$. LU factorization will produce two matrices $L$ and $U$ may be of the following structure
	\begin{center}
		$A=LU=\begin{pmatrix}1 & 0 & 0 & 0 \\ 
		         \heartsuit & 1 & 0 & 0 \\
						 \heartsuit & \heartsuit & 1 & 0 \\
						 \heartsuit & \heartsuit & \heartsuit & 1\\ \end{pmatrix}
						\begin{pmatrix}\Diamond & \heartsuit & \heartsuit & \heartsuit & \heartsuit \\ 
		         0 & \Diamond & \heartsuit & \heartsuit & \heartsuit \\
						 0 & 0 & 0 & \Diamond & \heartsuit \\
						 0 & 0 & 0 & 0 & 0\end{pmatrix}$
	\end{center}
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{LU factorization} 
\begin{block}{Solving a linear equation system using the LU decomposition}
	Consider the equation system $A \mathbf{x}=\mathbf{b}$, and assume we have decomposed $A$ as $A=LU$. Then,
	we can solve the equation system in two steps:
	\begin{center}
		$A\mathbf{x}=\mathbf{b}\Rightarrow (LU)\mathbf{x}=L(U\mathbf{x})=\mathbf{b}\Rightarrow
		\left\{\begin{array}{cc} L\mathbf{y}=\mathbf{b} \\ U\mathbf{x}=\mathbf{y}\end{array}\right.$ \\
		\includegraphics[scale=0.4]{figLU.jpg}
	\end{center}
\end{block}

\end{frame}

% ==============================================
\begin{frame}\frametitle{LU factorization} 
\begin{exampleblock}{Example}
	Consider
	\begin{tiny}\begin{center}
		$A=\left(\begin{array}{rrrr} 3 &-7 &-2 &2\\ -3& 5& 1& 0\\ 6& -4& 0& -5\\ -9& 5& -5& 12\end{array}\right)=
		   \left(\begin{array}{rrrr} 1 &0 &0 &0\\ -1& 1& 0& 0\\ 2& -5& 1& 0\\ -3& 8& 3& 1\end{array}\right)
			 \left(\begin{array}{rrrr} 3 &-7& -2& 2\\ 0& -2& -1& 2\\ 0& 0& -1& 1\\ 0& 0& 0& -1\end{array}\right)$
	\end{center}\end{tiny}
	and $\mathbf{b}=(-9,5,7,11)$. We first solve $L\mathbf{y}=\mathbf{b}$
	\begin{tiny}\begin{center}
		$\left(\begin{array}{rrrr|r} 1 &0 &0 &0 &-9\\ -1& 1& 0& 0 &5\\ 2& -5& 1& 0& 7\\ -3& 8& 3& 1 & 11\end{array}\right)\sim
		 \left(\begin{array}{rrrr|r} 1 &0 &0 &0 &-9\\  0& 1& 0& 0 &-4\\ 0& 0& 1& 0& 5\\ 0& 0& 0& 1 & 1\end{array}\right)$
	\end{center}\end{tiny}
	and now we solve $U\mathbf{x}=\mathbf{y}$
	\begin{tiny}\begin{center}
		$\left(\begin{array}{rrrr|r}  3 &-7& -2& 2 &-9\\ 0& -2& -1& 2&-4\\ 0& 0& -1& 1&5\\ 0& 0& 0& -1&1\end{array}\right)\sim
		 \left(\begin{array}{rrrr|r} 1 &0 &0 &0 &3\\  0& 1& 0& 0 &4\\ 0& 0& 1& 0& -6\\ 0& 0& 0& 1 & -1\end{array}\right)$
	\end{center}\end{tiny}
	The trick is that, thanks to the triangular structure, solving these two equation systems is rather fast.
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{An algorithm to simple LU factorizations} 
\begin{block}{Algorithm}
	Let us assume that $A$ is row-equivalent to $U$ only using row replacement only with the rows above the replaced row. Then,
	there must be a sequence of elementary matrices such that
	\begin{center}
		$A\sim U \Rightarrow E_p...E_2E_1A=U \Rightarrow A=(E_p...E_2E_1)^{-1}U$
	\end{center}
	By inspection, we note that $L=(E_p...E_2E_1)^{-1}$. 
\end{block}
In the previous algorithm we are making using of the following theorem:
\begin{ceuthm}
	\begin{enumerate}
		\item The product of two lower triangular matrices is lower triangular.
		\item The inverse of a lower triangular matrix is lower triangular.
	\end{enumerate}
\end{ceuthm}

\end{frame}

% ==============================================
\begin{frame}\frametitle{An algorithm to simple LU factorizations} 
\begin{exampleblock}{Example}
	\begin{center}
		\begin{tabular}{clr}
			& & $A=\begin{pmatrix}2 & 1 & 0 \\1 & 2 & 1\\0 & 1 & 2\end{pmatrix}$ \\
			$\mathbf{r}_2\leftarrow\mathbf{r}_2-\frac{1}{2}\mathbf{r}_1$ & $E_1=\begin{pmatrix}1 & 0 & 0 \\-\frac{1}{2} & 1 & 0\\0 & 0 & 1\end{pmatrix}$ &
				$\begin{pmatrix}2 & 1 & 0 \\0 & \frac{3}{2} & 1\\0 & 1 & 2\end{pmatrix}$ \\
			$\mathbf{r}_3\leftarrow\mathbf{r}_3-\frac{2}{3}\mathbf{r}_2$ & $E_2=\begin{pmatrix}1 & 0 & 0 \\0 & 1 & 0\\0 & -\frac{2}{3}  & 1\end{pmatrix}$ &
				$U=\begin{pmatrix}2 & 1 & 0 \\0 & \frac{3}{2} & 1\\0 & 0 & \frac{4}{3}\end{pmatrix}$ \\
		\end{tabular}
	\end{center}
	Now, we calculate $L$ as
	\begin{center}
		$L=(E_2E_1)^{-1}=E_1^{-1}E_2^{-1}=\begin{pmatrix}1 & 0 & 0 \\ \frac{1}{2} & 1 & 0\\0 & 0 & 1\end{pmatrix}
			\begin{pmatrix}1 & 0 & 0 \\0 & 1 & 0\\0 & \frac{2}{3}  & 1\end{pmatrix}=\begin{pmatrix}1 & 0 & 0 \\\frac{1}{2} & 1 & 0\\0 & \frac{2}{3}  & 1\end{pmatrix}$
	\end{center}
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{An algorithm to simple LU factorizations} 
\begin{exampleblock}{Example}
	\begin{center}
		\begin{tabular}{clr}
			& & $A=\begin{pmatrix}2 & 1 & 0 \\1 & 2 & 1\\0 & 1 & 2\end{pmatrix}$ \\
			$\mathbf{r}_2\leftarrow\mathbf{r}_2-\frac{1}{2}\mathbf{r}_1$ & $E_1=\begin{pmatrix}1 & 0 & 0 \\-\frac{1}{2} & 1 & 0\\0 & 0 & 1\end{pmatrix}$ &
				$\begin{pmatrix}2 & 1 & 0 \\0 & \frac{3}{2} & 1\\0 & 1 & 2\end{pmatrix}$ \\
			$\mathbf{r}_3\leftarrow\mathbf{r}_3-\frac{2}{3}\mathbf{r}_2$ & $E_2=\begin{pmatrix}1 & 0 & 0 \\0 & 1 & 0\\0 & -\frac{2}{3}  & 1\end{pmatrix}$ &
				$U=\begin{pmatrix}2 & 1 & 0 \\0 & \frac{3}{2} & 1\\0 & 0 & \frac{4}{3}\end{pmatrix}$ \\
		\end{tabular}
	\end{center}
	Now, we calculate $L$ as
	\begin{center}
		$L=(E_2E_1)^{-1}=E_1^{-1}E_2^{-1}=\begin{pmatrix}1 & 0 & 0 \\ \frac{1}{2} & 1 & 0\\0 & 0 & 1\end{pmatrix}
			\begin{pmatrix}1 & 0 & 0 \\0 & 1 & 0\\0 & \frac{2}{3}  & 1\end{pmatrix}=\begin{pmatrix}1 & 0 & 0 \\\frac{1}{2} & 1 & 0\\0 & \frac{2}{3}  & 1\end{pmatrix}$
	\end{center}
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{LDU factorization} 
\begin{exampleblock}{Example (continued)}
	Note that the $L$ and $U$ matrices found so far are assymetric in the sense that $L$ has 1s in its main diagonal, but $U$ has not. We can extract the elements
	in the main diagonal of $U$ to a separate matrix $D$ by simply dividing the corresponding row of $U$ by that element:
	\begin{center}
		$\begin{array}{rll}A&=LU=&\begin{pmatrix}1 & 0 & 0 \\\frac{1}{2} & 1 & 0\\0 & \frac{2}{3}  & 1\end{pmatrix}
		                          \begin{pmatrix}2 & 1 & 0 \\0 & \frac{3}{2} & 1\\0 & 0 & \frac{4}{3}\end{pmatrix}\\
												&=LDU=&\begin{pmatrix}1 & 0 & 0 \\\frac{1}{2} & 1 & 0\\0 & \frac{2}{3}  & 1\end{pmatrix}
		                          \begin{pmatrix}2 & 0 & 0 \\0 & \frac{3}{2} & 0\\0 & 0 & \frac{4}{3}\end{pmatrix}
															\begin{pmatrix}1 & \frac{1}{2} & 0 \\0 & 1 & \frac{2}{3}\\0 & 0 & 1\end{pmatrix}
		\end{array}$
		where $D$ is always a diagonal matrix.
	\end{center}
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Other factorization examples} 
\begin{block}{Other factorizations}
	There are many other possibilities to factorize a matrix $A\in\mathcal{M}_{m\times n}$. See \url{http://en.wikipedia.org/wiki/Matrix_decomposition}. Among the most important are:
	\begin{description}
		\item[{QR}:] $A=QR$ where $Q\in\mathcal{M}_{m\times m}$ is orthogonal ($Q^tQ=D$) and $R\in\mathcal{M}_{m\times n}$ is upper triangular.
		\item[{SVD}:] $A=UDV^t$ where $U\in\mathcal{M}_{m\times m}$ is unitary ($U^tU=I_m$), $D\in\mathcal{M}_{m\times n}$ is diagonal, and $V\in\mathcal{M}_{n\times n}$ is also unitary ($V^tV=I_n$).
		\item[{Spectral}:] $A=PDP^{-1}$ (only for square matrices) where $P\in\mathcal{M}_{n\times n}$ and $D\in\mathcal{M}_{n\times n}$ is diagonal.
	\end{description}
\end{block}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Exercises} 

\begin{exerciseblock}{Exercises}
	From Lay (3rd ed.), Chapter 2, Section 5:
	\begin{itemize}
		\item 2.5.9
		\item 2.5.Practice problem
	\end{itemize}
\end{exerciseblock}

\end{frame}

% ==============================================
\subsection{An application to computer graphics and image processing (d)} 
\Outline

\begin{frame}\frametitle{An application to computer graphics and image processing} 
	\begin{exampleblock}{Example}
		In vectorial graphics, graphics are described as a set of connected points (whose coordinates are known).
		\begin{center}
			\includegraphics[scale=0.3]{figComputerGraphics.jpg} \\
			\begin{tabular}{m{7cm}m{4cm}}
				We may produce ``italic'' fonts by shearing the standard coordinates $T(\mathbf{x})=A\mathbf{x}$ where $A=\begin{pmatrix}1 & 0.25 \\ 0 & 1\end{pmatrix}$. &
				\includegraphics[scale=0.35]{figComputerGraphics2.jpg} \\
			\end{tabular}
		\end{center}
	\end{exampleblock}
\end{frame}

% ==============================================
\begin{frame}\frametitle{An application to computer graphics and image processing} 
	\begin{exampleblock}{Example}
		Coordinate translations can be expressed as $T(\mathbf{x})=\mathbf{x}+\mathbf{x}_0$. But this is not a linear transformation:
		\begin{center}
			$\begin{array}{rcl}
				T(\mathbf{u})&=&\mathbf{u}+\mathbf{x}_0 \\
				T(\mathbf{v})&=&\mathbf{v}+\mathbf{x}_0 \\
				T(\mathbf{u}+\mathbf{v})&=&\mathbf{u}+\mathbf{v}+\mathbf{x}_0 \\
				T(\mathbf{u})+T(\mathbf{v})&=&(\mathbf{u}+\mathbf{x}_0)+(\mathbf{v}+\mathbf{x}_0)=\mathbf{u}+\mathbf{v}+2\mathbf{x}_0 \\
				T(\mathbf{u}+\mathbf{v})&\neq& T(\mathbf{u})+T(\mathbf{v})
			\end{array}$
		\end{center}
	\end{exampleblock}
	We can solve this problem with homogeneous coordinates.
\end{frame}

% ==============================================
\begin{frame}\frametitle{An application to computer graphics and image processing} 
	\begin{ceudef}[Homogeneous coordinates]
		 Given a point with coordinates $\mathbf{x}$ we can construct its
		\textbf{homogeneous coordinates} as
		\begin{center}
			$\tilde{\mathbf{x}}=\begin{pmatrix}h\mathbf{x}\\ h\end{pmatrix}$
		\end{center}
		Or in other words, given the homogeneous coordinates $\tilde{\mathbf{u}}=\begin{pmatrix}\mathbf{u}\\ h\end{pmatrix}$, they represent the point at
		$\frac{\mathbf{u}}{h}$. It is customary to use $h=1$ (but it is not compulsory, and in certain applications it is better to use
		other $h$'s).
	\end{ceudef}
	\begin{exampleblock}{Example}
		The 2D point $(1,2)$ can be represented in homogeneous coordinates as $(1,2,1)$, as $(2,4,2)$ and, even, as $(-2,-4,-2)$. They all represent
		the same point.
	\end{exampleblock}
\end{frame}

% ==============================================
\begin{frame}\frametitle{An application to computer graphics and image processing} 
	\begin{exampleblock}{Example}
		Now, coordinate translations in homogeneous coordinates is a linear transformation. For instance, in 2D:
		\begin{center}
			$T(\tilde{\mathbf{x}})=A\tilde{\mathbf{x}}=\begin{pmatrix}1 & 0 & \Delta x \\ 0 & 1 &\Delta y \\ 0 & 0 & 1\end{pmatrix}
				\begin{pmatrix}x\\y\\1\end{pmatrix}=
				\begin{pmatrix}x+\Delta x\\y+\Delta y\\1\end{pmatrix}$
		\end{center}
	\end{exampleblock}
	\begin{block}{2D transformations in homogeneous coordinates}
		In general, any 2D transformation of the form $T(\mathbf{x})=A\mathbf{x}$ can be represented in homogeneous coordinates as
		\begin{center}
			$T(\tilde{\mathbf{x}})=\begin{pmatrix}A & \mathbf{0} \\ \mathbf{0} & 1\end{pmatrix}\tilde{\mathbf{x}}$
		\end{center}
	\end{block}
\end{frame}

% ==============================================
\begin{frame}\frametitle{An application to computer graphics and image processing} 
	\begin{exampleblock}{Example}
		An application in 3D graphics: \url{http://www.youtube.com/watch?v=EsNmiiKlRXQ}
	\end{exampleblock}
	\begin{exampleblock}{Example}
		Let's say we want to
		\begin{enumerate}
			\item \begin{tabular}{m{5cm}m{5cm}} Rotate a point 30\degree about the $Y$ axis.& \includegraphics[scale=0.3]{figRotation.jpg} \end{tabular}
			\item then, translate by $(-6,4,5)$
		\end{enumerate}
	\end{exampleblock}
	
\end{frame}

% ==============================================
\begin{frame}\frametitle{An application to computer graphics and image processing} 
	\begin{exampleblock}{Example (continued)}
		We need to use the transformation $T(\tilde{\mathbf{x}})=\tilde{A}\tilde{\mathbf{x}}$ with
		\begin{center}
			$\tilde{A}=\begin{pmatrix}1 & 0 & 0 & -6\\0 & 1 & 0 & 4 \\0 & 0 & 1 & 5\\ 0 & 0 & 0 & 1\end{pmatrix}
								 \begin{pmatrix}\cos(30^{\circ}) & 0 & \sin(30^{\circ}) & 0\\0 & 1 & 0 & 0 \\-\sin(30^{\circ}) & 0 & \cos(30^{\circ}) & 0\\ 0 & 0 & 0 & 1\end{pmatrix}$
		\end{center}
		and
		\begin{center}
			$\tilde{x}=\begin{pmatrix}x\\y\\z\\1\end{pmatrix}$
		\end{center}
		
	\end{exampleblock}
\end{frame}

% ==============================================
\begin{frame}\frametitle{An application to computer graphics and image processing} 
	\begin{exampleblock}{Example}
		Let's say we want to produce perspective projections. Let's imagine that the screen is on the $XY$ plane and the viewer's eye is at $(0,0,d)$ (the distance to
		the screen is $d$). Any object between the viewer and the screen is projected onto the screen as in the figure below
		\begin{center}
			\includegraphics[scale=0.24]{figPerspective.jpg}
		\end{center}
		By similar triangles we have
		\begin{center}
			$\tan \alpha=\frac{x^*}{d}=\frac{x}{d-z} \Rightarrow x^*=\frac{x}{1-\frac{z}{d}}$
		\end{center}
	\end{exampleblock}
\end{frame}

% ==============================================
\begin{frame}\frametitle{An application to computer graphics and image processing} 
	\begin{exampleblock}{Example (continued)}
		Similarly, $y^*=\frac{y}{1-\frac{z}{d}}$. Using homogeneous coordinates we want that $(x,y,z,1)$ maps onto $\left(\frac{x}{1-\frac{z}{d}},\frac{y}{1-\frac{z}{d}},0,1\right)$,
		or what is the same $\left(x,y,0,1-\frac{z}{d}\right)$. We can achieve this with the perspective transformation:
		\begin{center}
			$\tilde{P}=\begin{pmatrix}1 & 0 & 0 & 0\\0 & 1 & 0 & 0 \\0 & 0 & 0 & 0\\ 0 & 0 & -\frac{1}{d} & 1\end{pmatrix}$
		\end{center}
		
		
	\end{exampleblock}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Exercises} 

\begin{exerciseblock}{Exercises}
	From Lay (3rd ed.), Chapter 2, Section 7:
	\begin{itemize}
		\item 2.7.2
		\item 2.7.3
		\item 2.7.10
		\item 2.7.12
		\item 2.7.22
	\end{itemize}
\end{exerciseblock}

\end{frame}

% ==============================================
\subsection{Subspaces of $\mathbb{R}^n$ (e)} 
\Outline

\begin{frame}\frametitle{Subspace} 
\begin{ceudef}[Subspace of $\mathbb{R}^n$]
	$H \subseteq \mathbb{R}^n$ is a subspace of $\mathbb{R}^n$ if:
	\begin{enumerate}
		\item $\mathbf{0} \in H$
		\item $\forall \mathbf{u},\mathbf{v}\in H\quad \mathbf{u}+\mathbf{v}\in H$ (H is closed under vector addition)
		\item $\forall \mathbf{u}\in H\;\forall r\in \mathbb{R}\quad r\mathbf{u}\in H$ (H is closed under multiplication by a scalar)
	\end{enumerate}
\end{ceudef}

\begin{exampleblock}{Example: Special subspaces}
	The following two sets are subspaces of $\mathbb{R}^n$:
	\begin{enumerate}
		\item $H=\left\{\mathbf{0}\right\}$
		\item $H=\mathbb{R}^n$
	\end{enumerate}
\end{exampleblock}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Subspace} 
\begin{exampleblock}{Example: Plane}
	\begin{tabular}{m{3.5cm}m{9cm}}
		\includegraphics[scale=0.32]{figPlane.jpg} &
		A plane is defined as\newline $H=\mathrm{Span}\left\{\mathbf{v}_1,\mathbf{v}_2\right\}=\left\{\mathbf{v}\in\mathbb{R}^n\arrowvert\mathbf{v}=\lambda _1\mathbf{v}_1+\lambda _2\mathbf{v}_2\right\}$ \newline
		This plane is a subspace of $\mathbb{R}^3$\newline
		\underline{\textit{Proof}}\newline
		\begin{enumerate}
			\item \underline{Proof} $\mathbf{0} \in H$\\
						If $\lambda_1=\lambda_2=0$, then $\mathbf{v}=\mathbf{0}$.
			\item \underline{Proof} $\mathbf{u}+\mathbf{v}\in H$\\
						$\mathbf{u}\in H \Rightarrow \mathbf{u}=\lambda_{1u}\mathbf{v}_1+\lambda_{2u}\mathbf{v}_2$\\
						$\mathbf{v}\in H \Rightarrow \mathbf{v}=\lambda_{1v}\mathbf{v}_1+\lambda_{2v}\mathbf{v}_2$\\
						$\begin{array}{rcl}\mathbf{u}+\mathbf{v}&=&(\lambda_{1u}\mathbf{v}_1+\lambda_{2u}\mathbf{v}_2)+(\lambda_{1v}\mathbf{v}_1+\lambda_{2v}\mathbf{v}_2)\\
							&=&(\lambda_{1u}+\lambda_{1v})\mathbf{v}_1+(\lambda_{2u}+\lambda_{2v})\mathbf{v}_2 \in H\end{array}$
			\item \underline{Proof} $r\mathbf{u}\in H$\\
						$\mathbf{u}\in H \Rightarrow \mathbf{u}=\lambda_{1u}\mathbf{v}_1+\lambda_{2u}\mathbf{v}_2$\\
						$\begin{array}{rcl}r\mathbf{u}&=&r(\lambda_{1u}\mathbf{v}_1+\lambda_{2u}\mathbf{v}_2)\\ &=&r\lambda_{1u}\mathbf{v}_1+r\lambda_{2u}\mathbf{v}_2 \in H\end{array}$
		\end{enumerate}
		
	\end{tabular}
\end{exampleblock}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Subspace} 
\begin{exampleblock}{Example: Line not through the origin}
	A line (L) that does not pass through the origin is not a subspace, because
	\begin{enumerate}
		\item $\mathbf{0} \notin L$\\
		\item If we take two points belonging to the line ($\mathbf{u}$ and $\mathbf{v}$), $\mathbf{u}+\mathbf{v} \notin L$.
		\item If we take a point belonging to the line ($\mathbf{w}$), $2\mathbf{w} \notin L$.
	\end{enumerate}
	\begin{center}
		\includegraphics[scale=0.4]{figLine.jpg}
	\end{center}
\end{exampleblock}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Subspace} 
\begin{exampleblock}{Example: Line through the origin}
	Consider $\mathbf{v}_1$ and $\mathbf{v}_2=k\mathbf{v}_1$. Then,
	\begin{center}
		$H=\mathrm{Span}\left\{\mathbf{v}_1,\mathbf{v}_2\right\}=\mathrm{Span}\left\{\mathbf{v}_1\right\}$
	\end{center}
	is a line. It is easy to prove that this line is a subspace of $\mathbb{R}^n$.
	\begin{center}
		\includegraphics[scale=0.4]{figLine2.jpg}
	\end{center}
\end{exampleblock}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Column space} 
\begin{ceudef}[Column space of a matrix]
	Let $A \in \mathcal{M}_{m\times n}$. Let $\mathbf{a}_i \in \mathbb{R}^m$ be the columns of $A$. The \textbf{column space} of A is defined as
	\begin{center}
		$\mathrm{Col}\{A\}=\mathrm{Span}\left\{\mathbf{a}_1,\mathbf{a}_2,...,\mathbf{a}_n\right\} \subseteq \mathbb{R}^m$
	\end{center}
\end{ceudef}
\begin{ceuthm}
	$\mathrm{Col}\{A\}$ is a subspace of $\mathbb{R}^m$.
\end{ceuthm}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Column space} 
\begin{exampleblock}{Example}
	Let $A=\left(\begin{array}{rrr}1 & -3 & -4 \\ -4 & 6 & -2 \\ -3 & 7 & 6\end{array}\right)$ and $\mathbf{b}=\left(\begin{array}{r}3\\3 \\ -4\end{array}\right)$.\\
	Determine if $\mathbf{b}$ belongs to $\mathrm{Col}\{A\}$.\\
	\underline{\textit{Solution}}\\
	If $\mathbf{b} \in \mathrm{Col}\{A\}$ there must be some coefficients $x_1$, $x_2$ and $x_3$ such that
	\begin{center}
		$\mathbf{b}=x_1\mathbf{a}_1+x_2\mathbf{a}_2+x_3\mathbf{a}_3$
	\end{center}
	To find these coefficients we simply have to solve the equation system $A\mathbf{x}=\mathbf{b}$.
	\begin{center}
		$\left(\begin{array}{rrr|r}1 & -3 & -4 &3\\ -4 & 6 & -2 &3\\ -3 & 7 & 6&-4\end{array}\right)\sim
		 \left(\begin{array}{rrr|r}1 & -3 & -4 &3\\  0 & -6 & -18 &15\\ 0 & 0 & 0 & 0\end{array}\right)$
	\end{center}
	In fact, there are infinite solutions to the equation system and, consequently, $\mathbf{b} \in \mathrm{Col}\{A\}$.
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Null space} 
\begin{ceudef}[Null space of a matrix]
	Let $A \in \mathcal{M}_{m\times n}$. The \textbf{null space} of A is defined as
	\begin{center}
		$\mathrm{Nul}\{A\}=\left\{\mathbf{v}\in\mathbb{R}^n \arrowvert A\mathbf{v}=\mathbf{0}\right\}$
	\end{center}
\end{ceudef}
\begin{ceuthm}
	$\mathrm{Nul}\{A\}$ is a subspace of $\mathbb{R}^n$.\\
	\underline{\textit{Proof}}
		\begin{enumerate}
			\item \underline{Proof} $\mathbf{0} \in \mathrm{Nul}\{A\}$\\
						$A\mathbf{0}=\mathbf{0} \Rightarrow \mathbf{0}\in\mathrm{Nul}\{A\}$ (q.e.d.)
			\item \underline{Proof} $\mathbf{u}+\mathbf{v}\in \mathrm{Nul}\{A\}$\\
						$\mathbf{u}\in \mathrm{Nul}\{A\} \Rightarrow A\mathbf{u}=\mathbf{0}$\\
						$\mathbf{v}\in \mathrm{Nul}\{A\} \Rightarrow A\mathbf{v}=\mathbf{0}$\\
						$A(\mathbf{u}+\mathbf{v})=A\mathbf{u}+A\mathbf{v}=\mathbf{0}+\mathbf{0}=\mathbf{0} \Rightarrow \mathbf{u}+\mathbf{v}\in \mathrm{Nul}\{A\}$ (q.e.d.)
			\item \underline{Proof} $r\mathbf{u}\in \mathrm{Nul}\{A\}$\\
						$\mathbf{u}\in \mathrm{Nul}\{A\} \Rightarrow A\mathbf{u}=\mathbf{0}$\\
						$A(r\mathbf{u})=rA\mathbf{u}=r\mathbf{0}=\mathbf{0} \Rightarrow r\mathbf{u}\in \mathrm{Nul}\{A\}$ (q.e.d.)
		\end{enumerate}
\end{ceuthm}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Basis of a subspace} 
\begin{ceudef}[Basis of a subspace]
	Let $H\subseteq\mathbb{R}^n$. The set of vectors $B$ is a basis of $H$ if:
	\begin{enumerate}
		\item All vectors in $B$ are linearly independent
		\item $H=\mathrm{Span}\{B\}$
	\end{enumerate}
\end{ceudef}
\begin{exampleblock}{Standard basis of $\mathbb{R}^n$}
	Let be the vectors\\
	\begin{center}\begin{tabular}{ccccc}
		$\mathbf{e}_1=\begin{pmatrix}1\\0\\0\\...\\0\end{pmatrix}$ &
		$\mathbf{e}_2=\begin{pmatrix}0\\1\\0\\...\\0\end{pmatrix}$ &
		$\mathbf{e}_3=\begin{pmatrix}0\\0\\1\\...\\0\end{pmatrix}$ &
		... &
		$\mathbf{e}_n=\begin{pmatrix}0\\0\\0\\...\\1\end{pmatrix}$
	\end{tabular}\end{center}
	The set $B=\{\mathbf{e}_1,\mathbf{e}_2,...,\mathbf{e}_n\}$ is the \textbf{standard basis} of $\mathbb{R}^n$.
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Basis of a subspace} 
\begin{exampleblock}{Example}
	Find a basis for the null space of $A=\left(\begin{array}{rrrrr}-3&6&-1&1&-7\\1&-2&2&3&-1\\2&-4&5&8&-4\end{array}\right)$.\\
	\underline{\textit{Solution}}\\
	The null space of $A$ are all those vectors satisfying $A\mathbf{x}=\mathbf{0}$.
	\begin{center}
		$\left(\begin{array}{r|r}A & \mathbf{0}\end{array}\right)\sim\left(\begin{array}{rrrrr|r}1&-2&0&-1&3&0\\0&0&1&2&-2&0\\0&0&0&0&0&0\end{array}\right)$
	\end{center}
	So the solution of the equation system is $\left.\begin{array}{c}x_1=2x_2+x_4-3x_5\\x_3=-2x_4+2x_5\end{array}\right\}\Rightarrow$
	\begin{center}
		$\mathbf{x}=\begin{pmatrix}2x_2+x_4-3x_5\\ x_2 \\-2x_4+2x_5\\x_4\\x_5\end{pmatrix}
		=x_2\begin{pmatrix}2\\ 1 \\0\\0\\0\end{pmatrix}+x_4\left(\begin{array}{r}1\\ 0 \\-2\\1\\0\end{array}\right)+x_5\left(\begin{array}{r}-3\\ 0 \\ 2\\0\\1\end{array}\right)$
	\end{center}
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Null space and equation systems} 
\begin{exampleblock}{Example (continued)}
	The set $B=\{(2,1,0,0,0),(1,0,-2,1,0),(-3,0,2,0,1)\}$ is a basis of $\mathrm{Nul}\{A\}$. By construction, we have chosen them to be linearly independent.
\end{exampleblock}
\begin{exampleblock}{Example: Null space and equation systems }
	Consider $A=\begin{pmatrix}1 & 0 & 0 \\ 0 & 1 & 0 \\ 0 & 0 & 0\end{pmatrix}$
	\begin{itemize}
		\item $\{\mathbf{e}_3\}$ is a basis for $\mathrm{Nul}\{A\}$
		\item Consider $\mathbf{b}=(7,3,0)$. The general solution of $A\mathbf{x}=\mathbf{b}$ is of the form\\
					\begin{center}
						$\mathbf{x}=\mathbf{x}_0+\mathbf{x}_{Nul}$
					\end{center}
					where $\mathbf{x}_0$ is a solution of $A\mathbf{x}=\mathbf{b}$ that does not belong to $\mathrm{Nul}\{A\}$ and $\mathbf{x}_{Nul}$ belongs to $\mathrm{Nul}\{A\}$.
					In this particular case,
					\begin{center}
						$\mathbf{x}=(7,3,0)+x_3\mathbf{e}_3$
					\end{center}
	\end{itemize}
	
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{ Null space and equation systems} 
\begin{exampleblock}{Example: Null space and equation systems (continued)}
	Let us prove that the general solution is actually a solution of $A\mathbf{x}=\mathbf{b}$
	\begin{center}
		$A\mathbf{x}=A(\mathbf{x}_0+\mathbf{x}_{Nul})=A\mathbf{x}_0+A\mathbf{x}_{Nul}=\mathbf{b}+\mathbf{0}=\mathbf{b}$
	\end{center}
	Intuititively we can say that the null space is the set of all solutions for which we have no measurements. The equation
	system only impose some constraints on those coefficients for which we have measurements.
	This is a problem in real situations as shown in the following slide.
\end{exampleblock}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Null space and equation systems} 
In this example, the authors describe how the exact location of a tooth fracture is uncertain (Fig. C)
due to the artifacts introduced by the null space of the tomographic problem.

\begin{center}
		\includegraphics[scale=0.4]{figNullSpace2.jpg}
\end{center}

\begin{tiny}
Mora, M. A.; Mol, A.; Tyndall, D. A., Rivera, E. M. \textit{In vitro assessment of local computed tomography for the detection of longitudinal tooth fractures}. Oral Surg Oral Med Oral Pathol Oral Radiol Endod, \textbf{2007}, 103, 825-829.
\end{tiny}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Basis of a subspace} 
\begin{exampleblock}{Example}
	Find a basis for the column space of $B=\left(\begin{array}{rrrrr}1&0&-3&5&0\\0&1&2&-1&0\\0&0&0&0&1\\0&0&0&0&0\end{array}\right)$.\\
	\underline{\textit{Solution}}\\
	From the columns with non-pivot positions of matrix $B$ we learn that \begin{center}$\begin{array}{c}\mathbf{b}_3=-3\mathbf{b}_1+2\mathbf{b}_2\\ \mathbf{b}_4=5\mathbf{b}_1-\mathbf{b}_2\end{array}$\end{center}
	Then,
	\begin{center}
		$\begin{array}{rcl}\mathrm{Col}\{B\}&=&\left\{\mathbf{v}\in\mathbb{R}^4\arrowvert\mathbf{v}=x_1\mathbf{b}_1+x_2\mathbf{b}_2+x_3\mathbf{b}_3+x_4\mathbf{b}_4+x_5\mathbf{b}_5\right\}\\
			&=&\left\{\mathbf{v}\in\mathbb{R}^4\left|\begin{array}{rcl}\mathbf{v}&=&x_1\mathbf{b}_1+x_2\mathbf{b}_2+x_3(-3\mathbf{b}_1+2\mathbf{b}_2)+\\
			                                                                         & &x_4(5\mathbf{b}_1-\mathbf{b}_2)+x_5\mathbf{b}_5\end{array}\right.\right\}\\
			&=&\left\{\mathbf{v}\in\mathbb{R}^4\arrowvert\mathbf{v}=x_1'\mathbf{b}_1+x_2'\mathbf{b}_2+x_5\mathbf{b}_5\right\}\\
			
		\end{array}$
	\end{center}
	And, consequently, $\mathrm{Basis}\{\mathrm{Col}\{B\}\}=\{\mathbf{b}_1,\mathbf{b}_2, \mathbf{b}_5\}$
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Basis of a subspace} 
\begin{exampleblock}{Example}
	Find a basis for the column space of $A=\left(\begin{array}{rrrrr}1&3&3&2&-9\\-2&-2&2&-8&2\\2&3&0&7&1\\3&4&-1&11&-8\end{array}\right)$.\\
	\underline{\textit{Solution}}\\
	It turns out that $A\sim B$ ($B$ in the previous example). Since row operations do not affect linear dependence relations among the columns of the matrix,
	we should have
	\begin{center}
		$\begin{array}{c}\mathbf{a}_3=-3\mathbf{a}_1+2\mathbf{a}_2\\ \mathbf{a}_4=5\mathbf{a}_1-\mathbf{a}_2\end{array}$
	\end{center}
	and $\mathrm{Basis}\{\mathrm{Col}\{A\}\}=\{\mathbf{a}_1,\mathbf{a}_2, \mathbf{a}_5\}$
\end{exampleblock}

\begin{ceuthm}
	The pivot columns of $A$ form a basis of $\mathrm{Col}\{A\}\}$.
\end{ceuthm}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Exercises} 

\begin{exerciseblock}{Exercises}
	From Lay (3rd ed.), Chapter 2, Section 1:
	\begin{itemize}
		\item 2.8.1
		\item 2.8.2
		\item 2.8.5
	\end{itemize}
\end{exerciseblock}

\end{frame}

% ==============================================
\subsection{Dimension and rank (e)} 
\Outline

\begin{frame}\frametitle{Coordinate system} 
\begin{ceudef}[Coordinates of a vector in the basis $B$]
	Suppose $B=\{\mathbf{b}_1,\mathbf{b}_2, ..., \mathbf{b}_p\}$ is a basis for the subspace $H \subseteq \mathbb{R}^n$. For each $\mathbf{x} \in H$, the coordinates
	of $\mathbf{x}$ relative to the basis $B$ are the weights $c_i$ such that
	\begin{center}
		$\mathbf{x}=c_1\mathbf{b}_1+c_2\mathbf{b}_2+...+c_p\mathbf{b}_p$
	\end{center}
	The \textbf{coordinates of} $\mathbf{x}$ \textbf{with respect to the basis} $B$ is the vector in $\mathbb{R}^p$
	\begin{center}
		$[\mathbf{x}]_B=\begin{pmatrix}c_1 \\c_2\\...\\c_p\end{pmatrix}$
	\end{center}
\end{ceudef}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Coordinate system} 
\begin{exampleblock}{Example}
	Let $\mathbf{x}=(3,12,7)$, $\mathbf{v}_1=(3,6,2)$, $\mathbf{v}_2=(-1,0,1)$, $B=\{\mathbf{v}_1,\mathbf{v}_2\}$.
	\begin{enumerate}
		\item Show that $B$ is a linearly independent set
		\item Find the coordinates of $\mathbf{x}$ in the coordinate system $B$
	\end{enumerate}
	\underline{\textit{Solution}}
	\begin{enumerate}
		\item We need to prove that the only solution of the equation system $c_1\mathbf{v}_1+c_2\mathbf{v}_2=\mathbf{0}$ is $c_1=c_2=0$.\\
					$\left(\begin{array}{rr|r}3 & -1 & 0 \\ 6 & 0 & 0 \\ 2 & 1 & 0\end{array}\right) \sim 
					 \left(\begin{array}{rr|r}1 & 0 & 0 \\ 0 & 1 & 0 \\ 0 & 0 & 0\end{array}\right)$\\
					And, therefore, the unique solution is $c_1=c_2=0$ (q.e.d.)
		\item We need to find $c_1$ and $c_2$ such that $c_1\mathbf{v}_1+c_2\mathbf{v}_2=\mathbf{x}$\\
					$\left(\begin{array}{rr|r}3 & -1 & 3 \\ 6 & 0 & 12 \\ 2 & 1 & 7\end{array}\right) \sim 
					 \left(\begin{array}{rr|r}1 & 0 & 2 \\ 0 & 1 & 3 \\ 0 & 0 & 0\end{array}\right)$\\
					And, therefore, $[\mathbf{x}]_B=(2,3)$.
		
	\end{enumerate}
\end{exampleblock}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Coordinate system} 
\begin{exampleblock}{Example (continued)}
	The following figure shows how $\mathbf{x}$ is equal to $2\mathbf{v}_1+3\mathbf{v}_2$
	\begin{center}
		\includegraphics[scale=0.4]{figCoordinates.jpg}
	\end{center}
\end{exampleblock}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Coordinate system} 
\begin{ceuthm}
	The coordinates of a given vector with respect to a given basis are unique.\\
	\underline{\textit{Proof}}\\
	Let us assume they are not unique. Then, there must be two different sets of coordinates such that
	\begin{center}
		$\mathbf{x}=c_1\mathbf{b}_1+c_2\mathbf{b}_2+...+c_p\mathbf{b}_p$\\
		$\mathbf{x}=c_1'\mathbf{b}_1+c_2'\mathbf{b}_2+...+c_p'\mathbf{b}_p$\\
	\end{center}
	If we subtract both equations, we have
	\begin{center}
		$\mathbf{0}=(c_1-c_1')\mathbf{b}_1+(c_2-c_2')\mathbf{b}_2+...+(c_p-c_p')\mathbf{b}_p$\\
	\end{center}
	But because the basis is a linearly independent set of vectors, it must be
	\begin{center}
	\begin{tiny}
		$c_1-c_1'=0 \Rightarrow c_1=c_1'$\\
		$c_2-c_2'=0 \Rightarrow c_2=c_2'$\\
		...\\
		$c_p-c_p'=0 \Rightarrow c_p=c_p'$\\
	\end{tiny}
	\end{center}
	This is a contradiction with the hypothesis that there were two different sets of coordinates, and therefore, the coordinates of the vector $\mathbf{x}$ must be unique.
\end{ceuthm}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Subspace dimension} 
\begin{block}{Isomorphism to $\mathbb{R}^p$}
	For any given subspace $H$ and its corresponding basis $B$, the mapping\\
	\begin{center}
		$\begin{array}{rcl}
			T:H&\rightarrow&\mathbb{R}^p \\
			\mathbf{x} & \rightarrow& [\mathbf{x}]_B
		\end{array}$
	\end{center}
	is a linear, injective transformation that makes $H$ to behave as $\mathbb{R}^p$.
\end{block}

\begin{ceudef}[Dimension]
	The \textbf{dimension of a subspace} $H$ ($\mathrm{dim}\{H\}$) is the number of vectors of any of its basis.\\
	The dimension of $H=\{\mathbf{0}\}$ is 0.
\end{ceudef}

\begin{exampleblock}{Example (continued)}
	In our previous example in which $B=\{\mathbf{v}_1,\mathbf{v}_2\}$, the dimension is 2, in fact $H$ behaves like a plane (see previous figure in the example).
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Rank of a matrix} 

\begin{ceudef}[Rank of a matrix]
	The \textbf{rank of a matrix} $A$ is $\mathrm{rank}\{A\}=\mathrm{dim}\{\mathrm{Col\{A\}}\}$, that is, the dimension of the column space of the matrix.\\
	MATLAB: {\color{blue}\texttt{rank(A)}}
\end{ceudef}

\begin{ceuthm}
	The rank of a matrix is the number of pivot columns it has.\\
	\underline{\textit{Proof}}\\
	Since the pivot columns form a basis of the column space of $A$, the number of pivot columns is the rank of the matrix.
\end{ceuthm}

\begin{exampleblock}{Example}
	$A=\left(\begin{array}{rrrrr}1&3&3&2&-9\\-2&-2&2&-8&2\\2&3&0&7&1\\3&4&-1&11&-8\end{array}\right)\sim
		 \left(\begin{array}{rrrrr}{\color{red}1}&{\color{red}0}&-3&5&{\color{red}0}\\{\color{red}0}&{\color{red}1}&2&-1&{\color{red}0}\\
			{\color{red}0}&{\color{red}0}&0&0&{\color{red}1}\\{\color{red}0}&{\color{red}0}&0&0&{\color{red}0}\end{array}\right)$\\
	Therefore, the rank of $A$ is 3.
\end{exampleblock}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Rank of a matrix} 

\begin{ceuthm}[Rank theorem]
	If $A$ has $n$ columns, then
	\begin{center}
		$\mathrm{Rank}\{A\}+\mathrm{dim}\{\mathrm{Nul\{A\}}\}=n$
	\end{center}
\end{ceuthm}

\begin{ceuthm}[Basis theorem]
	Let $H$ be a subspace of dimension $p$. Any linearly independent set of $p$ vectors of $H$ is a basis of $H$. Any set of $p$ vectors that
	span $H$ is a basis of $H$.
\end{ceuthm}

\end{frame}

\begin{frame}\frametitle{Characterization of invertible matrices (continued)} 
\begin{ceuthm}[The invertible matrix theorem]
	Let $A \in \mathcal{M}_{n\times n}$. The following statements are equivalent (either they are all true or they are all false):
	\begin{enumerate}[i.]
		\setcounter{enumi}{12}
		\item The columns of $A$ form a basis of $\mathbb{R}^n$
		\item $\mathrm{Col\{A\}}=\mathbb{R}^n$
		\item $\mathrm{dim}\{\mathrm{Col\{A\}}\}=n$
		\item $\mathrm{Rank}\{A\}=n$
		\item $\mathrm{Nul\{A\}}=\{\mathbf{0}\}$
		\item $\mathrm{dim}\{\mathrm{Nul\{A\}}\}=0$
	\end{enumerate}
	\label{thm:characterization2}
\end{ceuthm}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Characterization of invertible matrices} 
\begin{block}{}
	\underline{\textit{Proof v $\Rightarrow$ xiii}}\\
	This is true by the basis theorem.\\
	\underline{\textit{Proof xiii $\Rightarrow$ xiv}}\\
	By the definition of basis.\\
	\underline{\textit{Proof xiii $\Rightarrow$ xv}}\\
	By the definition of dimension.\\
	\underline{\textit{Proof xv $\Rightarrow$ xvi}}\\
	By the definition of rank.\\
	\underline{\textit{Proof xvi $\Rightarrow$ xviii}}\\
	By the rank theorem.\\
	\underline{\textit{Proof xvii $\Rightarrow$ iv}}\\
	By the definition of null space.\\
\end{block}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Exercises} 

\begin{exerciseblock}{Exercises}
	From Lay (3rd ed.), Chapter 2, Section 9:
	\begin{itemize}
		\item 2.9.1
		\item 2.9.3
		\item 2.9.9
		\item 2.9.19
		\item 2.9.27
	\end{itemize}
\end{exerciseblock}

\end{frame}

\OutlineFinal

\end{document}