\input{../slidesComun}

\title[2. Linear equation systems]{Chapter 2. Linear equation systems}  
\COSS

% ==============================================
\begin{frame}\frametitle{References} 

\begin{figure}
	\includegraphics[scale=0.7]{../lay_linearalgebra.jpg}
\end{figure}
D. Lay. Linear algebra and its applications (3rd ed). Pearson (2006). Chapter 1.

\end{frame}

% ==============================================
\begin{frame}\frametitle{A little bit of history} 

Linear equations in their modern form are known since the middle of the XVIII$^{th}$ century and they were strongly developed during the XIX$^{th}$ century with important contributions of people like \href{http://en.wikipedia.org/wiki/Gabriel_Cramer}{Gabriel Cramer} (1750), \href{http://en.wikipedia.org/wiki/Carl_Friedrich_Gauss}{Carl Friedrich Gauss} (1801), Sir \href{http://en.wikipedia.org/wiki/William_Rowan_Hamilton}{William Rowan Hamilton} (1843) and \href{http://en.wikipedia.org/wiki/Wilhelm_Jordan_\%28geodesist\%29}{Wilhelm Jordan} (1873). They were mostly developed to explain the mechanics of celestial objects.

\begin{figure}
	\includegraphics[height=3cm]{Cramer.jpg}
	\includegraphics[height=3cm]{Carl_Friedrich_Gauss.jpg}
	\includegraphics[height=3cm]{William_Rowan_Hamilton.jpg}
	\includegraphics[height=3cm]{Jordan.jpg}
\end{figure}

To know more about the history of linear equations visit
\begin{itemize}
	\item \url{http://hom.wikidot.com/cramer-s-method-and-cramer-s-paradox}
\end{itemize}
\end{frame}

% ==============================================
\begin{frame}\frametitle{A little bit of history} 

Wassily Leontief was a Russian-American economist that worked in Harvard. In 1949 he performed an analysis with the early computers at Harvard using data from the U.S. Bureau of Labor Statistics to classify the U.S. economy into 500 sectors, that were later simplified to 42. He used linear equation systems to do so. It took 56 hours in Mark II (one of the first computers) to solve it. He was awarded the Nobel prize in 1970 for his work on input-output tables that analyze how outputs from some industries are inputs to some other industries.

\begin{figure}
	\includegraphics[height=4cm]{Leontief.jpg}
\end{figure}
\end{frame}

% ==============================================
\begin{frame}\frametitle{A little bit of history} 

Currently, we need about two weeks in a supercomputer (128 cores) to solve the structure of a macromolecular assembly (in the figure, the HIV virus capsid). We have 1,000 million equations with about 3 million unknowns.

\begin{figure}
	\includegraphics[height=6cm]{HIVcapsid.jpg}
\end{figure}
\end{frame}

% ==============================================
\setnextsection{2}
\section{Linear equation system} 
\subsection{Introduction (a)} 
\Outline

\begin{frame}\frametitle{What is a linear equation system?} 
\begin{ceudef}[Linear equation system]
A \textbf{linear equation} is one that can be expressed in the form
\begin{center}
	$\sum\limits_{i=1}^n{a_ix_i}=b$\\
	$a_1x_1+a_2x_2+...+a_nx_n=b$\\
	$\left<\mathbf{a},\mathbf{x}\right>=b$
\end{center}
The unknowns are $x_i$ ($i=1,2,...,n$) while $a_i$'s and $b$ are coefficients. When we have several of these equations, we have a \textbf{linear equation system}.
\end{ceudef}

\begin{exampleblock}{Example}
	\begin{columns}[T]
		\begin{column}{5cm}
			\underline{Examples of linear equations}
			\begin{center}
				$7x_1-2x_2=4$\\
				$7(x_1-\sqrt{3}x_2)=\frac{1}{\sqrt{2}}x_1 \Rightarrow $\\
				$(7-\frac{1}{\sqrt{2}})x_1-7\sqrt{3}x_2=0$
			\end{center}
		\end{column}
		\begin{column}{5cm}
			\underline{Examples of non-linear equations}
			\begin{center}
				$x_1+x_2+x_1x_2=1$\\
				$\sqrt{x_1}+x_2=1$
			\end{center}
		\end{column}
	\end{columns}
\end{exampleblock}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Set of solutions of a linear equation} 
\begin{ceudef}[Set of solutions of a linear equation system]
The set of solutions of a linear equation system $S \subseteq \mathbb{R}^n$ is the set of all those values that we can assign to $x_1$, $x_2$, ..., $x_n$ such that the equation system is fulfilled.
\end{ceudef}

\begin{exampleblock}{Example}
	Consider the following equation system\\
	\begin{center}
		$2x_1-x_2=7$\\
		$x_1+2x_2=11$
	\end{center}
	$\mathbf{x}=(5,3)$ is a solution to this equation system because\\
	\begin{center}
		$2\cdot 5-3=7$\\
		$5+2\cdot 3=11$
	\end{center}
	In fact it is its unique solution and, therefore, $S=\left\{(5,3)\right\} \subset \mathbb{R}^2$.
\end{exampleblock}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Geometric interpretation} 

\begin{exampleblock}{Example}
	\begin{center}
		\begin{description}
			\item $l_1$: $2x_1-x_2=7 \Rightarrow x_2=2x_1-7 \Rightarrow \mathbf{v}_1=(1,2) $
			\item $l_2$: $x_1+2x_2=11 \Rightarrow x_2=11-\frac{1}{2}x_1 \Rightarrow \mathbf{v}_2=(1,-\frac{1}{2}) $
		\end{description}
	\end{center}
	Each one of the equations is actually representing a line, and both lines, in this case intersect at the point $(5,3)$, the unique solution
	of this equation system.
	\begin{center}
		\includegraphics[height=4.5cm]{figSolution.eps}
	\end{center}

\end{exampleblock}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Geometric interpretation} 

\begin{exampleblock}{Example}
	There can be a single solution (left), no solution (middle), or infinite ($l_1=l_2$; right)
	\begin{center}
		\includegraphics[height=3cm]{figNumberOfSolutionsp.eps}
	\end{center}

\end{exampleblock}

\begin{block}{In general}
		With linear equations we can represent:
		\begin{description}
			\item[a \textbf{line} in 2D:] $a_1x_1+a_2x_2=b$
			\item[a \textbf{plane} in 3D:] $a_1x_1+a_2x_2+a_3x_3=b$
			\item[a \textbf{hyperplane} in nD:] $a_1x_1+a_2x_2+...+a_nx_n=b$
		\end{description}

\end{block}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Matrix notation} 

\begin{exampleblock}{Example}
	The equation system
	\begin{center}
		$\begin{array}{rrrcr}
			x_1& -2x_2& +x_3&=&0\\
			   &  2x_2& -8x_3&=&8\\
			-4x_1& +5x_2& +9x_3&=&-9\\
		\end{array}$
	\end{center}
	can be represented as
	\begin{center}
		$\left(\begin{array}{rrr|r}
			1 & -2&  1 & 0\\
			0 &  2& -8 & 8\\
		 -4 &  5&  9 &-9\\
		\end{array}\right)\;
		[\tilde{A}]$
	\end{center}
	or
	\begin{center}
		$\left(\begin{array}{rrr}
			1 & -2&  1 \\
			0 &  2& -8 \\
		 -4 &  5&  9 \\
		\end{array}\right)
		\left(\begin{array}{r}
			x_1 \\
			x_2 \\
		  x_3 \\
		\end{array}\right)=
		\left(\begin{array}{r}
			0 \\
			8 \\
		  -9 \\
		\end{array}\right)
		\;
		[A\mathbf{x}=\mathbf{b}]
		$
	\end{center}
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Matrix notation} 

\begin{block}{In general}
	$A \in \mathcal{M}_{m \times n}$ is called the \textbf{system matrix} of an equation system with $m$ equations and $n$ unknowns.\\
	$\tilde{A} \in \mathcal{M}_{m \times (n+1)}$ is called the \textbf{augmented system matrix} of an equation system with $m$ equations and $n$ unknowns. 
\end{block}

\begin{block}{Basic row iterations}
	To solve the equation system with the augmented system matrix, we used the so-called basic row operations:
	\begin{description}
		\item[\textbf{Substitution}:] $\mathbf{r}_i \leftarrow k_i\mathbf{r}_i+k_j\mathbf{r}_j$: Row $i$ is substituted by a linear combination of rows $i$ and $j$
		\item[\textbf{Swapping}:] $\mathbf{r}_i \leftrightarrow \mathbf{r}_j$: Row $i$ swapped with row $j$
		\item[\textbf{Scaling}:] $\mathbf{r}_i \leftarrow k_i\mathbf{r}_i$: Row $i$ is multiplied by a scale factor
	\end{description}
	All these operations transform the equation system into an \textbf{equivalent} system (with the same set of solutions). The two matrices (original and transformed)
	are said to be \textbf{row equivalent}.
\end{block}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Solving the equation system} 

\begin{exampleblock}{Example}
	In the following example we will see how linear combinations are actually changing the equation system to a different one, while scaling is not.
	\begin{center}
		\newcolumntype{C}{>{\centering\arraybackslash} m{3.5cm} }
		\begin{tabular}{m{3cm} | m{3cm} C}
			$\begin{array}{rrcr}
				2x_1& -x_2&=&7\\
				 x_1&+2x_2&=&11\\
			\end{array}$ &
			$\left(\begin{array}{rr|r}
				2 & -1& 7\\
				1 &  2& 11\\
			\end{array}\right)$ &
			\includegraphics[width=3.5cm]{figBasicRowOperations1.eps} \\

			$\mathbf{r}_1 \leftarrow \frac{1}{2}\mathbf{r}_1$ &
			$\left(\begin{array}{rr|r}
				1 & -\frac{1}{2}& \frac{7}{2} \\
				1 &  2& 11\\
			\end{array}\right)$ &
			\includegraphics[width=3.5cm]{figBasicRowOperations1.eps} \\
			
		\end{tabular}
	\end{center}
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Solving the equation system} 

\begin{exampleblock}{Example}
	\begin{center}
		\newcolumntype{C}{>{\centering\arraybackslash} m{3.5cm} }
		\begin{tabular}{m{3cm} | m{3cm} C}
			&
			$\left(\begin{array}{rr|r}
				1 & -\frac{1}{2}& \frac{7}{2} \\
				1 &  2& 11\\
			\end{array}\right)$ &
			\includegraphics[width=3.5cm]{figBasicRowOperations1.eps} \\
			
			$\mathbf{r}_2 \leftarrow \mathbf{r}_2-\mathbf{r}_1$ &
			$\left(\begin{array}{rr|r}
				1 & -\frac{1}{2}& \frac{7}{2} \\
				0 & \frac{5}{2}& \frac{15}{2} \\
			\end{array}\right)$ &
			\includegraphics[width=3.5cm]{figBasicRowOperations2.eps} \\
			
		\end{tabular}
	\end{center}
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Solving the equation system} 

\begin{exampleblock}{Example}
	\begin{center}
		\newcolumntype{C}{>{\centering\arraybackslash} m{3.5cm} }
		\begin{tabular}{m{3cm} | m{3cm} C}
			&
			$\left(\begin{array}{rr|r}
				1 & -\frac{1}{2}& \frac{7}{2} \\
				0 & \frac{5}{2}& \frac{15}{2} \\
			\end{array}\right)$ &
			\includegraphics[width=3.5cm]{figBasicRowOperations2.eps} \\
			
			$\mathbf{r}_2 \leftarrow \frac{2}{5}\mathbf{r}_2$ &
			$\left(\begin{array}{rr|r}
				1 & -\frac{1}{2}& \frac{7}{2} \\
				0 & 1& 3 \\
			\end{array}\right)$ &
			\includegraphics[width=3.5cm]{figBasicRowOperations2.eps} \\
			
		\end{tabular}
	\end{center}
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Solving the equation system} 

\begin{exampleblock}{Example}
	\begin{center}
		\newcolumntype{C}{>{\centering\arraybackslash} m{3.5cm} }
		\begin{tabular}{m{3cm} | m{3cm} C}
			 &
			$\left(\begin{array}{rr|r}
				1 & -\frac{1}{2}& \frac{7}{2} \\
				0 & 1& 3 \\
			\end{array}\right)$ &
			\includegraphics[width=3.5cm]{figBasicRowOperations2.eps} \\

			$\mathbf{r}_1 \leftarrow \mathbf{r}_1+\frac{1}{2}\mathbf{r}_2$ &
			$\left(\begin{array}{rr|r}
				1 & 0 & 5 \\
				0 & 1 & 3 \\
			\end{array}\right)$ &
			\includegraphics[width=3.5cm]{figBasicRowOperations3.eps} \\
			
		\end{tabular}
	\end{center}
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Existence and uniqueness of solutions} 

\begin{exampleblock}{Example}
	\begin{center}
		$\begin{array}{rrrcr}
			x_1& -2x_2& +x_3&=&0\\
			   &  2x_2& -8x_3&=&8\\
			-4x_1& +5x_2& +9x_3&=&-9\\
		\end{array}
		\sim\:...\:\sim
		\left(\begin{array}{rrr|r}
			1 & -2&  1 & 0\\
			0 &  1& -4 & 4\\
		  0 &  0&  1 & 3\\
		\end{array}\right)$
	\end{center}
	I can solve for $x_3$ ($x_3=3$), then use this value in the second equation to solve for $x_2$, and finally use these two values in the first equation to solve for $x_1$. Thus, the equation system \textbf{has a solution and it is unique}. We say the equation system is \textbf{compatible}. The set of solutions is $S=\left\{(29,16,3)\right\}$.\\
	\vspace{0.25cm}
		Matlab:\\
		{\color{blue}
		\texttt{
A=[1 -2 1; 0 2 -8; -4 5 9];\\
b=[0; 8; -9];\\
x=A\textbackslash b
		}
		}
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Existence and uniqueness of solutions} 

\begin{exampleblock}{Example}
	\begin{center}
		$\begin{array}{rrrcr}
			   &   x_2&-4x_3&=&8\\
		 2x_1& -3x_2&+2x_3&=&1\\
		 5x_1& -8x_2&+7x_3&=&1\\
		\end{array}
		\sim\:...\:\sim
		\left(\begin{array}{rrr|r}
			2 & -3&  2 & 1\\
			0 &  1& -4 & 8\\
		  0 &  0&  0 & \frac{5}{2}\\
		\end{array}\right)$
	\end{center}
	Last equation implies $0=\frac{5}{2}$ which is impossible. Consequently, there is \textbf{no solution} and we say that the equation system is \textbf{incompatible}. The set
	of solutions is $S=\varnothing$.
\end{exampleblock}

\begin{exampleblock}{Example}
	\begin{center}
		$\begin{array}{rrcr}
			x_1& +x_2& =&1\\
		 2x_1& +2x_2&=&2\\
		\end{array}
		\sim\:...\:\sim
		\left(\begin{array}{rr|r}
			1 & 1 &  1\\
			0 & 0 &  0\\
		\end{array}\right)$
	\end{center}
	There are \textbf{infinite} solutions. The system is \textbf{compatible indeterminate}. The set of solutions is $S=\left\{(x_1,1-x_1)\right\}$.
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Exercises} 

\begin{exerciseblock}{Exercises}
	From Lay (4th ed.), Chapter 1, Section 1:
	\begin{itemize}
		\item 1.1.11
		\item 1.1.4
		\item 1.1.15
		\item 1.1.18
		\item 1.1.25
		\item 1.1.26
		\item 1.1.33
	\end{itemize}
\end{exerciseblock}

\end{frame}

% ==============================================
\subsection{Gauss-Jordan algorithm (b)} 
\Outline

% ==============================================
\begin{frame}\frametitle{Echelon matrices} 

\begin{exampleblock}{Example}
	The following matrices are echelon matrices:\\
	\begin{center}
	$A_1=\left(\begin{array}{cccc}
		\Diamond & \heartsuit & \heartsuit & \heartsuit \\
		0 & \Diamond & \heartsuit & \heartsuit \\
		0 & 0 & 0 & 0 \\
		\end{array}\right)$\\
	$A_2=\left(\begin{array}{cccc}
		0 & \Diamond & \heartsuit & \heartsuit \\
		0 & 0 & 0 & \Diamond \\
		\end{array}\right)$
	\end{center}
	In the previous matrices we have marked with $\Diamond$ the \textbf{leading elements} (the first ones different from 0 in their row), and with $\heartsuit$ the
	rest of the elements different from 0.
	
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Echelon matrices} 

\begin{ceudef}[Echelon matrix]
	A rectangular matrix has an \textbf{echelon} form iff:
	\begin{enumerate}
		\item Within each row, the first element different from zero (called the \textbf{leading entry}) is in a column to the right 
		      of the leading entry of the previous row.
		\item Within each column, all values below a leading entry are zero.
		\item All rows without a leading entry (i.e., they only have zeros) are below all the rows in which at least one element is not zero.
	\end{enumerate}
\end{ceudef}

\begin{ceudef}[Reduced echelon matrix]
	A rectangular matrix has a \textbf{reduced echelon} form iif:
	\begin{enumerate}
		\item It is echelon.
		\item The leading entry of each row is 1.
		\item The leading entry is the only 1 in its column.
	\end{enumerate}
\end{ceudef}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Echelon matrices} 

\begin{ceuthm}
	Each matrix is row equivalent to one and only one reduced echelon matrix.
\end{ceuthm}

\begin{exampleblock}{Example}
\begin{columns}
	\begin{column}{5cm}
		\begin{tabular}{m{2.5cm} | m{2.5cm}}
			&
			$\left(\begin{array}{rrr}
				1 &  2 & 3 \\
				4 &  5 & 6 \\
			 -1 & -1 & 0 \\
			\end{array}\right)$ \\
			
			$\begin{array}{l}
				\mathbf{r}_2 \leftarrow \mathbf{r}_2-4\mathbf{r}_1\\
				\mathbf{r}_3 \leftarrow \mathbf{r}_3+\mathbf{r}_1\end{array}$ &
			$\left(\begin{array}{rrr}
				1 & 2 & 3 \\
				0 & -3 & -6 \\
				0 & 1 & 3 \\
			\end{array}\right)$ \\
			
			$\begin{array}{l}
				\mathbf{r}_2 \leftrightarrow \mathbf{r}_3\\
				\end{array}$ &
			$\left(\begin{array}{rrr}
				1 & 2 & 3 \\
				0 & 1 & 3 \\
				0 & -3 & -6 \\
			\end{array}\right)$ \\

			$\begin{array}{l}
				\mathbf{r}_3 \leftrightarrow \mathbf{r}_3+3\mathbf{r}_2\\
				\end{array}$ &
			$\left(\begin{array}{rrr}
				1 & 2 & 3 \\
				0 & 1 & 3 \\
				0 & 0 & 3 \\
			\end{array}\right)$ \\

		\end{tabular}
	\end{column}
	\begin{column}{5cm}
		\begin{tabular}{m{2cm} | m{2.5cm}}
			$\begin{array}{l}
				\mathbf{r}_1 \leftarrow \mathbf{r}_1-2\mathbf{r}_2 \\
				\mathbf{r}_3 \leftarrow \frac{1}{3}\mathbf{r}_3 \\
			\end{array}$ &
			$\left(\begin{array}{rrr}
				1 & 0 & -3 \\
				0 & 1 & 3 \\
				0 & 0 & 1 \\
			\end{array}\right)$ \\
			
			$\begin{array}{l}
				\mathbf{r}_1 \leftarrow \mathbf{r}_1+3\mathbf{r}_3 \\
				\mathbf{r}_2 \leftarrow \mathbf{r}_2-3\mathbf{r}_3 \\
			\end{array}$ &
			$\left(\begin{array}{rrr}
				1 & 0 & 0 \\
				0 & 1 & 0 \\
				0 & 0 & 1 \\
			\end{array}\right)$ \\
			
		\end{tabular}
	\end{column}
\end{columns}
	
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Echelon matrices} 

\begin{exampleblock}{Example (continued)}
		Matlab:\\
{\color{blue}
\texttt{
A=[1 2 3; 4 5 6; -1 -1 0]\\
A(2,:)=A(2,:)-4*A(1,:)\\
A(3,:)=A(3,:)+A(1,:)\\
aux=A(2,:); A(2,:)=A(3,:); A(3,:)=aux\\
A(3,:)=A(3,:)+3*A(2,:)\\
A(1,:)=A(1,:)-2*A(2,:)\\
A(3,:)=1/3*A(3,:)\\
A(1,:)=A(1,:)+3*A(3,:)\\
A(2,:)=A(2,:)-3*A(3,:)\\
		}
		}	
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Echelon matrices} 

\begin{exampleblock}{Example}
Now, we'll repeat the same example using different row operations:
\begin{columns}
	\begin{column}{5cm}
		\begin{tabular}{m{2.5cm} | m{2.5cm}}
			&
			$\left(\begin{array}{rrr}
				1 &  2 & 3 \\
				4 &  5 & 6 \\
			 -1 & -1 & 0 \\
			\end{array}\right)$ \\
			
			$\begin{array}{l}
				\mathbf{r}_1 \leftarrow \mathbf{r}_3
				\end{array}$ &
			$\left(\begin{array}{rrr}
			 -1 & -1 & 0 \\
				4 &  5 & 6 \\
				1 &  2 & 3 \\
			\end{array}\right)$ \\
			
			$\begin{array}{l}
				\mathbf{r}_1 \leftarrow -\mathbf{r}_1\\
				\end{array}$ &
			$\left(\begin{array}{rrr}
			  1 &  1 & 0 \\
				4 &  5 & 6 \\
				1 &  2 & 3 \\
			\end{array}\right)$ \\

			$\begin{array}{l}
				\mathbf{r}_2 \leftarrow \mathbf{r}_2-4\mathbf{r}_1\\
				\mathbf{r}_3 \leftarrow \mathbf{r}_3-\mathbf{r}_1\\
				\end{array}$ &
			$\left(\begin{array}{rrr}
			  1 &  1 & 0 \\
				0 &  1 & 6 \\
				0 &  1 & 3 \\
			\end{array}\right)$ \\

		\end{tabular}
	\end{column}
	\begin{column}{5cm}
		\begin{tabular}{m{2cm} | m{2.5cm}}
			$\begin{array}{l}
				\mathbf{r}_1 \leftarrow \mathbf{r}_1-\mathbf{r}_2\\
				\mathbf{r}_3 \leftarrow \mathbf{r}_3-\mathbf{r}_2\\
				\end{array}$ &
			$\left(\begin{array}{rrr}
			  1 &  0 & -6 \\
				0 &  1 & 6 \\
				0 &  0 & -3 \\
			\end{array}\right)$ \\
			
			$\begin{array}{l}
				\mathbf{r}_3 \leftarrow -\frac{1}{3}\mathbf{r}_3\\
				\end{array}$ &
			$\left(\begin{array}{rrr}
			  1 &  0 & -6 \\
				0 &  1 & 6 \\
				0 &  0 & 1 \\
			\end{array}\right)$ \\

			$\begin{array}{l}
				\mathbf{r}_1 \leftarrow \mathbf{r}_1+6\mathbf{r}_3\\
				\mathbf{r}_2 \leftarrow \mathbf{r}_2-6\mathbf{r}_3\\
				\end{array}$ &
			$\left(\begin{array}{rrr}
			  1 &  0 & 0 \\
				0 &  1 & 0 \\
				0 &  0 & 1 \\
			\end{array}\right)$ \\

		\end{tabular}
	\end{column}
\end{columns}
	
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Gauss-Jordan algorithm} 

\begin{ceudef}[Pivot and pivot column]
	A pivot element is the element of a matrix that is used to perform certain calculations. For the Gauss-Jordan algorithm it corresponds to the first element different from zero in a given row. A pivot column is a column that contains a pivot.
\end{ceudef}

\begin{block}{Step 1}
	Choose the left-most pivot column. The pivot element (marked in red) is any value within this column different from 0.
	Note: Normally, we should take the one with maximum absolute value to avoid numerical errors.
\end{block}

\begin{exampleblock}{Example}
	\begin{center}
		$\left(\begin{array}{rrrrrr}
			0   &  3 & -6 &  6 & 4 & -5 \\
			3   & -7 &  8 & -5 & 8 &  9 \\
			{\color{red}3} & -9 & 12 & -9 & 6 & 15 \\
		\end{array}\right)$
	\end{center}
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Gauss-Jordan algorithm} 

\begin{block}{Step 2}
	Sort rows if necessary so that the pivot is as high as possible.
\end{block}

\begin{exampleblock}{Example}
	\begin{tabular}{m{2cm} | m{2.5cm}}
		$\begin{array}{l}
			\mathbf{r}_3 \leftrightarrow \mathbf{r}_1\\
			\end{array}$ &
		$\left(\begin{array}{rrrrrr}
			{\color{red}3} & -9 & 12 & -9 & 6 & 15 \\
			3   & -7 &  8 & -5 & 8 &  9 \\
			0   &  3 & -6 &  6 & 4 & -5 \\
		\end{array}\right)$
	\end{tabular}
\end{exampleblock}

\begin{block}{Step 3}
	Use row operations to force the elements below the pivot to be 0.
\end{block}

\begin{exampleblock}{Example}
		\begin{tabular}{m{2cm} | m{2.5cm}}
			$\begin{array}{l}
				\mathbf{r}_2 \leftarrow \mathbf{r}_2-\mathbf{r}_1\\
				\end{array}$ &
			$\left(\begin{array}{rrrrrr}
				{\color{red}3} & -9 & 12 & -9 & 6 & 15 \\
				0   &  2 & -4 &  4 & 2 & -6 \\
				0   &  3 & -6 &  6 & 4 & -5 \\
			\end{array}\right)$
		\end{tabular}
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Gauss-Jordan algorithm} 

\begin{block}{Step 4}
	Repeat Steps 1 to 3 with the rows below the pivot.
\end{block}

\begin{exampleblock}{Example}
		\begin{tabular}{m{2cm} | m{2.5cm}}
			&
			$\left(\begin{array}{rrrrrr}
				3   & -9 & 12 & -9 & 6 & 15 \\
				0   &  {\color{red}3} & -6 &  6 & 4 & -5 \\
				0   &  2 & -4 &  4 & 2 & -6 \\
			\end{array}\right)$ \\

			$\begin{array}{l}
				\mathbf{r}_3 \leftarrow \mathbf{r}_3-\frac{2}{3}\mathbf{r}_2\\
				\end{array}$ &
			$\left(\begin{array}{rrrrrr}
				3   & -9 & 12 & -9 & 6 & 15 \\
				0   &  {\color{red}3} & -6 &  6 & 4 & -5 \\
				0   &  0 & 0 &  0 & -\frac{2}{3} & -\frac{8}{3} \\
			\end{array}\right)$ \\
			
			 &
			$\left(\begin{array}{rrrrrr}
				3   & -9 & 12 & -9 & 6 & 15 \\
				0   &  3 & -6 &  6 & 4 & -5 \\
				0   &  0 & 0 &  0 &  {\color{red}-\frac{2}{3}} & -\frac{8}{3} \\
			\end{array}\right)$ \\

		\end{tabular}
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Gauss-Jordan algorithm} 

\begin{block}{Step 5}
	Starting from the lowest and right-most pivot, force the elements above that pivot to be zero.
	If the pivot is not 1, then rescale the row. Repeat with the next pivot on the left.
\end{block}

\begin{exampleblock}{Example}
\begin{center}\begin{tiny}
		\begin{tabular}{m{2cm} | m{2.5cm}}
			
			$\begin{array}{l}
				\mathbf{r}_3 \leftarrow -\frac{3}{2}\mathbf{r}_3\\
				\end{array}$ &
			$\left(\begin{array}{rrrrrr}
				3   & -9 & 12 & -9 & 6 & 15 \\
				0   &  3 & -6 &  6 & 4 & -5 \\
				0   &  0 & 0 &  0 & {\color{red}1} & 4 \\
			\end{array}\right)$ \\

			$\begin{array}{l}
				\mathbf{r}_2 \leftarrow \mathbf{r}_2-4\mathbf{r}_3\\
				\mathbf{r}_1 \leftarrow \mathbf{r}_1-6\mathbf{r}_3\\
				\end{array}$ &
			$\left(\begin{array}{rrrrrr}
				3   & -9 & 12 & -9 & 0 & -9 \\
				0   &  3 & -6 &  6 & 0 & -21 \\
				0   &  0 & 0 &  0 &  {\color{red}1} & 4 \\
			\end{array}\right)$ \\

			$\begin{array}{l}
				\mathbf{r}_2 \leftarrow \frac{1}{3}\mathbf{r}_2\\
				\end{array}$ &
			$\left(\begin{array}{rrrrrr}
				3   & -9 & 12 & -9 & 0 & -9 \\
				0   &  {\color{red}1} & -2 &  2 & 0 & -7 \\
				0   &  0 & 0 &  0 &  1 & 4 \\
			\end{array}\right)$ \\

			$\begin{array}{l}
				\mathbf{r}_1 \leftarrow \mathbf{r}_1+9\mathbf{r}_2\\
				\end{array}$ &
			$\left(\begin{array}{rrrrrr}
				3   &  0 & -6 & 9 & 0 & -72 \\
				0   &  {\color{red}1} & -2 &  2 & 0 & -7 \\
				0   &  0 & 0 &  0 &  1 & 4 \\
			\end{array}\right)$ \\

			$\begin{array}{l}
				\mathbf{r}_1 \leftarrow \frac{1}{3}\mathbf{r}_1\\
				\end{array}$ &
			$\left(\begin{array}{rrrrrr}
				{\color{red}1}   &  0 & -2 & 3 & 0 & -24 \\
				0   &  1 & -2 &  2 & 0 & -7 \\
				0   &  0 & 0 &  0 &  1 & 4 \\
			\end{array}\right)$ \\

		\end{tabular}
\end{tiny}\end{center}
\end{exampleblock}

Computing the inverse of a $n\times n$ matrix costs in the order of $n^3$ operations (O($n^3$)). However, calculating the reduced echelon form is only in the order of $n^2$ (O($n^2$)). This difference is more and more important as $n$ grows.

\end{frame}

% ==============================================
\begin{frame}\frametitle{Existence and uniqueness of solutions (revisited)} 
We can now review the issue of existence and uniqueness under the light of the reduced echelon matrix.

\begin{exampleblock}{Example}
	\begin{center}
		$\left(\begin{array}{rrr|r}
			1 &  0&  0 & 1\\
			0 &  1&  0 & 4\\
		  0 &  0&  1 & 0\\
		\end{array}\right)$
	\end{center}
	The system is compatible and the set of solutions is formed by a single point $S=\left\{(1,4,0)\right\}$.
\end{exampleblock}

\begin{exampleblock}{Example}
	\begin{center}
		$\left(\begin{array}{rrr|r}
			1 &  0&  0 & 1\\
			0 &  1&  1 & 4\\
		  0 &  0&  0 & 0\\
		\end{array}\right)$
	\end{center}
	There are \textbf{infinite} solutions. The system is \textbf{compatible indeterminate}. The set of solutions is $S=\left\{(1,4-x_3,x_3)\; \forall x_3 \in \mathbb{R}^3 \right\}$.
	Because the set of solutions depends on a single variable, the set of solutions is a line.
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Existence and uniqueness of solutions (revisited)} 
\begin{exampleblock}{Example}
	\begin{center}
		$\left(\begin{array}{rrrr|r}
			1 &  0&  0 & 0 & 1\\
			0 &  1&  1 & 1 & 4\\
		  0 &  0&  0 & 0 & 0\\
		\end{array}\right)$
	\end{center}
	There are \textbf{infinite} solutions. The system is \textbf{compatible indeterminate}. The set of solutions is $S=\left\{(1,4-x_3-x_4,x_3,x_4)\; \forall x_3,x_4 \in \mathbb{R}^3 \right\}$. Now, the set of solutions depends on 2 variables and, consequently, it is a plane.
\end{exampleblock}

\begin{exampleblock}{Example}
	\begin{center}
		$\left(\begin{array}{rrr|r}
			1 &  0&  0 & 1\\
			0 &  1&  1 & 4\\
		  0 &  0&  0 & 1\\
		\end{array}\right)$
	\end{center}
	The system is incompatible since the last equation is $0=1$. The set of solutions is the empty set, $S=\varnothing$.
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Exercises} 

\begin{exerciseblock}{Exercises}
	From Lay (4th ed.), Chapter 1, Section 2:
	\begin{itemize}
		\item 1.2.2
		\item 1.2.8
		\item 1.2.19
		\item 1.2.33
		\item 1.2.34
	\end{itemize}
\end{exerciseblock}

\end{frame}

% ==============================================
\subsection{Interpretation as a subspace (b)} 
\Outline

% ==============================================
\begin{frame}\frametitle{Interpretation as a subspace} 
	\begin{block}{Subspace spanned by columns}
		Consider the equation system given by the matricial equation $A\mathbf{x}=\mathbf{b}$, where $A \in \mathcal{M}_{n\times p}$. Let us call the $p$ columns of $A$ as
		$\mathbf{c}_i \in \mathbb{R}^n$. The previous equation can be rewritten as
		\begin{center}
			$\left(\mathbf{c}_1\:\mathbf{c}_2\: ...\:\mathbf{c}_p\right)\left(\begin{array}{c}x_1 \\ x_2 \\ ... \\ x_p\end{array}\right)=\mathbf{b}
				\Rightarrow \sum\limits_{i=1}^p{x_i\mathbf{c}_i}=\mathbf{b}$
		\end{center}
		That is, $A\mathbf{x}$ is the subspace spanned by the columns of matrix $A$.
		\begin{center}
			$\mathrm{Span}\left\{\mathbf{c}_1,\mathbf{c}_2, ...,\mathbf{c}_p\right\}=\left\{\mathbf{v}\in \mathbb{R}^n \arrowvert \mathbf{v}=A\mathbf{x}
				\;\forall \mathbf{x}\in\mathbb{R}^p\right\}$
		\end{center}
		The equation system $A\mathbf{x}=\mathbf{b}$ the poses the question: \textbf{Find the weight coefficients} $x_i$ \textbf{such that vector} $\mathbf{b}$ \textbf{belongs to}
			$\mathrm{Span}\left\{\mathbf{c}_1,\mathbf{c}_2, ...,\mathbf{c}_p\right\}$.
	\end{block}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Interpretation as a subspace} 
	\begin{exampleblock}{Example}
		The equation system
		\begin{center}
			$\begin{array}{rrrcr}
				x_1& +2x_2& -x_3&=&4\\
					 & -5x_2&+3x_3&=&1\\
			\end{array}$
		\end{center}
		can be represented as
		\begin{center}
			$\left(\begin{array}{rrr}
				1 &  2& -1 \\
				0 & -5&  3 \\
			\end{array}\right)
			\left(\begin{array}{c}x_1 \\ x_2 \\ x_3\end{array}\right)=
			\left(\begin{array}{c}4 \\ 1\end{array}\right)
			$
		\end{center}
		That is, which are the weight coefficients $x_1$, $x_2$ and $x_3$ such that the vector $(4,1)$ belongs to the subspace generated by
		the vectors $(1,0)$, $(2, -5)$, and $(-1,3)$.
	\end{exampleblock}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Interpretation as a subspace} 
	\begin{ceuthm}
		The matrix equation $A\mathbf{x}=\mathbf{b}$ has the same solution as the vector equation $\sum\limits_{i=1}^p{x_i\mathbf{c}_i}=\mathbf{b}$
		and as the equation system whose augmented matrix is $\tilde{A}=\left(A|\mathbf{b}\right)$.
	\end{ceuthm}

	\begin{ceuthm}
		For any $A\in \mathcal{M}_{n\times p}$ and vector $\mathbf{b} \in \mathbb{R}^n$,
			the following four statements are equivalent, that is, $P_1 \Leftrightarrow P_2 \Leftrightarrow P_3 \Leftrightarrow P_4$
		\begin{description}
			\item[$P_1$:] The equation $A\mathbf{x}=\mathbf{b}$ has a solution.
			\item[$P_2$:] $\mathbf{b}$ is a linear combination of the columns of A.
			\item[$P_3$:] The columns of $A$ span all $\mathbb{R}^n$, i.e., $\mathrm{Span}\left\{\mathbf{c}_i\right\}=\mathbb{R}^n$.
			\item[$P_4$:] A has a pivot in each row.
		\end{description}
		\label{thm:interpretationAsSubspace}
	\end{ceuthm}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Exercises} 

\begin{exerciseblock}{Exercises}
	From Lay (4th ed.), Chapter 1, Section 4:
	\begin{itemize}
		\item 1.4.13
		\item 1.4.18
		\item 1.4.26
		\item 1.4.27
		\item 1.4.32
		\item 1.4.39 (bring computer)
		\item 1.4.41 (bring computer)
	\end{itemize}
\end{exerciseblock}

\end{frame}

% ==============================================
\subsection{Existence and uniqueness of solutions (c)} 
\Outline
\begin{frame}\frametitle{Existence and uniqueness of solutions (revisited once again)} 

Let us consider the \textbf{homogeneous system }$A\mathbf{x}=\mathbf{0}$. It obviously has the trivial solution $\mathbf{x}=\mathbf{0}$. Non-trivial solutions
can be found through the echelon matrix

\begin{exampleblock}{Example}
	\begin{center}
		$\begin{array}{rrrcr}
			3x_1& +5x_2& -4x_3&=&0\\
		 -3x_1& -2x_2& +4x_3&=&0\\
		  6x_1& +x_2 & -8x_3&=&0\\
		\end{array}
		\Rightarrow ... \sim
		\left(\begin{array}{rrr|r}
			1 &  0& \frac{4}{3} & 0\\
			0 &  1& 0 & 0\\
		  0 &  0& 0 & 0\\
		\end{array}\right)$
	\end{center}
	This is a compatible indeterminate system whose set of solutions is $S=\left\{(-\frac{4}{3}x_3,0,x_3)\; \forall x_3 \in \mathbb{R}\right\}$, or what is
	the same\\
	\begin{center}
		$S=\mathrm{Span}\left\{(-\frac{4}{3},0,1)\right\}$.
	\end{center}
	That is, any of the infinite points in the straight line whose director vector is $(-\frac{4}{3},0,1)$ is a solution
	of the equation system.
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Existence and uniqueness of solutions (revisited once again)} 

Let us consider the \textbf{non-homogeneous system }$A\mathbf{x}=\mathbf{b}$. 

\begin{exampleblock}{Example}
	\begin{center}
		$\begin{array}{rrrcr}
			3x_1& +5x_2& -4x_3&=&7\\
		 -3x_1& -2x_2& +4x_3&=&-1\\
		  6x_1& +x_2 & -8x_3&=&-4\\
		\end{array}
		\Rightarrow ... \sim
		\left(\begin{array}{rrr|r}
			1 &  0& \frac{4}{3} & 0\\
			0 &  1& 0 & 2\\
		  0 &  0& 0 & 0\\
		\end{array}\right)$
	\end{center}
	This is a compatible indeterminate system whose set of solutions is $S=\left\{(-\frac{4}{3}x_3,2,x_3)\; \forall x_3 \in \mathbb{R}\right\}$, or what is
	the same
	\begin{center}
		$S=\left\{(0,2,0)+(-\frac{4}{3}x_3,0,x_3)\; \forall x_3 \in \mathbb{R}\right\}=(0,2,0)+\mathrm{Span}\left\{(-\frac{4}{3},0,1)\right\}$.
	\end{center}
	That is, any of the infinite points in the straight line whose director vector is $(-\frac{4}{3},0,1)$ and passes through the point $(0,2,0)$ is a solution
	of the equation system.
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Existence and uniqueness of solutions (revisited once again)} 
Consider the following homogeneous equation system
\begin{exampleblock}{Example}
	\begin{center}
		$\begin{array}{rrrcr}
			10x_1& -3x_2& -2x_3&=&0\\
		\end{array}
		\Rightarrow ... \sim
		\left(\begin{array}{rrr|r}
			10 &  -3& -2 & 0\\
		\end{array}\right)$
	\end{center}
	This is a compatible indeterminate system whose set of solutions is $S=\left\{(\frac{3}{10}x_2+\frac{1}{5}x_3,x_2,x_3)\; \forall x_2,x_3 \in \mathbb{R}\right\}$, or what is
	the same
	\begin{center}
		$S=\mathrm{Span}\left\{(\frac{3}{10},1,0),(\frac{1}{5},0,1)\right\}$.
	\end{center}
	That is, any of the infinite points in the plane containing the vectors $(\frac{3}{10},1,0)$ and
	$(\frac{1}{5},0,1)$ is a solution	of the equation system.
\end{exampleblock}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Existence and uniqueness of solutions (revisited once again)} 
Consider now the following non-homogeneous equation system
\begin{exampleblock}{Example}
	\begin{center}
		$\begin{array}{rrrcr}
			10x_1& -3x_2& -2x_3&=&10\\
		\end{array}
		\Rightarrow ... \sim
		\left(\begin{array}{rrr|r}
			10 &  -3& -2 & 10\\
		\end{array}\right)$
	\end{center}
		This is a compatible indeterminate system whose set of solutions is $S=\left\{(1+\frac{3}{10}x_2+\frac{1}{5}x_3,x_2,x_3)\; \forall x_2,x_3 \in \mathbb{R}\right\}$, or what is
	the same
  \begin{center}
		$S=\left\{(1,0,0)+(\frac{3}{10}x_2+\frac{1}{5}x_3,x_2,x_3)\; \forall x_2,x_3 \in \mathbb{R}\right\}=(1,0,0)+\mathrm{Span}\left\{(\frac{3}{10},1,0),(\frac{1}{5},0,1)\right\}$.
	\end{center}
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Existence and uniqueness of solutions (revisited once again)} 
\begin{ceucor}
		Consider the compatible, non-homogeneous equation system given by $A\mathbf{x}=\mathbf{b}$ and its homogeneous counterpart $A\mathbf{x}=\mathbf{0}$. Let $S_h$ be the set of 
		solutions of the homogeneous equation system. Then, the set of solutions of the non-homogeneous equation system is of the form
		\begin{center}
			$S_{nh}=\mathbf{x}_0+S_h$
		\end{center}
		For some $\mathbf{x}_0 \in \mathbb{R}^n$.
\end{ceucor}

\begin{ceudef}[Null space of $A$]
		$S_h$ is called the null space of the matrix $A$. It has the property that given an equation system $A\mathbf{x}=\mathbf{b}$, if $\mathbf{x}_0$ is a solution of the equation
		system, then $\mathbf{x}_0+\mathbf{x}_h$ is also a solution, for any $\mathbf{x}_h \in S_h$.
\end{ceudef}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Existence and uniqueness of solutions (revisited once again)} 
\begin{center}
		\includegraphics[scale=0.3]{figNullSpace0.jpg}
\end{center}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Existence and uniqueness of solutions (revisited once again)} 
In this example, the authors describe how to solve a problem appearing in the tomographic
use of a certain microscope due to the absence of some measurements (resulting in an important
null space of the tomographic problem).
\begin{center}
		\includegraphics[scale=0.3]{figNullSpace.jpg}
\end{center}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Existence and uniqueness of solutions (revisited once again)} 
In this example, the authors describe how the exact location of a tooth fracture is uncertain (Fig. C)
due to the artifacts introduced by the null space of the tomographic problem.

\begin{center}
		\includegraphics[scale=0.4]{figNullSpace2.jpg}
\end{center}

\begin{tiny}
Mora, M. A.; Mol, A.; Tyndall, D. A., Rivera, E. M. \textit{In vitro assessment of local computed tomography for the detection of longitudinal tooth fractures}. Oral Surg Oral Med Oral Pathol Oral Radiol Endod, \textbf{2007}, 103, 825-829.
\end{tiny}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Exercises} 

\begin{exerciseblock}{Exercises}
	From Lay (4th ed.), Chapter 1, Section 5:
	\begin{itemize}
		\item 1.5.11
		\item 1.5.13
		\item 1.5.19
		\item 1.5.21
		\item 1.5.25
		\item 1.5.26
		\item 1.5.36
		\item 1.5.39
	\end{itemize}
\end{exerciseblock}

\end{frame}

% ==============================================
\subsection{Applications (c)} 
\Outline

\begin{frame}\frametitle{Applications} 
In fluorescence microscopy, we can quantitatively measure the amount of fluorescence coming from each source with a linear equation system.
\begin{center}
	\begin{tabular}{m{6cm}m{3cm}}
		\includegraphics[scale=0.2]{figApplication1.jpg} &
		\includegraphics[scale=0.35]{figApplication2.jpg}
	\end{tabular}
\end{center}

\begin{tiny}
C. Calabia-Linares, M. Pérez-Martínez, N. Martín-Cofreces, M. Alfonso-Pérez, C. Gutiérrez-Vázquez, M. Mittelbrunn, S. Ibiza, F.R. Urbano-Olmos, C. Aguado-Ballano, C.O.S. Sorzano, F. Sánchez-Madrid, E. Veiga. \textit{Clathrin drives actin accumulation at the immunological synapse}. J. Cell Science, 124: 820-830 (\textbf{2011})
\end{tiny}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Applications} 
In computed tomography, a simple model (but widely used) for data collection states that the data observed is the sum of the values of the density found along the X-ray path.
\begin{center}
		\includegraphics[scale=0.45]{figApplication3.jpg}
\end{center}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Applications} 
In the blood system, at each node, the sum of output flows must be equal to the sum of input flows.
\begin{center}
		\includegraphics[scale=0.45]{figApplication4.jpg}
\end{center}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Applications} 
In a very simplified model, respiration is the burning of glucose that can be written as
\begin{center}
		$x_1C_6H_{12}O_6+x_2O_2 \rightarrow x_3 CO_2+x_4H_2O$\\
		\includegraphics[scale=0.4]{figApplication5.jpg}
\end{center}
\begin{description}
	\item[C:] $6x_1=x_3$
	\item[H:] $6x_1=2x_4$
	\item[O:] $6x_1+2x_2=2x_3+x_4$
\end{description}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Exercises} 

\begin{exerciseblock}{Exercises}
	From Lay (4th ed.), Chapter 1, Section 6:
	\begin{itemize}
		\item 1.6.5
		\item 1.6.7
		\item 1.6.12
	\end{itemize}
\end{exerciseblock}
\end{frame}

% ==============================================
\subsection{Linear independence (c)} 
\Outline

\begin{frame}\frametitle{Linear independence} 

\begin{ceudef}[Linear independence]
	A set of vectors $\mathbf{v}_1,\mathbf{v}_2,...,\mathbf{v}_p$ is \textbf{linearly independent} if
	\begin{center}
		$x_1\mathbf{v}_1+x_2\mathbf{v}_2+...+x_p\mathbf{v}_p=\mathbf{0} \Rightarrow x_1=x_2=...=x_p=0$
	\end{center}
	That is the only solution of the previous equation is the trivial solution $\mathbf{x}=\mathbf{0}$. 
	The set is \textbf{linearly dependent} if at least one $x_i$ is different from 0.
	\label{eq:linearIndependence}
\end{ceudef}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Linear independence} 

\begin{exampleblock}{Example}
	Determine if the vectors $\mathbf{v}_1=(1,2,3)$, $\mathbf{v}_2=(4,5,6)$, and $\mathbf{v}_3=(2,1,0)$ are linearly independent.\\
	\underline{\textit{Solution}}\\
	The augmented matrix associated to the equation system in Definition \ref{eq:linearIndependence} is
	\begin{center}
		$\left(\begin{array}{rrr|r}
			1 &  4&  2 & 0\\
			2 &  5&  1 & 0\\
		  3 &  6&  0 & 0\\
		\end{array}\right)\;
		\sim ... \sim
		\left(\begin{array}{rrr|r}
			1 &  4&  2 & 0\\
			0 & -3& -3 & 0\\
		  0 &  0&  0 & 0\\
		\end{array}\right)$
	\end{center}
	Since the system is compatible indeterminate, there exists a solution apart from the trivial solution and, therefore, the vectors
	are linearly dependent.
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Linear independence} 

\begin{exampleblock}{Example}
	If possible, find a linear relationship among the three vectors.
	\underline{\textit{Solution}}\\
	We continue transforming the augmented matrix to its reduced echelon form
	\begin{center}
		$\left(\begin{array}{rrr|r}
			1 &  4&  2 & 0\\
			0 & -3& -3 & 0\\
		  0 &  0&  0 & 0\\
		\end{array}\right)\;
		\sim ... \sim
		\left(\begin{array}{rrr|r}
			1 &  0& -2 & 0\\
			0 &  1&  1 & 0\\
		  0 &  0&  0 & 0\\
		\end{array}\right)$
	\end{center}
	From which $x_1=2x_3$ and $x_2=-x_3$. Simply by choosing $x_3=1$, we obtain have that a possible solution
	to the equation system in Definition \ref{eq:linearIndependence} is $x_1=2$, $x_2=-1$ and $x_3=1$, consequently we have that
	\begin{center}
		$2\mathbf{v}_1-\mathbf{v}_2+\mathbf{v}_3=\mathbf{0}$
	\end{center}

\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Linear independence} 

\begin{exampleblock}{Example}
	$\mathbf{v}_1=(3,1)$ and $\mathbf{v}_2=(6,2)$ are linearly dependent because
	\begin{center}
		$\mathbf{v}_2=2\mathbf{v}_1 \Rightarrow -2\mathbf{v}_1+\mathbf{v}_2=\mathbf{0} \Rightarrow \mathbf{v}_1=\frac{1}{2}\mathbf{v}_2$\\
		\vspace{0.25cm}
		\includegraphics[scale=0.35]{figLinearIndependence1.eps}
	\end{center}
	If two vectors are linearly dependent of each other, then any one of them is a multiple of the other.
	\begin{center}
	\end{center}
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Linear independence} 

\begin{exampleblock}{Example}
	$\mathbf{v}_1=(3,2)$ and $\mathbf{v}_2=(6,2)$ are linearly independent
	\begin{center}
		\includegraphics[scale=0.4]{figLinearIndependence2.eps}
	\end{center}
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Linear independence} 

\begin{exampleblock}{Example}
	$\mathbf{v}_1=(1,1,0)$, $\mathbf{v}_2=(-1,1,0)$ and $\mathbf{v}_3=(0,2,0)$ are linearly dependent because
	\begin{center}
		$\mathbf{v}_3=\mathbf{v}_1+\mathbf{v}_2$\\
		\vspace{0.25cm}
		\includegraphics[scale=0.4]{figLinearIndependence3.eps}
	\end{center}
	\begin{center}
	\end{center}
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Linear independence} 

\begin{exampleblock}{Example}
	$\mathbf{v}_1=(1,1,0)$, $\mathbf{v}_2=(-1,1,0)$ and $\mathbf{v}_3=(0,2,1)$ are linearly independent
	\begin{center}
		\includegraphics[scale=0.4]{figLinearIndependence4.jpg}
	\end{center}
	\begin{center}
	\end{center}
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Linear independence} 

\begin{ceuthm}[Linear independence of matrix columns]
	The columns of the matrix $A$ are linearly independent iff the only solution of $A\mathbf{x}=\mathbf{0}$ is the trivial one.\\
	\underline{\textit{Proof}}\\
	Let $A=[\mathbf{a}_1\: \mathbf{a}_2\: ...\: \mathbf{a}_p]$ so that the columns of the matrix $A$ are the vectors $\mathbf{a}_i$. According to Definition
	\ref{eq:linearIndependence} these vectors are linearly independent iff
	\begin{center}
		$x_1\mathbf{a}_1+x_2\mathbf{a}_2+...+x_p\mathbf{a}_p=\mathbf{0} \Rightarrow x_1=x_2=...=x_p=0$
	\end{center}
	or what is the same
	\begin{center}
		$A\mathbf{x}=\mathbf{0} \Rightarrow x_1=x_2=...=x_p=0$
	\end{center}
	as stated by the theorem (q.e.d.)
	\label{thm:linearIndependenceMatrix}
\end{ceuthm}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Linear independence} 

\begin{ceuthm}
	Any set $\left\{\mathbf{v}_1,\mathbf{v}_2,...,\mathbf{v}_p\right\}$ with $\mathbf{v}_i \in \mathbb{R}^n$ is linearly dependent if $p>n$. \\
	\underline{\textit{Proof}}\\
	Let $A=[\mathbf{v}_1\: \mathbf{v}_2\: ...\: \mathbf{v}_p]$ and let us consider the equation system $A\mathbf{x}=\mathbf{0}$. If $p>n$ there are
	more unknowns than equations, and consequently, there are free variables and the system is compatible indeterminate. Thus, there are more solutions apart
	from the trivial one and the set of vectors is linearly dependent.
\end{ceuthm}

\begin{ceuthm}
	If any set $\left\{\mathbf{v}_1,\mathbf{v}_2,...,\mathbf{v}_p\right\}$ with $\mathbf{v}_i \in \mathbb{R}^n$ contains the vector $\mathbf{0}$, then
	the set of vectors is linearly dependent. \\
	\underline{\textit{Proof}}\\
	We can assume, without loss of generality, that $\mathbf{v}_1=\mathbf{0}$. Then, we can set $x_1=1$, $x_2=x_3=...=x_p=0$ so that the following equation
	is met:
	\begin{center}
		$1\mathbf{v}_1+0\mathbf{v}_2+...+0\mathbf{v}_p=\mathbf{0}$ (q.e.d.)
	\end{center}
\end{ceuthm}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Linear independence} 

\begin{ceuthm}
	A set of vectors is linearly dependent iff at least 1 of the vectors is linearly dependent on the rest \\
	\underline{\textit{Proof}}\\
	\parbox{11cm}{
		\underline{\textit{Proof $\Leftarrow$}}\\
		\leftskip5mm Let us assume that $\mathbf{v}_j$ is a linear combination of the rest of the vectors, that is,
		\begin{center}$\mathbf{v}_j=\sum\limits_{k\neq j}{x_k\mathbf{v}_k}$\end{center}
		Then, we can write
		$\mathbf{v}_j-\sum\limits_{k\neq j}{x_k\mathbf{v}_k}=\mathbf{0} \Rightarrow$\\
		\begin{center}$-x_1\mathbf{v}_1-x_2\mathbf{v}_2-...-x_{j-1}\mathbf{v}_{j-1}+\mathbf{v}_j-x_{j+1}\mathbf{v}_{j+1}-x_p\mathbf{v}_p=\mathbf{0}$\end{center}
		And consequently there exists a non-trivial solution of the equation of Definition \ref{eq:linearIndependence}.
	}
\end{ceuthm}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Linear independence} 

\begin{block}{}
	\parbox{11cm}{
		\underline{\textit{Proof $\Rightarrow$}}\\
		\leftskip5mm If $\mathbf{v}_1=\mathbf{0}$, then we have already a vector that is a trivial combination of the rest
		($\mathbf{v}_1=0\mathbf{v}_2+0\mathbf{v}_3+...+0\mathbf{v}_p$).\\
		If $\mathbf{v}_1\neq\mathbf{0}$, then there exist some coefficients such that
		\begin{center}$x_1\mathbf{v}_1+x_2\mathbf{v}_2+...+x_p\mathbf{v}_p=\mathbf{0}$\end{center}
		Let $j$ be the largest index for which $x_j\neq 0$ (that is, $x_{j+1}=x_{j+2}=...=x_p=0$).\\
		If $j=1$, then $x_1\mathbf{v}_1=\mathbf{0}$, but this is not possible because $\mathbf{v}_1\neq\mathbf{0}$.
		Then, $j>1$ and consequently
		\begin{center}
		$x_1\mathbf{v}_1+x_2\mathbf{v}_2+...+x_j\mathbf{v}_j+0\mathbf{v}_{j+1}+...+0\mathbf{v}_p=\mathbf{0} \Rightarrow$ \\
		$x_j\mathbf{v}_j=-x_1\mathbf{v}_1-x_2\mathbf{v}_2-...-x_{j-1}\mathbf{v}_{j-1} \Rightarrow$ \\
		$\mathbf{v}_j=-\frac{x_1}{x_j}\mathbf{v}_1-\frac{x_2}{x_j}\mathbf{v}_2-...-\frac{x_{j-1}}{x_j}\mathbf{v}_{j-1}$ (q.e.d.) \\
		\end{center}
	}
\end{block}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Exercises} 

\begin{exerciseblock}{Exercises}
	From Lay (4th ed.), Chapter 1, Section 7:
	\begin{itemize}
		\item 1.7.9
		\item 1.7.39
		\item 1.7.40
		\item 1.7.41 (bring computer)
	\end{itemize}
\end{exerciseblock}

\end{frame}

% ==============================================
\subsection{Linear transformations (d)} 
\Outline

\begin{frame}\frametitle{Linear transformations} 

\begin{ceudef}[Transformation]
	A \textbf{transformation} (or \textbf{function} or \textbf{mapping}), $T$, from $\mathbb{R}^n$ to $\mathbb{R}^m$ is a rule that assigns to each vector of $\mathbb{R}^n$ a vector of $\mathbb{R}^m$.
	\begin{center}
		$\begin{array}{rcl}
			T:\mathbb{R}^n & \rightarrow & \mathbb{R}^m \\
			\mathbf{x} & \rightarrow & T(\mathbf{x})
		\end{array}$
	\end{center}
	$\mathbb{R}^n$ is called the \textbf{domain} of the transformation, and $\mathbb{R}^m$ its \textbf{codomain}. $T(\mathbf{x})$ is the image of vector $\mathbf{x}$ under the action of $T$. The set of all images is the \textbf{range} of T.
	\begin{center}
		\includegraphics[scale=0.3]{figTransformation.jpg}
	\end{center}
\end{ceudef}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Linear transformations} 

\begin{ceudef}[Matrix transformation]
	$T$ is a \textbf{matrix transformation} iff $T(\mathbf{x})=A\mathbf{x}$ for some matrix $A \in \mathcal{M}_{m\times n}$.
\end{ceudef}

\begin{exampleblock}{Example}
	Let us consider $A=\begin{pmatrix}4 & -3 & 1 & 3 \\ 2 & 0 & 5 & 1\end{pmatrix}$ and the matrix transformation $\mathbf{y}=A\mathbf{x}$.
	For instance, the image of $\mathbf{x}=(1,1,1,1)$ is
	\begin{center}
		$\mathbf{y}=\begin{pmatrix}4 & -3 & 1 & 3 \\ 2 & 0 & 5 & 1\end{pmatrix}\begin{pmatrix}1 \\ 1 \\ 1 \\ 1\end{pmatrix}=\begin{pmatrix}5 \\ 8\end{pmatrix}$
	\end{center}
	The equation system $A\mathbf{x}=\begin{pmatrix}5 \\ 8\end{pmatrix}$ looks for all those $\mathbf{x}$, if any, such that $T(\mathbf{x})=\begin{pmatrix}5 \\ 8\end{pmatrix}$.
	The domain of this transformation is $\mathbb{R}^4$ and its codomain $\mathbb{R}^2$.
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Linear transformations} 

\begin{exampleblock}{Example}
	Let us consider $A=\begin{pmatrix}1 & 0 \\ 0 & 1\\ 0 & 0\end{pmatrix}$ and the matrix transformation $\mathbf{y}=A\mathbf{x}$.
	The domain of this transformation is $\mathbb{R}^2$ and its codomain $\mathbb{R}^3$. However, not all points in $\mathbb{R}^3$ need to be an image of some point
	$\mathbf{x}\in \mathbb{R}^2$, only a subset of them may be. In this case,
	\begin{center}
		$\mathbb{R}^3 \supset \mathrm{Range}(T)=\left<(1,0,0),(0,1,0)\right>$
	\end{center}
	In general, the range of the transformation $T$ is the subspace spanned by the columns of the matrix $A$.
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Linear transformations} 

\begin{exampleblock}{Example}
	Let us consider $A=\begin{pmatrix}1 & -3 \\ 3 & 5\\ -1 & 7\end{pmatrix}$ and the matrix transformation $\mathbf{y}=A\mathbf{x}$.
	\begin{enumerate}
		\item What is the image of $\mathbf{u}=(2, -1)$ under $T$?\\
					$T(\mathbf{u})=A\mathbf{u}=(5,1,9)$
		\item Let $\mathbf{b}=(3,2,-5)$. Which is $\mathbf{x}$ such that $T(\mathbf{x})=\mathbf{b}$?\\
					$\left(\begin{array}{rr|r}
						1 & -3&  3\\
						3 &  5&  2\\
					 -1 &  7& -5\\
					\end{array}\right)\;\sim
					\left(\begin{array}{rr|r}
						1 &  0&  \frac{3}{2}\\
						0 &  1&  -\frac{1}{2}\\
					  0 &  0&  0\\
					\end{array}\right)$\\
					From which we deduce $\mathbf{x}=(\frac{3}{2},-\frac{1}{2})$.
		\item Is there any other $\mathbf{x}$ such that $T(\mathbf{x})=\mathbf{b}$?\\
					No, the previous solution is unique because the equation system is definite compatible.
	\end{enumerate}
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Linear transformations} 

\begin{exampleblock}{Example}
	\begin{enumerate}
		\setcounter{enumi}{3}
		\item Does $\mathbf{c}=(3,2,5)$ belong to $\mathrm{Range}(T)$?\\
					$\left(\begin{array}{rr|r}
						1 & -3&  3\\
						3 &  5&  2\\
					 -1 &  7&  5\\
					\end{array}\right)\;\sim
					\left(\begin{array}{rr|r}
						1 &  0&  \frac{3}{2}\\
						0 &  1&  -\frac{1}{2}\\
					  0 &  0&  -35\\
					\end{array}\right)$\\
					Since the system is incompatible, we deduce that there is no vector $\mathbf{x}$ whose image is $\mathbf{c}$ and, consequently, $\mathbf{c} \not\in \mathrm{Range}(T)$.
		\item Which is the function $\mathbf{y}=T(\mathbf{x})$?\\
					$\begin{pmatrix}y_1 \\ y_2 \\ y_3\end{pmatrix}=\begin{pmatrix}1 & -3 \\ 3 & 5\\ -1 & 7\end{pmatrix}\begin{pmatrix}x_1 \\ x_2\end{pmatrix}=
					  \begin{pmatrix}x_1 -3x_2 \\ 3x_1 +5x_2\\ -x_1+7x_2\end{pmatrix}$
		\item Which is $\mathrm{Range}(T)$?\\
					$\mathrm{Range}(T)=\left<(1,3,-1),(-3,5,7)\right>=\left\{\mathbf{y}\in\mathbb{R}^3 \arrowvert \mathbf{y}=x_1\begin{pmatrix}1 \\ 3 \\ -1\end{pmatrix}+x_2
						\begin{pmatrix}-3 \\ 5 \\ 7\end{pmatrix}\:\forall x_1,x_2\in\mathbb{R}\right\}$\\
					Because $(1,3,-1)$ and $(-3,5,7)$ are linearly independent, $\mathrm{Range}(T)$ is a plane.
					
	\end{enumerate}
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Linear transformations} 

\begin{exampleblock}{Example}
	Consider the transformation $T(\mathbf{x})=\begin{pmatrix}1 & 0 & 0 \\ 0 & 1 & 0 \\ 0 & 0 & 0\end{pmatrix} \begin{pmatrix} x_1 \\ x_2 \\ x_3\end{pmatrix}=
		\begin{pmatrix} x_1 \\ x_2 \\ 0 \end{pmatrix}$. This
	is a projection transformation that projects any 3D point onto the $XY$ plane.
	\begin{center}
		\includegraphics[scale=0.3]{figProjection.jpg}
	\end{center}
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Linear transformations} 

\begin{ceudef}[Linear transformation]
	$T$ is a \textbf{linear transformation} iff $\forall \mathbf{x}_1,\mathbf{x}_2 \in \mathrm{Dom}(T)$, $\forall c \in \mathbb{R}$
	\begin{enumerate}
		\item $T(\mathbf{x}_1+\mathbf{x}_2)=T(\mathbf{x}_1)+T(\mathbf{x}_2)$
		\item $T(c\mathbf{x}_1)=cT(\mathbf{x}_1)$
	\end{enumerate}
	\label{def:linearTransformation}
\end{ceudef}

\begin{ceuthm}
	If $T(\mathbf{x})$ is a linear transformation, then
	\begin{enumerate}
		\item $T(\mathbf{0})=\mathbf{0}$
		\item $T(c_1\mathbf{x}_1+c_2\mathbf{x}_2)=c_1T(\mathbf{x}_1)+c_2T(\mathbf{x}_2)\; \forall \mathbf{x}_1,\mathbf{x}_2 \in \mathrm{Dom}(T)$, $\forall c_1,c_2 \in \mathbb{R}$
	\end{enumerate}
	
	\underline{\textit{Proof}}\\
	\begin{enumerate}
		\item $T(\mathbf{0})=T(0 \mathbf{x}_1)=$[(2), Def. \ref{def:linearTransformation}]$=0 T(\mathbf{x}_1)=\mathbf{0}$ (q.e.d.)
		\item $T(c_1\mathbf{x}_1+c_2\mathbf{x}_2)=$[(1), Def. \ref{def:linearTransformation}]$=T(c_1\mathbf{x}_1)+T(c_2\mathbf{x}_2)=$[(2), Def. \ref{def:linearTransformation}]
				  $c_1T(\mathbf{x}_1)+c_2T(\mathbf{x}_2)$ (q.e.d.)
	\end{enumerate}
\end{ceuthm}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Linear transformations} 

\begin{ceuthm}
	If $\forall \mathbf{x}_1,\mathbf{x}_2 \in \mathrm{Dom}(T)$, $\forall c_1,c_2 \in \mathbb{R}$ it is verified that
		$T(c_1\mathbf{x}_1+c_2\mathbf{x}_2)=c_1T(\mathbf{x}_1)+c_2T(\mathbf{x}_2)$, then $T(\mathbf{x})$ is a linear transformation.\\
	\underline{\textit{Proof}}\\
	\begin{enumerate}
		\item Let us consider the case $c_1=c_2=1$, then according to the assumption of the theorem we have $T(\mathbf{x}_1+\mathbf{x}_2)=T(\mathbf{x}_1)+T(\mathbf{x}_2)$, which implies
			(1) in Def. \ref{def:linearTransformation}.
		\item Let us consider the case $c_2=0$, then according to the assumption of the theorem we have $T(c_1\mathbf{x}_1)=c_1T(\mathbf{x}_1)$, which implies
			(2) in Def. \ref{def:linearTransformation}.
	\end{enumerate}
	(q.e.d.)
\end{ceuthm}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Linear transformations} 

\begin{block}{Corollary: Principle of superposition}
	If $\forall \mathbf{x}_i \in \mathrm{Dom}(T)$, $\forall c_i \in \mathbb{R}$ it is verified that
		$T\left(\sum\limits_i{c_i\mathbf{x}_i}\right)=\sum\limits_i{c_iT(\mathbf{x}_i)}$.\\
	\underline{\textit{Proof}}\\
	Apply the previous theorem multiple times. (q.e.d.)
\end{block}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Linear transformations} 

\begin{exampleblock}{Example}
	Show that $T(\mathbf{x})=\begin{pmatrix}1 & 0 \\ 0 & -1 \end{pmatrix}\begin{pmatrix}x_1 \\ x_2 \end{pmatrix}$ is a linear transformation.\\
	\underline{\textit{Proof}}\\
	\begin{enumerate}
		\item Show that $T(\mathbf{x}_1+\mathbf{x}_2)=T(\mathbf{x}_1)+T(\mathbf{x}_2)$\\
				  On one side we have $T(\mathbf{x}_1+\mathbf{x}_2)=\begin{pmatrix}1 & 0 \\ 0 & -1 \end{pmatrix}\begin{pmatrix}x_{11}+x_{21} \\ x_{12}+x_{22} \end{pmatrix}
						=\begin{pmatrix}x_{11}+x_{21} \\ -x_{12}-x_{22} \end{pmatrix}$\\
					On the other side we have $T(\mathbf{x}_1)+T(\mathbf{x}_2)=\begin{pmatrix}1 & 0 \\ 0 & -1 \end{pmatrix}\begin{pmatrix}x_{11} \\ x_{12} \end{pmatrix}+
						\begin{pmatrix}1 & 0 \\ 0 & -1 \end{pmatrix}\begin{pmatrix}x_{21} \\ x_{22} \end{pmatrix}=
						\begin{pmatrix}x_{11} \\ -x_{12} \end{pmatrix}+\begin{pmatrix}x_{21} \\ -x_{22} \end{pmatrix}=\begin{pmatrix}x_{11}+x_{21} \\ -x_{12}-x_{22} \end{pmatrix}$
					Obviously, these two calculations give the same result.
		\item Show that $T(c_1\mathbf{x}_1)=c_1T(\mathbf{x}_1)$\\
				  $\begin{array}{rcl}T(c_1\mathbf{x}_1)&=&\begin{pmatrix}1 & 0 \\ 0 & -1 \end{pmatrix}\begin{pmatrix}c_1x_{11} \\ c_1x_{12} \end{pmatrix}
						=\begin{pmatrix}c_1x_{11} \\ -c_1x_{12} \end{pmatrix} =c_1\begin{pmatrix}x_{11} \\ -x_{12} \end{pmatrix}\\ &=&c_1
						\begin{pmatrix}1 & 0 \\ 0 & -1 \end{pmatrix}\begin{pmatrix}x_{11} \\ x_{12} \end{pmatrix}=c_1T(\mathbf{x}_1)\end{array}$
	\end{enumerate}
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Linear transformations} 

\begin{ceuthm}
	Any matrix transformation is a linear transformation.\\
	\underline{\textit{Proof}}\\
	\begin{enumerate}
		\item Show that $T(\mathbf{x}_1+\mathbf{x}_2)=T(\mathbf{x}_1)+T(\mathbf{x}_2)$\\
				  $T(\mathbf{x}_1+\mathbf{x}_2)=A(\mathbf{x}_1+\mathbf{x}_2)=A\mathbf{x}_1+A\mathbf{x}_2=T(\mathbf{x}_1)+T(\mathbf{x}_2)$ (q.e.d.)
		\item Show that $T(c_1\mathbf{x}_1)=c_1T(\mathbf{x}_1)$\\
				  $T(c_1\mathbf{x}_1)=A(c_1\mathbf{x}_1)=c_1(A\mathbf{x}_1)=c_1T(\mathbf{x}_1)$ (q.e.d.)
	\end{enumerate}
\end{ceuthm}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Reinterpreting the columns of a matrix} 

\begin{exampleblock}{Example}
	Consider $T(\mathbf{x})=A\mathbf{x}$ with $A=\begin{pmatrix}4&-3&1&3\\2&0&5&1\end{pmatrix}$. Consider the standard canonical basis of $\mathbb{R}^4$ formed by the vectors
	$\mathbf{e}_1=(1,0,0,0)$, $\mathbf{e}_2=(0,1,0,0)$, $\mathbf{e}_3=(0,0,1,0)$, and $\mathbf{e}_4=(0,0,0,1)$. Let us consider the transformation of each one of these vectors\\
	$\begin{array}{cccc}
		T(\mathbf{e}_1)=\begin{pmatrix}4\\2\end{pmatrix} &
	  T(\mathbf{e}_2)=\begin{pmatrix}-3\\0\end{pmatrix} &
	  T(\mathbf{e}_3)=\begin{pmatrix}1\\5\end{pmatrix} &
		T(\mathbf{e}_4)=\begin{pmatrix}3\\1\end{pmatrix}
	\end{array}$\\
	In general, we note that the transformation of $\mathbf{e}_i$ is the $i$-th column of matrix $A$.
\end{exampleblock}

\begin{block}{Corollary}
	The columns of the matrix $A \in \mathcal{M}_{m\times n}$ can be understood as the transformations of the canonical basis of $\mathbb{R}^n$:
	\begin{center}
		$A=\begin{pmatrix}\mathbf{a}_1 & \mathbf{a}_2 & ... & \mathbf{a}_n\end{pmatrix}=\begin{pmatrix}T(\mathbf{e}_1) & T(\mathbf{e}_2) & ... & T(\mathbf{e}_n)\end{pmatrix}$
	\end{center}
\end{block}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Reinterpreting the columns of a matrix} 

\begin{exampleblock}{Example (continued)}
	In the previous example consider transforming the vector $\mathbf{x}=(1,-2,3,5)$. This vector is equal to
	\begin{center}
		$\mathbf{x}=\mathbf{e}_1-2\mathbf{e}_2+3\mathbf{e}_3+5\mathbf{e}_4$
	\end{center}
	Then, we have
	\begin{center}
		$\begin{array}{rcl}T(\mathbf{x})&=&T(\mathbf{e}_1-2\mathbf{e}_2+3\mathbf{e}_3+5\mathbf{e}_4)=T(\mathbf{e}_1)-2T(\mathbf{e}_2)+3T(\mathbf{e}_3)+5T(\mathbf{e}_4)\\
			&=&\begin{pmatrix}4\\2\end{pmatrix}-2\begin{pmatrix}-3\\0\end{pmatrix} +3\begin{pmatrix}1\\5\end{pmatrix}+5\begin{pmatrix}3\\1\end{pmatrix}=
			   \begin{pmatrix}28\\22\end{pmatrix}
		\end{array}$
	\end{center}
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Exercises} 

\begin{exerciseblock}{Exercises}
	From Lay (4th ed.), Chapter 1, Section 8:
	\begin{itemize}
		\item 1.8.23
		\item 1.8.25
		\item 1.8.26
		\item 1.8.30
		\item 1.8.34
	\end{itemize}
\end{exerciseblock}

\end{frame}

% ==============================================
\subsection{Geometrical transformations (e)} 
\Outline

\begin{frame}\frametitle{Geometrical transformations}
	Certain matrix transformations are used to transform the unit square into different shapes. The following table shows some of such transformations.
	\begin{center}
		\includegraphics[scale=0.35]{figUnitSquare.jpg} \\
		\includegraphics[scale=0.35]{figTransformations1.jpg}
	\end{center}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Geometrical transformations}
	\begin{center}
		\includegraphics[scale=0.35]{figTransformations2.jpg}
	\end{center}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Geometrical transformations}
	\begin{center}
		\includegraphics[scale=0.35]{figTransformations3.jpg}
	\end{center}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Geometrical transformations}
	\begin{center}
		\includegraphics[scale=0.32]{figTransformations4.jpg}
	\end{center}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Geometrical transformations}
	\begin{center}
		\includegraphics[scale=0.35]{figTransformations5.jpg}
	\end{center}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Geometrical transformations}
	\begin{center}
		\includegraphics[scale=0.35]{figTransformations6.jpg}
	\end{center}
\end{frame}

% ==============================================
\subsection{Classification of functions (e)} 
\Outline

\begin{frame}\frametitle{Classification of functions}
	\begin{ceudef}
		Functions can be classified as \textbf{surjective, injective or bijective}:
		\begin{description}
			\item[\textbf{Surjective}:] A function is surjective if every point of the codomain has \textbf{at least one} point of the domain that maps onto it. They are also called \textbf{onto} functions.
			\item[\textbf{Injective}:] A function is injective if every point of the codomain has \textbf{at most one} point in the domain that maps onto it. They are also called \textbf{one-to-one} functions.
			\item[\textbf{Bijective}:] A function is bijective if it is injective and surjective. 
		\end{description}
		\begin{center}
			\includegraphics[scale=0.35]{figClassification.jpg}
		\end{center}
		
	\end{ceudef}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Classification of functions}
	\begin{exampleblock}{Example}
		Here we have some examples of the classification of functions applied to linear transformations
		\begin{center}
			\includegraphics[scale=0.32]{figClassification1.jpg}\\
			\includegraphics[scale=0.32]{figClassification2.jpg}
		\end{center}
		
	\end{exampleblock}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Classification of functions}
	\begin{exampleblock}{Example}
		Consider $T(\mathbf{x})=A\mathbf{x}$ with $A=\begin{pmatrix}1&-4&8&1\\0&2&-1&3\\0&0&0&5\end{pmatrix}$. This is a transformation from $\mathbb{R}^4$ onto $\mathbb{R}^3$. The columns of $A$ $\mathbf{a}_1$, $\mathbf{a}_2$, and $\mathbf{a}_4$ are linearly independent and span $\mathbb{R}^3$ (that is, the function is surjective). Therefore, there must be points in $\mathbb{R}^3$ that come from several points in $\mathbb{R}^4$ (the function is not injective). Let us find some of these points.
		\begin{center}
				$\left(\begin{array}{rrrr|c}
					1 & -4&  8 & 1 & y_1\\
					0 &  2& -1 & 3 & y_2\\
				  0 &  0&  0 & 5 & y_3\\
				\end{array}\right)\;\sim
				\left(\begin{array}{rrrr|c}
					1 &  0&  6 & 0 & y_1-2y_2-\frac{4}{5}y_3\\
					0 &  1& -\frac{1}{2} & 0 & \frac{1}{2}y_2-\frac{3}{10}y_3\\
				  0 &  0&  0 & 1 & \frac{1}{5}y_3\\
				\end{array}\right)\Rightarrow$\\
				$\begin{array}{l}
					x_1=y_1-2y_2-\frac{4}{5}y_3-6x_3 \\
					x_2=\frac{1}{2}y_2-\frac{3}{10}y_3+\frac{1}{2}x_3 \\
					x_4=\frac{1}{5}y_3
				\end{array}$
		\end{center}
		Since $x_3$ is a free variable, we have that for each point in the codomain, there is a straight line that maps onto it (the equation of the line is the one given above).
	\end{exampleblock}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Classification of functions}
	\begin{ceuthm}
		Let $T(\mathbf{x})$ be a linear transformation. $T(\mathbf{x})$ is an injective function iff $T(\mathbf{x})=\mathbf{0}$ has only the trivial solution $\mathbf{x}=\mathbf{0}$.\\
		\underline{\textit{Proof}}\\
		\parbox{11cm}{
			\underline{\textit{Proof $\Rightarrow$}}\\
			\leftskip5mm If $T$ is injective, then, by definition, every point of the codomain, in particular $\mathbf{0}$ is the mapping of at most one point in the domain. We already
				know that for any linear transformation $T(\mathbf{0})=\mathbf{0}$, therefore, $\mathbf{x}=\mathbf{0}$ must be the unique solution of the equation
				$T(\mathbf{x})=\mathbf{0}$.
		}
		\label{thm:classification}
	\end{ceuthm}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Classification of functions}
	\begin{block}{}
		\parbox{11cm}{
			\underline{\textit{Proof $\Leftarrow$}}\\
			\leftskip5mm For any linear transformation we know that $T(\mathbf{0})=\mathbf{0}$. Let us assume that the statement is false, that is $T(\mathbf{x})=\mathbf{0}$ has only
			the trivial solution, but $T$ is not injective. IF $T$ is not injective there exist a point $\mathbf{y}$ in the codomain that is the image of two points in the domain
			\begin{center}
				$T(\mathbf{x}_1)=\mathbf{y}$\\
				$T(\mathbf{x}_2)=\mathbf{y}$\\
			\end{center}
			If we know subtract the two equations we have
			\begin{center}
				\begin{tabular}{cl}
					$T(\mathbf{x}_1)-T(\mathbf{x}_2)=\mathbf{0}$ & \\
					$T(\mathbf{x}_1-\mathbf{x}_2)=\mathbf{0}$ & $T$ is linear \\
					$\mathbf{x}_1-\mathbf{x}_2=\mathbf{0}$ & There is only one solution of $T(\mathbf{x})=\mathbf{0}$ \\
					$\mathbf{x}_1=\mathbf{x}_2$ & contradiction (q.e.d.)\\
				\end{tabular}
			\end{center}
		}
	\end{block}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Classification of functions}
	\begin{ceuthm}
		Let $T(\mathbf{x})=A\mathbf{x}$ be a linear transformation. Then:
		\begin{enumerate}
			\item $\mathrm{Range}(T)=\mathbb{R}^m$ iff $\mathrm{Span}(\mathbf{a}_1,\mathbf{a}_2,...,\mathbf{a}_n)=\mathbb{R}^m$.
			\item $T$ is injective iff all columns of $A$ are linearly independent.
		\end{enumerate}
		\underline{\textit{Proof}}\\
		\begin{enumerate}
			\item According to Theorem \ref{thm:interpretationAsSubspace}, the columns of $A$ span $\mathbb{R}^m$ if for each $\mathbf{b}\in\mathbb{R}^m$, the equation $A\mathbf{x}=\mathbf{b}$
					  is consistent, that is, if there exists at least one solution of $T(\mathbf{x})=\mathbf{b}$. If this is true, then $\mathrm{Range}(T)=\mathbb{R}^m$.
			\item According to Theorem \ref{thm:classification}, $T$ is injective iff $T(\mathbf{x})=\mathbf{0}$ has only the trivial solution, or what is the same
					  iff $A\mathbf{x}=\mathbf{0}$ has only the trivial solution. This happens only if the columns of $A$ are linearly independent as stated by Theorem
						\ref{thm:linearIndependenceMatrix}.
		\end{enumerate}		
	\end{ceuthm}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Classification of functions}
	\begin{exampleblock}{Example}
		Let $T(\mathbf{x})=\begin{pmatrix}3x_1+x_2 \\ 5x_1+7x_2 \\ x_1+3x_2\end{pmatrix}$:
		\begin{enumerate}
			\item Show that it is a linear transformation
			\item Does it map $\mathbb{R}^2$ onto $\mathbb{R}^3$?
		\end{enumerate}
		\underline{\textit{Solution}}\\
		\begin{enumerate}
			\item The transformation is of the form $T(\mathbf{x})=A\mathbf{x}$ with $A=\begin{pmatrix}3 & 1 \\ 5 & 7 \\ 1 & 3 \end{pmatrix}$ and, therefore, the transformation is linear.
			\item The columns of $A$ are linearly independent (because they are not multiples of each other), then, by the previous theorem, the transformation is injective.
						However, they do not span all $\mathbb{R}^3$ (since they are only two vectors and for spanning all $\mathbb{R}^3$ we need at least 3 vectors). Consequently,
						the transformation is not surjective, and it does not map $\mathbb{R}^2$ onto $\mathbb{R}^3$.
		\end{enumerate}		
	\end{exampleblock}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Exercises} 

\begin{exerciseblock}{Exercises}
	From Lay (4th ed.), Chapter 1, Section 9:
	\begin{itemize}
		\item 1.9.1
		\item 1.9.3
		\item 1.9.17
		\item 1.9.33
		\item 1.9.36
		\item 1.9.37
		\item 1.9.39
	\end{itemize}
\end{exerciseblock}

\end{frame}

% ==============================================
\subsection{More applications (e)} 
\Outline

\begin{frame}\frametitle{More applications}
	\begin{exampleblock}{Construction of a diet}
		Given the following nutritional information:
		\begin{center}
			\includegraphics[scale=0.35]{figApplication6.jpg}
		\end{center}
		What is the amount of nonfat milk, soy flour and whey needed to provide the protein carbohydrate and fat planned for one day?\\
		\underline{\textit{Solution}}\\
		$\left(\begin{array}{rrr|r}
			36 & 51&  13 & 33\\
			52 & 34&  74 & 45\\
		   0 &  7&  11 & 3\\
		\end{array}\right)\;\sim
		\left(\begin{array}{rrr|r}
			 1 &  0&  0 & 0.277\\
			 0 &  1&  0 & 0.392\\
		   0 &  0&  1 & 0.233\\
		\end{array}\right)$\\
		That is, we need $x_1=0.277\cdot 100 $g$=277$g of non-fat milk, $x_2=392$g of soy flour and $x_3=233$ g of whey.
		
	\end{exampleblock}
\end{frame}

% ==============================================
\begin{frame}\frametitle{More applications}
	\begin{exampleblock}{Dynamic systems: difference equations}
		In a simplistic model red blood cells (erythrocytes) are created in the bone marrow, then some of them pass to the blood. After some time, old red blood cells are destroyed in the spleen (bazo).
		\begin{center}
			\includegraphics[scale=0.75]{figApplication7.jpg}
		\end{center}
	\end{exampleblock}
\end{frame}

% ==============================================
\begin{frame}\frametitle{More applications}
	\begin{exampleblock}{Dynamic systems: difference equations (continued)}
		Let's say that at every time interval:
		\begin{itemize}
			\item 5\% of the erythrocytes in the marrow leave to the blood stream.
			\item 2\% of the erythrocytes in the blood stream are destroyed by the spleen.
			\item 1M new red blood cells are created at the marrow.
		\end{itemize}
		The following equation can be used to determine the amount of erythrocytes at any moment
		\begin{center}
		$\begin{pmatrix}x_{marrow}^{(k+1)} \\ x_{blood}^{(k+1)}\end{pmatrix}=
		 \begin{pmatrix}0.95 & 0 \\ 0.05 & 0.98\end{pmatrix}\begin{pmatrix}x_{marrow}^{(k)} \\ x_{blood}^{(k)}\end{pmatrix}
		 +\begin{pmatrix}10^6 \\ 0\end{pmatrix}$
		\end{center}
		This kind of models is called difference equations.
	\end{exampleblock}
\end{frame}



\OutlineFinal

\end{document}