\input{../slidesComun}

\title[5. Vector spaces]{Chapter 5. Vector spaces}  
\COSS

% ==============================================
\begin{frame}\frametitle{References} 

\begin{figure}
	\includegraphics[scale=0.7]{../lay_linearalgebra.jpg}
\end{figure}
D. Lay. Linear algebra and its applications (3rd ed). Pearson (2006). Chapter 4.

\end{frame}

% ==============================================
\begin{frame}\frametitle{A little bit of history} 

Vectors were first used about 1636 in 2D and 3D to describe geometrical operations by \href{https://en.wikipedia.org/wiki/Ren\%C3\%A9_Descartes}{René Descartes} and \href{http://en.wikipedia.org/wiki/Pierre_de_Fermat}{Pierre de Fermat}. In 1857 the notation of vectors and matrices was unified by \href{http://en.wikipedia.org/wiki/Arthur_Cayley}{Arthur Cayley}. \href{http://en.wikipedia.org/wiki/Giuseppe_Peano}{Giuseppe Peano} was the firsst to give the modern definition of vector space in 1888, and \href{http://en.wikipedia.org/wiki/Henri_Lebesgue}{Henri Lebesgue} (about 1900) applied this theory to describe functional spaces as vector spaces.

\begin{figure}
	\includegraphics[height=3cm]{Descartes.jpg}
	\includegraphics[height=3cm]{Fermat.jpg}
	\includegraphics[height=3cm]{../Tema3/Cayley.jpg}
  \includegraphics[height=3cm]{Peano.jpg}
  \includegraphics[height=3cm]{Lebesgue.jpg}
\end{figure}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Applications} 

It is difficult to think a mathematical tool with more applications than vector spaces. Thanks to them we may sum forces, control devices, model complex systems, denoise images, ... They underlie all these processes and it is thank to them that we can ``nicely'' operate with vectors. They are a mathemtical structure that generalizes many other useful structures.

\begin{center}
	\includegraphics[height=5cm]{figVectorSpace.png}
\end{center}

\end{frame}

% ==============================================
\setnextsection{5}
\section{Vector spaces} 
\subsection{Definition (a)} 
\Outline

\begin{frame}\frametitle{Vector space} 
\begin{ceudef}[Vector space]
	A \textbf{vector space} is a non-empty set, $V$, of objects (called \textbf{vectors}) in which we define two operations: the sum among vectors and the multiplication by a scalar (an element of any field, $\mathbb{K}$), and that $\forall \mathbf{u},\mathbf{v},\mathbf{w}\in V$ and $\forall c,d \in \mathbb{K}$ it is verified that
	\begin{enumerate}
		\item $\mathbf{u}+\mathbf{v}\in V$
		\item $\mathbf{u}+\mathbf{v}=\mathbf{v}+\mathbf{u}$
		\item $(\mathbf{u}+\mathbf{v})+\mathbf{w}=\mathbf{u}+(\mathbf{v}+\mathbf{w})$
		\item $\exists \mathbf{0}\in V | \mathbf{u}+\mathbf{0}=\mathbf{u}$
		\item $\forall\mathbf{u}\in V \quad \exists ! \mathbf{w}\in V | \mathbf{u}+\mathbf{w}=\mathbf{0}$ (we normally write $\mathbf{w}=-\mathbf{u}$)
		\item $c\mathbf{v}\in V$
		\item $c(\mathbf{u}+\mathbf{v})=c\mathbf{u}+c\mathbf{v}$
		\item $(c+d)\mathbf{u}=c\mathbf{u}+d\mathbf{u}$
		\item $c(d\mathbf{u})=(cd)\mathbf{u}$
		\item $1\mathbf{u}=\mathbf{u}$
	\end{enumerate}
\end{ceudef}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Vector space} 
\begin{ceuthm}[Other properties]
	\begin{enumerate}
		\setcounter{enumi}{10}
		\item $0\mathbf{u}=\mathbf{0}$
		\item $c\mathbf{0}=\mathbf{0}$
		\item $-\mathbf{u}=(-1)\mathbf{u}$
	\end{enumerate}
\end{ceuthm}
Watch out that $0$ and $1$ refer respectively to the neutral elements of the sum and multiplication in the field $\mathbb{K}$. $-1$ is the opposite number in $\mathbb{K}$ of $1$ with respect to the sum of scalars.

\begin{exampleblock}{Example: $\mathbb{R}^n$}
	\begin{columns}
		\begin{column}{4cm}
			\includegraphics[height=3cm]{figR3.png}
		\end{column}
		\begin{column}{6cm}
			$\mathbb{R}^n$ is a vector space of finite dimension for any $n$. As well as $\mathbb{C}^n$.
		\end{column}
	\end{columns}
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Vector space} 
\begin{exampleblock}{Example: Force fields in Physics}
	Consider $V$ to be the set of all arrows (directed line segments) in 3D. Two arrows are regarded as equal if they have the same length and direction.
	Define the sum of arrows and the multiplication by a scalar as shown below:
	\begin{center}
		\includegraphics[height=3cm]{figVectorSum.jpg}
		\includegraphics[height=3cm]{figVectorMultiplication.jpg}
	\end{center}
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Vector space} 
\begin{exampleblock}{Example: Force fields in Physics (continued)}
	Here is an example of the application of some of the properties of vector spaces
	\begin{center}
		\includegraphics[width=9cm]{figSomeArrowProperties.jpg}
	\end{center}
	With a force field we may define at every point in 3D space, which is the force that is applied.
	\begin{columns}
		\begin{column}{5cm}
			\href{http://mathinsight.org/conservative_vector_field_introduction}{Conservative force field}
		\end{column}
		\begin{column}{5cm}
			\includegraphics[height=3cm]{figVectorField.png}
		\end{column}
	\end{columns}
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Vector space} 
\begin{exampleblock}{Example: Infinite sequences}
	Let $S$ be the set of all infinite sequences of numbers
	\begin{center}
		$\mathbf{u}=(...,u_{-2},u_{-1},u_0,u_1,u_2,...)$
	\end{center}
	Define the sum among two vectors and the multiplication by a scalar as
	\begin{center}
		$\mathbf{u}+\mathbf{v}=(...,u_{-2}+v_{-2},u_{-1}+v_{-1},u_0+v_0,u_1+v_1,u_2+v_2,...)$\\
		$c\mathbf{u}=(...,cu_{-2},cu_{-1},cu_0,cu_1,cu_2,...)$
	\end{center}

	\begin{columns}
		\begin{column}{3cm}
			Digital Signal Processing
		\end{column}
		\begin{column}{7cm}
			\includegraphics[height=4cm]{figDSP.png}
		\end{column}
	\end{columns}
	
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Vector space} 
\begin{exampleblock}{Example: Polynomials of degree $n$ ($\mathbb{P}_n$)}
	Let $\mathbb{P}_n$ be the set of all polynomials of degree $n$
	\begin{center}
		$u(x)=u_0+u_1x+u_2x^2+...+u_nx^n$
	\end{center}
	Define the sum among two vectors and the multiplication by a scalar as
	\begin{center}
		$(u+v)(x)=(u_0+v_0)+(u_1+v_1)x+(u_2+v_2)x^2+...+(u_n+v_n)x^n$\\
		$(cu)(x)=cu_0+cu_1x+cu_2x^2+...+cu_nx^n$
	\end{center}

	\begin{columns}
		\begin{column}{3cm}
			Legendre polynomials
		\end{column}
		\begin{column}{7cm}
			\includegraphics[height=4cm]{figLegendrePolynomials.png}
		\end{column}
	\end{columns}
	
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Vector space} 
\begin{exampleblock}{Example: Set of real functions defined in some domain}
	Let $\mathbb{F}$ be the set of all real valued functions defined in some domain ($f:D \rightarrow \mathbb{R}$)
	Define the sum among two vectors and the multiplication by a scalar as
	\begin{center}
		$(u+v)(x)=u(x)+v(x)$\\
		$(cu)(x)=cu(x)$
	\end{center}

	\begin{columns}
		\begin{column}{4cm}
		  Ex: $u(x)=3+x$\\
			Ex: $v(x)=\sin x$\\
			Ex: Zernike polynomials
		\end{column}
		\begin{column}{6cm}
			\includegraphics[height=4.5cm]{figZernikePolynomials.png}
		\end{column}
	\end{columns}
	
\end{exampleblock}

\end{frame}

% ==============================================
\subsection{Vector subspace (a)} 
\Outline

\begin{frame}\frametitle{Vector subspace} 
Sometimes we don't need to deal with the whole vector space, but only a part of it. It would be nice if it also has the space properties.
\begin{ceudef}[Vector subspace]
	Let $V$ be a vector space, and $H\subseteq V$ a part of it. $H$ is \textbf{vector subspace} iff
	\begin{enumerate}[a)]
		\item $\mathbf{0}\in H$
		\item $\forall\mathbf{u},\mathbf{v}\in H \quad \mathbf{u}+\mathbf{v}\in H$ ($H$ is closed with respect to sum)
		\item $\forall\mathbf{u}\in H, \; \forall c\in \mathbb{K} \quad c\mathbf{u}\in H$ ($H$ is closed with respect to scalar multiplication)
	\end{enumerate}
\end{ceudef}

\begin{exampleblock}{Example}
	$H=\{\mathbf{0}\}$ is a subspace.
\end{exampleblock}

\begin{exampleblock}{Example}
	The vector space of polynomials (of any degree), $\mathbb{P} \in \mathbb{F}(\mathbb{R})$, is a vector subspace of the vector space of real valued functions defined over $\mathbb{R}$ ($\mathbb{F}(\mathbb{R})=\{f:\mathbb{R}\rightarrow\mathbb{R}\}$).
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Vector subspace} 
\begin{exampleblock}{Example}
	$H=\mathbb{R}^2$ is not a subspace of $\mathbb{R}^3$ because $\mathbb{R}^2\not\subset\mathbb{R}^3$, for instance, the vector $\mathbf{u}=\begin{pmatrix}1\\2\end{pmatrix}\in\mathbb{R}^2$, but $\mathbf{u}\notin\mathbb{R}^3$.
\end{exampleblock}

\begin{exampleblock}{Example}
	$H=\mathbb{R}^2 \times \{0\}$ is a subspace of $\mathbb{R}^3$ because all vectors of $H$ are of the form $\mathbf{u}=\begin{pmatrix}x_1\\x_2\\0\end{pmatrix}\in\mathbb{R}^3$. It is obvious that $H$ ``looks like'' $\mathbb{R}^2$. This resemblance is mathematically called \textbf{isomorphism}.
\end{exampleblock}

\begin{exampleblock}{Example}
	Any plane in 3D passing through the origin is a subspace of $\mathbb{R}^3$.\\
	Any plane in 3D \underline{not} passing through the origin is \underline{not} a subspace of $\mathbb{R}^3$, because $\mathbf{0}$ does not belong to the plane.
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Vector subspace} 
\begin{ceuthm}
	If $H$ is a vector subspace, then $H$ is a vector space.\\
	\underline{\textit{Proof}}\\
		\hspace{0.5cm}a) $\Rightarrow$ 4\\
			\hspace{1cm}$a\equiv \mathbf{0}\in H$\\
			\hspace{1cm}$4\equiv \exists \mathbf{0}\in V | \mathbf{u}+\mathbf{0}=\mathbf{u}$\\
		\hspace{0.5cm}b) $\Rightarrow$ 1\\
			\hspace{1cm}$b\equiv \forall\mathbf{u},\mathbf{v}\in H \quad \mathbf{u}+\mathbf{v}\in H$\\
			\hspace{1cm}$1\equiv \mathbf{u}+\mathbf{v}\in V$\\
		\hspace{0.5cm}Since $H\subset V$ and thanks to b) $\Rightarrow$ 2,3,7,8,9,10\\
			\hspace{1cm}$2\equiv \mathbf{u}+\mathbf{v}=\mathbf{v}+\mathbf{u}$\\
			\hspace{1cm}$3\equiv (\mathbf{u}+\mathbf{v})+\mathbf{w}=\mathbf{u}+(\mathbf{v}+\mathbf{w})$\\
			\hspace{1cm}$7\equiv c(\mathbf{u}+\mathbf{v})=c\mathbf{u}+c\mathbf{v}$\\
			\hspace{1cm}$8\equiv (c+d)\mathbf{u}=c\mathbf{u}+d\mathbf{u}$\\
			\hspace{1cm}$9\equiv c(d\mathbf{u})=(cd)\mathbf{u}$\\
			\hspace{1cm}$10\equiv 1\mathbf{u}=\mathbf{u}$\\
\end{ceuthm}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Vector subspace} 
\begin{block}{}
	\underline{\textit{Proof (continued)}}\\
		\hspace{0.5cm}c) $\Rightarrow$ 6\\
			\hspace{1cm}$c\equiv \forall\mathbf{u}\in H, \; \forall c\in \mathbb{K} \quad c\mathbf{u}\in H$\\
			\hspace{1cm}$6\equiv c\mathbf{v}\in V$\\
		\hspace{0.5cm}Proof of 5\\
			\hspace{1cm}Since $H$ is a subset of $V$, we know that for every $\mathbf{u}\in H$ there exists \\
			\hspace{1cm}a unique $\mathbf{w}\in V | \mathbf{u}+\mathbf{w}=\mathbf{0}$. The problem is whether\\
			\hspace{1cm}or not $\mathbf{w}$ is in $H$. We also know that $\mathbf{w}=(-1)\mathbf{v}$, and \\
			\hspace{1cm}by c), $\mathbf{w}\in H$. \\
		(q.e.d.)
\end{block}

\end{frame}

% ==============================================
\subsection{Subspace spanned by a set of vectors (a)} 
\Outline

\begin{frame}\frametitle{Subspace spanned by a set of vectors} 
\begin{exampleblock}{Example}
	Let $\mathbf{v}_1,\mathbf{v}_2\in V$ be two vectors of a vector space, $V$. The subset
	\begin{center}
		$H=\mathrm{Span}\{\mathbf{v}_1,\mathbf{v}_2\}$
	\end{center}
	is a subspace of $V$.\\
	\underline{\textit{Proof}}\\
	Any vector of $H$ is of the form $\mathbf{v}=\lambda_1\mathbf{v}_1+\lambda_2\mathbf{v}_2$ for some $\lambda_1,\lambda_2\in\mathbb{K}$.
	\hspace{0.25cm}
		\parbox{11cm}{
			\underline{\textit{Proof a) $\mathbf{0}\in H$}}\\
			\leftskip5mm Simply by setting $\lambda_1=\lambda_2=0$, we get $\mathbf{0}\in H$
		}
	\hspace{0.25cm}
		\parbox{11cm}{
			\underline{\textit{Proof b) $\mathbf{u}+\mathbf{v}\in H$}}\\
			\leftskip5mm Let $\mathbf{u},\mathbf{v}\in H\Rightarrow \left. \begin{array}{c}\mathbf{u}=\lambda_{1u}\mathbf{v}_1+\lambda_{2u}\mathbf{v}_2\\
					                        \mathbf{v}=\lambda_{1v}\mathbf{v}_1+\lambda_{2v}\mathbf{v}_2\end{array}
					 \right\}\Rightarrow$
				\begin{center}
					$\begin{array}{rcl}
						\mathbf{u}+\mathbf{v}&=&(\lambda_{1u}\mathbf{v}_1+\lambda_{2u}\mathbf{v}_2)+(\lambda_{1v}\mathbf{v}_1+\lambda_{2v}\mathbf{v}_2)\\
						&=&(\lambda_{1u}+\lambda_{1v})\mathbf{v}_1+(\lambda_{2u}+\lambda_{2v})\mathbf{v}_2 \in H\end{array}$
				\end{center}
		}
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Subspace spanned by a set of vectors} 
\begin{exampleblock}{}
	\hspace{0.25cm}
		\parbox{11cm}{
			\underline{\textit{Proof c) $c\mathbf{u}\in H$}}\\
			\leftskip5mm Let $\mathbf{u}\in H\Rightarrow$
				\begin{center}
					$\mathbf{u}=\lambda_{1}\mathbf{v}_1+\lambda_{2}\mathbf{v}_2 \Rightarrow
					c\mathbf{u}=c(\lambda_{u}\mathbf{v}_1+\lambda_{2}\mathbf{v}_2)=c\lambda_{u}\mathbf{v}_1+c\lambda_{2}\mathbf{v}_2 \in H$
				\end{center}
		}
\end{exampleblock}

\begin{ceuthm}
	Let $\mathbf{v}_1,\mathbf{v}_2,...,\mathbf{v}_p\in V$ be $p$ vectors of a vector space, $V$. The subset
	\begin{center}
		$H=\mathrm{Span}\{\mathbf{v}_1,\mathbf{v}_2,...,\mathbf{v}_p\}$
	\end{center}
	is a subspace of $V$.\\
	\underline{\textit{Proof}}\\
	Analogous to the previous example.
	\label{thm:subspacespanned}
\end{ceuthm}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Subspace spanned by a set of vectors} 
\begin{exampleblock}{Example}
	Consider the set of vectors $\mathbb{R}^4\supset H=\left\{(a-3b,b-a,a,b)\; \forall a,b\in\mathbb{R}\right\}$. Is it a vector subspace?\\
	\underline{\textit{Solution}}\\
	All vectors of $H$ can be written as
	\begin{center}
		$H\ni\mathbf{u}=\begin{pmatrix}a-3b\\b-a\\a\\b\end{pmatrix}=a\begin{pmatrix}1\\-1\\1\\0\end{pmatrix}+b\begin{pmatrix}-3\\1\\0\\1\end{pmatrix}$
	\end{center}
	Therefore, $H=\mathrm{Span}\{(1,-1,1,0),(-3,1,0,1)\}$ and by the previous theorem, it is a vector subspace.
\end{exampleblock}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Exercises} 

\begin{exerciseblock}{Exercises}
	From Lay (3rd ed.), Chapter 4, Section 1:
	\begin{itemize}
		\item 4.1.1
		\item 4.1.4
		\item 4.1.5
		\item 4.1.6
		\item 4.1.19
		\item 4.1.32
		\item 4.1.37 (computer)
	\end{itemize}
\end{exerciseblock}

\end{frame}

% ==============================================
\subsection{Null space and column space of a matrix (b)} 
\Outline

\begin{frame}\frametitle{Null space of a matrix} 
\begin{exampleblock}{Example}
	Consider the matrix
	\begin{center}
		$\begin{pmatrix} 1 & -3 & -2 \\ -5 & 9 & 1 \end{pmatrix}$
	\end{center}
	The point $\mathbf{x}=(5,3,-2)$ has the property that $A\mathbf{x}=\mathbf{0}$.
\end{exampleblock}

\begin{ceudef}[Null space]
	The \textbf{null space of a matrix} $A\in\mathcal{M}_{m\times n}$ is the set of vectors
	\begin{center}
		$\mathrm{Nul}\{A\}=\{\mathbf{x}\in\mathbb{R}^n | A\mathbf{x}=\mathbf{0}\}$\\
		\includegraphics[scale=0.4]{figNullSpace.jpg}
	\end{center}
\end{ceudef}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Null space of a matrix} 
\begin{exampleblock}{Example (continued)}
	\begin{center}
		$\left(\begin{array}{rrr|r} 1 & -3 & -2 & 0 \\ -5 & 9 & 1 & 0\end{array}\right)\sim
		 \left(\begin{array}{rrr|r} 1 & 0 & \frac{5}{2} & 0 \\  0 & 1 & \frac{3}{2} & 0\end{array}\right)$
	\end{center}
	Therefore \begin{center}$\mathrm{Nul}\{A\}=\{(-\frac{5}{2}x_3,-\frac{3}{2}x_3,x_3) \forall x_3\in\mathbb{R}\}$\end{center}
	The previous example ($\mathbf{x}=(5,3,-2)$) is the point we obtain for $x_3=-2$.
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Null space of a matrix} 
\begin{ceuthm}
	$\mathrm{Nul}\{A\}$ is a vector subspace of $\mathbb{R}^n$.\\
\underline{\textit{Proof}}\\
	It is obvious that $\mathrm{Nul}\{A\}\subseteq\mathbb{R}^n$ because $A$ has $n$ columns
	\hspace{0.25cm}
		\parbox{11cm}{
			\underline{\textit{Proof a) $\mathbf{0}\in \mathrm{Nul}\{A\}$}}\\
			\leftskip5mm \hspace{0.5cm} $A\mathbf{0}_n=\mathbf{0}_m \Rightarrow \mathbf{0}_n\in \mathrm{Nul}\{A\}$
		}
	\hspace{0.25cm}
		\parbox{11cm}{
			\underline{\textit{Proof b) $\mathbf{u}+\mathbf{v}\in \mathrm{Nul}\{A\}$}}\\
			\leftskip5mm \hspace{0.5cm} Let $\mathbf{u},\mathbf{v}\in \mathrm{Nul}\{A\}\Rightarrow \left. \begin{array}{c}A\mathbf{u}=\mathbf{0}\\
					                        A\mathbf{v}=\mathbf{0}\end{array}
					 \right\}\Rightarrow$
				\begin{center}
					$A(\mathbf{u}+\mathbf{v})=A\mathbf{u}+A\mathbf{v}=\mathbf{0}+\mathbf{0}=\mathbf{0}\Rightarrow \mathbf{u}+\mathbf{v}\in \mathrm{Nul}\{A\}$
				\end{center}
		}
	\hspace{0.25cm}
		\parbox{11cm}{
			\underline{\textit{Proof c) $c\mathbf{u}\in \mathrm{Nul}\{A\}$}}\\
			\leftskip5mm Let $\mathbf{u}\in H\Rightarrow$
				\begin{center}
					$A\mathbf{u}=\mathbf{0} \Rightarrow
					A(c\mathbf{u})=c(A\mathbf{u})=c\mathbf{0}=\mathbf{0}\Rightarrow c\mathbf{u}\in \mathrm{Nul}\{A\}$
				\end{center}
		}
\end{ceuthm}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Null space of a matrix} 
\begin{exampleblock}{Example}
	Let $H=\left\{(a,b,c,d)\in\mathbb{R}^4\left|\begin{array}{l}a-2b+5c=d\\c-a=b\end{array}\right.\right\}$. Is $H$ a vector subspace of $\mathbb{R}^4$?\\
\underline{\textit{Solution}}\\
	We may rewrite the conditions of belonging to $H$ as
	\begin{center}
		$\begin{array}{l}a-2b+5c=d\\c-a=b\end{array}\Rightarrow \left(\begin{array}{rrrr}1 &-2& 5 &-1\\-1&-1& 1 & 0\end{array}\right)\begin{pmatrix}a\\b\\c\\d\end{pmatrix}=\mathbf{0}$
	\end{center}
	and, thanks to the previous theorem, $H$ is a vector subspace of $\mathbb{R}^4$. 
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Null space of a matrix} 
\begin{exampleblock}{Example (continued)}
We can even provide a basis for $H$
	\begin{center}
		$\left(\begin{array}{rrrr}1 &-2& 5 &-1\\-1&-1& 1 & 0\end{array}\right)\sim
		 \left(\begin{array}{rrrr}1 & 0& 1 &-1\\ 0& 1& 0 & 0\end{array}\right)$
	\end{center}
	The solution of $A\mathbf{x}=\mathbf{0}$ are all points of the form
	\begin{center}
		$\begin{pmatrix}a\\b\\c\\d\end{pmatrix}=\begin{pmatrix}-c+d\\0\\c\\d\end{pmatrix}=
		   c\begin{pmatrix}-1\\0\\1\\0\end{pmatrix}+d\begin{pmatrix}1\\0\\0\\1\end{pmatrix}$
	\end{center}
	Consequently $H=\mathrm{Span}\{(-1,0,1,0),(1,0,0,1)\}$.
	
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Column space of a matrix} 
\begin{ceudef}[Column space]
	Let $A\in\mathcal{M}_{m\times n}$ a matrix and $\mathbf{a}_i \in\mathbb{R}^m$ ($i=1,2,...n$) its columns. The \textbf{column space of the matrix} $A$ is defined as
	\begin{center}
		$\mathrm{Col}\{A\}=\mathrm{Span}\{\mathbf{a}_1,\mathbf{a}_2,...\mathbf{a}_n\}=\{\mathbf{b}\in\mathbb{R}^m | A\mathbf{x}=\mathbf{b} \textrm{ for some }\mathbf{x}\in\mathbb{R}^n\}$\\
	\end{center}
\end{ceudef}

\begin{ceuthm}
	The column space of a matrix is a subspace of $\mathbb{R}^m$
	\underline{\textit{Proof}}\\
		$\mathrm{Col}\{A\}$ is a set generated by a number of vectors and by Theorem \ref{thm:subspacespanned} it is a subspace of $\mathbb{R}^m$.
\end{ceuthm}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Column space of a matrix} 
\begin{exampleblock}{Example}
	Find a matrix $A$ such that $\mathrm{Col}\{A\}=\{(6a-b,a+b,-7a) \forall a,b\in\mathbb{R}\}$\\
	\underline{\textit{Solution}}
	We can express the points in $\mathrm{Col}\{A\}$ as
	\begin{center}
		$\mathrm{Col}\{A\}\ni \mathbf{x}=\begin{pmatrix}6a-b\\a+b\\-7a\end{pmatrix}=a\begin{pmatrix}6\\1\\-7\end{pmatrix}+b\begin{pmatrix}-1\\1\\0\end{pmatrix}$
	\end{center}
	Therefore, $\mathrm{Col}\{A\}=\mathrm{Span}\{(6,1,-7),(-1,1,0)\}$. That is, these must be the two columns of $A$
	\begin{center}
		$A=\begin{pmatrix}6 & -1 \\1 & 1\\-7& 0\end{pmatrix}$
	\end{center}
	
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Comparison between the Null and the Column spaces} 
\begin{center}
	\includegraphics[height=8cm]{figNullColSpaceComparison.jpg}
\end{center}
\end{frame}

% ==============================================
\subsection{Kernel and range of a linear transformation (b)} 
\Outline

\begin{frame}\frametitle{Linear transformation}
We have said that $T(\mathbf{x})=A\mathbf{x}$ is a linear transformation, but it is not the only one.

\begin{ceudef}[Linear transformation]
	The transformation $T:V\rightarrow W$ between two vectors spaces $V$ and $W$ is a rule that for each vector $\mathbf{v}\in V$ assigns a unique
	vector $\mathbf{w}=T(\mathbf{v})\in W$, such that
	\begin{enumerate}
		\item $T(\mathbf{v}_1+\mathbf{v}_2)=T(\mathbf{v}_1)+T(\mathbf{v}_2)\quad \forall \mathbf{v}_1,\mathbf{v}_2\in V$
		\item $T(c\mathbf{v})=cT(\mathbf{v})\quad \forall \mathbf{v}\in V,\forall c\in\mathbb{K}$
	\end{enumerate}
\end{ceudef}

\begin{exampleblock}{Example}
	For a matrix $A\in\mathcal{M}_{m\times n}$, we have that
	\begin{center}
		$\begin{array}{rcl}T:\mathbb{R}^n &\rightarrow&\mathbb{R}^m\\ \mathbf{x} & \rightarrow & A\mathbf{x}\end{array}$
	\end{center}
	is a linear transformation (we can easily verify that $T$ meets the two required conditions).
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Linear transformation}
\begin{exampleblock}{Example}
	Consider the space of all continuous, real-valued functions defined over $\mathbb{R}$ whose all derivatives are also continuous. We will refer
	to this space as $C^\infty(\mathbf{R})$. For instance, all polynomials belong to this space, as well as any $\sin$, $\cos$ function. It can be proved that
	$C^\infty(\mathbf{R})$ is a vector space.
	
	Consider the transformation that assigns to each function in $C^\infty(\mathbf{R})$ its derivative
	\begin{center}
		$\begin{array}{rcl}D:C^\infty(\mathbf{R}) &\rightarrow&C^\infty(\mathbf{R})\\ f & \rightarrow & D(f)\end{array}$
	\end{center}
	is a linear transformation.\\
	\underline{\textit{Proof}}\\
	\begin{enumerate}
		\item $D(f+g)=D(f)+D(g)$
		\item $D(cf)=cD(f)$
	\end{enumerate}
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Kernel and range of transformation}
\begin{ceudef}[Kernel (Núcleo)]
	The \textbf{kernel of a transformation} $T$ is the set of all vectors such that 
	\begin{center}
		$\mathrm{Ker}\{T\}=\left\{\mathbf{v}\in V\left| T(\mathbf{v})=\mathbf{0}\right.\right\}$
	\end{center}
\end{ceudef}
\begin{ceudef}[Range (Imagen)]
	The \textbf{range of a transformation} $T$ is the set of all vectors such that 
	\begin{center}
		$\mathrm{Range}\{T\}=\left\{\mathbf{w}\in W\left| \exists \mathbf{v}\in V \; T(\mathbf{v})=\mathbf{w}\right.\right\}$
	\end{center}
\end{ceudef}
\begin{center}
	\includegraphics[height=3cm]{figKernel.jpg}
\end{center}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Kernel and range of transformation}
\begin{exampleblock}{Example (continued)}
	$\mathrm{Ker}\{T\}=\mathrm{Nul}\{A\}$\\
	$\mathrm{Ker}\{D\}=\{f(x)=c\}$ because $D(c)=0$\\
\end{exampleblock}

\begin{ceuthm}
	If $T(\mathbf{x})=A\mathbf{x}$, then
	\begin{center}
		$\begin{array}{rcl}\mathrm{Ker}\{T\}&=&\mathrm{Nul}\{A\}\\
		 \mathrm{Range}\{T\}&=&\mathrm{Col}\{A\}\end{array}$
	\end{center}
\end{ceuthm}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Exercises} 

\begin{exerciseblock}{Exercises}
	From Lay (3rd ed.), Chapter 4, Section 2:
	\begin{itemize}
		\item 4.2.3
		\item 4.2.9
		\item 4.2.11
		\item 4.2.30
		\item 4.2.31
	\end{itemize}
\end{exerciseblock}

\end{frame}

% ==============================================
\subsection{Linearly independent sets and bases (b)} 
\Outline

\begin{frame}\frametitle{Linear independence}
\begin{ceudef}[Linear independence]
	A set of vectors $\left\{\mathbf{v}_1,\mathbf{v}_2,...,\mathbf{v}_p\right\}$ is \textbf{linearly independent} iff the only solution to the equation
	\begin{center}
		$c_1\mathbf{v}_1+c_2\mathbf{v}_2+...+c_p\mathbf{v}_p=\mathbf{0}$
	\end{center}
	is the trivial solution ($c_1=c_2=...=c_p=0$). The set is linearly dependent if there exists another solution to the equation.	
\end{ceudef}
Watch out that we cannot simply put all vectors as columns of a matrix $A$ and solve $A\mathbf{c}=\mathbf{0}$ because this is only
valid for vectors in $\mathbb{R}^n$, but it is not valid for any vector space.

\end{frame}

% ==============================================
\begin{frame}\frametitle{Linear independence}
\begin{exampleblock}{Example}
	\begin{itemize}
		\item $\left\{\mathbf{v}_1\right\}$ is linearly dependent if $\mathbf{v}_1=\mathbf{0}$.
		\item $\left\{\mathbf{v}_1,\mathbf{v}_2\right\}$ is linearly dependent if $\mathbf{v}_2=c\mathbf{v}_1$.
		\item $\left\{\mathbf{0}, \mathbf{v}_1,\mathbf{v}_2, ...,\mathbf{v}_p\right\}$ is linearly dependent.
	\end{itemize}
\end{exampleblock}

\begin{exampleblock}{Example}
	In the vector space of continuous functions over $\mathbb{R}$, $C(\mathbb{R})$, the vectors $f_1(x)=\sin x$ and $f_2(x)=\cos x$ are independent because
	\begin{center}
		$f_2(x)\neq c f_1(x)$\\
		\includegraphics[scale=0.3]{figSineCosine.png}
	\end{center}
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Linear independence}
\begin{ceuthm}
	A set of vectors $\left\{\mathbf{v}_1,\mathbf{v}_2,...,\mathbf{v}_p\right\}$, with $\mathbf{v}_1\neq\mathbf{0}$ is \textbf{linearly dependent} if any of the vectors $\mathbf{v}_j$ ($j>1$) is linearly dependent on the previous ones $\left\{\mathbf{v}_1,\mathbf{v}_2,...,\mathbf{v}_{j-1}\right\}$.
\end{ceuthm}
\begin{exampleblock}{Example}
	In the vector space of polynomials, consider the vectors $p_0(x)=1$, $p_1(x)=x$, $p_2(x)=4-x$. The set $\{p_0(x),p_1(x),p_2(x)\}$ is linearly
  dependent because
	\begin{center}
		$p_2(x)=4p_0(x)-p_1(x) \Rightarrow p_1(x)-4p_0(x)+p_2(x)=0$
	\end{center}
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Linear independence}
\begin{exampleblock}{Example}
		In the vector space of continuous functions, consider the vectors $f_1(x)=\sin(x)\cos(x)$ and $f_2(x)=\sin(2x)$. The set $\{f_1(x),f_2(x)\}$ is linearly
		dependent because $f_2(x)=2f_1(x)$\\
		\begin{columns}
		\begin{column}{4cm}
				MATLAB:\\ {\color{blue}\texttt{x=[-pi:0.001:pi]\\
		f1=sin(x).*cos(x);\\
		f2=sin(2*x);\\
		plot(x,f1,x,f2)\\
		}}
		\end{column}
		\begin{column}{7cm}
			\includegraphics[height=5cm]{figSineCosine2.jpg}
		\end{column}
		\end{columns}
\end{exampleblock}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Basis of a subspace}
\begin{ceudef}[Basis of a subspace]
	A set of vectors $B=\left\{\mathbf{v}_1,\mathbf{v}_2,...,\mathbf{v}_p\right\}$ is a basis of the vector subspace $H$ iff
	\begin{enumerate}
		\item $B$ is a linearly independent set of vectors
		\item $H=\mathrm{Span}\{B\}$
	\end{enumerate}
	In other words, a basis is a non-redundant set of vectors that span $H$.
\end{ceudef}

\begin{exampleblock}{Example}
		Let $A$ be an invertible matrix. By Theorem 5.1 and 11.5 of Chapter 3 (the invertible matrix theorem), we know that the columns of $A$ span $\mathbb{R}^n$ and that they 
		are linearly independent. Consequently, the columns of $A$ are a basis of $\mathbb{R}^n$.
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Basis of a subspace}
\begin{exampleblock}{Example}
		The standard basis of $\mathbb{R}^n$ are the columns of $I_n$
		\begin{center}
			\begin{tabular}{cccc}
				$\mathbf{e}_1=\begin{pmatrix}1\\0\\...\\0\end{pmatrix}$ &
				$\mathbf{e}_2=\begin{pmatrix}0\\1\\...\\0\end{pmatrix}$ &
				... &
				$\mathbf{e}_n=\begin{pmatrix}0\\0\\...\\1\end{pmatrix}$
			\end{tabular}
		\end{center}
\end{exampleblock}

\begin{exampleblock}{Example}
		Let $\mathbf{v}_1=(3,0,-6)$, $\mathbf{v}_2=(-4,1,7)$, $\mathbf{v}_3=(-2,1,5)$. Is $\{\mathbf{v}_1,\mathbf{v}_2,\mathbf{v}_3\}$ a basis of $\mathbb{R}^3$?\\
		\underline{\textit{Solution}}\\
		This question is the same as whether $A$ is invertible with
		\begin{center}
			$A=\left(\begin{array}{rrr}3 & -4 & -2 \\ 0 & 1 & 1 \\ -6 & 7 & 5 \end{array}\right) \Rightarrow |A|=6 \Rightarrow \exists A^{-1}$
		\end{center}
		Because $A$ is invertible, we have that $\{\mathbf{v}_1,\mathbf{v}_2,\mathbf{v}_3\}$ is a basis of $\mathbb{R}^3$.
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Basis of a subspace}
\begin{exampleblock}{Example: DNA Structure}
	In 1953, \href{http://en.wikipedia.org/wiki/Rosalind_Franklin}{Rosalind Franklin}, \href{http://en.wikipedia.org/wiki/James_Watson}{James Watson} and \href{http://en.wikipedia.org/wiki/Francis_Crick}{Francis Crick} determined the 3D structure of DNA using data coming from X-ray diffraction of crystallized DNA. Watson and Crick received
	the Nobel prize in physiology and medicine in 1962 (Franklin died 1958).
	\begin{center}
		\includegraphics[height=5cm]{figDNAStructure.png}
		\includegraphics[height=3.5cm]{figDNAcrystal.jpg}
	\end{center}
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Basis of a subspace}
\begin{exampleblock}{Example: DNA Structure (continued)}
	Three-dimensional crystals repeat a certain motif all over the space following a crystal lattice. The vectors that define the crystal lattice are a basis of $\mathbb{R}^3$
	\begin{center}
		\includegraphics[height=6cm]{figCrystallography.png}
	\end{center}
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Basis of a subspace}
\begin{exampleblock}{Example}
	$B=\{1,x,x^2,x^3,...\}$ is the standard basis of the vector space of polynomials $\mathbb{P}$.\\
	\underline{\textit{Proof}}\\
	\begin{enumerate}
		\item $B$ is linearly independent:\\
				  \begin{center}
						$\forall x\in\mathbb{R}\quad c_01+c_1x+c_2x^2+c_3x^3+...=0\Rightarrow c_0=c_1=c_2=...=0$
					\end{center}
					The only way that a polynomial of degree whichever is 0 for all values of $x$ is that the coefficients of the polynomial are all 0.
		\item $\mathbb{P}=\mathrm{Span}\{B\}$:\\
				  It is obvious that any polynomial can be written as a linear combination of elements of $B$ (in fact, this is they way we normally do).
	\end{enumerate}
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Basis of a subspace}
\begin{exampleblock}{Example}
	$H=\mathrm{Span}\{\mathbf{v}_1,\mathbf{v}_2,\mathbf{v}_3\}$ with $\mathbf{v}_1=(0,2,-1)$, $\mathbf{v}_2=(2,2,0)$, $\mathbf{v}_3=(6,16,-5)$. Find a basis of $H$\\
	\underline{\textit{Solution}}\\
	All vectors in $H$ are of the form:
	\begin{center}
		$H\ni \mathbf{x}=c_1\mathbf{v}_1+c_2\mathbf{v}_2+c_3\mathbf{v}_3$
	\end{center}
	We realize that $\mathbf{v}_3=5\mathbf{v}_1+3\mathbf{v}_2$, therefore, $\mathbf{v}_3$ is redundant:
	\begin{center}
		$\begin{array}{rcl}H\ni \mathbf{x}&=&c_1\mathbf{v}_1+c_2\mathbf{v}_2+c_3(5\mathbf{v}_1+3\mathbf{v}_2)\\
		                                  &=&(c_1+5c_3)\mathbf{v}_1+(c_2+3c_3)\mathbf{v}_2\\
																			&=&c_1'\mathbf{v}_1+c_2'\mathbf{v}_2\end{array}$
	\end{center}
	It suffices to construct our basis with $\mathbf{v}_1$ and $\mathbf{v}_2$.
	
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Basis of a subspace}
\begin{ceuthm}[Spanning set theorem (conjunto generador)]
	Let $S=\{\mathbf{v}_1,\mathbf{v}_2,...,\mathbf{v}_p\}$ be a set of vectors and $H=\mathrm{Span}\{S\}$. Then,
	\begin{enumerate}
		\item If $\mathbf{v}_k$ is a linear combination of the rest, then the set $S-\{\mathbf{v}_k\}$ still generates $H$.
		\item If $H\neq\{\mathbf{0}\}$, then some subset of $S$ is a basis of $H$.
	\end{enumerate}
	\underline{\textit{Proof}}\\
	\begin{enumerate}
		\item Assume that the linear combination that explains $\mathbf{v}_k$ is
				  \begin{center}
						$\mathbf{v}_k=a_1\mathbf{v}_1+...+a_{k-1}\mathbf{v}_{k-1}+a_{k+1}\mathbf{v}_{k+1}+...+a_p\mathbf{v}_p$
					\end{center}
					Consider any vector in $H$
				  \begin{center}
						$\begin{array}{rcl}\mathbf{x}&=&c_1\mathbf{v}_1+c_2\mathbf{v}_2+...+c_p\mathbf{v}_p\\
							&=&(c_1+a_1)\mathbf{v}_1+...+(c_{k-1}+a_{k-1})\mathbf{v}_{k-1}+\\
							& &(c_{k+1}+a_{k+1})\mathbf{v}_{k+1}+...+(c_p+a_p)\mathbf{v}_p\\
						\end{array}$
					\end{center}
					That is we can express $\mathbf{x}$ not using $\mathbf{v}_k$.
		\item \underline{Step 1}: If $S$ is a linearly independent set, then $S$ is the basis of $H$.\\
		      \underline{Step 2}: If $S$ is not, using the previous point we can remove a vector to produce $S'$ that still generates $H$ (go to Step 1).
	\end{enumerate}
	
\end{ceuthm}

\end{frame}

% ==============================================
\subsection{Bases for $\mathrm{Nul}\{A\}$ and $\mathrm{Col}\{A\}$ (c)} 
\Outline

\begin{frame}\frametitle{Basis for $\mathrm{Nul}\{A\}$}
\begin{exampleblock}{Example}
	Let $A=\left(\begin{array}{rrrrr} -3&6&-1&1&-7\\1&-2&2&3&-1\\2&-4&5&8&-4\end{array}\right)$\\
	We solve the equation system $A\mathbf{x}=\mathbf{0}$ to find 
	\begin{center}
		$(A|\mathbf{0})\sim\left(\begin{array}{rrrrr|r} {\color{blue}1}&-2&{\color{blue}0}&-1&3&0\\
		{\color{blue}0}&0&{\color{blue}1}&2&-2&0\\{\color{blue}0}&0&{\color{blue}0}&0&0&0\end{array}\right)$
	\end{center}
	we have coloured the pivot columns from which learn
	\begin{center}
		$\begin{array}{l}
			x_1=2x_2+x_4-3x_5\\
			x_3=-2x_4+2x_5
		\end{array}\Rightarrow
		   \mathrm{Nul}\{A\} \ni \mathbf{x}=\begin{pmatrix}2x_2+x_4-3x_5\\x_2\\-2x_4+2x_5\\x_4\\x_5\end{pmatrix}$
	\end{center}
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Basis for $\mathrm{Nul}\{A\}$}
\begin{exampleblock}{Example (continued)}
	\begin{center}
		   $\mathrm{Nul}\{A\} \ni \mathbf{x}=\begin{pmatrix}2x_2+x_4-3x_5\\x_2\\-2x_4+2x_5\\x_4\\x_5\end{pmatrix}=
				x_2\begin{pmatrix}2\\1\\0\\0\\0\end{pmatrix}+
				x_4\begin{pmatrix}1\\0\\-2\\1\\0\end{pmatrix}+
				x_5\begin{pmatrix}-3\\0\\2\\0\\1\end{pmatrix}$
	\end{center}
	Finally the basis for $\mathrm{Nul}\{A\}$ is
	\begin{center}
		   $\mathrm{Nul}\{A\} =\mathrm{Span}\left\{
				\begin{pmatrix}2\\1\\0\\0\\0\end{pmatrix},
				\begin{pmatrix}1\\0\\-2\\1\\0\end{pmatrix},
				\begin{pmatrix}-3\\0\\2\\0\\1\end{pmatrix}\right\}$
	\end{center}
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Basis for $\mathrm{Col}\{A\}$}
\begin{exampleblock}{Example}
	Consider $A$ as in the previous example. We had
	\begin{center}
		$A\sim\left(\begin{array}{rrrrr} {\color{blue}1}&-2&{\color{blue}0}&-1&3\\{\color{blue}0}&0&{\color{blue}1}&2&-2\\{\color{blue}0}&0&{\color{blue}0}&0&0\end{array}\right)=B$
	\end{center}
	Let's call this latter matrix $B$. Non-pivot columns of $B$ can be written as a linear combination of the pivot columns:
	\begin{center}
		$\begin{array}{rcl}
			\mathbf{b}_2&=&-2\mathbf{b}_1\\
			\mathbf{b}_4&=&-\mathbf{b}_1+2\mathbf{b}_3\\
			\mathbf{b}_5&=&3\mathbf{b}_1-2\mathbf{b}_3\\
		 \end{array}$
	\end{center}
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Basis for $\mathrm{Col}\{A\}$}
\begin{exampleblock}{Example (continued)}
	Since row operations do not change the linear dependences among matrix columns, we can derive the same relationships for matrix $A$
	\begin{center}
		$\begin{array}{rcl}
			\mathbf{a}_2&=&-2\mathbf{a}_1\\
			\mathbf{a}_4&=&-\mathbf{a}_1+2\mathbf{a}_3\\
			\mathbf{a}_5&=&3\mathbf{a}_1-2\mathbf{a}_3\\
		 \end{array}$
	\end{center}
	Finally, the basis of $\mathrm{Col}\{A\}$ is $\{\mathbf{a}_1,\mathbf{a}_3\}$.
	\begin{center}
		$\mathrm{Col}\{A\}=\mathrm{Span}\left\{\mathbf{a}_1,\mathbf{a}_3\right\}=\mathrm{Span}\left\{\begin{pmatrix}-3\\1\\2\end{pmatrix},\begin{pmatrix}-1\\2\\5\end{pmatrix}\right\}$
	\end{center}
	
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Basis for $\mathrm{Col}\{A\}$}
\begin{ceuthm}
		The pivot columns of $A$ constitute a basis for $\mathrm{Col}\{A\}$.\\
		\underline{\textit{Proof}}\\
		Let $B$ the reduced echelon form of $A$.
		\begin{enumerate}
			\item The pivot columns of $B$ form a linearly independent set because none of its elements
		can be expressed as a linear combination of the elements before each one of them.
			\item The dependence relationships among columns are not affected by row operations. Therefore, the corresponding
			pivot columns of $A$ are also linearly independent and, consequently, a basis of $\mathrm{Col}\{A\}$.
		\end{enumerate}
\end{ceuthm}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Two views of a basis}
\begin{block}{As small as possible, as large as possible}
	\begin{enumerate}
		\item The Spanning Set Theorem states that the basis is as small as possible as long as it spans the required subspace. 
		\item The basis has the maximum amount of vectors spanning the required subspace. If we add one more, the new set is not linearly independent.
	\end{enumerate}
\end{block}
\begin{exampleblock}{Example}
		\begin{itemize}
			\item $\{(1,0,0),(2,3,0)\}$ is a set of 2 linearly independent vectors. But it cannot span $\mathbb{R}^3$ because for this we need 3 vectors.
			\item $\{(1,0,0),(2,3,0),(4,5,6)\}$ is a set of 3 linearly independent vectors that spans $\mathbb{R}^3$, so it is a basis of $\mathbb{R}^3$.
			\item $\{(1,0,0),(2,3,0),(4,5,6),(7,8,9)\}$ is a set of 4 linearly dependent vectors that spans $\mathbb{R}^3$, so it cannot be a basis.
		\end{itemize}
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Exercises} 

\begin{exerciseblock}{Exercises}
	From Lay (3rd ed.), Chapter 4, Section 3:
	\begin{itemize}
		\item 4.3.1
		\item 4.3.2
		\item 4.3.8
		\item 4.3.12
		\item 4.3.24
		\item 4.3.31
		\item 4.3.32
		\item 4.3.33
		\item 4.3.37 (computer)
	\end{itemize}
\end{exerciseblock}

\end{frame}

% ==============================================
\subsection{Coordinate system (c)} 
\Outline

\begin{frame}\frametitle{Coordinate system}
An important reason to assign a basis to a vector space $V$ is that it makes $V$ to ``behave'' as $\mathbb{R}^n$ through, what is called, a coordinate system.
\begin{ceuthm}[The unique representation theorem]
	Let $B=\{\mathbf{b}_1,\mathbf{b}_2,...,\mathbf{b}_n\}$ a basis of the vector space $V$, and consider any vector $\mathbf{v}\in V$. There exists a unique set of scalars such that
	\begin{center}
		$\mathbf{v}=c_1\mathbf{b}_1+c_2\mathbf{b}_2+...+c_n\mathbf{b}_n$
	\end{center}
	\underline{\textit{Proof}}\\
	Let assume that there exists another set of scalars such that
	\begin{center}
		$\mathbf{v}=c_1'\mathbf{b}_1+c_2'\mathbf{b}_2+...+c_n'\mathbf{b}_n$
	\end{center}
	Subtracting both equations we have
	\begin{center}
		$\mathbf{0}=(c_1-c_1')\mathbf{b}_1+(c_2-c_2')\mathbf{b}_2+...+(c_n-c_n')\mathbf{b}_n$
	\end{center}
	But since the vectors $\mathbf{b}_i$ form a basis and are linearly independent, it must be
	\begin{center}
		$(c_1-c_1')=(c_2-c_2')=(c_n-c_n')=0$
	\end{center}
\end{ceuthm}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Coordinate system}
\begin{block}{}
	\underline{\textit{Proof (continued)}}\\
	Finally, $c_1=c_1'$, $c_2=c_2'$, ..., $c_n=c_n'$ which is a contradiction with the hypothesis that there were two different sets of scalars representing the vector.
	Consequently, the set of scalars must be unique.
\end{block}

\begin{ceudef}[Coordinates]
	Let $B=\{\mathbf{b}_1,\mathbf{b}_2,...,\mathbf{b}_n\}$ a basis of the vector space $V$, and consider any vector $\mathbf{v}\in V$. The \textbf{coordinates} of $\mathbf{v}$ in $B$ are the
	$c_i$ coefficients such that
	\begin{center}
		$\mathbf{v}=c_1\mathbf{b}_1+c_2\mathbf{b}_2+...+c_n\mathbf{b}_n \Rightarrow \left[\mathbf{v}\right]_B=\begin{pmatrix}c_1\\c_2\\...\\c_n\end{pmatrix}$
	\end{center}
	The transformation $T:V\rightarrow \mathbb{R}^n$ such that $T(\mathbf{x})=\left[\mathbf{x}\right]_B$ is called the \textbf{coordinate mapping}.
\end{ceudef}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Coordinate system}
\begin{exampleblock}{Example}
	Let $B=\{(1,0),(1,2)\}$ be a basis of $\mathbb{R}^2$ and $\left[\mathbf{x}\right]_B=(-2,3)$, then
	\begin{center}
		$\mathbf{x}=-2\mathbf{b}_1+3\mathbf{b}_2=-2\begin{pmatrix}1\\0\end{pmatrix}+3\begin{pmatrix}1\\2\end{pmatrix}=\begin{pmatrix}1\\6\end{pmatrix}$
	\end{center}
	In fact $(1,6)$ are the coordinates of $\mathbf{x}$ in the standard basis $\{\mathbf{e}_1,\mathbf{e}_2\}$
	\begin{center}
		$\mathbf{x}=1\mathbf{e}_1+6\mathbf{e}_2=1\begin{pmatrix}1\\0\end{pmatrix}+6\begin{pmatrix}0\\1\end{pmatrix}=\begin{pmatrix}1\\6\end{pmatrix}$
	\end{center}
	That is, the point $\mathbf{x}$ does not change, but depending on the coordinate system employed, we ``see'' it with different coordinates.
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Coordinate system}
\begin{exampleblock}{Example (continued)}
	\begin{center}
		\includegraphics[scale=0.4]{figCoordinates.jpg}
	\end{center}
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Coordinate system}
\begin{exampleblock}{Example: X-ray diffraction}
	In ths figure we see how a X-ray diffraction pattern of a crystal is ``indexed''.
	\begin{center}
		\includegraphics[scale=0.7]{figCoordinatesDiffraction.jpg}
	\end{center}
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Coordinates in $\mathbb{R}^n$}
If we have a point $\mathbf{x}$ in $\mathbb{R}$ we can easily find its coordinates in any basis, as in the following example.
\begin{exampleblock}{Example}
	Let $\mathbf{x}=(4,5)$ and the basis $B=\{(2,1),(-1,1)\}$. We need to find $c_1$ and $c_2$ such that
	\begin{center}
		$\mathbf{x}=c_1\mathbf{b}_1+c_2\mathbf{b}_2 \Rightarrow
		 \begin{pmatrix}4\\5\end{pmatrix}=c_1\begin{pmatrix}2\\1\end{pmatrix}+c_2\begin{pmatrix}-1\\1\end{pmatrix}=\begin{pmatrix}2 & -1\\1&1\end{pmatrix}
			\begin{pmatrix}c_1\\c_2\end{pmatrix}$
	\end{center}
	\begin{columns}
		\begin{column}{5cm}
			From which we can easily derive that $c_1=3$ and $c_2=2$.
		\end{column}
		\begin{column}{6cm}
			\includegraphics[scale=0.3]{figCoordinates2.jpg}
		\end{column}
	\end{columns}
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Change of basis}
\begin{block}{Change from the standard basis to an arbitrary basis}
	Note that the previous equation system is of the form
	\begin{center}
		$\mathbf{x}=P_B[\mathbf{x}]_B$
	\end{center}
	where $P_B$ is called the \textbf{change-of-coordinates matrix} and its columns are the vectors of the basis B (consequently, it is invertible). We find the coordinates of the vector $\mathbf{x}$ in the basis $B$ as
	\begin{center}
		$[\mathbf{x}]_B=P_B^{-1}\mathbf{x}$
	\end{center}
	
\end{block}

\begin{block}{Change between two arbitrary bases}
	Let's say we know the coordinates of a point in some basis, $B_1$, and we want to know its coordinates in some other basis, $B_2$. We may use
	\begin{center}
		$\mathbf{x}=P_{B_1}[\mathbf{x}]_{B_1}=P_{B_2}[\mathbf{x}]_{B_2} \Rightarrow [\mathbf{x}]_{B_2}=P_{B_2}^{-1}P_{B_1}[\mathbf{x}]_{B_1}$
	\end{center}
\end{block}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Coordinate mapping}
\begin{ceuthm}[The coordinate mapping is an isomorphism between $V$ and $\mathbb{R}^n$]
	The coordinate mapping is a bijective, linear transformation.
	\begin{center}
		\includegraphics[scale=0.35]{figCoordinateMapping.jpg}
	\end{center}
\end{ceuthm}
\begin{block}{Corollary}
	Since the coordinate mapping is a linear transformation it extends to linear combinations
	\begin{center}
		$\left[a_1\mathbf{u}_1+a_2\mathbf{u}_2+...+a_p\mathbf{u}_p\right]_B=a_1[\mathbf{u}_1]_B+a_2[\mathbf{u}_2]_B+...+a_p[\mathbf{u}_p]_B$
	\end{center}
\end{block}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Coordinate mapping}
\begin{block}{Consequences}
		Any operation in $V$ can be performed in $\mathbb{R}^n$ and then go back to $V$.\\
		For spaces of functions, this opens a new door to analyze functions (signals, images, ...) in $\mathbb{R}^n$ using
		the appropriate basis: Fourier transform, wavelet transform, Discrete Cosine Transform, ...
		\begin{center}
			\includegraphics[scale=0.65]{figFourierTransform.jpg}
		\end{center}
\end{block}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Coordinate mapping}
\begin{exampleblock}{Example}
	Consider the space of polynomials of degree 2, $\mathbb{P}_2$. any polynomial in this space is of the form
	\begin{center}
		$p(t)=a_0+a_1t+a_2t^2$
	\end{center}
	If we choose the standard basis in $\mathbb{P}_2$ that is 
	\begin{center}
		$B=\{1,t,t^2\}$
	\end{center}
	Then, we have the coordinate mapping
	\begin{center}
		$T(p(t))=[p]_B=\begin{pmatrix}a_0\\a_1\\a_2\end{pmatrix}$
	\end{center}
	that is an isomorphism from $\mathbb{P}_2$ onto $\mathbb{R}^3$.
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Coordinate mapping}
\begin{exampleblock}{Example (continued)}
	Now we can perform any reasoning in $\mathbb{P}_2$ by studying an analogous problem in $\mathbb{R}^3$. For instance, let's study if the following polynomials are linearly independent
	\begin{center}
		$\begin{array}{rclcl}
			p_1(t)&=&1+2t^2 &\Rightarrow& [p_1(t)]_B=(1,0,2)\\
			p_2(t)&=&4+t+5t^2 &\Rightarrow& [p_2(t)]_B=(4,1,5)\\
			p_3(t)&=&3+2t &\Rightarrow& [p_3(t)]_B=(3,2,0)\\
		\end{array}$
	\end{center}
	We simply need to see if the corresponding coordinates in $\mathbb{R}^3$ are linearly independent
	\begin{center}
		$\begin{pmatrix}1 & 4 & 3 \\ 0 & 1 & 2 \\ 2 & 5 & 0\end{pmatrix}\sim
		 \begin{pmatrix}1 & 0 & -5 \\ 0 & 1 & 2 \\ 0 & 0 & 0\end{pmatrix}$
	\end{center}
	Looking at the non-pivot columns we learn that
	\begin{center}
		$p_3(t)=-5p_1(t)+2p_2(t)$
	\end{center}
	Finally, we conclude that the 3 polynomials are not linearly independent.
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Coordinate mapping}
\begin{exampleblock}{Example}
	Consider $\mathbf{v}_1=(3,6,2)$, $\mathbf{v}_2=(-1,0,1)$, $B=\{\mathbf{v}_1,\mathbf{v}_2\}$, and $H=\mathrm{Span}\{B\}$. $H$ is isomorphic to $\mathbb{R}^2$ (because its points
	have only 2 coordinates). For instance, the coordinates of $\mathbf{x}=(3,12,7)\in H$ are $[\mathbf{x}]_B=(2,3)$.
	\begin{center}
		\includegraphics[scale=0.4]{figIsomorphism.jpg}
	\end{center}
\end{exampleblock}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Coordinate mapping}
\begin{exampleblock}{Example}
	Consider $\mathbf{v}_1=(3,6,2)$, $\mathbf{v}_2=(-1,0,1)$, $B=\{\mathbf{v}_1,\mathbf{v}_2\}$, and $H=\mathrm{Span}\{B\}$. $H$ is isomorphic to $\mathbb{R}^2$ (because its points
	have only 2 coordinates). For instance, the coordinates of $\mathbf{x}=(3,12,7)\in H$ are $[\mathbf{x}]_B=(2,3)$.
	\begin{center}
		\includegraphics[scale=0.4]{figIsomorphism.jpg}
	\end{center}
\end{exampleblock}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Exercises} 

\begin{exerciseblock}{Exercises}
	From Lay (3rd ed.), Chapter 4, Section 4:
	\begin{columns}
		\begin{column}{4cm}
			\begin{itemize}
				\item 4.4.3
				\item 4.4.8
				\item 4.4.9
				\item 4.4.13
				\item 4.4.17
				\item 4.4.19
				\item 4.4.22
				\item 4.4.24
				\item 4.4.25
			\end{itemize}
		\end{column}
	\end{columns}
\end{exerciseblock}

\end{frame}

% ==============================================
\subsection{Dimension of a vector space (d)} 
\Outline

\begin{frame}\frametitle{Dimension of a vector space} 
We have just said that if the basis of a vector space $V$ has $n$ elements, then $V$ is isomorphic to $\mathbb{R}^n$. $n$ is a characteristic number of each space called the dimension.

\begin{ceuthm}
	Let $V$ be a vector space with a basis given by $B=\{\mathbf{b}_1,\mathbf{b}_2,...,\mathbf{b}_n\}$. Then, any subset of $V$ with more than $n$ elements is linearly dependent.\\
	\underline{\textit{Proof}}\\
	Let $S$ be a subset of $V$ with $p>n$ vectors
	\begin{center}
		$S=\{\mathbf{v}_1,\mathbf{v}_2,...,\mathbf{v}_p\}$
	\end{center}
	We now consider the set of coordinates of these vectors.
	\begin{center}
		$\left\{[\mathbf{v}_1]_B,[\mathbf{v}_2]_B,...,[\mathbf{v}_p]_B\right\}$
	\end{center}
	They are $p>n$ vectors in $\mathbb{R}^n$ and, therefore, necessarily linearly dependent. That is, there exist $c_1,c_2,...,c_p$, not all of them 0, such that
	\begin{center}
		$c_1[\mathbf{v}_1]_B+c_2[\mathbf{v}_2]_B+c_p[\mathbf{v}_p]_B=\mathbf{0} \in \mathbb{R}^n$
	\end{center}
\end{ceuthm}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Dimension of a vector space} 

\begin{block}{}
	\underline{\textit{Proof (continued)}}\\
	If we now exploit the fact that the coordinate mapping is linear, then we have
	\begin{center}
		$\left[c_1\mathbf{v}_1+c_2\mathbf{v}_2+c_p\mathbf{v}_p\right]_B=\mathbf{0} \in \mathbb{R}^n$
	\end{center}
	Finally, we make use of the fact that the coordinate mapping is bijective
	\begin{center}
		$c_1\mathbf{v}_1+c_2\mathbf{v}_2+c_p\mathbf{v}_p=\mathbf{0} \in V$
	\end{center}
	And, consequently, we have shown that the $p$ vectors in $S$ are linearly dependent.
\end{block}

\begin{ceuthm}
	If a basis of a vector space has $n$ vectors, then all other bases also have $n$ vectors.
	\underline{\textit{Proof}}\\
	Let $B_1$ be a basis with $n$ vectors of a vector space $V$. Let $B_2$ another basis of $V$. By the previous theorem, $B_2$ has at most $n$ vectors. Let us assume now that
	$B_2$ has less than $n$ vectors, then by the previous theorem $B_1$ would not be a basis. This is a contradiction with the fact that $B_1$ is a basis and, consequently,
	$B_2$ cannot have less than $n$ vectors.
\end{ceuthm}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Dimension of a vector space} 

\begin{ceudef}
	If the vector space $V$ is spanned by a finite set of vectors, then $V$ is \textbf{finite-dimensional} and its \textbf{dimension} ($\mathrm{dim}\{V\}$) is the number of elements of any of its bases. The dimension of $V=\{\mathbf{0}\}$ is 0. If $V$ is not generated by a finite set of vectors, then it is \textbf{infinite-dimensional}.
\end{ceudef}

\begin{exampleblock}{Example}
	$\mathrm{dim}\{\mathbb{R}^n\}=n$\\
	$\mathrm{dim}\{\mathbb{P}_2\}=3$ because one of its bases is $\{1,t,t^2\}$\\
	$\mathrm{dim}\{\mathbb{P}\}=\infty$\\
	$\mathrm{dim}\{\mathrm{Span}\{\mathbf{v}_1,\mathbf{v}_2\}\}=2$\\
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Dimension of a vector space} 
\begin{exampleblock}{Example: in $\mathbb{R}^3$}
	There is a single subspace of dimension 0 ($\{\mathbf{0}\}$)\\
  There are infinite subspaces of dimension 1 (all lines going through the origin)\\
	There are infinite subspaces of dimension 2 (all planes going through the origin)\\
	There is a single subspace of dimension 3 ($\mathbb{R}^3$)\\
	\begin{center}
		\includegraphics[scale=0.4]{figSubspaces.jpg}
	\end{center}
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Dimension of a vector space} 
\begin{ceuthm}
	Let $H\subseteq V$ be a vector subspace of a vector space $V$. Then,
	\begin{center}
		$\mathrm{dim}\{H\}\leq \mathrm{dim}\{V\}$
	\end{center}
\end{ceuthm}

\begin{ceuthm}
	Let $V$ a $n$-dimensional vector space ($n\geq 1$).
	\begin{itemize}
		\item Any linearly independent subset of $V$ with $n$ elements is a basis.
		\item Any subset of $V$ with $n$ elements that span $V$ is a basis.
	\end{itemize}
\end{ceuthm}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Dimension of a vector space} 
\begin{ceuthm}
	Consider any matrix $A\in\mathcal{M}_{m\times n}$. 
	\begin{itemize}
		\item $\mathrm{dim}\{\mathrm{Nul}\{A\}\}$ is the number of free variables in the equation $A\mathbf{x}=\mathbf{0}$.
		\item $\mathrm{dim}\{\mathrm{Col}\{A\}\}$ is the number of pivot columns of $A$.
	\end{itemize}
	\label{thm:dimNuldimCol}
\end{ceuthm}

\begin{exampleblock}{Example}
	\begin{center}
		$A=\left(\begin{array}{rrrrr} -3&6&-1&1&-7\\1&-2&2&3&-1\\2&-4&5&8&-4\end{array}\right) \sim
		\left(\begin{array}{rrrrr} {\color{blue}1}&-2&{\color{blue}0}&-1&3\\
		{\color{blue}0}&0&{\color{blue}1}&2&-2\\{\color{blue}0}&0&{\color{blue}0}&0&0\end{array}\right)$
	\end{center}
	The number of pivot columns of $A$ is $2=\mathrm{dim}\{\mathrm{Col}\{A\}\}$ (in blue), while the number of free variables is $3=\mathrm{dim}\{\mathrm{Nul}\{A\}\}$
	(the free variables are $x_2$, $x_4$ and $x_5$).
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Exercises} 

\begin{exerciseblock}{Exercises}
	From Lay (3rd ed.), Chapter 4, Section 5:
	\begin{itemize}
		\item 4.5.1
		\item 4.5.13
		\item 4.5.21
		\item 4.5.25
		\item 4.5.26
		\item 4.5.27
		\item 4.5.28
		\item 4.5.31
		\item 4.5.32
	\end{itemize}
\end{exerciseblock}

\end{frame}

% ==============================================
\subsection{Rank of a matrix (d)} 
\Outline

\begin{frame}\frametitle{Rank of a matrix} 

	The \textbf{rank of a matrix} is the number of linearly independent rows of that matrix. It can also be defined as the number of linearly independent columns of that matrix because both definitions yield the same number. We'll see a more formal definition below.

\begin{ceudef}[Row space of a matrix]
	Given a matrix $A\in\mathcal{M}_{m\times n}$, the \textbf{row space} of $A$ is the space spanned by all rows of $A$ ($\mathrm{Row}\{A\}\subseteq \mathbb{R}^n$).
\end{ceudef}

\begin{ceuthm}
	\begin{center}
		$\mathrm{Row}\{A\}=\mathrm{Col}\{A^T\}$
	\end{center}
\end{ceuthm}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Rank of a matrix} 

\begin{ceuthm}
	If a matrix $A$ is row equivalent to another matrix $B$, then $\mathrm{Row}\{A\}=\mathrm{Row}\{B\}$.\\
	If $B$ is in a reduced echelon form, then the non-null rows of $B$ form a basis of $\mathrm{Row}\{A\}$\\
	\underline{\textit{Proof}}\\
	\hspace{0.25cm}
		\parbox{11cm}{
			\underline{\textit{Proof $\mathrm{Row}\{A\}\supseteq\mathrm{Row}\{B\}$}}\\
			\leftskip5mm Since the rows of $B$ are obtained by row operations on the rows of $A$, then any linear combination of the rows of $B$ can be obtained
			   as linear combinations of the rows of $A$.
		}\\
	\hspace{0.25cm}
		\parbox{11cm}{
			\underline{\textit{Proof $\mathrm{Row}\{A\}\subseteq\mathrm{Row}\{B\}$}}\\
			\leftskip5mm Since the row operations are reversible, then any linear combination of the rows of $A$ can be obtained
			   as linear combinations of the rows of $B$.
		}\\
	\hspace{0.25cm}
		\parbox{11cm}{
			\underline{\textit{Proof non-null rows of $B$ form a basis}}\\
			\leftskip5mm They are linearly independent because any non-null row of $B$ cannot be obtained as a linear combination of the rows below (because it is in echelon form and
			there are numbers in early columns that have 0s below)
		}
\end{ceuthm}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Rank of a matrix} 

\begin{exampleblock}{Example}
\begin{center}
	$A=\left(\begin{array}{rrrrr}-2&-5&8&0&-17\\1&3&-5&1&5\\3&11&-19&7&1\\1&7&-13&5&-3\end{array}\right)\sim
	 B=\left(\begin{array}{rrrrr}{\color{blue}1}&{\color{blue}3}&-5&{\color{blue}1}&5\\{\color{blue}0}&{\color{blue}1}&-2&{\color{blue}2}&-7\\
	{\color{blue}0}&{\color{blue}0}&0&{\color{blue}-4}&20\\{\color{blue}0}&{\color{blue}0}&0&{\color{blue}0}&0\end{array}\right)$
\end{center}
Pivot columns have been highlighted in blue. At this point we can already construct a basis for the row and column spaces of $A$
\begin{center}
	$\begin{array}{rcl}
		\mathbb{R}^5\supset\mathrm{Row}\{A\}&=&\mathrm{Span}\{(1,3,-5,1,5),(0,1,-2,2,-7),(0,0,0,-4,20)\}\\
		\mathbb{R}^4\supset\mathrm{Col}\{A\}&=&\mathrm{Span}\{(-2,1,3,1),(-5,3,11,7),(0,1,7,5)\}\\
	\end{array}$
\end{center}
To calculate the null space of $A$ we need the reduced echelon form
\begin{center}
	$A\sim\left(\begin{array}{rrrrr}1&0&1&0&1\\0&1&-2&0&3\\0&0&0&1&-5\\0&0&0&0&0\end{array}\right)$
\end{center}
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Rank of a matrix} 

\begin{exampleblock}{Example (continued)}
\begin{center}
	$A\sim\left(\begin{array}{rrrrr}1&0&1&0&1\\0&1&-2&0&3\\0&0&0&1&-5\\0&0&0&0&0\end{array}\right) \Rightarrow$\\
	$\begin{array}{rcl}
		x_1&=&-x_3-x_5\\
		x_2&=&2x_3-3x_5\\
		x_4&=&5x_5
	\end{array} \Rightarrow \mathrm{Nul}\{A\}\ni \mathbf{x}=x_3\begin{pmatrix}-1\\2\\1\\0\\0\end{pmatrix}+x_5\begin{pmatrix}-1\\-3\\0\\5\\1\end{pmatrix}$
\end{center}
Finally,
\begin{center}
	$\begin{array}{rcl}
		\mathbb{R}^5\supset\mathrm{Nul}\{A\}&=&\mathrm{Span}\{(-1,2,1,0,0),(-1,-3,0,5,1)\}\\
	\end{array}$
\end{center}
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Rank of a matrix} 

\begin{ceudef}[Rank of a matrix]
	\begin{center}
		$\mathrm{Rank}\{A\}=\mathrm{dim}\{\mathrm{Col}\{A\}\}$
	\end{center}
	That is, by definition, $\mathrm{Rank}\{A\}$ is the number of pivot columns of $A$.
\end{ceudef}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Rank of a matrix} 

\begin{ceuthm}[Rank theorem]
	For any matrix $A\in\mathcal{M}_{m\times n}$
	\begin{enumerate}
		\item $\mathrm{dim}\{\mathrm{Row}\{A\}\}=\mathrm{dim}\{\mathrm{Col}\{A\}\}$
		\item $\mathrm{Rank}\{A\}+\mathrm{dim}\{\mathrm{Nul}\{A\}\}=n$
	\end{enumerate}
	\underline{\textit{Proof}}\\
	\begin{enumerate}
		\item Let $B$ be the reduced echelon form of $A$. By definition $\mathrm{Rank}\{A\}$ is the number of pivot columns in $A$ (that is the same as the number of pivot columns in $B$). 
			    Since $B$ is in reduced echelon form, each of its non-zero rows has a column pivot and, consequently, the number of non-zero rows coincides with the number of pivot columns.
					The basis of $\mathrm{Row}\{B\}=\mathrm{Row}\{A\}$ must have as many elements as pivot columns.
		\item From Theorem \ref{thm:dimNuldimCol} we know that $\mathrm{Null}\{A\}$ is the number of free variables in $A\mathbf{x}=\mathbf{0}$, that is, the number of non-pivot columns of 
		      $B$. Consequently, we have
					\begin{center}
						$\mathrm{dim}\{\mathrm{Col}\{A\}\}+\mathrm{dim}\{\mathrm{Nul}\{A\}\}=n$
					\end{center}
					But by definition, $\mathrm{Rank}\{A\}=\mathrm{dim}\{\mathrm{Col}\{A\}\}$, which proves the theorem.
	\end{enumerate}
	\label{thm:rank}
\end{ceuthm}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Rank of a matrix} 

\begin{exampleblock}{Example}
	Let $A\in\mathcal{M}_{7\times 9}$. We know $\mathrm{dim}\{\mathrm{Nul}\{A\}\}=2$. What is $\mathrm{Rank}\{A\}$?\\
	According to the previous theorem
	\begin{center}
		$\mathrm{Rank}\{A\}=n-\mathrm{dim}\{\mathrm{Nul}\{A\}\}=9-2=7$
	\end{center}
\end{exampleblock}

\begin{exampleblock}{Example}
	Let $A\in\mathcal{M}_{6\times 9}$. Can it be $\mathrm{dim}\{\mathrm{Nul}\{A\}\}=2$?\\
	Let us presume that it can be $\mathrm{dim}\{\mathrm{Nul}\{A\}\}=2$, then
	\begin{center}
		$\mathrm{Rank}\{A\}=n-\mathrm{dim}\{\mathrm{Nul}\{A\}\}=9-2=7$
	\end{center}
	But since $A$ has only 6 rows, the maximum rank can only be 6 (not 7), and therefore, it must be $\mathrm{dim}\{\mathrm{Nul}\{A\}\}\geq 3$.
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Rank of a matrix} 

\begin{exampleblock}{Example}
	\begin{center}
		$A=\left(\begin{array}{rrr}3&0&-1\\3&0&-1\\4&0&5\end{array}\right)\Rightarrow
		   \begin{array}{l}
			   \mathrm{Nul}\{A\}=\{(0,x_2,0) \quad \forall x_2\in\mathbb{R}\}\\
			   \mathrm{Row}\{A\}=\{(x_1,0,x_3) \quad \forall x_1,x_3\in\mathbb{R}\}\\
			   \mathrm{Col}\{A\}=\{(x_2,x_2,x_3) \quad \forall x_2,x_3\in\mathbb{R}\}\\
			   \mathrm{Nul}\{A^T\}=\{(x_1,-x_1,0) \quad \forall x_1\in\mathbb{R}\}\\
				\end{array}$\\
			\includegraphics[scale=0.4]{figRank.jpg}
	\end{center}
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Rank of a matrix} 

\begin{ceuthm}[The invertible matrix theorem (continued)]
		The following statements are equivalent to those in Theorems 5.1 and 11.5 of Chapter 3 (the invertible matrix theorem). Let $A \in \mathcal{M}_{n\times n}$
	\begin{enumerate}[i.]
		\setcounter{enumi}{18}
		\item The columns of $A$ form a basis of $\mathbb{R}^n$.
		\item $\mathrm{Col}\{A\}=\mathbb{R}^n$.
		\item $\mathrm{dim}\{\mathrm{Col}\{A\}\}=n$
		\item $\mathrm{Rank}\{A\}=n$
		\item $\mathrm{Nul}\{A\}=\{\mathbf{0}\}$.
		\item $\mathrm{dim}\{\mathrm{Nul}\{A\}\}=0$.
	\end{enumerate}
	\underline{\textit{Proof vii $\Leftrightarrow$ xx}}\\
	vii$\equiv$The equation $A\mathbf{x}=\mathbf{b}$ has at least one solution for every $\mathbf{b}\in \mathbb{R}^n$.\\
	But $\mathrm{Col}\{A\}$ is the set of all $\mathbf{b}$'s for which $A\mathbf{x}=\mathbf{b}$ has a solution. Therefore, vii $\Rightarrow$ xx.\\
	\underline{\textit{Proof xx $\Leftrightarrow$ xxi $\Leftrightarrow$ xxii}}\\
	Because of the definition of rank.\\
\end{ceuthm}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Rank of a matrix} 

\begin{block}{}
	\underline{\textit{Proof v,viii $\Leftrightarrow$ xix}}\\
	v$\equiv$The columns of $A$ are linearly independent.\\
	viii$\equiv$The columns of $A$ span $\mathbb{R}^n$.\\
	But both together are the definition of a basis for $\mathbb{R}^n$.\\
	\underline{\textit{Proof xxi $\Leftrightarrow$ xxiv}}\\
	Knowing xxi and thanks to the rank theorem \ref{thm:rank}, we can infer that $\mathrm{dim}\{\mathrm{Nul}\{A\}\}=n-n=0$\\
	\underline{\textit{Proof xxiv $\Leftrightarrow$ xxiii}}\\
	The only subspace with null dimension is $\{\mathbf{0}\}$.
\end{block}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Exercises} 

\begin{exerciseblock}{Exercises}
	From Lay (3rd ed.), Chapter 4, Section 6:
	\begin{itemize}
		\item 4.6.1
		\item 4.6.13
		\item 4.6.15
		\item 4.6.19
		\item 4.6.26
		\item 4.6.28
		\item 4.6.29
		\item 4.6.33
		\item 4.6.35
	\end{itemize}
\end{exerciseblock}

\end{frame}

% ==============================================
\subsection{Change of basis (d)} 
\Outline

\begin{frame}\frametitle{Change of basis} 
\begin{exampleblock}{Example}
	Let us assume we have a vector $\mathbf{x}$ that has two different coordinates in two different coordinate systems $B$ and $C$.
	\begin{center}
		$[\mathbf{x}]_B=(3,1)$ and $[\mathbf{x}]_C=(6,4)$\\
		\includegraphics[scale=0.4]{figChangeOfBasis.jpg}
	\end{center}
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Change of basis} 
\begin{exampleblock}{Example (continued)}
	Presume that for our example
	\begin{center}
		$\begin{array}{rcl}
			\mathbf{b}_1&=&4\mathbf{c}_1+\mathbf{c}_2\\
			\mathbf{b}_2&=&-6\mathbf{c}_1+\mathbf{c}_2
		\end{array}$
	\end{center}
	We can calculate the coordinates of the basis vectors $B$ in the $C$ coordinate system as
	\begin{center}
		$\begin{array}{rcl}
			\left[\mathbf{b}_1\right]_C&=&(4,1)\\
			\left[\mathbf{b}_2\right]_C&=&(-6,1)\\
		\end{array}$
	\end{center}
	The coordinates of $\mathbf{x}$ in the basis $B$ tell us
	\begin{center}
		$\mathbf{x}=3\mathbf{b}_1+\mathbf{b}_2$
	\end{center}
	If we now apply the coordinate mapping transformation we have
	\begin{center}
		$[\mathbf{x}]_C=3[\mathbf{b}_1]_C+[\mathbf{b}_2]_C=3\left(\begin{array}{r}4\\1\end{array}\right)+\left(\begin{array}{r}-6\\1\end{array}\right)=
		   \left(\begin{array}{rr}4&-6\\1&1\end{array}\right)\left(\begin{array}{r}3\\1\end{array}\right)=
		   \left(\begin{array}{r}6\\4\end{array}\right)$
	\end{center}
	
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Change of basis} 
\begin{exampleblock}{Example (continued)}
	Note that the columns of the matrix
	\begin{center}
		$\left(\begin{array}{rr}4&-6\\1&1\end{array}\right)$
	\end{center}
	are the coordinates of each one of the elements of the basis $B$ expressed in the coordinate system $C$, and that 
	the overall change of coordinates has the form
	\begin{center}
		$[\mathbf{x}]_C=\left(\begin{array}{rr}4&-6\\1&1\end{array}\right)[\mathbf{x}]_B$
	\end{center}
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Change of basis} 
\begin{ceuthm}[Change of basis]
	Let $B=\{\mathbf{b}_1,\mathbf{b}_2,...,\mathbf{b}_n\}$ and $C=\{\mathbf{c}_1,\mathbf{c}_2,...,\mathbf{c}_n\}$ be two bases of the 
	vector space $V$. We can transform coordinates from one coordinate system to the other by multiplying by a single, invertible $n\times n$ matrix, called
	$P_{C\leftarrow B}$ whose columns are the coordinates of the vectors of $B$ in the basis $C$.
	\begin{center}
		$[\mathbf{x}]_C=P_{C\leftarrow B}[\mathbf{x}]_B$\\
		\includegraphics[scale=0.4]{figChangeOfBasis2.jpg}
	\end{center}
\end{ceuthm}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Change of basis} 
\begin{block}{Corollary}
	To convert from $C$ coordinates back to $B$ coordinates we simply have to invert the transformation.
	\begin{center}
		$P_{B\leftarrow C}=P_{C\leftarrow B}^{-1}$
	\end{center}
\end{block}

\begin{block}{Corollary}
	Consider the standard base in $V$ given by $E=\{\mathbf{e}_1,\mathbf{e}_2,...,\mathbf{e}_n\}$. The matrix to convert the coordinates from $B$ to $E$ is simply
	\begin{center}
		$P_{E\leftarrow B}=\begin{pmatrix}\mathbf{b}_1 & \mathbf{b}_2 &...&\mathbf{b}_n\end{pmatrix}$
	\end{center}
	Consequently, we have that for two different bases
	\begin{center}
		$\mathbf{x}=P_{E\leftarrow B}[\mathbf{x}]_B=P_{E\leftarrow C}[\mathbf{x}]_C$
	\end{center}
	Finally,
	\begin{center}
		$[\mathbf{x}]_C=P_{E\leftarrow C}^{-1}P_{E\leftarrow B}[\mathbf{x}]_B$
	\end{center}
\end{block}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Change of basis} 
\begin{block}{Numerical trick}
	Given the two basis $B$ and $C$ we can easily find the coordinates of $B$ in the basis $C$ in the following way.
	Let us define two matrices $\mathcal{B}$ and $\mathcal{C}$ whose columns are the elements of the basis. Then
	\begin{center}
		$(\mathcal{C} | \mathcal{B}) \sim (I_n|P_{C\leftarrow B})$
	\end{center}
\end{block}

\begin{exampleblock}{Example}
	Let's say we are given $\mathbf{b}_1=(-9,1)$, $\mathbf{b}_2=(-5,-1)$, $\mathbf{c}_1=(1,-4)$, $\mathbf{c}_2=(3,-5)$.
	\begin{center}
		$\left(\begin{array}{rr|rr}
			1&3&-9&5\\
			-4&-5&1&-1
		\end{array}\right)\sim
		\left(\begin{array}{rr|rr}
			1&0&6&4\\
			0&1&-5&3
		\end{array}\right)$
	\end{center}
	Then, $P_{C\leftarrow B}=\left(\begin{array}{rr}
			6&4\\-5&3
		\end{array}\right)$.
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Exercises} 

\begin{exerciseblock}{Exercises}
	From Lay (3rd ed.), Chapter 4, Section 7:
	\begin{itemize}
		\item 4.7.1
		\item 4.7.9
	\end{itemize}
\end{exerciseblock}

\end{frame}

\OutlineFinal

\end{document}