\input{../slidesComun}

\title[4. Determinant of a matrix]{Chapter 4. Determinant of a matrix}  
\COSS

% ==============================================
\begin{frame}\frametitle{References} 

\begin{figure}
	\includegraphics[scale=0.7]{../lay_linearalgebra.jpg}
\end{figure}
D. Lay. Linear algebra and its applications (3rd ed). Pearson (2006). Chapter 3.

\end{frame}

% ==============================================
\begin{frame}\frametitle{A little bit of history} 

The determinant of a matrix was first proposed by \href{https://en.wikipedia.org/wiki/Seki_Takakazu}{Seki Takakazu} (1683) and \href{http://en.wikipedia.org/wiki/Gottfried_Wilhelm_Leibniz}{Gottfried Leibniz} (1693). Then \href{http://en.wikipedia.org/wiki/Gabriel_Cramer}{Gabriel Cramer} (1750) and \href{http://en.wikipedia.org/wiki/Augustin-Louis_Cauchy}{Augustin Cauchy} (1812) used them to solve
problems in analytical geometry. Currently, they are not so much used in computational algebra, but they give important insights into the structure of a matrix.

\begin{figure}
	\includegraphics[height=3cm]{../Tema3/Seki.jpg}
	\includegraphics[height=3cm]{../Tema3/Leibniz.jpg}
	\includegraphics[height=3cm]{../Tema3/Cramer.jpg}
  \includegraphics[height=3cm]{Cauchy.jpg}
\end{figure}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Applications} 

The determinant plays an important role in the analysis of Brownian motion. It was first described by Robert Brown in 1827 (looking at pollen grains in water). Albert Einstein published in 1905 a paper in which he explained brownian motion as the result of the hitting molecules to bigger particles. This served as a theoretical basis for a posterior experiment by Jean Perrin that confirmed the existence of atoms. Jean Perrin was Nobel Prize in 1926.

\begin{figure}
  \includegraphics[height=3cm]{figBrownianMotion.jpg}
\end{figure}

See video at \url{https://www.youtube.com/watch?v=hy-clLi8gHg}

\end{frame}

% ==============================================
\setnextsection{4}
\section{Determinant of a matrix} 
\subsection{Introduction} 
\Outline

\begin{frame}\frametitle{Cofactor} 
\begin{ceudef}[Cofactor]
	The \textbf{cofactor} of the $ij$-th element of the matrix $A$ is
	\begin{center}
		$C_{ij}=(-1)^{i+j}\left|A_{ij}\right|$
	\end{center}
	where $A_{ij}$ is the matrix that results after eliminating the $i$-th row and the $j$-th column from matrix $A$.
\end{ceudef}

\begin{exampleblock}{Example}
	In the following example we calculate $A_{32}$
	\begin{center}
		\includegraphics[scale=0.4]{figCofactor.jpg}
	\end{center}
\end{exampleblock}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Determinant of a matrix} 
\begin{ceudef}[Determinant of a matrix]
	The \textbf{determinant} of a square $n\times n$ matrix $A$ ($\left|A\right|$ or $\mathrm{det}\{A\}$) is a mapping from $\mathcal{M}_{n\times n}$ onto $\mathbb{R}$ such that
	\begin{center}
		$\left|A\right|=\left\{ \begin{array}{cc} A & n=1 \\
		    a_{11}C_{11}+a_{12}C_{12}+...+a_{1n}C_{1n}\ & n\geq 2\end{array}\right.$
	\end{center}
	where $a_{ij}$ is the $ij$-th element of matrix $A$.\\
	MATLAB: {\color{blue}\texttt{det(A)}}
\end{ceudef}

\begin{exampleblock}{Example}
	\begin{center}\begin{tiny}
		\begin{tabular}{l}
		$\begin{array}{rcl}\mathrm{det}\left\{\left(\begin{array}{rrr}1 & 5 & 0 \\ 2 & 4 & -1 \\ 0 & -2 & 0\end{array}\right)\right\}&=&
			1\mathrm{det}\left\{\left(\begin{array}{rr} 4 & -1 \\ -2 & 0\end{array}\right)\right\}-
			5\mathrm{det}\left\{\left(\begin{array}{rr}  2 & -1 \\ 0 & 0\end{array}\right)\right\}+
			0\mathrm{det}\left\{\left(\begin{array}{rr}  2 & 4 \\ 0 & -2\end{array}\right)\right\}\\&=&1\cdot (-2)-5\cdot 0+0\cdot(-4)=-2\\
			\mathrm{det}\left\{\left(\begin{array}{rr} 4 & -1 \\ -2 & 0\end{array}\right)\right\}&=&4\mathrm{det}\{0\}-(-1)4\mathrm{det}\{-2\}=4\cdot 0-(-1)\cdot(-2)=-2\\
			\mathrm{det}\left\{\left(\begin{array}{rr}  2 & -1 \\ 0 & 0\end{array}\right)\right\}&=&2\mathrm{det}\{0\}-(-1)\mathrm{det}\{0\}=2\cdot 0-(-1)\cdot 0=0\\
		  \mathrm{det}\left\{\left(\begin{array}{rr}  2 & 4 \\ 0 & -2\end{array}\right)\right\}&=&2\mathrm{det}\{-2\}-4\mathrm{det}\{0\}=2\cdot (-2)-4\cdot 0=-4\\
			\end{array}$
		\end{tabular}
	\end{tiny}\end{center}
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Determinant of a matrix} 

\begin{ceuthm}
	For $n\geq 2$, the determinant can be computed as a weighted sum of the cofactors along any row or column
	\begin{center}
		$\left|A\right|=\sum\limits_{j=1}^n{a_{ij}C_{ij}}=\sum\limits_{i=1}^n{a_{ij}C_{ij}}$
	\end{center}
\end{ceuthm}

\begin{exampleblock}{Example (continued)}
	\begin{center}
		\begin{tabular}{l}
		$\begin{array}{rcl}\mathrm{det}\left\{\left(\begin{array}{rrr}1 & 5 & {\color{blue}0} \\ 2 & 4 & {\color{blue}-1} \\ 0 & -2 & {\color{blue}0}\end{array}\right)\right\}&=&
			0\cdot C_{13}-1\cdot C_{23}+0\cdot C_{33}=-2\\
			C_{23}&=&(-1)^{2+3}\left|\begin{array}{rr}  1 & 5 \\ 0 & -2\end{array}\right|=-(1|(-2)|-5|(0)|)=2\\
			\end{array}$
		\end{tabular}
	\end{center}
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Determinant of a matrix} 
\begin{ceuthm}[Useful particular cases]
	\begin{itemize}
		\item For $n=2$, \\$|A|=a_{11}a_{22}-a_{12}a_{21}$
		\item For $n=3$, \\$|A|=a_{11}a_{22}a_{33}+a_{12}a_{23}a_{31}+a_{13}a_{21}a_{32}-a_{11}a_{23}a_{32}-a_{12}a_{21}a_{33}-a_{13}a_{22}a_{31}$
	\end{itemize}
	\begin{center}
		\includegraphics[height=1.5cm]{figDeterminant2x2.jpg}\\
		\includegraphics[height=3cm]{figDeterminant3x3.jpg}
	\end{center}
\end{ceuthm}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Determinant of a matrix} 
\begin{ceuthm}[Useful particular cases (continued)]
	\begin{itemize}
		\item For triangular matrices,\\
			$|A|=\prod\limits_{i=1}^n{a_{ii}}$
	\end{itemize}
\end{ceuthm}

\begin{exampleblock}{Example}
	\begin{center}
		$\left|\begin{array}{rrrr}{\color{blue}1} & 4 & \frac{3}{5} & 2 \\ {\color{blue}0} & 1 & 2 & -10 \\ {\color{blue}0} & 0 & 1 & 12 \\ {\color{blue}0} & 0 & 0 & 1\end{array}\right|
		 =1\left|\begin{array}{rrr}{\color{blue}1} & 2 & -10 \\  {\color{blue}0} & 1 & 12 \\  {\color{blue}0} & 0 & 1\end{array}\right|
		 =1\cdot 1 \left|\begin{array}{rr} {\color{blue}1} & 12 \\ {\color{blue}0} & 1\end{array}\right|
		 =1\cdot 1 \cdot 1 \left|{\color{blue}1}\right|=1
		$
	\end{center}
\end{exampleblock}

Computing the determinant requires $O(n!)$ operations if we do it through the cofactor expansion. There are much faster algorithms ($O(n^3)$) that look for triangular matrices that have the same determinant as the original matrix and, then, they use this theorem that makes a much faster calculation.

\end{frame}

% ==============================================
\begin{frame}\frametitle{Exercises} 

\begin{exerciseblock}{Exercises}
	From Lay (3rd ed.), Chapter 3, Section 1:
	\begin{itemize}
		\item 3.1.42
		\item 3.1.43 (with computer; MATLAB: {\color{blue}\texttt{A=rand(4)}})
		\item 3.1.44 (with computer)
		\item 3.1.45 (with computer)
		\item 3.1.46 (with computer)
	\end{itemize}
\end{exerciseblock}

\end{frame}

% ==============================================
\subsection{Properties of determinants} 
\Outline

\begin{frame}\frametitle{Properties of determinants} 
\begin{ceuthm}[Determinant of the multiplication]
	$\mathrm{det}\{AB\}=\mathrm{det}\{A\}\mathrm{det}\{B\}$\\
	$\mathrm{det}\{kA\}=k^n\mathrm{det}\{A\}$
\end{ceuthm}
Note: In general, $\mathrm{det}\{A+B\}\neq\mathrm{det}\{A\}+\mathrm{det}\{B\}$
\begin{ceuthm}[Determinant of row operations]
	\begin{enumerate}
		\item If a multiple of one row of a matrix $A$ is added to another row to obtain a matrix $B$, then $\mathrm{det}\{B\}=\mathrm{det}\{A\}$.
		\item If two rows of a matrix $A$ are interchanged to obtain a matrix $B$, then $\mathrm{det}\{B\}=-\mathrm{det}\{A\}$.
		\item If a row of a matrix $A$ is multiplied by $k$ to obtain a matrix $B$, then $\mathrm{det}\{B\}=k\mathrm{det}\{A\}$.
	\end{enumerate}
\end{ceuthm}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Properties of determinants} 
\begin{exampleblock}{Example}
	Consider the following transformations that are of the form $B=EA$
	\begin{enumerate}
		\item $B=\begin{pmatrix}1 & 0 & 0 & 0\\ k&1&0&0 \\ 0&0&1&0 \\ 0&0&0&1 \end{pmatrix}A \Rightarrow |B|=|E||A|=1|A|$
		\item $B=\begin{pmatrix}0 & 1 & 0 & 0\\ 1&0&0&0 \\ 0&0&1&0 \\ 0&0&0&1 \end{pmatrix}A \Rightarrow |B|=|E||A|=-1|A|$
		\item $B=\begin{pmatrix}k & 0 & 0 & 0\\ 0&1&0&0 \\ 0&0&1&0 \\ 0&0&0&1 \end{pmatrix}A \Rightarrow |B|=|E||A|=k|A|$
	\end{enumerate}
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Properties of determinants} 
\begin{exampleblock}{Example}
	\begin{center}
		\begin{tabular}{c|c|l}
			& $A=\left(\begin{array}{rrr}2 & 4 & 6\\ 3 & 5 & 7 \\ 1 & 2 & 3 \end{array}\right)$ & $|A|$ \\
			$\mathbf{r}_1\leftarrow\frac{1}{2}\mathbf{r}_1$& $B_1=\left(\begin{array}{rrr}1 & 2 & 3\\ 3 & 5 & 7 \\ 1 & 2 & 3 \end{array}\right)$ & $|B_1|=\frac{1}{2}|A|\Rightarrow |A|=2|B_1|$\\
			$\begin{array}{l}\mathbf{r}_2\leftarrow\mathbf{r}_2-3\mathbf{r}_1\\
			                 \mathbf{r}_2\leftarrow\mathbf{r}_2-\mathbf{r}_1\end{array}$
											& $B_2=\left(\begin{array}{rrr}1 & 2 & 3\\ 0 & -1 & -2 \\ 0 & 0 & 0 \end{array}\right)$ & $\begin{array}{l}|B_2|=|B_1|\Rightarrow\\ |A|=2|B_2|=2(1\cdot (-1) \cdot 0)=0\end{array}$
		\end{tabular}
	\end{center}
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Properties of determinants} 
\begin{ceuthm}
	$A$ is invertible iff $|A|\neq 0$. In that case, $|A^{-1}|=|A|^{-1}$.
\end{ceuthm}

\begin{block}{Corollary}
	If $|A|=0$, then the columns of $A$ are not linearly independent.
\end{block}

\begin{ceuthm}
	For any matrix $A\in \mathcal{M}_{n\times n}$, it is verified that 
		$|A|=|A^T|$.\\
\end{ceuthm}

\begin{block}{Corollary}
	The effect of column operations on the determinant is the same as the effect of row operations.
\end{block}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Exercises} 

\begin{exerciseblock}{Exercises}
	From Lay (3rd ed.), Chapter 3, Section 2:
	\begin{itemize}
		\item 3.2.14
		\item 3.2.15
		\item 3.2.18
		\item 3.2.19
		\item 3.2.24
		\item 3.2.31
		\item 3.2.32
		\item 3.2.33
		\item 3.2.45 (computer)
	\end{itemize}
\end{exerciseblock}

\end{frame}

% ==============================================
\subsection{Cramer's rule} 
\Outline

\begin{frame}\frametitle{Cramer's rule} 
Cramer's rule is useful for a theoretical comprehension of what the determinant is and its properties, but it is not so useful for computational calculations.

\begin{ceuthm}[Cramer's rule]
	Let $A\in\mathcal{M}_{n\times n}$ be an invertible matrix. For every $\mathbf{b}\in\mathbb{R}^n$ the $i$-th entry of the unique solution $\mathbf{x}$ of $A\mathbf{x}=\mathbf{b}$ is
	\begin{center}
		$x_i=\frac{\mathrm{det}\{A_i(\mathbf{b})\}}{\mathrm{det}\{A\}}$
	\end{center}
	where $A_i(\mathbf{b})$ is the $A$ matrix in which the $i$-th column has been substituted by $\mathbf{b}$, that is,
	\begin{center}
		$A_i(\mathbf{b})=\begin{pmatrix} \mathbf{a}_1 & \mathbf{a}_2 & ... & \mathbf{a}_{i-1} & \mathbf{b} & \mathbf{a}_{i+1} & ... & \mathbf{a}_n \end{pmatrix}$
	\end{center}
	\underline{\textit{Proof}}\\
	Let $\mathbf{e}_i$ ($i=1,2,...,n$) be the columns of the identity matrix $I_n$. Consider the product
	\begin{center}
		$\begin{array}{rcl}AI_i(\mathbf{x})&=&\begin{pmatrix} A\mathbf{e}_1 & A\mathbf{e}_2 & ... & A\mathbf{e}_{i-1} & A\mathbf{x} & A\mathbf{e}_{i+1} & ... & A\mathbf{e}_n \end{pmatrix}=\\
			&=&\begin{pmatrix} \mathbf{a}_1 & \mathbf{a}_2 & ... & \mathbf{a}_{i-1} & \mathbf{b} & \mathbf{a}_{i+1} & ... & \mathbf{a}_n \end{pmatrix}=A_i(\mathbf{b})\end{array}$
	\end{center}
\end{ceuthm}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Cramer's rule} 

\begin{block}{}
	Now we take the determinant on both sides
	\begin{center}
		$|A_i(\mathbf{b})|=|AI_i(\mathbf{x})|=|A||I_i(\mathbf{x})|=|A|x_i \Rightarrow x_i=\frac{|A_i(\mathbf{b})|}{|A|}$
	\end{center}
\end{block}

\begin{exampleblock}{Example}
	Consider the equation system $\begin{pmatrix} 3s & -2 \\ -6 & s \end{pmatrix}\begin{pmatrix}x_1\\x_2\end{pmatrix}=\begin{pmatrix}4\\1\end{pmatrix}$. Its solution is given by
	\begin{center}
		$\begin{array}{rcl}
			x_1&=&\frac{\left|\begin{array}{rr} 4 & -2 \\ 1 & s\end{array}\right|}{\left|\begin{array}{rr} 3s & -2 \\ -6 & s\end{array}\right|} =
				\frac{4s+2}{3s^2-12} = \frac{4(s+\frac{1}{2})}{3(s-2)(s+2)}\\
			x_2&=&\frac{\left|\begin{array}{rr} 3s & 4 \\ -6 & 1\end{array}\right|}{\left|\begin{array}{rr} 3s & -2 \\ -6 & s\end{array}\right|} =
			  \frac{3s+24}{3s^2-12} = \frac{s+8}{(s-2)(s+2)}
		\end{array}$
	\end{center}
\end{exampleblock}
\end{frame}

% ==============================================
\subsection{Matrix inversion} 
\Outline

\begin{frame}\frametitle{Matrix inversion} 
\begin{block}{Algorithm to invert a matrix}
	We know that the inverse is a matrix such that $AA^{-1}=I_n$. If we call $\mathbf{x}_i$ to the $i$-th column of $A^{-1}$, then we have
	\begin{center}
		$AA^{-1}=A\begin{pmatrix} \mathbf{x}_1 & \mathbf{x}_2 & ... & \mathbf{x}_n \end{pmatrix}=\begin{pmatrix} \mathbf{e}_1 & \mathbf{e}_2 & ... & \mathbf{e}_n \end{pmatrix}$
	\end{center}
	i.e., we are solving simultaneously $n$ equation systems of the form $A\mathbf{x}_j=\mathbf{e}_j$. The $i$-th entry of these columns is
	\begin{center}
		$x_{ij}=\frac{|A_i(\mathbf{e}_j)|}{|A|}$
	\end{center}
	If we now calculate the determinant in the numerator by expanding by the $j$-th column, we have $|A_i(\mathbf{e}_j)|=(-1)^{i+j}|A_{ji}|$, where $A_{ji}$ is the submatrix that
	results after eliminating the $j$-th row and the $i$-th column (or, what is the same, the cofactor of the $ji$-th element).
	\begin{center}
		$x_{ij}=\frac{(-1)^{i+j}|A_{ji}|}{|A|}=\frac{C_{ji}}{|A|}$
	\end{center}
	
\end{block}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Matrix inversion} 

\begin{ceudef}[Adjoint (adjugate, \textit{adjunta}) of a matrix]
	Let $A\in\mathcal{M}_{n\times n}$ be a square matrix. The adjoint of $A$ is another $n\times n$ matrix, denoted by $A^*$ such that
	\begin{center}
		$A_{ij}^*=C_{ij}$
	\end{center}
\end{ceudef}

\begin{block}{Algorithm to invert a matrix (continued)}
	Finally we have
	\begin{center}
		$A^{-1}=\frac{1}{|A|}\begin{pmatrix} C_{11} & C_{21} & ... & C_{n1} \\ C_{12} & C_{22} & ... & C_{n2}\\...&...&...&...\\C_{1n} & C_{2n} & ... & C_{nn} \end{pmatrix}$
	\end{center}
	Watch out that the indexes of the cofactors are transposed with respect to the standard order. Consequently
	\begin{center}
		$A^{-1}=\frac{1}{|A|}(A^T)^*$
	\end{center}
\end{block}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Matrix inversion} 
\begin{ceuthm}
	\begin{center}
		$(A^T)^*=(A^*)^T$
	\end{center}
\end{ceuthm}

\begin{exampleblock}{Example}
	\begin{center}
		$A=\begin{pmatrix}2&1&0\\1&1&0\\0&0&1\end{pmatrix} \Rightarrow $\\
		\begin{tiny}
			\begin{tabular}{ccc}
				&$|A|=1$&\\
				$C_{11}=(-1)^{1+1}\left|\begin{array}{cc}1&0\\0&1\end{array}\right|=1$ &
				$C_{12}=(-1)^{1+2}\left|\begin{array}{cc}1&0\\0&1\end{array}\right|=-1$ &
				$C_{13}=(-1)^{1+3}\left|\begin{array}{cc}1&1\\0&0\end{array}\right|=0$ \\
				$C_{21}=(-1)^{2+1}\left|\begin{array}{cc}1&0\\0&1\end{array}\right|=-1$ &
				$C_{22}=(-1)^{2+2}\left|\begin{array}{cc}2&0\\0&1\end{array}\right|=2$ &
				$C_{23}=(-1)^{2+3}\left|\begin{array}{cc}2&1\\0&0\end{array}\right|=0$ \\
				$C_{31}=(-1)^{3+1}\left|\begin{array}{cc}1&0\\1&0\end{array}\right|=0$ &
				$C_{32}=(-1)^{3+2}\left|\begin{array}{cc}2&0\\1&0\end{array}\right|=0$ &
				$C_{33}=(-1)^{3+3}\left|\begin{array}{cc}2&1\\1&1\end{array}\right|=1$ \\
			\end{tabular}
			\begin{center}
				$A^*=\begin{pmatrix}1&-1&0\\-1&2&0\\0&0&1\end{pmatrix} \Rightarrow
					A^{-1}=\frac{1}{|A|}(A^*)^T=\begin{pmatrix}1&-1&0\\-1&2&0\\0&0&1\end{pmatrix}^T=\begin{pmatrix}1&-1&0\\-1&2&0\\0&0&1\end{pmatrix}$
			\end{center}
		\end{tiny}
	\end{center}
	
\end{exampleblock}

\end{frame}

% ==============================================
\subsection{Areas and volumes} 
\Outline

\begin{frame}\frametitle{Areas and volumes} 
\begin{ceuthm}[Area of a parallelogram, Volume of a parallelepiped]
	If $A$ is a $2\times 2$ matrix, then $|\det\{A\}|$ is the area of the parallelogram formed by the columns of $A$.
	If $A$ is a $3\times 3$ matrix, then $|\det\{A\}|$ is the volume of the parallelepiped formed by the columns of $A$.
\end{ceuthm}
\begin{exampleblock}{Example}
	Let be the parallelogram $ABCD$ ($A=(-2,-2)$, $B=(0,3)$, $C=(4,-1)$, $D=(6,4)$).
	\begin{columns}
		\begin{column}{3.75cm}
			\includegraphics[scale=0.275]{figParallelogram.eps}
		\end{column}
		\begin{column}{6.5cm}
			The area can be calculated as
				$\left|\det\left(\begin{array}{cc}\mathbf{B}-\mathbf{A} & \mathbf{C}-\mathbf{A}\end{array}\right)\right|=$\\
				$\left|\det\left(\begin{array}{cc}\begin{pmatrix}0\\3\end{pmatrix}-\begin{pmatrix}-2\\-2\end{pmatrix} & 
				                        \begin{pmatrix}4\\-1\end{pmatrix}-\begin{pmatrix}-2\\-2\end{pmatrix}\end{array}\right)\right|=$ \\
				$\left|\det\left(\begin{array}{cc}2 & 6\\5&1\end{array}\right)\right|=|-28|=28$ \\
		\end{column}
	\end{columns}
\end{exampleblock}
\end{frame}

% ==============================================

\begin{frame}\frametitle{Areas and volumes} 
\begin{ceuthm}[Area after a linear transformation]
	Consider the transformation $T(\mathbf{x})=A\mathbf{x}$.\\
	If $A\in\mathcal{M}_{2\times 2}$ and $S$ is a parallelogram in $\mathbf{R}^2$, then \begin{center}$\mathrm{Area}\{T(S)\}=|\det A|\mathrm{Area}\{S\}$\end{center}
	If $A\in\mathcal{M}_{3\times 3}$ and $S$ is a parallelepiped in $\mathbf{R}^3$, then the volume of $T(S)$ is \begin{center}$\mathrm{Volume}\{T(S)\}=|\det A|\mathrm{Volume}\{S\}$\end{center}
	\underline{\textit{Proof}}\\
	Let's prove it for the 2D case (the 3D one is analogous).\\
	Consider the columns of $A$, $A=\begin{pmatrix}\mathbf{a}_1 & \mathbf{a}_2\end{pmatrix}$. Without loss of generality we may consider $S$ to be at the origin with sides given by
	$\mathbf{b}_1$ and $\mathbf{b}_2$:
	\begin{center}
		$S=\left\{\mathbf{x}\in\mathbb{R}^2\arrowvert \mathbf{x}=s_1\mathbf{b}_1+s_2\mathbf{b}_2 \:\forall s_1,s_2\in[0,1]\right\}$
	\end{center}
\end{ceuthm}
\end{frame}

% ==============================================

\begin{frame}\frametitle{Areas and volumes} 
\begin{block}{}
	The image of $S$ by $T$ is
	\begin{center}
		$T(S)=\left\{\mathbf{y}\in\mathbb{R}^2\arrowvert \mathbf{y}=A\mathbf{x}=s_1A\mathbf{b}_1+s_2A\mathbf{b}_2 \:\forall s_1,s_2\in[0,1]\right\}$
	\end{center}
	which is another parallelogram. Therefore, the area of $T(S)$ is
	\begin{center}
		$\begin{array}{rcl}\mathrm{Area}\{T(S)\}&=&\left|\det\begin{pmatrix}A\mathbf{b}_1 & A\mathbf{b}_2\end{pmatrix}\right|=
			\left|\det\left\{A\begin{pmatrix}\mathbf{b}_1 & \mathbf{b}_2\end{pmatrix}\right\}\right|=\left|\det\left\{AB\right\}\right|\\
			&=&|\det A||\det B|=|\det A|\mathrm{Area}\{S\}\end{array}$
	\end{center}
	(q.e.d.)
\end{block}
\end{frame}

% ==============================================

\begin{frame}\frametitle{Areas and volumes} 
\begin{ceuthm}
	The previous theorem is valid for any closed region in $\mathbb{R}^2$ or $\mathbb{R}^3$ with finite area or volume.\\
	\underline{\textit{Proof (hint)}}\\
	We only need to divide the region into very small (infinitely small) parallelograms (or parallelepipeds) and apply the previous theorem
	to each one of the pieces.
		\begin{center}
			\includegraphics[scale=0.37]{figParallelogram2.jpg}
		\end{center}
\end{ceuthm}
\end{frame}

% ==============================================

\begin{frame}\frametitle{Areas and volumes} 
\begin{exampleblock}{Example}
	Suppose that the unit disk defined as
	\begin{center}
		$D=\left\{\mathbf{u}\in\mathbb{R}^2 \arrowvert u_1^2+u_2^2\leq 1\right\}$
	\end{center}
	is transformed with the transformation \begin{center}$T(\mathbf{u})=\begin{pmatrix}a&0\\0&b\end{pmatrix}\mathbf{u}$\end{center}
	to produce
	\begin{center}
		$E\equiv T(D)=\left\{\mathbf{x}\in\mathbb{R}^2 \left| \mathbf{x}=\begin{pmatrix}a&0\\0&b\end{pmatrix}\mathbf{u}=\begin{pmatrix}au_1\\bu_2\end{pmatrix}\right.\right\}$
	\end{center}
	Exploiting the facts that $x_1=au_1 \Rightarrow u_1=\frac{x_1}{a}$, $x_2=bu_2  \Rightarrow u_2=\frac{x_2}{b}$ we may also characterize the transformed region as
	\begin{center}
		$E=\left\{\mathbf{x}\in\mathbb{R}^2 \left| \left(\frac{x_1}{a}\right)^2+\left(\frac{x_2}{b}\right)^2\leq 1 \right.\right\}$
	\end{center}
	that is a solid ellipse.
	
\end{exampleblock}
\end{frame}

% ==============================================

\begin{frame}\frametitle{Areas and volumes} 
\begin{exampleblock}{Example (continued)}
	\begin{columns}
		\begin{column}{3.2cm}
			\includegraphics[scale=0.37]{figEllipse.jpg}
		\end{column}
		\begin{column}{7.5cm}
			$\begin{array}{rcl}\mathrm{Area}\{E\}&=&|\det A|\mathrm{Area}\{D\}=(a b)(\pi (1)^2)\\&=&\pi a b\end{array}$
		\end{column}
	\end{columns}
\end{exampleblock}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Exercises} 

\begin{exerciseblock}{Exercises}
	From Lay (3rd ed.), Chapter 3, Section 3:
	\begin{itemize}
		\item 3.3.1
		\item 3.3.7
		\item 3.3.11
		\item 3.3.21
		\item 3.3.25
		\item 3.3.26
		\item 3.3.29
		\item 3.3.32
	\end{itemize}
\end{exerciseblock}
\end{frame}

% ==============================================
%\begin{frame}\frametitle{Exercises} 
%
%\begin{exerciseblock}{Exercises}
	%From Lay (3rd ed.), Chapter 3, Supplementary Exercises:
	%\begin{itemize}
		%\item 3.Sup.7
		%\item 3.Sup.9
		%\item 3.Sup.10
		%\item 3.Sup.14
		%\item 3.Sup.15
		%\item 3.Sup.16
	%\end{itemize}
%\end{exerciseblock}
%\end{frame}

\OutlineFinal

\end{document}