\input{../slidesComun}

\title[1. Vectors]{Chapter 1. Vectors}  
\COSS
\note{
In this chapter we'll learn to:
	\begin{itemize}
		\item What are the basic operations with vectors
		\item What is a linear combination
		\item What is an inner product
		\item What is the length of a vector
		\item What is the angle between two vectors
		\item 
	\end{itemize}
}

% ==============================================
\begin{frame}\frametitle{References} 

\begin{figure}
	%\pgfuseimage{myfigure}
	\includegraphics[scale=1]{strang_linearalgebra.jpg}
\end{figure}
G. Strang. Introduction to linear algebra (4th ed). Wellesley Cambridge Press (2009). Chapter 1.

\end{frame}

% ==============================================
\begin{frame}\frametitle{References} 

\begin{figure}
	\includegraphics[scale=0.7]{../lay_linearalgebra.jpg}
\end{figure}
D. Lay. Linear algebra and its applications (3rd ed). Pearson (2006). Chapter 1.

\end{frame}

% ==============================================
\begin{frame}\frametitle{A little bit of history} 

Vectors were developed during the XIX$^{\mathrm{th}}$ century by mathematicians and physicists like \href{http://en.wikipedia.org/wiki/Carl_Friedrich_Gauss}{Carl Friedrich Gauss} (1799), \href{http://en.wikipedia.org/wiki/William_Rowan_Hamilton}{William Rowan Hamilton} (1837), and \href{http://en.wikipedia.org/wiki/James_Clerk_Maxwell}{James Clerk Maxwell} (1873), mostly as a tool to represent complex numbers, and later as a tool to perform geometrical reasoning. Their modern algebra was formalized by \href{https://en.wikipedia.org/wiki/Josiah_Willard_Gibbs}{Josiah Willard Gibbs} (1901), a university professor at Yale.
\begin{figure}
	\includegraphics[height=3cm]{Carl_Friedrich_Gauss.jpg}
	\includegraphics[height=3cm]{William_Rowan_Hamilton.jpg}
	\includegraphics[height=3cm]{Maxwell.jpg}
	\includegraphics[height=3cm]{Gibbs.jpg}
\end{figure}

To know more about the history of vectors visit
\begin{itemize}
	\item \url{http://www.math.mcgill.ca/labute/courses/133f03/VectorHistory.html}
	\item \url{https://www.math.ucdavis.edu/~temple/MAT21D/SUPPLEMENTARY-ARTICLES/Crowe_History-of-Vectors.pdf}
\end{itemize}
\end{frame}

% ==============================================
\setnextsection{1}
\section{Vectors} 
\subsection{Vectors and basic operations (a)} 
\Outline

\begin{frame}\frametitle{What is a vector?} 
\begin{ceudef}
Informally, a \textbf{vector} is a collection of $n$ numbers of the same type. We say it has $n$ components (1,2,...,n)
\end{ceudef}
We'll see that this definition is terribly simplistic since many other things (like functions, infinite sequences, etc.) can be vectors. But, for the time being, let's stick to this simple definition.

\begin{exampleblock}{Example}
	\begin{tabular}{cl}
		$\left(\begin{array}{c} -1 \\ 0 \\ 1 \\ \end{array}\right) \in \mathbb{Z}^3$ & is a collection of 3 integer numbers\\
		$\left(\begin{array}{c} -1.1 \\ 1.1 \\ \end{array}\right) \in \mathbb{Q}^2$  & is a collection of 2 rational numbers \\
		$\left(\begin{array}{c} -1.1 \\ \sqrt{2} \\ \end{array}\right) \in \mathbb{R}^2$  & is a collection of 2 real numbers \\
	\end{tabular}
	\\
		Matlab:\\
		{\color{blue}
		\leftskip5mm
		\texttt{
			[-1.1; sqrt(2)]
		}
		}
\end{exampleblock}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Transpose} 

We distinguish between column vectors (for instance $\mathbf{v}$ below) and row vectors ($\mathbf{w}$). In the first case, we say $\mathbf{v}$ is a $n\times 1$ vector, while in the second, we say $\mathbf{w}$ is a $1\times n$ vector.
\begin{center}
	$\mathbf{v}=\left(\begin{array}{c} v_1 \\ v_2 \\ ... \\ v_n \\ \end{array}\right)$ and 
	$\mathbf{w}=\left(w_1 w_2 ... w_n \right)$.
\end{center}

\begin{ceudef}
	The \textbf{transpose} is the operation that transforms a column vector into a row vector and viceversa.
\end{ceudef}
\begin{exampleblock}{Example}
	\begin{center}
		$\left(-1\; 1\right) ^T=\left(\begin{array}{c} -1 \\ 1 \\ \end{array}\right)$ \\
	\end{center}
	Matlab:\\
	{\color{blue}
	\leftskip5mm
	\texttt{
		[-1 1]'
	}}
\end{exampleblock}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Addition of vectors} 
\begin{ceudef}
Given two vectors $\mathbf{v}=\left(\begin{array}{c} v_1 \\ v_2 \\ ... \\ v_n \\ \end{array}\right)$ and $\mathbf{w}=\left(\begin{array}{c} w_1 \\ w_2 \\ ... \\ w_n \\ \end{array}\right)$ the \textbf{sum} of these two vectors is another vector defined as
	$\mathbf{v}+\mathbf{w}=\left(\begin{array}{c} v_1+w_1 \\ v_2+w_2 \\ ... \\ v_n+w_n \\ \end{array}\right)$. Note that you can only add two column vectors or two row vectors, but not a column and a row vector.
\end{ceudef}
\begin{columns}[t]
	\begin{column}{7cm}
		\begin{exampleblock}{Example}
			$\left(\begin{array}{c} -1.1 \\ 1.1 \\ \end{array}\right)+\left(\begin{array}{c} -1.1 \\ \sqrt{2} \\ \end{array}\right)=
				\left(\begin{array}{c} -2.2 \\ 1.1+\sqrt{2} \\ \end{array}\right)$
			Matlab:\\
			{\color{blue}
			\leftskip5mm
			\texttt{
				[-1.1; 1.1]+[-1.1; sqrt(2)]
			}}
		\end{exampleblock}
	\end{column}
	\begin{column}{3cm}
		\begin{ceuprop}
			Commutativity: $\mathbf{v}+\mathbf{w}=\mathbf{w}+\mathbf{v}$
		\end{ceuprop}
	\end{column}
\end{columns}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Addition of vectors} 
		\begin{exampleblock}{Example}
		\begin{center}
			$\left(\begin{array}{c} 4 \\ 2 \\ \end{array}\right)+\left(\begin{array}{c} -1 \\ 2 \\ \end{array}\right)=
				\left(\begin{array}{c} 3 \\ 4 \\ \end{array}\right)$\\
			\vspace{0.25cm}
			\includegraphics[scale=0.4]{figAddVectors.eps}
		\end{center}
		\end{exampleblock}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Product by scalar} 
\begin{ceudef}
Given a vector $\mathbf{v}$ and a scalar $c$, the \textbf{multiplication} of $c$ and $\mathbf{v}$ is defined as
	\begin{center}$c\mathbf{v}=\left(\begin{array}{c} cv_1 \\ cv_2 \\ ... \\ cv_n \\ \end{array}\right)$\end{center}
\end{ceudef}
\begin{exampleblock}{Example}
	\begin{center}
		$2\left(\begin{array}{c} -1.1 \\ 1.1 \\ \end{array}\right)=\left(\begin{array}{c} -2.2 \\ 2.2 \\ \end{array}\right)$\\
		\vspace{0.25cm}
		$-\left(\begin{array}{c} -1.1 \\ 1.1 \\ \end{array}\right)=\left(\begin{array}{c}  1.1 \\ -1.1 \\ \end{array}\right)$
	\end{center}
	Matlab:\\
	{\color{blue}
	\leftskip5mm
	\texttt{
		2*[-1.1; 1.1]
		-[1.1; 1.1]
	}}
\end{exampleblock}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Product by scalar} 
		\begin{exampleblock}{Example}
		\begin{center}
			$\mathbf{w}=\left(\begin{array}{c} -1 \\ 2 \\ \end{array}\right)$\\
			\vspace{0.25cm}
				\begin{minipage}{4.5cm}
					What is the shape of all scaled vectors of the form $c\mathbf{w}$?\\
					If $\mathbf{w}=\mathbf{0}$, then it is a single point ($\mathbf{0}$).
					If $\mathbf{w}\neq\mathbf{0}$, then it is the straight line that passes through $\mathbf{0}$ and $\mathbf{w}$.
				\end{minipage}
				\hspace{0.5cm}
				\begin{minipage}{6cm}
					\includegraphics[scale=0.4]{figMultiplyByScalar.eps}
				\end{minipage}
		\end{center}
		\end{exampleblock}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Properties}
	For simplification we will present them as properties for $\mathbb{R}^n$, but they apply to all vector spaces. Given any three vectors $\mathbf{u},\mathbf{v},\mathbf{w} \in \mathbb{R}^n$ and any two scalars $c,d \in \mathbb{R}$, we have
	\begin{block}{Vector operation properties}
		Regarding the sum of vectors:
		\begin{enumerate}
			\item $\mathbf{u}+\mathbf{v}=\mathbf{v}+\mathbf{u}$ Commutativity
			\item ($\mathbf{u}+\mathbf{v})+\mathbf{w}=\mathbf{u}+(\mathbf{v}+\mathbf{w})$ Associativity
			\item $\mathbf{u}+\mathbf{0}=\mathbf{0}+\mathbf{u}=\mathbf{u}$ Existence of neutral element
			\item $\mathbf{u}+\mathbf{-u}=\mathbf{-u}+\mathbf{u}=\mathbf{0}$ Existence of symmetric element
		\end{enumerate}
		Regarding the sum of vectors and scalar product:
		\begin{enumerate}
			\setcounter{enumi}{4}
			\item c($\mathbf{u}+\mathbf{v})=c\mathbf{v}+c\mathbf{u}$ Distributivity with respect to the sum of vectors
			\item (c+d)$\mathbf{u}=c\mathbf{u}+d\mathbf{u}$ Distributivity with respect to the sum of scalars
		\end{enumerate}
		Regarding the scalar product:
		\begin{enumerate}
			\setcounter{enumi}{6}
			\item c(d$\mathbf{u})=(cd)\mathbf{u}$ Associativity
			\item 1$\mathbf{u}=\mathbf{u}$ Existence of neutral element
		\end{enumerate}
	\end{block}
\end{frame}

% ==============================================
\subsection{Linear combination (a)} 
\Outline

\begin{frame}\frametitle{Linear combination} 
\begin{ceudef}
	Given a collection of $p$ scalars ($x_i$, $i=1,2,...,p$) and $p$ vectors ($\mathbf{v}_i$), the \textbf{linear combination} of the $p$ vectors using the
	\textbf{weights} given by the $p$ scalars is defined as
	\begin{center}$\sum\limits_{i=1}^p{x_i\mathbf{v}_i}=x_1\mathbf{v}_1+x_2\mathbf{v}_2+...+x_p\mathbf{v}_p$\end{center}
\end{ceudef}
\begin{exampleblock}{Example}
	\begin{center}
		$\frac{1}{2}\left(\begin{array}{c} -1 \\ 1 \\ \end{array}\right)-\frac{2}{3}\left(\begin{array}{c} 2 \\ 2 \\ \end{array}\right)=\left(\begin{array}{c} -\frac{5}{6} \\ -\frac{11}{6} \\ \end{array}\right)$\\
	\end{center}
	Matlab:\\
	{\color{blue}
	\leftskip5mm
	\texttt{
		format rational \\
		-1/2*[-1; 1]-2/3*[2; 2]
	}}
\end{exampleblock}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Linear combination} 
		\begin{exampleblock}{Example}
		A very basic model of the activity of neurons is
		\begin{center}
			$output=f(\sum\limits_{i}{weight_i input_i})$
		\end{center}
		where $f(x)$ is a non-linear function. In fact, this is the model used in artificial neuron networks.
		\begin{center}
			\includegraphics[scale=1.2]{neuron.jpg}
		\end{center}
		The human brain has in the order of $10^{11}$ neurons and about $10^{18}$ connections.
		See \url{https://www.youtube.com/watch?v=zLp-edwiGUU}.
		\end{exampleblock}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Linear combination} 
		\begin{exampleblock}{Example}
		\begin{center}
				$\mathbf{v}=\left(\begin{array}{c} 4 \\ 2 \\ \end{array}\right)$ 
				$\mathbf{w}=\left(\begin{array}{c} -1 \\ 2 \\ \end{array}\right)$\\
			\vspace{0.25cm}
			\includegraphics[scale=0.4]{figLinearCombination.eps}
		\end{center}
		We may think of the weight coefficients as the ``travelling'' instructions. For instance, for the figure in the right, the instructions say:
		\textit{``Travel $\frac{1}{3}$ of $\mathbf{v}$ along $\mathbf{v}$, then travel $\frac{1}{2}$ of $\mathbf{w}$ along $\mathbf{w}$''}.
		\end{exampleblock}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Linear combination} 
		\begin{exampleblock}{What is the shape of all linear combinations of the form $c\mathbf{v}+d\mathbf{w}$}
			If the two vectors are not collinear (i.e., $\mathbf{w}\neq k\mathbf{v}$), then it is the whole plane passing by $\mathbf{0}$, $\mathbf{v}$ and $\mathbf{w}$.
			We can think of it as the sum of all vectors belonging to the line $\overline{\mathbf{0}\mathbf{v}}$ and $\overline{\mathbf{0}\mathbf{w}}$.\\
			\vspace{0.25cm}
			\begin{minipage}{5cm}
				The plane generated by $\mathbf{v}$ and $\mathbf{w}$ is the set of all vectors that can be generated as a linear combination of both vectors.\\
				$\Pi=\left\{\mathbf{r} \arrowvert \mathbf{r}=c\mathbf{v}+d\mathbf{w}\:\forall c,d \in \mathbf{R}\right\}$
			\end{minipage}
			\hspace{0.25cm}
			\begin{minipage}{6cm}
				\includegraphics[scale=0.5]{figLinearCombinationPlane.eps}
			\end{minipage}
		\end{exampleblock}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Linear combination} 
	The previous example prompts the following definition:
	\begin{ceudef}[Spanned subspace]
		The subspace spanned by the vectors $\mathbf{v}_i$, $i=1,2,...,p$, is the set of all vectors that can be expressed as the linear combination of them. Formally, \\
		\begin{center}
			$\left<\mathbf{v}_1,\mathbf{v}_2,...,\mathbf{v}_p\right>=\mathrm{Span}\left\{\mathbf{v}_1,\mathbf{v}_2,...,\mathbf{v}_p\right\} \triangleq 
				\left\{\mathbf{v} \in \mathbb{R}^n \arrowvert \mathbf{v}=x_1\mathbf{v}_1+x_2\mathbf{v}_2+...+x_p\mathbf{v}_p\right\}$
		\end{center}
	\end{ceudef}
	\begin{columns}
		\begin{column}{6cm}
			\begin{exampleblock}{Example}
				Assuming all vectors below are linearly independent:\\
				$\mathrm{Span}\left\{\mathbf{v}_1\right\}$ is a straight line. \\
				$\mathrm{Span}\left\{\mathbf{v}_1,\mathbf{v}_2\right\}$ is a plane. \\
				$\mathrm{Span}\left\{\mathbf{v}_1,\mathbf{v}_2,...,\mathbf{v}_{n-1}\right\}$ is a hyperplane. \\
			\end{exampleblock}
		\end{column}

		\begin{column}{3cm}
			\begin{block}{Properties}
			\begin{center}
				$\mathbf{0} \in \mathrm{Span}\left\{\cdot\right\}$
			\end{center}
			\end{block}
		\end{column}
	\end{columns}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Linear combination} 
		\begin{exampleblock}{Outside the plane}
			Let $\mathbf{v}=(1,1,0)$ and $\mathbf{w}=(0,1,1)$. The linear combinations of $\mathbf{v}$ and $\mathbf{w}$ fill a plane in 3D. All points belonging to this plane
			are of the form \\
			\vspace{0.25cm}
			$\Pi=\left\{\mathbf{r} \arrowvert \mathbf{r}=c(1,1,0)+d(0,1,1)\:\forall c,d \in \mathbf{R}\right\} = \left\{\mathbf{r}=(c,c+d,d)\:\forall c,d \in \mathbf{R}\right\}$\\
			\vspace{0.25cm}
			It is clear that the vector $\mathbf{r}'=(0,1,0) \notin \Pi$, therefore, it is outside the plane.
			\begin{center}
				\includegraphics[scale=0.4]{figOutsideThePlane.eps}
			\end{center}
		\end{exampleblock}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Linear combination} 
		\begin{exampleblock}{Sets of points}
			Let $\mathbf{v}=(1,0)$.
			\begin{enumerate}
				\item $S_1=\left\{\mathbf{r}=c\mathbf{v}\; \forall c\in \mathbb{Z}\right\}$ is a set of points
				\item $S_2=\left\{\mathbf{r}=c\mathbf{v}\; \forall c\in \mathbb{R}^+\right\}$ is a semiline
				\item $S_3=\left\{\mathbf{r}=c\mathbf{v}\; \forall c\in \mathbb{R}\right\}$ is a line
			\end{enumerate}
			\begin{center}
				\includegraphics[scale=0.4]{figSets1.eps}
			\end{center}
		\end{exampleblock}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Linear combination} 
		\begin{exampleblock}{Sets of points}
			Let $\mathbf{v}=(1,0)$ and $\mathbf{w}=(0,1)$.
			\begin{enumerate}
				\item $S_1=\left\{\mathbf{r}=c\mathbf{v}+d\mathbf{w}\; \forall c\in \mathbb{Z},\forall d\in \mathbb{R}\right\}$ is a set of lines
				\item $S_2=\left\{\mathbf{r}=c\mathbf{v}+d\mathbf{w}\; \forall c\in \mathbb{R}^+,\forall d\in \mathbb{R}\right\}$ is a semiplane
				\item $S_3=\left\{\mathbf{r}=c\mathbf{v}+d\mathbf{w}\; \forall c,d\in \mathbb{R}\right\}$ is a plane
			\end{enumerate}
			\begin{center}
				\includegraphics[scale=0.4]{figSets2.eps}
			\end{center}
		\end{exampleblock}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Linear combination} 
		\begin{exampleblock}{Combination coefficients}
			Let $\mathbf{v}=(2,-1)$, $\mathbf{w}=(-1,2)$ and $\mathbf{b}=(1,0)$. Find $c$ and $d$ such that $\mathbf{b}=c\mathbf{v}+d\mathbf{w}$.\\
		\underline{\textit{Solution}}\\
		We need to find $c$ and $d$ such that
		\begin{center}
		$\left(\begin{array}{c}1 \\ 0\end{array}\right)=c\left(\begin{array}{c}2 \\ -1\end{array}\right)+d\left(\begin{array}{c}-1 \\ 2\end{array}\right)=
			\left(\begin{array}{c}2c-d \\ 2d-c\end{array}\right)$
		\end{center}
		This gives a simple equation system\\
		\begin{center}
		$\begin{array}{c}2c-d = 1\\ 2d-c=0\end{array}$\\
		\end{center}
		whose solution is $c=\frac{2}{3}$ and $d=\frac{1}{3}$. We can easily check it with Matlab:\\
		{\color{blue}\leftskip5mm
		\texttt{
			2/3*[2 -1]'+1/3*[-1 2]'
		}}
		\end{exampleblock}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Exercises} 

\begin{exerciseblock}{Exercises}
	From Lay (4th ed.), Chapter 1, Section 3:
	\begin{itemize}
		\item 1.3.1
		\item 1.3.3
		\item 1.3.6
		\item 1.3.7
		\item 1.3.25
		\item 1.3.27
		\item 1.3.29
		\item 1.3.31
	\end{itemize}
\end{exerciseblock}

\end{frame}

% ==============================================
\subsection{Inner product or dot product (b)} 
\Outline

\begin{frame}\frametitle{Inner product} 
\begin{ceudef}
	Given two vectors $\mathbf{v}$ and $\mathbf{w}$ the \textbf{inner or dot product} between $\mathbf{v}$ and $\mathbf{w}$ is defined as
	\begin{center}$\left<\mathbf{v},\mathbf{w}\right> = \mathbf{v} \cdot \mathbf{w} \triangleq \mathbf{v}^T \mathbf{w} = \sum\limits_{i=1}^n{v_iw_i}=v_1w_1+v_2w_2+...+v_nw_n$\end{center}
\end{ceudef}
Mathematically, the concept of inner product is much more general, and this operational definition is just a particularization for vectors in $\mathbb{R}^n$. Although, the introduced inner product is the most common, it is not the only one that can be defined in $\mathbb{R}^n$. But, let's leave these generalization for the moment.
\begin{columns}[T]
	\begin{column}{7.5cm}
		\begin{exampleblock}{Example}
			\begin{center}
				$\left(\begin{array}{c} 4 \\ 2 \\ \end{array}\right)\cdot\left(\begin{array}{c} -1 \\ 2 \\ \end{array}\right)=4\cdot(-1)+2\cdot 2=0$\\
			\end{center}
			Matlab:\\
			{\color{blue}
			\leftskip5mm
			\texttt{
				dot([4; 2],[-1; 2])
			}}
		\end{exampleblock}
	\end{column}

	\begin{column}{3.5cm}
		\begin{ceuprop}
			\begin{center}
				Commutativity: $\mathbf{v} \cdot \mathbf{w} = \mathbf{w} \cdot \mathbf{v}$
			\end{center}
		\end{ceuprop}
	\end{column}
\end{columns}
\end{frame}

% ==============================================
%\begin{frame}\frametitle{Inner product} 
%\begin{exampleblock}{Example}
%	Let a set of masses be distributed as in the drawing. Let $\mathbf{m}=(4,2)$ be the vector of masses, and $\mathbf{d}=(-1,2)$ be the vector of distances. The moment of the system is given by $\mathbf{m} \cdot \mathbf{d}=0$. Since it has null moment, the system is at equilibirum. *** Entender qué significa este equilibrio \\
%	\vspace{0.25cm}
%	\begin{center}
%		\includegraphics[scale=0.4]{figInner1.eps}
%	\end{center}
%\end{exampleblock}
%\end{frame}

% ==============================================
\subsection{Norm, vector length and unit vectors (b)} 
\Outline

\begin{frame}\frametitle{Vector norm and vector length} 
\begin{ceudef}
	Given a vector $\mathbf{v}$, its \textbf{length or norm} is defined as
	\begin{center}$\left\|\mathbf{v}\right\|\triangleq \sqrt{\left<\mathbf{v},\mathbf{v}\right>}$\end{center}
	In the particular case of working with the previously introduced inner product, this definition boils down to
	\begin{center}$\left\|\mathbf{v}\right\|\triangleq \sqrt{\mathbf{v}^T\mathbf{v}}=\sqrt{\sum\limits_{i=1}^n{v_i^2}}$\end{center}
	that is known as the \textbf{Euclidean norm} of vector $\mathbf{v}$.
\end{ceudef}
\begin{columns}[T]
	\begin{column}{4cm}
		\begin{ceuprop}
			\begin{center}
				$\begin{array}{rcl}
					\|-\mathbf{v}\|&=&\|\mathbf{v}\|\\
					\|c\mathbf{v}\|&=&|c|\|\mathbf{v}\|\\
				 \end{array}$
			\end{center}
		\end{ceuprop}
	\end{column}
\end{columns}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Vector norm and vector length} 
	\begin{exampleblock}{Example}
		\begin{minipage}{6.5cm}
			\begin{center}
				$\left\|(-1,0,1)\right\|=\sqrt{(-1)^2+0^2+1^2}=\sqrt{2} $
			\end{center}
			\begin{minipage}{5cm}
			Matlab:\\
			{\color{blue}
			\leftskip5mm
			\texttt{
				norm([-1;0;1])
			}}
			\end{minipage}
		\end{minipage}
		\begin{minipage}{5cm}
			\includegraphics[scale=0.3]{figNorm.eps}
		\end{minipage}
	\end{exampleblock}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Unit vectors} 
\begin{ceudef}
	$\mathbf{v}$ is \textbf{unitary} iff $\left\|\mathbf{v}\right\|=1$.
\end{ceudef}
	\begin{exampleblock}{Example}
		\begin{minipage}{6.5cm}
			$\mathbf{e}_1=(1,0)$\\
			$\mathbf{e}_2=(0,1)$\\
			$\mathbf{e}_\theta=(\cos(\theta),\sin(\theta))$\\
			Matlab:\\
			{\color{blue}
			\begin{small}{
			\texttt{
				theta=pi/4;\\
				e\_theta=[cos(theta);sin(theta)];\\
				norm(e\_theta)\\
			}}\end{small}}
		\end{minipage}
		\begin{minipage}{5cm}
			\includegraphics[scale=0.35]{figNorm2.eps}
		\end{minipage}
	\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Unit vectors} 
\begin{ceudef}[Construction of a unit vector]
	Given any vector $\mathbf{v}$ (whose norm is not null), we can always construct a unitary vector with the same
	direction of $\mathbf{v}$ as $\mathbf{u}_{\mathbf{v}}=\frac{\mathbf{v}}{\left\|\mathbf{v}\right\|}$.
	\label{def:constructionUnitVector}
\end{ceudef}
	\begin{exampleblock}{Example}
		\begin{minipage}{6.5cm}
			$\mathbf{v}=(1,1)$\\
			$\mathbf{u}_{\mathbf{v}}=\frac{\mathbf{v}}{\left\|\mathbf{v}\right\|}=\frac{(1,1)}{\sqrt{2}}=\left(\frac{1}{\sqrt{2}},\frac{1}{\sqrt{2}}\right)$\\
		\end{minipage}
		\begin{minipage}{5cm}
			\includegraphics[scale=0.35]{figNorm3.eps}
		\end{minipage}
	\end{exampleblock}

\end{frame}

% ==============================================
\subsection{Distances and angles (b)} 
\Outline

\begin{frame}\frametitle{Distance and angle between two vectors} 
\begin{ceudef}
		\begin{minipage}{6.5cm}
			Given two vectors $\mathbf{v}$ and $\mathbf{w}$, the \textbf{distance} between both is defined as
			\begin{center}$d(\mathbf{v},\mathbf{w})\triangleq \|\mathbf{v}-\mathbf{w}\|$\end{center}
			and their \textbf{angle} is
			\begin{center}$\angle(\mathbf{v},\mathbf{w})\triangleq \mathrm{acos}\frac{\mathbf{v}\cdot\mathbf{w}}{\|\mathbf{v}\|\|\mathbf{w}\|}=\theta$\end{center}
		\end{minipage}
		\begin{minipage}{5cm}
			\includegraphics[scale=0.35]{figDistance.eps}
		\end{minipage}
\end{ceudef}
\begin{ceudef}
	Two vectors are \textbf{orthogonal} (perpendicular) iff their inner product is 0. We then write $\mathbf{v} \perp \mathbf{w}$. In this case, $\angle(\mathbf{v},\mathbf{w})=\frac{\pi}{2}$.
\end{ceudef}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Distance and angle between two vectors} 
\begin{exampleblock}{Example}
	Let $\mathbf{v}=(-\frac{2}{5},\frac{2}{3})$ and $\mathbf{w}=(1,\frac{2}{3})$. The angle between these two vectors can be calculated as\\
		\begin{minipage}{6.5cm}
			$\mathbf{v}\cdot\mathbf{w}=(-\frac{2}{5})1 + \frac{2}{3}\frac{2}{3}=\frac{2}{45}$ \\
			$\|\mathbf{v}\|=\sqrt{(-\frac{2}{5})^2+(\frac{2}{3})^2}=%\sqrt{\frac{4}{25}+\frac{4}{9}}=\sqrt{\frac{136}{225}}=
			\frac{\sqrt{136}}{15}$ \\
			$\|\mathbf{w}\|=\sqrt{(1)^2+(\frac{2}{3})^2}=%\sqrt{\frac{4}{25}+\frac{4}{9}}=\sqrt{\frac{136}{225}}=
			\frac{\sqrt{13}}{3}$ \\
			$\angle(\mathbf{v},\mathbf{w}) = \mathrm{acos}\frac{\frac{2}{45}}{\frac{\sqrt{136}}{15}\frac{\sqrt{13}}{3}}=87.27\degree$ \\
			$\mathbf{v}$ and $\mathbf{w}$ are almost orthogonal.
		\end{minipage}
		\begin{minipage}{5cm}
			\includegraphics[scale=0.35]{figAngles.eps}
		\end{minipage}
\end{exampleblock}
\begin{exampleblock}{Example}
	Let $\mathbf{v}=(1,0,0,1,0,0,1,0,0,1)$ and $\mathbf{w}=(0,1,1,0,1,1,0,1,1,0)$. These two vectors in a 10-dimensional space are orthogonal because
	$\mathbf{v}\cdot\mathbf{w}=1\cdot 0+0\cdot 1+0 \cdot 1+1\cdot 0+0\cdot 1+0 \cdot 1+1\cdot 0+0\cdot 1+0 \cdot 1+1\cdot 0=0$
\end{exampleblock}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Distance and angle between two vectors} 
\begin{exampleblock}{Example}
	Search for a vector that is orthogonal to $\mathbf{v}=(-\frac{2}{5},\frac{2}{3})$\\
	\underline{\textit{Solution}}\\
	Let the vector $\mathbf{w}=(w_1,w_2)$ be such a vector. Since it is orthogonal to $\mathbf{v}$ it must meet\\
	\begin{center}$\left<\mathbf{v},\mathbf{w}\right>=0=(-\frac{2}{5})w_1+\frac{2}{3}w_2 \Rightarrow w_2=\frac{3}{5}w_1 $\end{center}
	That is, any vector of the form $\mathbf{w}=(w_1,\frac{3}{5}w_1)=w_1(1,\frac{3}{5})$ is perpendicular to $\mathbf{v}$. This is the line passing by the origin and with direction $(1, 
	\frac{3}{5})$. In particular, for $w_1=\frac{2}{3}$ we have that $\mathbf{w}=(\frac{2}{3},\frac{2}{5})$ and for $w_1=-\frac{2}{3}$ we have $\mathbf{w}=(-\frac{2}{3},-\frac{2}{5})$. \\
	\vspace{0.5cm}
	This is a general rule in 2D. Given a vector $\mathbf{v}=(a,b)$, the vectors $\mathbf{w}=(b,-a)$ and $\mathbf{w}=(-b,a)$ are orthogonal to $\mathbf{v}$.\\
	\begin{center}
		$(a,b)\perp(b,-a)$ and $(a,b)\perp(-b,a)$
	\end{center}
\end{exampleblock}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Distance and angle between two vectors} 
\begin{ceuthm}[Pythagorean theorem]
	If $\mathbf{v}\perp\mathbf{w}$, then $\|\mathbf{v}-\mathbf{w}\|^2=\|\mathbf{v}\|^2+\|\mathbf{w}\|^2$.\\
	\underline{\textit{Proof}}\\
	$\|\mathbf{v}-\mathbf{w}\|^2=(\mathbf{v}-\mathbf{w})^T(\mathbf{v}-\mathbf{w})=\mathbf{v}^T\mathbf{v}-\mathbf{v}^T\mathbf{w}-\mathbf{w}^T\mathbf{v}+\mathbf{w}^T\mathbf{w}=
		\|\mathbf{v}\|^2+\|\mathbf{w}\|^2-2\left<\mathbf{v},\mathbf{w}\right>$\\
	\vspace{0.25cm}
	But, because $\mathbf{v}\perp\mathbf{w}$, we have $\left<\mathbf{v},\mathbf{w}\right>=0$, and consequently\\
	\vspace{0.25cm}
	$\|\mathbf{v}-\mathbf{w}\|^2=\|\mathbf{v}\|^2+\|\mathbf{w}\|^2$ (q.e.d.)\\
\end{ceuthm}

\begin{ceucor}
	\begin{itemize}
		\item If $\left<\mathbf{v},\mathbf{w}\right> < 0$, then $\frac{\pi}{2}<\theta\leq \pi$.
		\item If $\left<\mathbf{v},\mathbf{w}\right> > 0$, then $0\le \theta< \frac{\pi}{2}$.
		\item For two unit vectors, $\mathbf{u}_1$ and $\mathbf{u}_2$, we have $\cos \theta =\left<\mathbf{u}_1,\mathbf{u}_2\right>$, and as a consequence
				$-1\leq \left<\mathbf{u}_1,\mathbf{u}_2\right> \leq 1$.
	\end{itemize}
	\label{cor:angles}
\end{ceucor}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Distance and angle between two vectors} 
\begin{ceuthm}[Cosine formula]
	For any two vectors, $\mathbf{v}$ and $\mathbf{w}$, such that $\|\mathbf{v}\|\neq 0$ and $\|\mathbf{w}\|\neq 0$, we have
	\begin{center}
		$\left<\mathbf{v},\mathbf{w}\right>=\|\mathbf{v}\|\|\mathbf{w}\|\cos\theta$
	\end{center}
	\underline{\textit{Proof}}\\
	By use of Definition \ref{def:constructionUnitVector}, we can construct the unit vectors associated to $\mathbf{v}$ and $\mathbf{w}$, that is
	$\mathbf{u}_\mathbf{v}$ and $\mathbf{u}_\mathbf{w}$. Then by Corollary \ref{cor:angles} we know that
	\begin{center}
		$\cos \theta =\left<\mathbf{u}_\mathbf{v},\mathbf{u}_\mathbf{w}\right>=\left(\frac{\mathbf{v}}{\|\mathbf{v}\|}\right)^T\left(\frac{\mathbf{w}}{\|\mathbf{w}\|}\right)
			= \frac{1}{\|\mathbf{u}\|\|\mathbf{w}\|}\mathbf{u}^T\mathbf{w} = \frac{\left<\mathbf{v},\mathbf{w}\right>}{\|\mathbf{u}\|\|\mathbf{w}\|}$
	\end{center}
	From this point it is trivial to deduce that $\left<\mathbf{v},\mathbf{w}\right>=\|\mathbf{v}\|\|\mathbf{w}\|\cos\theta$ (q.e.d.)
	\label{thm:cosineFormula}
\end{ceuthm}
\end{frame}

%% ==============================================
\begin{frame}\frametitle{Distance and angle between two vectors} 
\begin{exampleblock}{Example}
	To compute the knee flexion angle, we need to calculate the dot product between the vectors aligned with the leg before and after the knee.
	\begin{center}
		\includegraphics[scale=0.4]{kneeFlexionAngle.jpg}
	\end{center}
\end{exampleblock}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Distance and angle between two vectors} 
\begin{ceuthm}[Cauchy-Schwarz inequality]
	For any two vectors, $\mathbf{v}$ and $\mathbf{w}$, it is verified that
	\begin{center}
		$|\left<\mathbf{v},\mathbf{w}\right>|<\|\mathbf{v}\|\|\mathbf{w}\|$
	\end{center}
	\underline{\textit{Proof}}\\
	From the cosine formula (Theorem \ref{thm:cosineFormula}), we know that \\
	\begin{center}
		$\begin{array}{rcl}
			\left<\mathbf{v},\mathbf{w}\right>&=&\|\mathbf{v}\|\|\mathbf{w}\|\cos\theta \Rightarrow \\
			|\left<\mathbf{v},\mathbf{w}\right>|&=&\left|\|\mathbf{v}\|\|\mathbf{w}\|\cos\theta\right| 
			=\|\mathbf{v}\|\|\mathbf{w}\|\left|\cos\theta\right| 
			\leq \|\mathbf{v}\|\|\mathbf{w}\|\\
		\end{array}$
	\end{center}
	\label{thm:CauchySchwarz}
\end{ceuthm}
\begin{exampleblock}{Example}
	Let $\mathbf{v}=(-\frac{2}{5},\frac{2}{3})$ and $\mathbf{w}=(1,\frac{2}{3})$. We already know that 
	$\mathbf{v}\cdot\mathbf{w}=\frac{2}{45}$, $\|\mathbf{v}\|=\frac{\sqrt{136}}{15}$, and 
	$\|\mathbf{w}\|=\frac{\sqrt{13}}{3}$. Let us check Cauchy-Schwarz inequality\\
	\vspace{0.25cm}
	\begin{center}
		$|\frac{2}{45}|<\frac{\sqrt{136}}{15}\frac{\sqrt{13}}{3} \Leftrightarrow 0.0444 < 0.9344$
	\end{center}
\end{exampleblock}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Distance and angle between two vectors} 
\begin{exampleblock}{Example}
	Show that for any two positive numbers, $x$ and $y$, the geometric mean ($\sqrt{xy}$) is always smaller or equal than the arithmetic mean ($\frac{x+y}{2}$).
	For instance, the statement is verified for $x=2$ and $y=3$: $\sqrt{6} \leq \frac{5}{2} \Leftrightarrow 2.4495 \leq 2.5$.\\
	\underline{\textit{Proof}}\\
	Let there be vectors $\mathbf{v}=(a,b)$ and $\mathbf{w}=(b,a)$. Then, by Cauchy-Schwarz inequality we know that\\
	\begin{center}$|\left<\mathbf{v},\mathbf{w}\right>|<\|\mathbf{v}\|\|\mathbf{w}\| \Rightarrow |2ab|\leq a^2+b^2$\end{center}
	Since $x$ and $y$ are positive numbers, we may consider them to be $x=a^2$ and $y=b^2$. Consequently, we can rewrite the previous expression as
	\begin{center}$2\sqrt{x}\sqrt{y}\leq x+y \Rightarrow \sqrt{xy}\leq \frac{x+y}{2}$ (q.e.d.)\end{center}
	In fact, the geometric mean is nothing more than the arithmetic mean in logarithmic units
	\begin{center}$\log(\sqrt{xy})=\log(xy)^{\frac{1}{2}}=\frac{1}{2}(\log x + \log y)=\frac{\log x+\log y}{2}$\end{center}
\end{exampleblock}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Distance and angle between two vectors} 
\begin{ceuthm}[Triangular inequality]
	For any two vectors, $\mathbf{v}$ and $\mathbf{w}$, it is verified that
	\begin{center}
		$\|\mathbf{v}+\mathbf{w}\|\leq \|\mathbf{v}\|+\|\mathbf{w}\|$
	\end{center}
	\underline{\textit{Proof}}\\
	By definition we know that
	\begin{center}
		$\|\mathbf{v}+\mathbf{w}\|^2=(\mathbf{v}+\mathbf{w})^T(\mathbf{v}+\mathbf{w})=\|\mathbf{v}\|^2+\|\mathbf{w}\|^2+2\left<\mathbf{v},\mathbf{w}\right>$
	\end{center}
	Applying the Cauchy-Schwarz inequality (Theorem \ref{thm:CauchySchwarz}), we have
	\begin{center}
		$\|\mathbf{v}+\mathbf{w}\|^2 \leq \|\mathbf{v}\|^2+\|\mathbf{w}\|^2+2\|\mathbf{v}\|\|\mathbf{w}\|=(\|\mathbf{v}\|+\|\mathbf{w}\|)^2$
	\end{center}
	Taking the square root we have
	\begin{center}
		$\|\mathbf{v}+\mathbf{w}\| \leq \|\mathbf{v}\|+\|\mathbf{w}\|$
	\end{center}
	
\end{ceuthm}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Distance and angle between two vectors} 
\begin{exampleblock}{Example}
	Let $\mathbf{v}=(-\frac{2}{5},\frac{2}{3})$ and $\mathbf{w}=(1,\frac{2}{3})$. We already know that 
	$\|\mathbf{v}\|=\frac{\sqrt{136}}{15}$ and 
	$\|\mathbf{w}\|=\frac{\sqrt{13}}{3}$. Let us check the triangular inequality\\
	\vspace{0.25cm}
	\begin{minipage}{6.5cm}
	  $\mathbf{v}+\mathbf{w}=(\frac{3}{5},\frac{4}{3}) \Rightarrow \|\mathbf{v}+\mathbf{w}\|=\frac{\sqrt{481}}{15}$\\
		\vspace{0.25cm}
		$\frac{\sqrt{481}}{15}\leq\frac{\sqrt{136}}{15}+\frac{\sqrt{13}}{3} \Leftrightarrow 1.4621 \leq 1.9793$
	\end{minipage}
	\begin{minipage}{5cm}
		\includegraphics[scale=0.35]{figTriangular.eps}
	\end{minipage}
\end{exampleblock}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Distance and angle between two vectors} 
\begin{block}{Orthogonal projections}
	Let us consider the orthogonal projection of $\mathbf{v}$ onto $\mathbf{w}$. \\
	\begin{minipage}{6.5cm}
		\begin{center}
			$\mathbf{v}'=\left<\mathbf{v},\mathbf{w}\right>\frac{\mathbf{w}}{\|\mathbf{w}\|^2}=\frac{\left<\mathbf{v},\mathbf{w}\right>}{\|\mathbf{w}\|}\frac{\mathbf{w}}{\|\mathbf{w}\|}$
		\end{center}
		The length of this vector is $\frac{\left<\mathbf{v},\mathbf{w}\right>}{\|\mathbf{w}\|}$
	\end{minipage}
	\begin{minipage}{5cm}
		\includegraphics[scale=0.35]{figInner2.eps}
	\end{minipage}
\end{block}
\begin{exampleblock}{Example}
	Let $\mathbf{v}=(\frac{5}{2},1)$ and $\mathbf{w}=(3,0)$. Then, $\mathbf{v}'=\frac{\frac{5}{2}3+1\cdot 0}{3}(1,0)=(\frac{5}{2},0)$. See the figure above.
\end{exampleblock}

\end{frame}

% ==============================================
\subsection{Multiplication by matrices (b)} 
\Outline

\begin{frame}\frametitle{Multiplication by matrices} 
\begin{exampleblock}{Example}
	Let's consider three vectors $\begin{tiny}\mathbf{v}_1=\left(\begin{array}{c}1\\-1\\0\end{array}\right)\end{tiny}$,
	$\begin{tiny}\mathbf{v}_2=\left(\begin{array}{c}0\\1\\-1\end{array}\right)\end{tiny}$ and 
	$\begin{tiny}\mathbf{v}_3=\left(\begin{array}{c}0\\0\\1\end{array}\right)\end{tiny}$. Let's consider the linear combination
	\begin{center}
		$\begin{tiny}\mathbf{y}=x_1\mathbf{v}_1+x_2\mathbf{v}_2+x_3\mathbf{v}_3=x_1\left(\begin{array}{c}1\\-1\\0\end{array}\right)+x_2\left(\begin{array}{c}0\\1\\-1\end{array}\right)+x_3\left(\begin{array}{c}0\\0\\1\end{array}\right)=\left(\begin{array}{c}x_1\\x_2-x_1\\x_3-x_2\end{array}\right)\end{tiny}$
	\end{center}
	I can obtain the same result by constructing a matrix
	\begin{center}
		$\begin{tiny}A=(\mathbf{v}_1\; \mathbf{v}_2\; \mathbf{v}_3)=\left(\begin{array}{ccc} 1 & 0 & 0 \\ -1 & 1 & 0 \\ 0 & -1 & 1 \end{array}\right)\end{tiny}$.
	\end{center}
	And making the multiplication
	\begin{center}
		$\begin{tiny}\mathbf{y}=A\left(\begin{array}{c}x_1\\x_2\\x_3\end{array}\right)\end{tiny}=
			(\mathbf{v}_1\; \mathbf{v}_2\; \mathbf{v}_3)\left(\begin{array}{c}x_1\\x_2\\x_3\end{array}\right)=\left(\begin{array}{ccc} 1 & 0 & 0 \\ -1 & 1 & 0 \\ 0 & -1 & 1 \end{array}\right)
			\left(\begin{array}{c}x_1\\x_2\\x_3\end{array}\right)=\left(\begin{array}{c}x_1\\x_2-x_1\\x_3-x_2\end{array}\right)$
	\end{center}
	
\end{exampleblock}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Multiplication by matrices} 
\begin{exampleblock}{Example}
	We can also achieve the same result by calculating $\mathbf{y}$ as the inner product of the rows of the matrix $A$ and the weight vector.
	\begin{center}
		$\begin{tiny}\mathbf{y}=\left(\begin{array}{c}
		\left<(1,0,0),(x_1,x_2,x_3)\right>\\
		\left<(-1,1,0),(x_1,x_2,x_3)\right>\\
		\left<(0,-1,1),(x_1,x_2,x_3)\right>\\
		\end{array}\right)=\left(\begin{array}{c}x_1\\x_2-x_1\\x_3-x_2\end{array}\right)\end{tiny}$
	\end{center}
		Matlab:\\
		{\color{blue}
		%\leftskip5mm
		\texttt{
syms x1 x2 x3 \\
x=[x1; x2; x3] \\
A=[1 0 0; -1 1 0; 0 -1 1];\\
y=A*x
		}
		}
	
\end{exampleblock}
\end{frame}

% ==============================================
\begin{frame}\frametitle{Multiplication by matrices} 
\begin{block}{Matrix multiplication as a linear combination}
	This is a general rule: a matrix multiplication can be seen as the linear combination of the columns of the matrix.
	\begin{center}
		$A=(\mathbf{c}_1\; \mathbf{c}_2\; ... \mathbf{c}_p) \Rightarrow
		\mathbf{y}=A\mathbf{x}=\sum\limits_{i=1}^p{x_i\mathbf{c}_i}$
	\end{center}	
\end{block}

\begin{block}{Matrix multiplication as inner products}
	Also, a matrix multiplication can be seen as the dot product of the weight vector with the rows of the matrix.
	\begin{center}
		$A=\left(\begin{array}{c}\mathbf{r}^T_1\\\mathbf{r}^T_2\\ ...\\\mathbf{r}^T_n\end{array}\right) \Rightarrow
		\mathbf{y}=A\mathbf{x}=\left(\begin{array}{c}
		\left<\mathbf{r}_1,\mathbf{x}\right>\\
		\left<\mathbf{r}_2,\mathbf{x}\right>\\
		...\\
		\left<\mathbf{r}_n,\mathbf{x}\right>\\
		\end{array}\right)$
	\end{center}	
\end{block}

\end{frame}

% ==============================================
\begin{frame}\frametitle{Multiplication by matrices} 
\begin{block}{Properties of multiplication by matrices}
	\begin{center}
		$A(\mathbf{u}+\mathbf{v})=A\mathbf{u}+A\mathbf{v}$\\
		$A(c\mathbf{u})=c(A\mathbf{u})$\\
	\end{center}	
\end{block}

\end{frame}

\OutlineFinal

\end{document}