\begin{frame}[label=grassman-manifold]{Grassmann Manifold}
  \begin{itemize}
   \item A \textcf{Grassmann Manifold} is a collection of linear subspaces
         of a vector space. It is denoted $G(r, V)$ and is equal to the set
         of $r$-dimensional linear subspaces of $V$. Sometimes $G(r, n)$ is
         used, where $n$ is the dimensionality of $V$.
   \item Each point on the Grassmann manifold represents a subspace.
   \item For example, $G(1, \mathbb{R}^3)$ is the set of lines
         (1-dimensional) through the origin in $\mathbb{R}^3$:
  %\includegraphics[width=\textwidth]{}
  \end{itemize}
\end{frame}

%\frame{ \frametitle{} \includegraphics[width=\textwidth]{grassmann-manifold}}

\begin{frame}[label=geodesics]{Geodesics}
  \begin{itemize}
   \item For points $A$ and $B$ along a surface $S$, a \textcf{geodesic}
         is a shortest-length curve connecting $A$ and $B$ along $S$.
   \item In the simplest case, $S$ is a plane, and the geodesic is a
         straight line. In more complex cases, the geodesic can be
         solved for using the \emph{calculus of variations}, which
         deals with maximizing or minimizing functionals.
   \item A \textcf{geodesic path} is a constant-velocity curve along
         a manifold.
  \end{itemize}
\end{frame}

\frame{ \frametitle{} \includegraphics[width=\textwidth]{geodesic}}

\begin{frame}[label=orthogonal-complement]{Orthogonal Completion}
  \begin{itemize}
   \item An \textcf{orthogonal completion} $Q$ of a subspace $W$ is a set
         of vectors $q$ which are orthogonal to all the vectors
         $v$. $W^\perp$ is also unitary; its vectors $q$ are
         orthonormal.
   \item An orthogonal completion can be found using the \emph{QR
         decomposition}:
   \begin{equation}
      W = QR,
   \end{equation}
   Here, $R$ is a right-triangular matrix.  One algorithm for
   QR decomposition uses what is called the \emph{Gram-Schmidt}
   process.
  \end{itemize}
\end{frame}

\frame{ \frametitle{} \includegraphics[width=\textwidth]{orthogonal-complement}}

\begin{frame}[label=cs-decomposition]{Cosine Sine Decomposition}
  \begin{itemize}
   \item The \textcf{cosine sine decomposition} of a
         unitary matrix
         $U$ is
   \begin{equation}
    U =
      \begin{bmatrix} L_0 & 0 \\ 0 & L_1 \end{bmatrix}
      D
      \begin{bmatrix} R_0 & 0 \\ 0 & R_1 \end{bmatrix},
   \end{equation}
   \item where $D$ is given by:
   \begin{equation}
   D =
      \begin{bmatrix} D_{00} & D_{10} \\ D_{01} & D_{11} \end{bmatrix}.
   \end{equation}
   \item $D_{00} = D_{11} = diag(C_1, C_2, \ldots, C_\frac{N}{2})$, and
   \item $D_{10} = -D_{01} = diag(S_1, S_2, \ldots, S_\frac{N}{2})$.
   \item $\forall i \in \mathbb{Z}_{1,\frac{N}{2}}$,
   $S_i = sin(\theta_i)$, $C_i = cos(\theta_i)$.
  \end{itemize}
\end{frame}

\begin{frame}[label=exponential-map]{Exponential Map}
  \begin{itemize}
   \item If $v$ is a tangent vector to a point $p$ on a manifold $M$
         which has a parameterized
         geodesic
         $\gamma(t)$ between $p$
         and $q$ such that $\gamma(0)=p$ and $\gamma(1)=q$, then the
         \textcf{exponential map} given $v$ is $exp_p(v) = \gamma(1)$.
   \item Essentially, the exponential map takes a tangent vector to
         a point (which describes the \emph{geodesic flow} at that
         point) and maps it to the geodesic it follows for a unit time.
  \end{itemize}
\end{frame}

\frame{ \frametitle{}
  \begin{center}
    \includegraphics[width=.7\textwidth]{exponential-map}
  \end{center}
}

% \frame{ \frametitle{} \includegraphics[width=\textwidth]{quotient_space.gif}}

\begin{frame}[label=svd]{Singular Value Decomposition}
  \begin{itemize}
   \item The \textcf{singular value decomposition} is the decomposition
         of a matrix $M$ into components $U, \Sigma, V^*$ as follows:
   \begin{equation}
         M = U\Sigma V^*
   \end{equation}
   \item where $U$ and $V$ are $N \times N$
         unitary matrices,
         and $\Sigma$ is an $N \times N$ matrix.
   \item One can think og $U$ as being a polar rotation, $V$ as an
         azimuthal rotation, and $\Sigma$ as a scaling matrix.
  \end{itemize}
\end{frame}

\frame{ \frametitle{} \includegraphics[width=0.9 \textwidth]{svd-matrices}}
\frame{ \frametitle{} \includegraphics[width=0.9 \textwidth]{svd-gene-example}}

\begin{frame}[label=unitary-matrix]{Unitary Matrix}
  \begin{itemize}
   \item A \textcf{unitary matrix} is a square matrix $U$ that satisfies
         the condition:
   \begin{equation}
         U^*U = UU^* = I,
   \end{equation}
   \item where $U*$ is the
         conjugate transpose
         of U.
  \end{itemize}
\end{frame}

\frame{ \frametitle{} \includegraphics[width=\textwidth]{unitary-mx}}

\begin{frame}[label=conjugate-transpose]{Conjugate Transpose}
  \begin{itemize}
   \item The \textcf{conjugate transpose} $U^*$ of $U$ is the complex
         conjugate of the transpose of $U$. That is,
 %\end{itemize}
   \begin{equation}
         (U^*)_{ij} = \bar{U}_{ji}
   \end{equation}
 %\begin{itemize}
   \item Recall that the conjugate of a complex number $a+bi$ is
         defined as $a-bi$.
  \end{itemize}
\end{frame}

\begin{frame}[label=pca]{Principal Component Analysis}
  \begin{itemize}
   \item A \textcf{principal component analysis} uses an orthogonal
         transformation to convert a set of possibly correlated
         data points into uncorrelated data points.
   \item For the new dimensionality $d$ of the data, the highest
         variance lies along the first dimension, the second highest
         on the second dimension, etc.
  \end{itemize}
\end{frame}

\begin{frame}
  \begin{center}
  \includegraphics[width=.6\textwidth]{pca}
  \end{center}
\end{frame}

\begin{frame}[label=matrix-exponential]{Matrix Exponential}
  \begin{itemize}
    \item A \textbf{matrix exponential} is given by:
    \begin{equation}
       e^X = exp(X) = \sum_{k=0}^N \frac{A^N}{k!}.
    \end{equation}
    \item Here $A^N$ is the matrix $A$ multiplied by itself
          $N$ times.  The matrix exponential always converges.
  \end{itemize}
\end{frame}

