% Copyright (C) 2017 Koz Ross <koz.ross@retro-freedom.nz>

% This work is licensed under the Creative Commons Attribution-ShareAlike 4.0 
% International License. To view a copy of this license, visit 
% http://creativecommons.org/licenses/by-sa/4.0/ or send a letter to Creative 
% Commons, PO Box 1866, Mountain View, CA 94042, USA.

\documentclass{beamer}

\usepackage[utf8]{inputenc}
\usecolortheme{seahorse}
\usefonttheme{professionalfonts}
\usepackage{fontspec}
\setmainfont{DejaVu Sans}
\setbeamertemplate{navigation symbols}{}
\usepackage{caption}
\usepackage[font=itshape, begintext=``, endtext='']{quoting}
\usepackage{fancyvrb}
\usepackage{color}
\usepackage[noend]{algpseudocode}

\title{Hylomorphisms}
\subtitle{Or: intermediate structures in a non-awful way}
\titlegraphic{\includegraphics[scale=0.8]{img/logo.pdf}}
\author{Koz Ross}
\date{14th September, 2017}

\begin{document}

\begin{frame}[c]
  \titlepage{}
\end{frame}

\begin{frame}[c]
  \frametitle{Outline}
  \tableofcontents
\end{frame}

\section{Introduction}

\begin{frame}[c]
  \frametitle{Intermediate structures}
  \pause{}
  \begin{definition}
    We say that a data structure is {\em intermediate\/} if it is used in an
    algorithm, but not given as an input or returned as an output.
  \end{definition}
  \pause{}
  \bigskip{}
  Essentially, it's a data structure which we {\em use}, but never {\em see}.
\end{frame}

\begin{frame}[c, fragile]
  \frametitle{An example}
  \begin{definition}
    The {\em Fibonacci numbers\/} $(F_0, F_1, \ldots)$ are a sequence of natural
    numbers, such that:\pause{}
    \begin{itemize}
      \item $F_0 = F_1 = 1$\pause{}
      \item For any $n > 1$, $F_n = F_{n - 1} + F_{n - 2}$
    \end{itemize}
  \end{definition}\pause{}

  If we wanted to write an algorithm for computing $F_n$ given an input $n$, we
  can convert this definition directly:\pause{}
  
  \bigskip{}

  \begin{algorithmic}
    \Function{$\mathrm{fib}$}{$n$}
    \If{$n = 0$ or $n = 1$}
    \State{}\Return{}$1$
    \Else{}
    \State{}\Return{}\Call{$\mathrm{fib}$}{$n - 1$} $+$ \Call{$\mathrm{fib}$}{$n - 2$}
    \EndIf{}
    \EndFunction{}
  \end{algorithmic}
\end{frame}

\begin{frame}[c]
  \frametitle{Why this is awful}
  Let's see what happens if we call $\mathrm{fib}(4)$:\pause{}

  \begin{enumerate}
    \item $\mathrm{fib}(3) + \mathrm{fib}(2)$\pause{}
    \item $\mathrm{fib}(2) + \mathrm{fib}(1) + \mathrm{fib}(2)$\pause{}
    \item $\mathrm{fib}(1) + \mathrm{fib}(0) + \mathrm{fib}(1) +
      \mathrm{fib}(2)$\pause{}
    \item $1 + \mathrm{fib}(0) + \mathrm{fib}(1) + \mathrm{fib}(2)$\pause{}
    \item $1 + 1 + \mathrm{fib}(1) + \mathrm{fib}(2)$\pause{}
    \item $1 + 1 + 1 + \mathrm{fib}(2)$\pause{}
    \item $1 + 1 + 1 + \mathrm{fib}(1) + \mathrm{fib}(0)$\pause{}
    \item $1 + 1 + 1 + 1 + \mathrm{fib}(0)$\pause{}
    \item $1 + 1 + 1 + 1 + 1$\pause{}
    \item $5$\pause{}
  \end{enumerate}

  That's a {\em lot\/} of wasted effort --- for example, we have to compute
  $\mathrm{fib}(2)$ twice.\pause{} Only gets worse for higher $n$!
\end{frame}

\begin{frame}[c]
  \frametitle{How awful is it?}

  Every call to $\mathrm{fib}(n)$ produces two recursive calls: one to
  $\mathrm{fib}(n - 1)$ and another to $\mathrm{fib}(n - 2)$. We stop when 
  $n = 0, 1$.\pause{} That means that $\mathrm{fib}(n)$ requires $2^{n - 2} +
  2^{n - 3}$ recursive calls.\pause{} This means our algorithm is
  $O(2^n)$.\pause{} This is \alert{atrocious}.\pause{}

  \bigskip{}

  One reason why it's so bad is because we waste a lot of work --- in order to
  compute $\mathrm{fib}(n - 1)$, we have to compute $\mathrm{fib}(n - 2)$, only
  to throw it away and do it again!\pause{} If only we kept a hold of
  $\mathrm{fib}(n - 2)$, we could reduce the number of recursive calls required
  from $2$ to $1$.\pause{} Let's try using an intermediate data structure for
  that\ldots
\end{frame}

\begin{frame}[c, fragile]
  \frametitle{Take 2}
  Let's use an array to hold the intermediate computations:\pause{}

  \bigskip

  \begin{algorithmic}
    \Function{$\mathrm{fib}^{\prime}$}{$n$}
      \State{}$\mathrm{arr} \gets$ an empty array of length $n$
      \State{}$\mathrm{arr}[0] = 1$
      \State{}$\mathrm{arr}[1] = 1$
      \For{$i \in 2,3,\ldots,n - 1$}
        \State{}$\mathrm{arr}[i] \gets \mathrm{arr}[i - 1] + \mathrm{arr}[i -
        2]$
      \EndFor{}
      \State{}\Return{}$\mathrm{arr}[n - 1]$
    \EndFunction{}
  \end{algorithmic}\pause{}

  \bigskip

  Since we have a (nearly) $n$-length loop in $\mathrm{fib}^{\prime}(n)$, this
  is clearly $O(n)$ --- much better! All thanks to the intermediate array.
\end{frame}

\begin{frame}[c]
  \frametitle{Intermediate structures are {\em everywhere\/}}
  \pause{}
  \begin{itemize}
    \item Graph processing algorithms (DFS, BFS, Dijkstra's, Prim's, Kruskal's,
      \ldots)\pause{}
    \item Graphics pipeline (rasterization, translations, shading,
      \ldots)\pause{}
    \item Compilers (of any sort)\pause{}
    \item Every time you do recursion (implicitly)\pause{}
    \item And so many more\ldots
  \end{itemize}

  \pause{}

  However, it's not all brilliant --- intermediate structures have two major
  issues.
\end{frame}

\begin{frame}[c]
  \frametitle{Issue One: ad-hoc-ism}

  Every time we employ an intermediate data structure, we have to devise the
  algorithm `from first principles'. This has several downsides:\pause{}
  
  \begin{itemize}
    \item Correctness is harder to determine\pause{}
    \item Structure and content are entangled (no general treatment)\pause{}
    \item Incidental complexity from the structure adds to the complexity of the
      algorithm\pause{}
  \end{itemize}

  This is mostly because we're being forced to consider a specific solution
  every time we want an intermediate structure to be built up, then torn down.
\end{frame}

\begin{frame}[c, fragile]
  \frametitle{Issue Two: efficiency}

  Let's look at $\mathrm{fib}^{\prime}$ again:\pause{}

  \bigskip

  \begin{algorithmic}
    \Function{$\mathrm{fib}^{\prime}$}{$n$}
      \State{}$\mathrm{arr} \gets$ an empty array of length $n$
      \State{}$\mathrm{arr}[0] = 1$
      \State{}$\mathrm{arr}[1] = 1$
      \For{$i \in 2,3,\ldots,n - 1$}
        \State{}$\mathrm{arr}[i] \gets \mathrm{arr}[i - 1] + \mathrm{arr}[i -
        2]$
      \EndFor{}
      \State{}\Return{}$\mathrm{arr}[n - 1]$
    \EndFunction{}
  \end{algorithmic}\pause{}

  \bigskip
  
  At no point in the algorithm are we interested in the whole array --- just the
  last two elements.\pause{} However, we can't get rid of any part of the
  structure until the {\em very\/} end, even though it's useless to us!
\end{frame}

\begin{frame}[c]
  \frametitle{Can we do better?}

  What we really want is:\pause{}

  \begin{itemize}
    \item A general way of handling {\em any\/} algorithm which involves
      intermediate structures\pause{}
    \item A guarantee of {\em some\/} correctness properties (from a
      formal/mathematical point of view)\pause{}
    \item Some way of automatically only building up as much structure as we
      need\pause{}
  \end{itemize}

  Sounds impossible, right?\pause{} However: there's a morphism for that!
\end{frame}

\section{Hylomorphisms}

\begin{frame}[fragile, c]
  \frametitle{Where we last left recursion schemes\ldots}\pause{}

  \begin{Verbatim}[fontsize=\small, commandchars=\\\{\}]
\color{gray}-- our knotting hack
data Knot f = Tie \{ untie :: f (Knot f) \} 

\color{gray}-- pronounced `then', for easier composition
>>> :: (a -> b) -> (b -> c) -> (a -> c)
f >>> g = g \circ f 

type Algebra f a = f a -> a

\color{gray}-- catamorphism - tears structures down
cata :: Functor f => Algebra f a -> Knot f -> a
cata f = untie >>> map (cata f) >>> f

type Coalgebra f a = a -> f a

\color{gray}-- anamorphism - builds structures up
ana :: Functor f => Coalgebra f a -> a -> Knot f
ana f = f >>> map (ana f) >>> Tie
  \end{Verbatim}
\end{frame}

\begin{frame}[fragile, c]
  \frametitle{Trying to use recursion schemes}

  When we use an intermediate structure, we first build it up, then tear it
  down. What would happen if we tried to write this using recursion
  schemes?\pause{}

  \begin{Verbatim}[fontsize=\small, commandchars=\\\{\}]
\color{gray}-- what's the type of this?
huh coalg alg = ana coalg >>> cata alg \pause{}
huh :: Functor f => (a -> f a) -> (f b -> b) -> a -> b
\color{gray}-- remember, (a -> f a) is Coalgebra f a
\color{gray}-- and (f b -> b) is Algebra f b
  \end{Verbatim}
  \pause{}

  Now, {\tt huh} is a very silly name. We should really fix our terminology
  again\ldots
\end{frame}

\begin{frame}[fragile, c]
  \frametitle{The humble hylomorphism}

  \begin{Verbatim}[fontsize=\small, commandchars=\\\{\}]
hylo :: Functor f => Coalgebra f a -> Algebra f b -> a -> b
hylo f g = ana f >>> cata g
  \end{Verbatim}

  \bigskip

  The term `hylomorphism' comes from the Greek root {\em hylo\/} (meaning
  `tree'), as its behaviour can be seen as tree-like (building up is like the
  tree growing, and tearing down is like the tree being turned into
  matches).\pause{} 
  
  \bigskip{}

  Note: The intermediate structure being operated on by the
  algebra and coalgebra must be {\em the same}, but the data can be different.
\end{frame}

\section{Using hylomorphisms}

\begin{frame}[fragile, c]
  \frametitle{Example: mergesort}
  \pause{}
  \begin{itemize}
    \item A well-known {\em comparison-sorting\/} algorithm for arrays\pause{}
    \item An example of a {\em divide-and-conquer\/} algorithm\pause{}
    \item Relies on the following two principles:\pause{}
      \begin{enumerate}
        \item An array of length $1$ is always sorted\pause{}
        \item We can merge two sorted arrays into one sorted array\pause{}
      \end{enumerate}
    \item Relies on a three-step {\em merge procedure\/}:\pause{}
      \begin{enumerate}
        \item Create an array big enough to hold the contents of both inputs\pause{}
        \item Walk over the new array, inserting the smallest element from either
        input at each stage\pause{}
      \item When we've finished, return the result\pause{}
      \end{enumerate}
    \item Implicitly builds up a binary tree of deferred sort operations
  \end{itemize}
\end{frame}

\begin{frame}[fragile,c]
  \frametitle{The merge procedure, in detail}
  \begin{algorithmic}
    \Function{$\mathrm{merge}$}{$a_1, a_2$}
      \State{}$n \gets$ \Call{$\mathrm{len}$}{$a_1$} $+$ \Call{$\mathrm{len}$}{$a_2$}
      \State{}$a \gets$ a new array of length $n$
      \State{}$f_1, f_2 \gets 0$
      \For{$i \in 0,1,\ldots,n - 1$}
        \If{$f_1 =$ \Call{$\mathrm{len}$}{$a_1$}}
          \State{}$a[i] = a_2[f_2]$
          \State{}$f_2 \gets f_2 + 1$
        \ElsIf{$f_2 =$ \Call{$\mathrm{len}$}{$a_2$}}
          \State{}$a[i] \gets a_1[f_1]$
          \State{}$f_1 \gets f_1 + 1$
        \Else{}
          \State{}$e \gets \min\{a_1[f_1], a_2[f_2]\}$
          \State{}$a[i] \gets e$
          \State{}Increment corresponding $f$
        \EndIf{}
      \EndFor{}
      \State{}\Return{}$a$
    \EndFunction{}
  \end{algorithmic}
\end{frame}

\begin{frame}[fragile,c]
  \frametitle{Viewing mergesort as a hylomorphism}
    \begin{itemize}
      \item When we do the `divide' step of mergesort, we build up a binary
        tree:\pause{}
        \begin{itemize}
          \item The leaves are singleton arrays\pause{}
          \item The internal nodes are deferred merges of the children\pause{}
        \end{itemize}
      \item Once we've built up this tree, we can tear it down bottom-up by
        realizing the deferred computations based on their children\pause{}
      \item At the end, we have a single, sorted array, and the tree is
        gone\pause{}
    \end{itemize}

    \bigskip{}

    Let's see how this works in practice: we will sort the array {\tt [4, 2, 6,
    1]}.
\end{frame}

\begin{frame}[fragile,c]
  \frametitle{Worked example}
  \begin{overprint}
    \onslide<1>\centerline{\includegraphics[scale=1]{img/merge1.pdf}}%
    \onslide<2>\centerline{\includegraphics[scale=1]{img/merge2.pdf}}%
    \onslide<3>\centerline{\includegraphics[scale=1]{img/merge3.pdf}}%
    \onslide<4>\centerline{\includegraphics[scale=1]{img/merge4.pdf}}%
    \onslide<5>\centerline{\includegraphics[scale=1]{img/merge5.pdf}}%
    \onslide<6>\centerline{\includegraphics[scale=1]{img/merge6.pdf}}%
    \onslide<7>\centerline{\includegraphics[scale=1]{img/merge7.pdf}}%
    \onslide<8>\centerline{\includegraphics[scale=1]{img/merge8.pdf}}%
  \end{overprint}
\end{frame}

\begin{frame}[fragile,c]
  \frametitle{Putting it into a hylo}
  First, we will need an intermediate tree structure:\pause{}

  \begin{Verbatim}[commandchars=\\\{\}]
data Tree a = Leaf a | Internal (Tree a) (Tree a)
  \end{Verbatim}
  \pause{}

  It then needs a slight change so we can use it with recursion schemes:\pause{}

  \begin{Verbatim}[commandchars=\\\{\}]
data ITree a s = Leaf a | Internal s s
  \end{Verbatim}

  \pause{}

  Lastly, we need a {\tt Functor} instance for it. This can be derived
  automatically (both in theory {\em and\/} practice).\pause{}

  \begin{Verbatim}[commandchars=\\\{\}]
instance Functor (ITree a) where \pause{}
  map :: (s -> t) -> ITree a s -> ITree a t\pause{}
  map _ (Leaf a) = Leaf a \color{gray}-- nothing to do\pause{}
  map f (Internal l r) = Internal (f l) (f r)
  \end{Verbatim}
\end{frame}

\begin{frame}[fragile,c]
  \frametitle{A suitable coalgebra}
  Our coalgebra will represent one `step' in the dividing process:\pause{}

  \begin{itemize}
    \item If we get given an array of length $1$, we just make a {\tt Leaf}
      storing it\pause{}
    \item Otherwise, we make an {\tt Internal}, and send half of the array into
      each child\pause{}
  \end{itemize}

  \begin{Verbatim}[commandchars=\\\{\}]
\color{gray}-- Vector a is an array of a
breakDown :: Coalgebra (ITree (Vector a)) (Vector a)\pause{} 
\color{gray}-- breakDown :: Vector a -> ITree (Vector a)\pause{}
breakDown v = case (length v) of \pause{}
  1 -> Leaf v\pause{}
  x -> let half = x `div` 2 in
    Internal (slice v 0 half) (slice v half (x - half))
  \end{Verbatim}
\end{frame}

\begin{frame}[fragile,c]
  \frametitle{A suitable algebra}
  Our algebra will represent one `step' in the conquering process:\pause{}

  \begin{itemize}
    \item If we have a {\tt Leaf}, we just return the array as-is\pause{}
    \item If we have an {\tt Internal}, merge the arrays in its left and right
      children\pause{}
  \end{itemize}

  We will assume we have {\tt merge} defined appropriately --- its details don't
  matter for this.\pause{}

  \begin{Verbatim}[commandchars=\\\{\}]
mergeAlg :: Ord a => Algebra (ITree (Vector a)) (Vector a)\pause{}
\color{gray}-- mergeAlg :: Ord a => ITree (Vector a) -> Vector a\pause{}
mergeAlg (Leaf v) = v\pause{}
mergeAlg (Internal v1 v2) = merge v1 v2
  \end{Verbatim}
\end{frame}

\begin{frame}[fragile,c]
  \frametitle{Putting it all together}

  \begin{Verbatim}[commandchars=\\\{\}]
mergeSort :: Ord a => Vector a -> Vector a
mergeSort v\pause{}
  | length v == 0 = v \color{gray}-- no point sorting an empty array\pause{}
  | otherwise = hylo breakDown mergeAlg v
  \end{Verbatim}

  \pause{}
  \bigskip{}

  \alert{Yay!}
\end{frame}

\begin{frame}[fragile, c]
  \frametitle{Reviewing our goals}
  \pause{}
  \begin{itemize}
    \item Generalization of all divide-and-conquer algorithms (and almost all
      intermediate structure work!)\pause{}
    \item Strong correctness guarantees:\pause{}
      \begin{itemize}
        \item Won't miss any items in the teardown\pause{}
        \item No edge-cases for `lop-sided' structures\pause{}
        \item No `funny stuff' --- no side effects, no structural wrecking we
          didn't ask for\pause{}
      \end{itemize}
    \item Efficiency still a problem --- until we finish {\tt ana f} we can't
      start on {\tt cata g}!\pause{}
  \end{itemize}

  Luckily for us, Meijer and Hutton solved that last one in their `Bananas in
  Space' paper in 1995:\pause{}

  \begin{Verbatim}[fontsize=\small,commandchars=\\\{\}]
hylo' :: Functor f => Coalgebra f a -> Algebra f b -> a -> b
hylo' f g = f >>> map (hylo' f g) >>> g
  \end{Verbatim}

  \pause{}

  \alert{Result:} We have everything we wanted, and recursion schemes still rule!
\end{frame}

\section{Questions}

\begin{frame}[fragile, c]
  \frametitle{Questions?}
  \begin{center}
  \includegraphics[scale=0.27]{img/questions.pdf}
\end{center}
\end{frame}

\end{document}

