
\documentclass{beamer}
\usepackage{beamerthemesplit}
\usepackage{hyperref}

\newcommand{\Expr}{\text{Expr}}
\newcommand{\EWS}{\text{ExprWithScript}}
\renewcommand{\L}{\ \ | \ \ }
\newcommand{\ra}{\rightarrow \ }
\newcommand{\tS}[1]{^{\text{#1}}}
\newcommand{\ts}[1]{_{\text{#1}}}
\newcommand{\Score}{\text{Score}}
\newcommand{\Rule}{\text{Rule}}
\newcommand{\Tree}{\text{Tree}}
\newcommand{\bounds}{\text{bounds}}
\newcommand{\LHS}{\text{LHS}}
\newcommand{\RHS}{\text{RHS}}
\newcommand{\height}{\text{height}}
\newcommand{\abs}{\text{abs}}
\newcommand{\vecf}{\mathbf{f}}
\newcommand{\vecw}{\mathbf{w}}
\newcommand{\Data}{\text{Data}}
\newcommand{\BigOp}{\text{BigOp}}
\newcommand{\BinOp}{\text{BinaryOp}}
\newcommand{\THRESHOLD}{\text{THRESHOLD}}
\newcommand{\Strokes}{\text{Strokes}}
\newcommand{\Char}{\text{Char}}
\newcommand{\Segmentation}{\text{Segmentation}}
\newcommand{\op}[1]{\text{#1} \ }
\newcommand{\newtag}{\tag{\theequation}\addtocounter{equation}{1}}
\newcommand{\Parse}{\text{Parse}}
\newcommand{\li}{\text{LayoutInfo}}
\newcommand{\xmin}{\text{xmin}}
\newcommand{\xmax}{\text{xmax}}
\newcommand{\ymin}{\text{ymin}}
\newcommand{\ymax}{\text{ymax}}
\newcommand{\mb}{\mathbf}
\newcommand{\bs}{\boldsymbol}


\title{Discriminative parsing for equation recognition}
\author{John Schulman}
\date{\today}

\begin{document}

\frame{\titlepage}

\frame{
\frametitle{Intro}
\begin{itemize}
\item Group project for CS 155: Probabilistic Graphical Models, taught by Andras Krause
\item Collaborators: Noah Jakimo, Luke Moryl, Eric Paniagua
\item Goal: convert handwritten equations to typeset \LaTeX. Character recognition and equation parsing algorithms written from scratch in Python.
\item Demo...
\end{itemize}
}

\section{Problem description}
\frame{
\frametitle{Not today: segmentation, character recognition}
Say a person draws $x^2+3$ as shown below.

\begin{onlyenv}<1>\begin{figure}[h!]\includegraphics[width=0.8\textwidth]{drawing}\end{figure}\end{onlyenv}
\begin{onlyenv}<2>\begin{figure}[h!]\includegraphics[width=0.8\textwidth]{withboxes}\end{figure}\end{onlyenv}
\begin{onlyenv}<3>\begin{figure}[h!]\includegraphics[width=0.8\textwidth]{withchars}\end{figure}\end{onlyenv}

\vspace{-5em}
Previous stages of the algorithm have done the following:
\begin{itemize}
  \item<2-> segmented the drawing into a bunch of symbols.
  \item<3-> determined candidate characters for each symbol.
\end{itemize}
  
}

\frame{
\frametitle{Input to algorithm}
So the input is a list of pairs (\textsc{Candidates}, \textsc{BoundingBox}), where \textsc{Candidates} is a list of pairs (Char, LogL), and \textsc{BoundingBox}.

\begin{tabular}{ll}
\{\\
&  \textcolor{gray}{\big( [(Char,LogL), ...], \ \ \ $(x_{\min},x_{\max} ,y_{\min} ,y_{\max}) \big)$} \\
&  \big( [($\mb{x}$,-2.2), ($\bs{+}$,-3.3)], \ \ \ (-3,-2,1,2) \big),\\
&  \big( [($\mb{2}$,-1.1), ($\mb{r}$,-4.4), ($\bs{\int}$,-5.5)], \ \ \ (-3,-2,1,2) \big),\\
& \big( [($\bs{+}$...\big),\\
& \big( [($\mb{2}$...\big)\\
\}
\end{tabular}
}

\begin{frame}[fragile]
\frametitle{Output}
We want to infer the ``best'' parse tree, which encodes structure and layout

For $x^2+3$, the tree is
\begin{verbatim}
(binop
   +
   (sup x 2)
   3
)
\end{verbatim}
\end{frame}

\section{Probabilistic model}
\frame{
\frametitle{Generative model for \LaTeX \ expressions}

Context-free grammar for generating math expressions
\begin{itemize}
\item Nonterminal replacement rules
\begin{align*}
  \Expr \ra &\op{right} \Expr\ \Expr \\
  \L &\op{sub} \EWS_{\Expr} \\
  \L &\op{sup} \EWS^{\Expr} \\
  \L &\op{subsup} \EWS^{\Expr}_{\Expr} \\
  \L &\op{frac} -\ \Expr\ \Expr\ \\
  \cdots
\end{align*}

\item  Terminal replacement rules
\begin{align*}
  \Expr& \ra a,b,c,\dots,1,2,3,\dots \\
  \BigOp& \ra \sum, \prod, \int \\
  \cdots
\end{align*}

\end{itemize}
}

\frame{
\frametitle{Likelihood maximization problem}
\begin{itemize}
\item At each rule application, LayoutInfo is generated.
\item LayoutInfo is defined as the bounding boxes of the sub-expresssions.
\end{itemize}

\begin{align*}
P(\Parse,\li)=P(\Parse)P(\li|\Parse)
\end{align*}

\begin{itemize}
\item Goal: maximize  $\max_{\Parse} P(\li|\Parse)$.
\item Don't have to model $P(\Parse)$
\end{itemize}
}

\frame{
\frametitle{Model for LayoutInfo}
\begin{itemize}
\item Suppose $x^2$ has bounding box $(x_{\min},x_{\max},y_{\min},y_{\max}) = (0,1,0,1)$. 
\item What's the distribution of the bounding boxes on $x$ and $2$?
\item We use the following log-linear model:
\begin{itemize}
   \item Each rule corresponds to a set of binary features $f$. 
   \item Each feature describes some condition on the bounds that we expect to be true. (e.g. for superscript, we expect exponent to be smaller than base)
   \item Features $f$ are independent Bernoulli r.v.s
\begin{align}
f = \begin{cases}
  +1& \text{with probability } e^{+w}/Z \\
  -1& \text{with probability } e^{-w}/Z \end{cases}
\end{align}
where $Z =e^w+e^{-w}$.

\end{itemize}
\end{itemize}
}

\frame{
\frametitle{P(LayoutInfo)}
Thus
\begin{align*}
P(\li_{\Rule} | \Rule) &= \prod_i e^{w_if_i}/Z_i \\
\log P(\li_{\Rule} | \Rule) &= \sum_i (w_if_i - \log Z_i)
\end{align*}

where $f_1,f_2,\dots$ are the set of features that correspond to $\Rule$, and $w_1,w_2,\dots$ are the corresponding probability parameters.
}

\begin{frame}[fragile]
\frametitle{What are the features?}
\begin{itemize}
\item Each feature describes some condition on the bounds that we expect to be true.

\item For the superscript rule,
\begin{verbatim}
SupFeats =
   [LL1,BB1,TT1,R1L2,TT2,Bigger12,RR2]
\end{verbatim}

where
\begin{verbatim}
Bigger12 = "1.7<height1/height2" 
\end{verbatim}

\item Other features describe alignment of the edges of the bounding boxes of the base and exponent.


\end{itemize}

\end{frame}

\frame{
\frametitle{Total likelihood}
\begin{itemize}
\item The LogL for the whole parse is the sum of the LogL of all rules:
\begin{align}
\log P(\text{All}\li|\Parse) = \sum_{\substack{r\in\Rule-\\\text{applications}}}\log P(\li_r|\Rule_r)
\end{align}

\item So how do we efficiently find the most likely parse?
\end{itemize}
}

\section{Algorithm}
\frame{
\frametitle{Parsing algorithm}
Algorithm finds (exact) optimal parse tree
\begin{itemize}
\item Recursively solves problem on subsets of symbols.
\item For each subset $S$ and every nonterminal $N$, finds the optimal parse tree of $S$ with $N$ at the root.
\item Uses memoization
\item $O(n^3)$ or $O(n^4)$, where $n$ is the number of symbols.
\end{itemize}
}

\section{Parameter learning}
\frame{
\frametitle{Likelihood of training dataset}
\begin{itemize}
\item Recall likelihood for a single rule application:
\begin{align*}
P(\li_{\Rule} | \Rule) &= \prod_i e^{w_if_i}/Z_i
\end{align*}
\item Assume a set of training examples
\begin{align*}
(\li, \text{correct parse tree})
\end{align*}
\item The likelihood of the entire training dataset is
\begin{align*}
P(\text{Data}) &= \prod_{e\in\text{Examples}}\prod_{\substack{r\in\Rule-\\\text{applications}}}P(\li^e_r|\Rule_r) \\
&= \prod_{e\in\text{Examples}}\prod_{\substack{r\in\Rule-\\\text{applications}}}\prod_{f_i\in\text{Features}(r)} e^{w_if^{e}_i}/Z_i
\end{align*}
\end{itemize}
}

\frame{
\frametitle{Features are independent}
\begin{itemize}
\item Group terms based on feature functions
\begin{align*}
P(\text{Data}) &= \prod_{f_i} \prod_{n=1}^{N_i}  e^{w_if^{n}_i}/Z_i
\end{align*}
Here $n$ indexes over the $N_i$ occurrences of the rule (corresponding to $f_i$) in the training data.
\item For log-linear model,
\begin{align*}
\bar{f} = E_w f
\end{align*}
at ML solution for $w$, where $\bar{f}$ is the empirical expectation.
\end{itemize}
}

\frame{
\frametitle{Optimal parameter values}
\begin{itemize}
\item Solving for $w$
\begin{align*}
E_wf = \frac{e^w-e^{-w}}{e^w+e^{-w}} = \tanh(w)\\
w_{\text{ML}} = \tanh^{-1} \bar{f}
\end{align*}
\item Training is not yet implemented. We set $w=1$ corresponding to $\bar f = .76$.
\end{itemize}
}

\frame{
\frametitle{The end}
\begin{itemize}
\item Source code: \url{http://code.google.com/p/wyoming/}
\item Thanks:
\begin{itemize}
\item Collaborators: Noah Jakimo, Luke Moryl, Eric Paniagua
\item Andreas Krause, for discussions and advice
\end{itemize}

\item Any questions?
\end{itemize}
}

\end{document}


\begin{psmatrix}
[rowsep=6mm,colsep=6mm]
 & &[name=1] \psframebox{\Expr} \\
 & sup &[name=2] \psframebox{\EWS} &[name=3] \psframebox{\Expr} \\
 &     &[name=4]         $x$       &[name=5]        $2$
\end{psmatrix}
\ncline{1}{2}
\ncline{1}{3}
\ncline{2}{4}
\ncline{3}{5}
}


Outline
----------

The main thing I want to do is tell you the probability model we used for parsing, and how we implement it.


* Intro
  * Goal
  * Show demo video
  * Summary of algorithms


* Character recognition
* Segmentation
* Layout parsing
  * Probability model
  * What is the most likely equation
  * How to efficiently find it.