\section{Introduction}

\subsection{Phrase Based Models}

\begin{frame}
\frametitle{Phrase Based Models}
    \begin{itemize}
        \item Advanced the state of the art
        \item Often employ the noisy channel approach
            % but join prob P(e, f) or log-linear models are also used
        \item Can translate substrings that are common enough
            % to max of 3 words
    \end{itemize}
\end{frame}


% phrase as the basic unit of translation
% moved SOTA forward 
% basic phrase-based model is an instance of the noisy channel approach
% (noisy channel approach explained)

% 3 steps of how basic phrase thingy work  < for slide?

% other models join distribution p(e, f) (instead of P(e|f) as in the 3 steps)
% or made the probs in features of log linear model
% basic reordering remains the same

% phrases longer than 3 words improve performance little (data sparseness?)
% above phrase level, reorder phrase independent of content, or not at all

% desirable to capture larger scope

% manderin-english example, where insufficiency becomes clear

\begin{frame}
\frametitle{Example, Mandarin to English}

\begin{figure}
\centering
\includegraphics[width=8cm]{resources/exampleSentence}
\end{figure}

\end{frame}

\begin{frame}

A phrase-based system (Pharaoh) translated this as followed:

\begin{center}
\small
[Aozhou] [shi] [yu] [Bei Han] [you]

[Banjiao] [de shaoshu guojia zhiyi]

\vspace{0.5cm}
[Australia] [is] [dipl. rels.] [with]

[North Korea] [is] [one of the few countries]
\end{center}

\normalsize

\begin{itemize}
    \item orders 'diplomatic ... Korea' and 'one ... countries' correctly
    \item does not re-order those groups
\end{itemize}

\end{frame}

\subsection{Hierarchical Phrase Based Model}
\begin{frame}
\frametitle{Hierarchical Phrase Based Model}
A solution to these problems:
    \begin{itemize}
        \item Phrases work well for reordering words
        \item So, also use phrases for reordering phrases
        \item Will not interfere with the strengths of the phrase-based approach
    \end{itemize}
\end{frame}
% proposition: phrases work good for reordering words, also use them for reordering phrases
% (does not interfere with strenght of phrase-based approach)
% : HIERARCHICAL phrases! :D

% example of hierarchical phrases, and how the previous example can now be translated

% formally producionts of synchronous cfg
% difference between formally syntax based and linguistically syntax based

\subsection{Phrase Based Model Problems}

\begin{frame}
\frametitle{Example 1}
This example of an hierarchical phrase might help with the previous problem:
\begin{align}
<yu\  [1]\  you\  [2],\  have\  [2]\  with\  [1]>
\end{align}

\begin{itemize}
    \item $[1]$ and $[2]$ are place holders for sub phrases.
    \item Captures that Chinese PPs almost always modify VP on the left in
contrast to English, on the right
\end{itemize}

% because it generalizes over possible prepositional objects and direct objects,
% it acts both as discontinuous phrase pair and a phrase-reordering rule
% this is more powerful than a conventional phrase pair
\end{frame}

\begin{frame}
\frametitle{Examples 2 and 3}
\begin{align}
<[1]\  de\  [2],\  the\  [2]\  that\ [1]>
\end{align}
\begin{itemize}
    \item Captures that Chinese relative clauses modify NPs on the left, in
contrast to English on the right
\end{itemize}

\begin{align}
<[1]\  zhiyi,\  one\  of\  [1]>
\end{align}
\vspace{-0.5cm}
\begin{itemize}
    \item Renders the construction of \emph{zhiyi} in English word order
\end{itemize}

\end{frame}

\subsection{Hierarchical Phrase Based Model Solutions}
\begin{frame}
With these three examples we can translate the sentence correctly:

\begin{center}
\small
[Aozhou] [shi] [[[yu [Bei Han] you

[Banjiao]] de [shaoshu guojia]] zhiyi]

\vspace{0.5cm}
[Australia] [is] [one of [the [ few countries]

that [have [dipl. rels.] with [North Korea]]]]
\end{center}

%with the tiny mistake, in that it omits 'that', but we just added it here

\normalsize

\end{frame}

\begin{frame}
\frametitle{Linguistically Motivated?}

    \begin{itemize}
        \item These hierarchical phrase pairs are formally productions of a
synchronous context-free grammar
        \item Can be seen as a move to syntax-based Machine Translation
        \item However, that is \emph{formally}
        \item Does not rely on any linguistic annotations or assumptions
        \item The structure \emph{can} represent a syntactician's grammar, but often
does not
    \end{itemize}

\end{frame}

\section{The Model}

% weighted synchronous cfg

\subsection{Synchronous CFG Rules}

\begin{frame}
\frametitle{Model}
The model is based on a weighted synchronous CFG.
% in a synchronous CFG the elementary structures are rewrite rules with aligned pairs of RHS

\begin{align}
X \rightarrow < \gamma, \alpha, \sim>
\end{align}

Where
\begin{itemize}
    \item $X$ is a non-terminal
    \item $\gamma$ and $\alpha$ are both strings
    \item $\sim$ a one-to-one correspondence between on-terminal occurrences in
$\gamma$ and $\alpha$
\end{itemize}


\end{frame}

% rewriting begins with a pair of linked start symbols
% at each step 2 coindexed non terminals are rewritten using the 2 components of a single rule
%   such that non of hte new symbols is linked to symbols already present

% example

% only use X for non terminal symbols
% except for 'glue rules', which combine sequence of Xs to form an S
% these give the model the option to build only partial translations, then
% combine them serially as in a standard phrase-based model

\begin{frame}
\frametitle{Formalization of the Exemplified Phrase Pairs}

\begin{align}
X \rightarrow&\ <yu\  X_{[1]}\  you\  X_{[2]},\  have\  X_{[2]}\  with\  X_{[1]}> \\
X \rightarrow&\ <X_{[1]}\  de\  X_{[2]},\  the\  X_{[2]}\  that\ X_{[1]}> \\
X \rightarrow&\ <X_{[1]}\  zhiyi,\  one\  of\  X_{[1]}>
\end{align}
\end{frame}

\begin{frame}
\frametitle{`Glue' Rules}

\begin{itemize}
    \item Note that the only symbol for a non-terminal is $X$. The exception is $S$, which
is used for special 'glue' rules.
    \item `Glue' rules are used to 
        \begin{itemize}
            \item build partial translations (\ref{eqlin:glue1})
            \item combine phrases serially, as a standard phrase-based model (\ref{eqlin:glue2})
        \end{itemize}
\end{itemize}

\begin{align}
S \rightarrow&\ <S_{[1]}\  X_{[2]}\,\  S_{[1]}\  X_{[2]}> \label{eqlin:glue1}\\
S \rightarrow&\ <X_{[1]},\  X_{[1]}> \label{eqlin:glue2}
\end{align}
\end{frame}

\subsection{Log-linear Model}

\begin{frame}
\frametitle{A More General Log-linear Model}
\begin{itemize}
    \item Instead of the standard noisy-channel approach, a more general log-linear model
is used.\\
    \item The weight of each rule is:
\end{itemize}

\begin{align}
w(X \rightarrow <\gamma, \alpha>) = \prod_i \phi_i (X \rightarrow <\gamma,
\alpha>)^{\lambda_i}
\end{align}

Where $\phi_i$ are features defined on rules.

\end{frame}

\subsection{Features}

\begin{frame}
\frametitle{Features}

Analogous to Pharaoh's default feature set
% TODO: what or who is Pharaoh?!?!

    \begin{itemize}
        \item $P(\gamma | \alpha)$ and $P(\alpha | \gamma)$, the latter is not
found in the noisy-channel model (but is proven useful)
        \item Lexical weights $P_{w}(\gamma | \alpha)$ and $P_{w}(\alpha |
\gamma)$, estimates how well words in $\alpha$ translate in $\gamma$.
% footnote says this feature uses word alignment info, this is discarded in the final grammar
% a weighted average is used if more alignments are possible
        \item Phrase penalty $exp(1)$, allows the model to learn a preference
for shorter or longer derivations
    \end{itemize}

\end{frame}

\begin{frame}
\frametitle{Again Exceptions for the Glue Rules}

\begin{align*}
S \rightarrow&\ <S_{[1]}\  X_{[2]}\,\  S_{[1]}\  X_{[2]}>\\
S \rightarrow&\ <X_{[1]},\  X_{[1]}>
\end{align*}

\begin{itemize}
    \item First rule has weight 1
    \item Second rule has weight $exp(\lambda_g)$,
\end{itemize}

$\lambda_g$ Controls the models preference for hierarchical phrases over serial
combinations
\end{frame}


\subsection{Weights}
\begin{frame}
\frametitle{Weights}
    \begin{itemize}
        \item $D$ = derivation of grammar
        \item $f(D)$ \& $e(D)$, the French \& English strings generated by $D$
        \item $D$ is represented as a set of triplets $<r, i, j>$
            \begin{itemize}
                \item $r$ application of rule $r$ to rewrite a non-terminal that spans $f(D)_{i}^{j}$ on the French side
            \end{itemize}
    \end{itemize}
Additionally, the product of the weights of the rules used are multiplied by:
\begin{align}
W(D) = \prod_{<r, i, j> \in D} W(r) \times P_{lm}(e)^{\lambda_{lm}} \times
exp(-\lambda_{wp}|e|)
\end{align}
Where $P_{lm}$ is the language model, and $exp(-\lambda_{wp}|e|)$ is the word
penalty %(gives control over the length of the English output)

% implemented into the rule weights, but convenient to note seperately

\end{frame}
