\documentclass{article} \usepackage{times} \usepackage{graphicx} \usepackage{subfigure} \usepackage{amsmath} \usepackage{amsfonts} \usepackage{stfloats} \usepackage{url} \usepackage{natbib} \usepackage{algorithm} \usepackage{algorithmic} \usepackage{hyperref} \newcommand{\theHalgorithm}{\arabic{algorithm}} \usepackage[accepted]{arXiv} \graphicspath{ {./figures/} } \newcommand{\obs}{\text{obs}} \newcommand{\mis}{\text{mis}} \newcommand{\qt}[1]{\left<#1\right>} \newcommand{\ql}[1]{\left[#1\right]} \newcommand{\hess}{\mathbf{H}} \newcommand{\jacob}{\mathbf{J}} \newcommand{\hl}{HL} \newcommand{\cost}{\mathcal{L}} \newcommand{\lout}{\mathbf{r}} \newcommand{\louti}{r} \newcommand{\outi}{y} \newcommand{\out}{\mathbf{y}} \newcommand{\gauss}{\mathbf{G_N}} \newcommand{\eye}{\mathbf{I}} \newcommand{\softmax}{\phi} \newcommand{\targ}{\mathbf{t}} \newcommand{\metric}{\mathbf{G}} \newcommand{\sample}{\mathbf{z}} \newcommand{\bmx}[0]{\begin{bmatrix}} \newcommand{\emx}[0]{\end{bmatrix}} \newcommand{\qexp}[1]{\left<#1\right>} \newcommand{\vect}[1]{\mathbf{#1}} \newcommand{\vects}[1]{\boldsymbol{#1}} \newcommand{\matr}[1]{\mathbf{#1}} \newcommand{\var}[0]{\operatorname{Var}} \newcommand{\std}[0]{\operatorname{std}} \newcommand{\cov}[0]{\operatorname{Cov}} \newcommand{\diag}[0]{\operatorname{diag}} \newcommand{\matrs}[1]{\boldsymbol{#1}} \newcommand{\va}[0]{\vect{a}} \newcommand{\vb}[0]{\vect{b}} \newcommand{\vc}[0]{\vect{c}} \newcommand{\ve}[0]{\vect{e}} \newcommand{\vh}[0]{\vect{h}} \newcommand{\vv}[0]{\vect{v}} \newcommand{\vx}[0]{\vect{x}} \newcommand{\vn}[0]{\vect{n}} \newcommand{\vz}[0]{\vect{z}} \newcommand{\vw}[0]{\vect{w}} \newcommand{\vs}[0]{\vect{s}} \newcommand{\vf}[0]{\vect{f}} \newcommand{\vi}[0]{\vect{i}} \newcommand{\vo}[0]{\vect{o}} \newcommand{\vy}[0]{\vect{y}} \newcommand{\vg}[0]{\vect{g}} \newcommand{\vm}[0]{\vect{m}} \newcommand{\vu}[0]{\vect{u}} \newcommand{\vL}[0]{\vect{L}} \newcommand{\vr}[0]{\vect{r}} \newcommand{\mW}[0]{\matr{W}} \newcommand{\mG}[0]{\matr{G}} \newcommand{\mX}[0]{\matr{X}} \newcommand{\mQ}[0]{\matr{Q}} \newcommand{\mU}[0]{\matr{U}} \newcommand{\mV}[0]{\matr{V}} \newcommand{\vE}[0]{\matr{E}} \newcommand{\mA}{\matr{A}} \newcommand{\mD}{\matr{D}} \newcommand{\mS}{\matr{S}} \newcommand{\mI}{\matr{I}} \newcommand{\td}[0]{\text{d}} \newcommand{\TT}[0]{\vects{\theta}} \newcommand{\vsig}[0]{\vects{\sigma}} \newcommand{\valpha}[0]{\vects{\alpha}} \newcommand{\vmu}[0]{\vects{\mu}} \newcommand{\vzero}[0]{\vect{0}} \newcommand{\tf}[0]{\text{m}} \newcommand{\tdf}[0]{\text{dm}} \newcommand{\grad}[0]{\nabla} \newcommand{\alert}[1]{\textcolor{red}{#1}} \newcommand{\N}[0]{\mathcal{N}} \newcommand{\LL}[0]{\mathcal{L}} \newcommand{\HH}[0]{\mathcal{H}} \newcommand{\RR}[0]{\mathbb{R}} \newcommand{\II}[0]{\mathbb{I}} \newcommand{\Scal}[0]{\mathcal{S}} \newcommand{\sigmoid}{\sigma} \newcommand{\E}[0]{\mathbb{E}} \newcommand{\enabla}[0]{\ensuremath{\overset{\raisebox{-0.3ex}[0.5ex][0ex]{\ensuremath{\scriptscriptstyle e}}}{\nabla}}} \newcommand{\enhnabla}[0]{\nabla_{\hspace{-0.5mm}e}\,} \newcommand{\tred}[1]{\textcolor{red}{#1}} \newcommand{\tgreen}[1]{\textcolor{green}{#1}} \newcommand{\tblue}[1]{\textcolor{blue}{#1}} \newcommand{\todo}[1]{{\Large\textcolor{red}{#1}}} \newcommand{\done}[1]{{\Large\textcolor{green}{#1}}} \newcommand{\dd}[1]{\ensuremath{\mbox{d}#1}} \DeclareMathOperator*{\argmax}{\arg \max} \DeclareMathOperator*{\argmin}{\arg \min} \newcommand{\newln}{\\&\quad\quad{}} \newcommand{\Ax}{\mathcal{A}_x} \newcommand{\Ay}{\mathcal{A}_y} \newcommand{\ola}{\overleftarrow} \newcommand{\ora}{\overrightarrow} \newcommand{\ov}{\overline} \newcommand{\ts}{\rule{0pt}{2.6ex}} \newcommand{\ms}{\rule{0pt}{0ex}} \newcommand{\bs}{\rule[-1.2ex]{0pt}{0pt}} \newcommand{\specialcell}[2][c]{\begin{tabular}[#1]{@{}c@{}}#2\end{tabular}} \newcommand{\bx}{\textbf{x}} \newcommand{\by}{\textbf{y}} \newcommand{\bW}{\textbf{W}} \icmltitlerunning{Neural Image Caption Generation with Visual Attention} \begin{document} \twocolumn[ \icmltitle{Show, Attend and Tell: Neural Image Caption\\Generation with Visual Attention} \icmlauthor{Kelvin Xu}{kelvin.xu@umontreal.ca} \icmlauthor{Jimmy Lei Ba}{jimmy@psi.utoronto.ca} \icmlauthor{Ryan Kiros}{rkiros@cs.toronto.edu} \icmlauthor{Kyunghyun Cho}{kyunghyun.cho@umontreal.ca} \icmlauthor{Aaron Courville}{aaron.courville@umontreal.ca} \icmlauthor{Ruslan Salakhutdinov}{rsalakhu@cs.toronto.edu} \icmlauthor{Richard S. Zemel}{zemel@cs.toronto.edu} \icmlauthor{Yoshua Bengio}{find-me@the.web} \vskip 0.3in ] \begin{abstract} Inspired by recent work in machine translation and object detection, we introduce an attention based model that automatically learns to describe the content of images. We describe how we can train this model in a deterministic manner using standard backpropagation techniques and stochastically by maximizing a variational lower bound. We also show through visualization how the model is able to automatically learn to fix its gaze on salient objects while generating the corresponding words in the output sequence. We validate the use of attention with state-of-the-art performance on three benchmark datasets: Flickr8k, Flickr30k and MS COCO. \end{abstract} \section{Introduction} Automatically generating captions of an image is a task very close to the heart of scene understanding --- one of the primary goals of computer vision. Not only must caption generation models be powerful enough to solve the computer vision challenges of determining which objects are in an image, but they must also be capable of capturing and expressing their relationships in a natural language. For this reason, caption generation has long been viewed as a difficult problem. It is a very important challenge for machine learning algorithms, as it amounts to mimicking the remarkable human ability to compress huge amounts of salient visual infomation into descriptive language. Despite the challenging nature of this task, there has been a recent surge of research interest in attacking the image caption generation problem. Aided by advances in training neural networks \citep{Krizhevsky2012} and large classification datasets \citep{Imagenet14}, recent work has significantly improved the quality of caption generation using a combination of convolutional neural networks (convnets) to obtain vectorial representation of images and recurrent neural networks to decode those representations into natural language sentences (see Sec.~\ref{section:background}). \begin{figure}[tp] \label{figure:model_diagram} \centering \caption{Our model learns a words/image alignment. The visualized attentional maps (3) are explained in section \ref{section:model} \& \ref{section:viz}} \vspace{3mm} \includegraphics[width=\columnwidth]{model_diagram.pdf} \vspace{-6mm} \end{figure} \begin{figure*}[!tp] \label{figure:attention_diagram} \centering \caption{Attention over time. As the model generates each word, its attention changes to reflect the relevant parts of the image. ``soft'' (top row) vs ``hard'' (bottom row) attention. (Note that both models generated the same captions in this example.) } \includegraphics[width=6.5in]{runout} \vspace{-5mm} \end{figure*} \begin{figure*}[!tp] \label{figure:alignments} \centering \caption{Examples of attending to the correct object (\textit{white} indicates the attended regions, \textit{underlines} indicated the corresponding word)} \includegraphics[width=0.87\textwidth]{good.pdf} \vspace{-5mm} \end{figure*} One of the most curious facets of the human visual system is the presence of attention \citep{Rensink2000,Corbetta2002}. Rather than compress an entire image into a static representation, attention allows for salient features to dynamically come to the forefront as needed. This is especially important when there is a lot of clutter in an image. Using representations (such as those from the top layer of a convnet) that distill information in image down to the most salient objects is one effective solution that has been widely adopted in previous work. Unfortunately, this has one potential drawback of losing information which could be useful for richer, more descriptive captions. Using more low-level representation can help preserve this information. However working with these features necessitates a powerful mechanism to steer the model to information important to the task at hand. In this paper, we describe approaches to caption generation that attempt to incorporate a form of attention with two variants: a ``hard'' attention mechanism and a ``soft'' attention mechanism. We also show how one advantage of including attention is the ability to visualize what the model ``sees''. Encouraged by recent advances in caption generation and inspired by recent success in employing attention in machine translation \citep{Bahdanau2014} and object recognition \citep{Ba2014,Mnih2014}, we investigate models that can attend to salient part of an image while generating its caption. The contributions of this paper are the following: \vspace{- 1mm} \begin{itemize} \vspace{-2mm} \item We introduce two attention-based image caption generators under a common framework (Sec.~\ref{section:model}): 1) a ``soft'' deterministic attention mechanism trainable by standard back-propagation methods and 2) a ``hard'' stochastic attention mechanism trainable by maximizing an approximate variational lower bound or equivalently by REINFORCE~\citep{Williams92}. \vspace{-2mm} \item We show how we can gain insight and interpret the results of this framework by visualizing ``where'' and ``what'' the attention focused on. (see Sec.~\ref{section:viz}) \vspace{-2mm} \item Finally, we quantitatively validate the usefulness of attention in caption generation with state of the art performance (Sec.~\ref{section:results}) on three benchmark datasets: Flickr8k \citep{Hodosh2013} , Flickr30k \citep{Young2014} and the MS COCO dataset \citep{Lin2014}. \end{itemize} \section{Related Work} \label{section:background} In this section we provide relevant background on previous work on image caption generation and attention. Recently, several methods have been proposed for generating image descriptions. Many of these methods are based on recurrent neural networks and inspired by the successful use of sequence to sequence training with neural networks for machine translation~\citep{Cho2014,Bahdanau2014,Sutskever2014}. One major reason image caption generation is well suited to the encoder-decoder framework \citep{Cho2014} of machine translation is because it is analogous to ``translating'' an image to a sentence. The first approach to use neural networks for caption generation was \citet{Kiros2014a}, who proposed a multimodal log-bilinear model that was biased by features from the image. This work was later followed by \citet{Kiros2014b} whose method was designed to explicitly allow a natural way of doing both ranking and generation. \citet{Mao2014} took a similar approach to generation but replaced a feed-forward neural language model with a recurrent one. Both \citet{Vinyals2014} and \citet{Donahue2014} use LSTM RNNs for their models. Unlike \citet{Kiros2014a} and \citet{Mao2014} whose models see the image at each time step of the output word sequence, \citet{Vinyals2014} only show the image to the RNN at the beginning. Along with images, \citet{Donahue2014} also apply LSTMs to videos, allowing their model to generate video descriptions. All of these works represent images as a single feature vector from the top layer of a pre-trained convolutional network. \citet{Karpathy2014} instead proposed to learn a joint embedding space for ranking and generation whose model learns to score sentence and image similarity as a function of R-CNN object detections with outputs of a bidirectional RNN. \citet{Fang2014} proposed a three-step pipeline for generation by incorporating object detections. Their model first learn detectors for several visual concepts based on a multi-instance learning framework. A language model trained on captions was then applied to the detector outputs, followed by rescoring from a joint image-text embedding space. Unlike these models, our proposed attention framework does not explicitly use object detectors but instead learns latent alignments from scratch. This allows our model to go beyond ``objectness'' and learn to attend to abstract concepts. Prior to the use of neural networks for generating captions, two main approaches were dominant. The first involved generating caption templates which were filled in based on the results of object detections and attribute discovery (\citet{Kulkarni2013}, \citet{Li2011}, \citet{Yang2011}, \citet{Mitchell2012}, \citet{Elliott2013}). The second approach was based on first retrieving similar captioned images from a large database then modifying these retrieved captions to fit the query \citep{Kuznetsova2012,Kuznetsova2014}. These approaches typically involved an intermediate ``generalization'' step to remove the specifics of a caption that are only relevant to the retrieved image, such as the name of a city. Both of these approaches have since fallen out of favour to the now dominant neural network methods. There has been a long line of previous work incorporating attention into neural networks for vision related tasks. Some that share the same spirit as our work include \citet{Larochelle2010,Denil2012,Tang2014}. In particular however, our work directly extends the work of \citet{Bahdanau2014,Mnih2014,Ba2014}. \section{Image Caption Generation with Attention Mechanism} \subsection{Model Details} \label{section:model} In this section, we describe the two variants of our attention-based model by first describing their common framework. The main difference is the definition of the $\phi$ function which we describe in detail in Section \ref{sec:det_sto}. We denote vectors with bolded font and matrices with capital letters. In our description below, we suppress bias terms for readability. \subsubsection{Encoder: Convolutional Features} Our model takes a single raw image and generates a caption $\vy$ encoded as a sequence of 1-of-$K$ encoded words. \[ y = \left\{\vy_1, \ldots, \vy_{C} \right\},\mbox{ } \vy_i \in \RR^{K} \] where $K$ is the size of the vocabulary and $C$ is the length of the caption. We use a convolutional neural network in order to extract a set of feature vectors which we refer to as annotation vectors. The extractor produces $L$ vectors, each of which is a D-dimensional representation corresponding to a part of the image. \begin{align*} a = \left\{\va_1, \ldots, \va_L \right\},\mbox{ } \va_i \in \RR^{D} \end{align*} In order to obtain a correspondence between the feature vectors and portions of the 2-D image, we extract features from a lower convolutional layer unlike previous work which instead used a fully connected layer. This allows the decoder to selectively focus on certain parts of an image by selecting a subset of all the feature vectors. \begin{figure}[tp] \vskip 0.2in \begin{center} \centerline{\includegraphics[width=\columnwidth]{lstm_2.pdf}} \caption{A LSTM cell, lines with bolded squares imply projections with a learnt weight vector. Each cell learns how to weigh its input components (input gate), while learning how to modulate that contribution to the memory (input modulator). It also learns weights which erase the memory cell (forget gate), and weights which control how this memory should be emitted (output gate).} \label{figure:conditional_lstm} \end{center} \vskip -0.3 in \end{figure} \subsubsection{Decoder: Long Short-Term Memory Network} We use a long short-term memory (LSTM) network~\citep{Hochreiter+Schmidhuber-1997} that produces a caption by generating one word at every time step conditioned on a context vector, the previous hidden state and the previously generated words. Our implementation of LSTM closely follows the one used in \citet{Zaremba2014} (see Fig.~\ref{figure:conditional_lstm}). Using $T_{s,t}: \RR^{s} \rightarrow \RR^{t}$ to denote a simple affine transformation with parameters that are learned, \begin{align} \label{eq:lstm_gates} \begin{pmatrix} \vi_t \\ \vf_t \\ \vo_t \\ \vg_t \\ \end{pmatrix} = & \begin{pmatrix} \sigmoid \\ \sigmoid \\ \sigmoid \\ \tanh \\ \end{pmatrix} T_{D+m+n, n} \begin{pmatrix} \vE\vy_{t-1}\\ \vh_{t-1}\\ \hat{\vz_t}\\ \end{pmatrix} \\ \label{eq:lstm_memory} \vc_t &= \vf_t \odot \vc_{t-1} + \vi_t \odot \vg_t \\ \label{eq:lstm_hidden} \vh_t &= \vo_t \odot \tanh (\vc_{t}). \end{align} Here, $\vi_t$, $\vf_t$, $\vc_t$, $\vo_t$, $\vh_t$ are the input, forget, memory, output and hidden state of the LSTM, respectively. The vector $\hat{\vz} \in \RR^{D}$ is the context vector, capturing the visual information associated with a particular input location, as explained below. $\vE\in\RR^{m\times K}$ is an embedding matrix. Let $m$ and $n$ denote the embedding and LSTM dimensionality respectively and $\sigma$ and $\odot$ be the logistic sigmoid activation and element-wise multiplication respectively. In simple terms, the context vector $\hat{\vz}_t$ (equations~\eqref{eq:lstm_gates}--\eqref{eq:lstm_hidden}) is a dynamic representation of the relevant part of the image input at time $t$. We define a mechanism $\phi$ that computes $\hat{\vz}_t$ from the annotation vectors $\va_i, i=1,\ldots,L$ corresponding to the features extracted at different image locations. For each location $i$, the mechanism generates a positive weight $\alpha_i$ which can be interpreted either as the probability that location $i$ is the right place to focus for producing the next word (the ``hard'' but stochastic attention mechanism), or as the relative importance to give to location $i$ in blending the $a_i$'s together. The weight $\alpha_i$ of each annotation vector $a_i$ is computed by an \emph{attention model} $f_{\mbox{att}}$ for which we use a multilayer perceptron conditioned on the previous hidden state $h_{t-1}$. The soft version of this attention mechanism was introduced by~\citet{Bahdanau2014}. For emphasis, we note that the hidden state varies as the output RNN advances in its output sequence: ``where'' the network looks next depends on the sequence of words that has already been generated. \begin{align} e_{ti} =& f_{\mbox{att}} (\va_i, \vh_{t-1}) \\ \label{eq:alpha} \alpha_{ti} =& \frac{\exp(e_{ti})}{\sum_{k=1}^L \exp(e_{tk})}. \end{align} Once the weights (which sum to one) are computed, the context vector $\hat{z}_t$ is computed by \begin{align} \label{eq:context} \hat{\vz}_t = \phi\left( \left\{ \va_i \right\}, \left\{ \alpha_i \right\} \right), \end{align} where $\phi$ is a function that returns a single vector given the set of annotation vectors and their corresponding weights. The details of $\phi$ function are discussed in Sec.~\ref{sec:det_sto}. The initial memory state and hidden state of the LSTM are predicted by an average of the annotation vectors fed through two separate MLPs ($\text{init,c}$ and $\text{init,h}$): \begin{align} \vc_0 = f_{\text{init,c}} (\frac{1}{L} \sum_i^L \va_i) \nonumber \\ \vh_0 = f_{\text{init,h}} (\frac{1}{L} \sum_i^L \va_i) \nonumber \end{align} In this work, we use a deep output layer~\citep{Pascanu2014} to compute the output word probability given the LSTM state, the context vector and the previous word: \begin{gather} \label{eq:p-out} p(\vy_t | \va, \vy_1^{t-1}) \propto \exp(\vL_o(\vE\vy_{t-1} + \vL_h\vh_t+ \vL_z \hat{\vz}_t)) \end{gather} Where $\vL_o\in\RR^{K\times m}$, $\vL_h\in\RR^{m\times n}$, $\vL_z\in\RR^{m\times D}$, and $\vE$ are learned parameters initialized randomly. \section{Learning Stochastic ``Hard'' vs Deterministic ``Soft'' Attention} \label{sec:det_sto} In this section we discuss two alternative mechanisms for the attention model $f_{\mbox{att}}$: stochastic attention and deterministic attention. \begin{figure*}[!tp] \label{fig:second} \caption{Examples of mistakes where we can use attention to gain intuition into what the model saw.} \includegraphics[width=1.02\textwidth]{errors.pdf} \label{fig:subfigures} \end{figure*} \subsection{Stochastic ``Hard'' Attention} \label{sec:sto_attn} We represent the location variable $s_t$ as where the model decides to focus attention when generating the $t^{th}$ word. $s_{t,i}$ is an indicator one-hot variable which is set to 1 if the $i$-th location (out of $L$) is the one used to extract visual features. By treating the attention locations as intermediate latent variables, we can assign a multinoulli distribution parametrized by $\{\alpha_i\}$, and view $\hat{z}_t$ as a random variable: \begin{align} \label{eq:s_dist} p(&s_{t,i} = 1 \mid s_{j