|
{ |
|
"paper_id": "I17-1014", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T07:38:41.814141Z" |
|
}, |
|
"title": "Imagination Improves Multimodal Translation", |
|
"authors": [ |
|
{ |
|
"first": "Desmond", |
|
"middle": [], |
|
"last": "Elliott And\u00e1kos K\u00e1d\u00e1r", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Tilburg University", |
|
"location": {} |
|
}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "We decompose multimodal translation into two sub-tasks: learning to translate and learning visually grounded representations. In a multitask learning framework, translations are learned in an attentionbased encoder-decoder, and grounded representations are learned through image representation prediction. Our approach improves translation performance compared to the state of the art on the Multi30K dataset. Furthermore, it is equally effective if we train the image prediction task on the external MS COCO dataset, and we find improvements if we train the translation model on the external News Commentary parallel text.", |
|
"pdf_parse": { |
|
"paper_id": "I17-1014", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "We decompose multimodal translation into two sub-tasks: learning to translate and learning visually grounded representations. In a multitask learning framework, translations are learned in an attentionbased encoder-decoder, and grounded representations are learned through image representation prediction. Our approach improves translation performance compared to the state of the art on the Multi30K dataset. Furthermore, it is equally effective if we train the image prediction task on the external MS COCO dataset, and we find improvements if we train the translation model on the external News Commentary parallel text.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Multimodal machine translation is the task of translating sentences in context, such as images paired with a parallel text . This is an emerging task in the area of multilingual multimodal natural language processing. Progress on this task may prove useful for translating the captions of the images illustrating online news articles, and for multilingual closed captioning in international television and cinema.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Initial efforts have not convincingly demonstrated that visual context can improve translation quality. In the results of the First Multimodal Translation Shared Task, only three systems outperformed an off-the-shelf text-only phrase-based machine translation model, and the best performing system was equally effective with or without the visual features . There remains an open question about how translation models should take advantage of visual context. We present a multitask learning model that decomposes multimodal translation into learning a translation model and learning visually grounded representations. This decomposition means that our model can be trained over external datasets of parallel text or described images, making it possible to take advantage of existing resources. Figure 1 presents an overview of our model, Imagination, in which source language representations are shared between tasks through the Shared Encoder. The translation decoder is an attention-based neural machine translation model (Bahdanau et al., 2015) , and the image prediction decoder is trained to predict a global feature vector of an image that is associated with a sentence (Chrupa\u0142a et al., 2015, IMAGINET) . This decomposition encourages grounded learning in the shared encoder because the IMAGINET decoder is trained to imagine the image associated with a sentence. It has been shown that grounded representations are qualitatively different from their text-only counterparts (K\u00e1d\u00e1r et al., 2016) and correlate better with human similarity judgements (Chrupa\u0142a et al., 2015) . We assess the success of the grounded learning by evaluating the image prediction model on an image-sentence ranking task to determine if the shared representations are useful for image retrieval (Hodosh et al., 2013) . In contrast with most previous work, our model does not take images as input at translation time, rather it learns grounded representations in the shared encoder.", |
|
"cite_spans": [ |
|
{ |
|
"start": 1024, |
|
"end": 1047, |
|
"text": "(Bahdanau et al., 2015)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 1176, |
|
"end": 1209, |
|
"text": "(Chrupa\u0142a et al., 2015, IMAGINET)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1481, |
|
"end": 1501, |
|
"text": "(K\u00e1d\u00e1r et al., 2016)", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 1556, |
|
"end": 1579, |
|
"text": "(Chrupa\u0142a et al., 2015)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 1778, |
|
"end": 1799, |
|
"text": "(Hodosh et al., 2013)", |
|
"ref_id": "BIBREF24" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 794, |
|
"end": 800, |
|
"text": "Figure", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "We evaluate Imagination on the Multi30K dataset using a combination of in-domain and out-of-domain data. In the indomain experiments, we find that multitasking translation with image prediction is competitive with the state of the art. Our model achieves 55.8 Meteor as a single model trained on multimodal in-domain data, and 57.6 Meteor as an ensemble.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In the experiments with out-of-domain resources, we find that the improvement in translation quality holds when training the IMAGINET decoder on the MS COCO dataset of described images (Chen et al., 2015) . Furthermore, if we significantly improve our text-only baseline using out-of-domain parallel text from the News Commentary corpus (Tiedemann, 2012) , we still find improvements in translation quality from the auxiliary image prediction task. Finally, we report a state-of-the-art result of 59.3 Meteor on the Multi30K corpus when ensembling models trained on in-and out-of-domain resources.", |
|
"cite_spans": [ |
|
{ |
|
"start": 185, |
|
"end": 204, |
|
"text": "(Chen et al., 2015)", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 337, |
|
"end": 354, |
|
"text": "(Tiedemann, 2012)", |
|
"ref_id": "BIBREF51" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The main contributions of this paper are:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 We show how to apply multitask learning to multimodal translation. This makes it possible to train models for this task using external resources alongside the expensive triplealigned source-target-image data.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 We decompose multimodal translation into two tasks: learning to translate and learning grounded representations. We show that each task can be trained on large-scale external resources, e.g. parallel news text or images described in a single language.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 We present a model that achieves state of the art results without using images as an input. Instead, our model learns visually grounded source language representations using an auxiliary image prediction objective. Our model does not need any additional parameters to translate unseen sentences.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Multimodal translation is the task of producing target language translation y, given the source language sentence x and additional context, such as an image v . Let x be a source language sentence consisting of N tokens: x 1 , x 2 , . . ., x n and let y be a target language sentence consisting of M tokens: y 1 , y 2 , . . ., y m . The training data consists of tuples D \u2208 (x, y, v) , where x is a description of image v, and y is a translation of x.", |
|
"cite_spans": [ |
|
{ |
|
"start": 374, |
|
"end": 383, |
|
"text": "(x, y, v)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Problem Formulation", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Multimodal translation has previously been framed as minimising the negative log-likelihood of a translation model that is additionally conditioned on the image, i.e. J(\u03b8) = \u2212 j log p(y j |y <j , x, v). Here, we decompose the problem into learning to translate and learning visually grounded representations. The decomposition is based on sharing parameters \u03b8 between these two tasks, and learning task-specific parameters \u03c6. We learn the parameters in a multitask model with shared parameters in the source language encoder. The translation model has taskspecific parameters \u03c6 t in the attention-based decoder, which are optimized through the translation loss J T (\u03b8, \u03c6 t ). Grounded representations are learned through an image prediction model with task-specific parameters \u03c6 g in the imageprediction decoder by minimizing J G (\u03b8, \u03c6 g ). The joint objective is given by mixing the translation and image prediction tasks with the parameter w:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Problem Formulation", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "J(\u03b8, \u03c6) = wJ T (\u03b8, \u03c6 t ) + (1 \u2212 w)J G (\u03b8, \u03c6 g ) (1)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Problem Formulation", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Our decomposition of the problem makes it straightforward to optimise this objective without paired tuples, e.g. where we have an external dataset of described images D image \u2208 (x, v) or an external parallel corpus D text \u2208 (x, y).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Problem Formulation", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "We train our multitask model following the approach of Luong et al. (2016) . We define a primary task and an auxiliary task, and a set of parameters \u03b8 to be shared between the tasks. A minibatch of updates is performed for the primary task with probability w, and for the auxiliary task with 1\u2212w. The primary task is trained until convergence and weight w determines the frequency of parameter updates for the auxiliary task.", |
|
"cite_spans": [ |
|
{ |
|
"start": 55, |
|
"end": 74, |
|
"text": "Luong et al. (2016)", |
|
"ref_id": "BIBREF35" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Problem Formulation", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "The encoder network of our model learns a representation of a sequence of N tokens x 1...n in the source language with a bidirectional recurrent neural network (Schuster and Paliwal, 1997) . This representation is shared between the different tasks. Each token is represented by a one-hot vector x i , which is mapped into an embedding e i through a learned matrix E:", |
|
"cite_spans": [ |
|
{ |
|
"start": 160, |
|
"end": 188, |
|
"text": "(Schuster and Paliwal, 1997)", |
|
"ref_id": "BIBREF42" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Shared Encoder", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "e i = x i \u2022 E", |
|
"eq_num": "(2)" |
|
} |
|
], |
|
"section": "Shared Encoder", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "A sentence is processed by a pair of recurrent neural networks, where one captures the sequence left-to-right (forward), and the other captures the sequence right-to-left (backward). The initial state of the encoder h \u22121 is a learned parameter:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Shared Encoder", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "\u2212 \u2192 h i = \u2212\u2212\u2192 RNN( \u2212\u2212\u2192 h i\u22121 , e i ) (3) \u2190 \u2212 h i = \u2190\u2212\u2212 RNN( \u2190\u2212\u2212 h i\u22121 , e i )", |
|
"eq_num": "(4)" |
|
} |
|
], |
|
"section": "Shared Encoder", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Each token in the source language input sequence is represented by a concatenation of the forward and backward hidden state vectors:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Shared Encoder", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "h i = [ \u2212 \u2192 h i ; \u2190 \u2212 h i ]", |
|
"eq_num": "(5)" |
|
} |
|
], |
|
"section": "Shared Encoder", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "The translation model decoder is an attentionbased recurrent neural network (Bahdanau et al., 2015) . Tokens in the decoder are represented by a one-hot vector y j , which is mapped into an embedding e j through a learned matrix E y :", |
|
"cite_spans": [ |
|
{ |
|
"start": 76, |
|
"end": 99, |
|
"text": "(Bahdanau et al., 2015)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Neural Machine Translation Decoder", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "e j = y j \u2022 E y", |
|
"eq_num": "(6)" |
|
} |
|
], |
|
"section": "Neural Machine Translation Decoder", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "The inputs to the decoder are the previously predicted token y j\u22121 , the previous decoder state d j\u22121 , and a timestep-dependent context vector c j calculated over the encoder hidden states:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Neural Machine Translation Decoder", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "d j = RNN(d j\u22121 , y j\u22121 , e j )", |
|
"eq_num": "(7)" |
|
} |
|
], |
|
"section": "Neural Machine Translation Decoder", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "The initial state of the decoder d -1 is a nonlinear transform of the mean of the encoder states, where W init is a learned parameter:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Neural Machine Translation Decoder", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "d -1 = tanh(W init \u2022 1 N N i h i )", |
|
"eq_num": "(8)" |
|
} |
|
], |
|
"section": "Neural Machine Translation Decoder", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "The context vector c j is a weighted sum over the encoder hidden states, where N denotes the length of the source sentence:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Neural Machine Translation Decoder", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "c j = N i=1 \u03b1 ji h i (9)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Neural Machine Translation Decoder", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "The \u03b1 ji values are the proportion of which the encoder hidden state vectors h 1...n contribute to the decoder hidden state when producing the jth token in the translation. They are computed by a feed-forward neural network, where v a , W a and U a are learned parameters:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Neural Machine Translation Decoder", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "\u03b1 ji = exp(e ji ) N l=1 exp(e li )", |
|
"eq_num": "(10)" |
|
} |
|
], |
|
"section": "Neural Machine Translation Decoder", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "e ji = v a \u2022 tanh(W a \u2022 d j\u22121 + U a \u2022 h i ) (11)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Neural Machine Translation Decoder", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "From the hidden state d j the network predicts the conditional distribution of the next token y j , given a target language embedding e j\u22121 of the previous token, the current hidden state d j , and the calculated context vector c j . Note that at training time, y j\u22121 is the true observed token; whereas for unseen data we use the inferred token\u0177 j\u22121 sampled from the output of the softmax:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Neural Machine Translation Decoder", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "p(y j |y <j , c) = softmax(tanh(e j\u22121 + d j + c j ))", |
|
"eq_num": "(12)" |
|
} |
|
], |
|
"section": "Neural Machine Translation Decoder", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "The translation model is trained to minimise the negative log likelihood of predicting the target language output:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Neural Machine Translation Decoder", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "J N LL (\u03b8, \u03c6 t ) = \u2212 j log p(y j |y <j , x) (13)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Neural Machine Translation Decoder", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "The image prediction decoder is trained to predict the visual feature vector of the image associated with a sentence (Chrupa\u0142a et al., 2015) . It encourages the shared encoder to learn grounded representations for the source language.", |
|
"cite_spans": [ |
|
{ |
|
"start": 117, |
|
"end": 140, |
|
"text": "(Chrupa\u0142a et al., 2015)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Imaginet Decoder", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "A source language sentence is encoded using the Shared Encoder, as described in Section 3.1. Then we transform the shared encoder representation into a single vector by taking the mean pool over the hidden state annotations, the same way we initialise the hidden state of the translation decoder (Eqn. 8). This sentence representation is the input to a feed-forward neural network that predicts the visual feature vectorv associated with a ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Imaginet Decoder", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "v = tanh(W vis \u2022 1 N N i h i )", |
|
"eq_num": "(14)" |
|
} |
|
], |
|
"section": "Imaginet Decoder", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "This decoder is trained to predict the true image vector v with a margin-based objective, parameterised by the minimum margin \u03b1, and the cosine distance d(\u2022, \u2022). A margin-based objective has previously been used in grounded representation learning (Vendrov et al., 2016; Chrupa\u0142a et al., 2017) . The contrastive examples v are drawn from the other instances in a minibatch:", |
|
"cite_spans": [ |
|
{ |
|
"start": 248, |
|
"end": 270, |
|
"text": "(Vendrov et al., 2016;", |
|
"ref_id": "BIBREF53" |
|
}, |
|
{ |
|
"start": 271, |
|
"end": 293, |
|
"text": "Chrupa\u0142a et al., 2017)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Imaginet Decoder", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "J M AR (\u03b8, \u03c6 t ) = v =v max{0, \u03b1 \u2212 d(v, v) + d(v, v )}", |
|
"eq_num": "(15)" |
|
} |
|
], |
|
"section": "Imaginet Decoder", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "We evaluate our model using the benchmark Multi30K dataset , which is the largest collection of images paired with sentences in multiple languages. This dataset contains 31,014 images paired with an English language sentence and a German language translation: 29,000 instances are reserved for training, 1,014 for development, and 1,000 for evaluation. 1 The English and German sentences are preprocessed by normalising the punctuation, lowercasing and tokenizing the text using the Moses toolkit. We additionally decompound the German text using Zmorge (Sennrich and Kunz, 2014) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 353, |
|
"end": 354, |
|
"text": "1", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 554, |
|
"end": 579, |
|
"text": "(Sennrich and Kunz, 2014)", |
|
"ref_id": "BIBREF45" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "This results in vocabulary sizes of 10,214 types for English and 16,022 for German.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "We also use two external datasets to evaluate our model: the MS COCO dataset of English described images (Chen et al., 2015) , and the English-German News Commentary parallel corpus (Tiedemann, 2012) . When we perform experiments with the News Commentary corpus, we first calculate a 17,597 sub-word vocabulary using SentencePiece (Schuster and Nakajima, 2012) over the concatentation of the Multi30K and News Commentary datasets. This gives us a shared vocabulary for the external data that reduces the number of out-of-vocabulary tokens.", |
|
"cite_spans": [ |
|
{ |
|
"start": 105, |
|
"end": 124, |
|
"text": "(Chen et al., 2015)", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 182, |
|
"end": 199, |
|
"text": "(Tiedemann, 2012)", |
|
"ref_id": "BIBREF51" |
|
}, |
|
{ |
|
"start": 331, |
|
"end": 360, |
|
"text": "(Schuster and Nakajima, 2012)", |
|
"ref_id": "BIBREF41" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Images are represented by 2048D vectors extracted from the 'pool5/7x7 s1' layer of the GoogLeNet v3 CNN (Szegedy et al., 2015) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 104, |
|
"end": 126, |
|
"text": "(Szegedy et al., 2015)", |
|
"ref_id": "BIBREF50" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "We evaluate our multitasking approach with inand out-of-domain resources. We start by reporting results of models trained using only the Multi30K dataset. We also report the results of training the IMAGINET decoder with the COCO dataset. Finally, we report results on incorporating the external News Commentary parallel text into our model. Throughout, we report performance of the En\u2192De translation using Meteor (Denkowski and Lavie, 2014) and BLEU (Papineni et al., 2002) against lowercased tokenized references.", |
|
"cite_spans": [ |
|
{ |
|
"start": 413, |
|
"end": 440, |
|
"text": "(Denkowski and Lavie, 2014)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 450, |
|
"end": 473, |
|
"text": "(Papineni et al., 2002)", |
|
"ref_id": "BIBREF37" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "The encoder is a 1000D Gated Recurrent Unit bidirectional recurrent neural network (Cho et al., 2014, GRU) with 620D embeddings. We share all of the encoder parameters between the primary and auxiliary task. The translation decoder is a 1000D GRU recurrent neural network, with a 2000D context vector over the encoder states, and 620D word embeddings (Sennrich et al., 2017) . The Imaginet decoder is a single-layer feed-forward network, where we learn the parameters W vis \u2208 R 2048x2000 to predict the true image vector with \u03b1 = 0.1 for the Imaginet objective (Equation 15). The models are trained using the Adam optimiser with the default hyperparameters (Kingma and Ba, 2015) in minibatches of 80 instances. The translation task is defined as the primary task and convergence is reached when BLEU has not increased for five epochs on the validation data. Gradients are clipped when their norm ex- Table 2 : En\u2192De translation results on the Multi30K dataset. Our Imagination model is competitive with the state of the art when it is trained on in-domain data. We report the mean and standard deviation of three random initialisations. ceeds 1.0. Dropout is set to 0.2 for the embeddings and the recurrent connections in both tasks (Gal and Ghahramani, 2016) . Translations are decoded using beam search with 12 hypotheses.", |
|
"cite_spans": [ |
|
{ |
|
"start": 83, |
|
"end": 106, |
|
"text": "(Cho et al., 2014, GRU)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 351, |
|
"end": 374, |
|
"text": "(Sennrich et al., 2017)", |
|
"ref_id": "BIBREF43" |
|
}, |
|
{ |
|
"start": 1233, |
|
"end": 1259, |
|
"text": "(Gal and Ghahramani, 2016)", |
|
"ref_id": "BIBREF19" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 900, |
|
"end": 907, |
|
"text": "Table 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Hyperparameters", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "We start by presenting the results of our multitask model trained using only the Multi30K dataset. We compare against state-of-the-art approaches and text-only baselines. Moses is the phrase-based machine translation model (Koehn et al., 2007) reported in . NMT is a text-only neural machine translation model. Calixto et al. 2016is most comparable to our approach: it is a multimodal variational NMT model that infers latent variables to represent the source language semantics from the image and linguistic data. Table 2 shows the results of this experiment. We can see that the combination of the attention-based translation model and the image prediction model is a 1.8 Meteor point improvement over the NMT baseline, but it is 1.1 Meteor points worse than the strong Moses baseline. Our approach is competitive with previous approaches that use visual features as inputs to the decoder and the targetside reranking model. It also competitive with Table 4 : Translation results with out-of-domain parallel text and described images. We find further improvements when we multitask with the News Commentary (NC) and COCO datasets. Toyama et al. (2016) , which also only uses images for training. These results confirm that our multitasking approach uses the image prediction task to improve the encoder of the translation model.", |
|
"cite_spans": [ |
|
{ |
|
"start": 223, |
|
"end": 243, |
|
"text": "(Koehn et al., 2007)", |
|
"ref_id": "BIBREF32" |
|
}, |
|
{ |
|
"start": 1133, |
|
"end": 1153, |
|
"text": "Toyama et al. (2016)", |
|
"ref_id": "BIBREF52" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 515, |
|
"end": 522, |
|
"text": "Table 2", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 952, |
|
"end": 959, |
|
"text": "Table 4", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "In-domain experiments", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "Recall from Section 2 that we are interested in scenarios where x, y, and v are drawn from different sources. We now experiment with separating the translation data from the described image data using D image : MS COCO dataset of 83K described images 2 and D text : Multi30K parallel text. Table 3 shows the results of this experiment. We find that there is no significant difference between training the IMAGINET decoder on in-domain (Multi30K) or out-of-domain data (COCO). This result confirms that we can separate the parallel text from the described images.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 290, |
|
"end": 297, |
|
"text": "Table 3", |
|
"ref_id": "TABREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "External described image data", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "We now experiment with training our model on a combination of the Multi30K and the News Commentary English-German data. In these experiments, we concatenate the Multi30K and News Commentary datasets into a single D text training dataset, similar to Freitag and Al-Onaizan (2016) . We compare our model against Calixto et al. 2017, who pre-train their model on the WMT'15 English-German parallel text and backtranslate (Sennrich et al., 2016) additional sentences from the bilingual independent descriptions in the Multi30K dataset (Footnote 2). Table 4 presents the results. The text-only NMT model using sub-words is 1.2 Meteor points lower than decompounding the German text. Nevertheless, the model trained over a concatenation of the parallel texts is a 2.7 Meteor point improvement over this baseline (+ NC) and matches the performance of our Multitasking model that uses only in-domain data (Section 5.2). We do not see an additive improvement for the multitasking model with the concatenated parallel text and the indomain data (+ Imagination) using a training objective interpolation of w = 0.89 (the ratio of the training dataset sizes). This may be because we are essentially learning a translation model and the updates from the IMAGINET decoder are forgotten. Therefore, we experiment with multitasking the concatenated parallel text and the COCO dataset (w = 0.5). We find that balancing the datasets improves over the concatenated text model by 0.4 Meteor (+ Imagination (COCO)). Our multitasking approach improves upon Calixto et al. by 0.3 Meteor points. Our model can be trained in 48 hours using 240K parallel sentences and 414K described images from out-of-domain datasets. Furthermore, recall that our model does not use images as an input for translating unseen data, which results in 6.2% fewer parameters compared to using the 2048D Inception-V3 visual features to initialise the hidden state of the decoder. Table 5 presents the results of ensembling different randomly initialised models. We achieve a start-of-the-art result of 57.6 Meteor for a model trained on only in-domain data. The improvements are more pronounced for the models trained using sub-words and out-of-domain data. An ensemble of baselines trained on sub-words is initially worse than an ensemble trained on Zmorge decompounded words. However, we always see an improvement from ensembling models trained on in-and out-of-domain data. Our best ensemble is trained on Multi30K parallel text, the News Commentary parallel text, and the COCO descriptions to set a new state-of-the-art result of 59.3 Meteor.", |
|
"cite_spans": [ |
|
{ |
|
"start": 249, |
|
"end": 278, |
|
"text": "Freitag and Al-Onaizan (2016)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 418, |
|
"end": 441, |
|
"text": "(Sennrich et al., 2016)", |
|
"ref_id": "BIBREF44" |
|
}, |
|
{ |
|
"start": 1534, |
|
"end": 1555, |
|
"text": "Calixto et al. by 0.3", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 545, |
|
"end": 552, |
|
"text": "Table 4", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1932, |
|
"end": 1939, |
|
"text": "Table 5", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "External parallel text data", |
|
"sec_num": "5.4" |
|
}, |
|
{ |
|
"text": "We also evaluate our approach against 16 submissions to the WMT Shared Task on Multimodal Translation and Multilingual Image Description (Elliott et al., 2017) . This shared task features a new evaluation dataset: Multi30K Test 2017 (Elliott et al., 2017) , which contains 1,000 new evaluation images. The shared task submissions are evaluated with Meteor and human direct assessment (Graham et al., 2017) . We submitted two systems, based on whether they used only the Multi30K dataset (constrained) or used additional external resources (unconstrained). Our constrained submission is an ensemble of three Imagination models trained over only the Multi30K training data. This achieves a Meteor score of 51.2, and a joint 3rd place ranking according to human assessment. Our unconstrained submission is an ensemble of three Imagination models trained with the Multi30K, News Commentary, and MS COCO datasets. It achieves a Meteor score of Source: two children on their stomachs lay on the ground under a pipe NMT: zwei kinder auf ihren gesichtern liegen unter dem boden auf dem boden Ours: zwei kinder liegen b\u00e4uchlings auf dem boden unter einer schaukel Source: small dog in costume stands on hind legs to reach dangling flowers NMT: ein kleiner hund steht auf dem hinterbeinen und l\u00e4uft , nach links von blumen zu sehen Ours: ein kleiner hund in einem kost\u00fcm steht auf den hinterbeinen , um die blumen zu erreichen Source: a bird flies across the water NMT: ein vogel fliegt\u00fcber das wasser Ours: ein vogel fliegt durch das wasser Table 6 : Examples where our model improves or worsens the translation compared to the NMT baseline. Top: NMT translates the wrong body part; both models skip \"pipe\". Middle: NMT incorrectly translates the verb and misses several nouns. Bottom: Our model incorrectly translates the preposition.", |
|
"cite_spans": [ |
|
{ |
|
"start": 137, |
|
"end": 159, |
|
"text": "(Elliott et al., 2017)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 233, |
|
"end": 255, |
|
"text": "(Elliott et al., 2017)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 384, |
|
"end": 405, |
|
"text": "(Graham et al., 2017)", |
|
"ref_id": "BIBREF21" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 1532, |
|
"end": 1539, |
|
"text": "Table 6", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Multi30K 2017 results", |
|
"sec_num": "5.6" |
|
}, |
|
{ |
|
"text": "53.5, and 2nd place in the human assessment. Table 6 shows examples of where the multitasking model improves or worsens translation performance compared to the baseline model 3 . The first example shows that the baseline model makes a significant error in translating the pose of the children, translating \"on their stomachs\" as \"on their faces\"). The middle example demonstrates that the baseline model translates the dog as walking (\"l\u00e4uft\") and then makes grammatical and sense errors after the clause marker. Both models neglect to translate the word \"dangling\", which is a low-frequency word in the training data. There are instances where the baseline produces better translations than the multitask model: In the bottom example, our model translates a bird flying through the water (\"durch\") instead of \"over\" the water.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 45, |
|
"end": 52, |
|
"text": "Table 6", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Multi30K 2017 results", |
|
"sec_num": "5.6" |
|
}, |
|
{ |
|
"text": "6.1 Does the model learn grounded representations?", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "A natural question to ask if whether the multitask model is actually learning representations that are relevant for the images. We answer this question by evaluating the Imaginet decoder in an imagesentence ranking task. Here the input is a source language sentence, from which we predict its im- 3 We used MT-ComparEval (Klejch et al., 2015) age vectorv. The predicted vectorv can be compared against the true image vectors v in the evaluation data using the cosine distance to produce a ranked order of the images. Our model returns a median rank of 11.0 for the true image compared to the predicted image vector. Figure 2 shows examples of the nearest neighbours of the images predicted by our multitask model. We can see that the combination of the multitask source language representations and IMAGINET decoder leads to the prediction of relevant images. This confirms that the shared encoder is indeed learning visually grounded representations.", |
|
"cite_spans": [ |
|
{ |
|
"start": 297, |
|
"end": 298, |
|
"text": "3", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 321, |
|
"end": 342, |
|
"text": "(Klejch et al., 2015)", |
|
"ref_id": "BIBREF30" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 616, |
|
"end": 624, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Discussion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "We now study the effect of varying the Convolutional Neural Network used to extract the visual features used in the Imaginet decoder. It has previously been shown that the choice of visual features can affect the performance of vision and language models (Jabri et al., 2016; Kiela et al., 2016) . We compare the effect of training the IMAGINET decoder to predict different types of image features, namely: 4096D features extracted from the 'fc7'' layer of the VGG-19 model (Simonyan and Zisserman, 2015) , 2048D features extracted from the 'pool5/7x7 s1' layer of InceptionNet V3 (Szegedy et al., 2015) , and 2048D features extracted from 'avg pool' layer of ResNet-50 (He et al., 2016) . Table 7 shows the results of this experiment. There is a clear difference between predicting the 2048D (a) Nearest neighbours for \"a native woman is working on a craft project .\"", |
|
"cite_spans": [ |
|
{ |
|
"start": 255, |
|
"end": 275, |
|
"text": "(Jabri et al., 2016;", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 276, |
|
"end": 295, |
|
"text": "Kiela et al., 2016)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 474, |
|
"end": 504, |
|
"text": "(Simonyan and Zisserman, 2015)", |
|
"ref_id": "BIBREF47" |
|
}, |
|
{ |
|
"start": 581, |
|
"end": 603, |
|
"text": "(Szegedy et al., 2015)", |
|
"ref_id": "BIBREF50" |
|
}, |
|
{ |
|
"start": 670, |
|
"end": 687, |
|
"text": "(He et al., 2016)", |
|
"ref_id": "BIBREF22" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 690, |
|
"end": 697, |
|
"text": "Table 7", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "The effect of visual feature vectors", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "(b) Nearest neighbours for \"there is a cafe on the street corner with an oval painting on the side of the building .\" Figure 2 : We can interpret the IMAGINET Decoder by visualising the predictions made by our model.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 118, |
|
"end": 126, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "The effect of visual feature vectors", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "Inception-V3 56.0 \u00b1 0.1 11.0 \u00b1 0.0", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Meteor Median Rank", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Resnet-50 54.7 \u00b1 0.4 11.7 \u00b1 0.5 VGG-19 53.6 \u00b1 1.8 13.0 \u00b1 0.0 Table 7 : The type of visual features predicted by the IMAGINET Decoder has a strong impact on the Multitask model performance.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 61, |
|
"end": 68, |
|
"text": "Table 7", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Meteor Median Rank", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "vectors (Inception-V3 and ResNet-50) compared to the 4096D vector from VGG-19). This difference is reflected in both the translation Meteor score and the Median rank of the images in the validation dataset. This is likely because it is easier to learn the parameters of the image prediction model that has fewer parameters (8.192 million for VGG-19 vs. 4.096 million for Inception-V3 and ResNet-50). However, it is not clear why there is such a pronounced difference between the Inception-V3 and ResNet-50 models 4 .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Meteor Median Rank", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Initial work on multimodal translation used semantic or spatially-preserving image features as inputs to a translation model. Semantic image features are typically extracted from the final layer of a pre-trained object recognition CNN, e.g. 'pool5/7x7 s1' in GoogLeNet (Szegedy et al., 2015) . This type of vector has been used as input to the encoder (Elliott et al., 2015; Huang et al., 2016) , the decoder (Libovick\u00fd et al., 2016) , or as features in a phrase-based translation model (Shah et al., 2016; Hitschler et al., 2016) . Spatially-preserving image features are extracted from deeper inside a CNN, where the position of a feature is related to its position in the image. These features have been used in \"double-attention models\", which calculate independent context vectors for the source language and a convolutional image features (Calixto et al., 2016; Caglayan et al., 2016; . We use an attentionbased translation model but our multitask model does not use images for translation.", |
|
"cite_spans": [ |
|
{ |
|
"start": 269, |
|
"end": 291, |
|
"text": "(Szegedy et al., 2015)", |
|
"ref_id": "BIBREF50" |
|
}, |
|
{ |
|
"start": 352, |
|
"end": 374, |
|
"text": "(Elliott et al., 2015;", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 375, |
|
"end": 394, |
|
"text": "Huang et al., 2016)", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 409, |
|
"end": 433, |
|
"text": "(Libovick\u00fd et al., 2016)", |
|
"ref_id": "BIBREF33" |
|
}, |
|
{ |
|
"start": 487, |
|
"end": 506, |
|
"text": "(Shah et al., 2016;", |
|
"ref_id": "BIBREF46" |
|
}, |
|
{ |
|
"start": 507, |
|
"end": 530, |
|
"text": "Hitschler et al., 2016)", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 845, |
|
"end": 867, |
|
"text": "(Calixto et al., 2016;", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 868, |
|
"end": 890, |
|
"text": "Caglayan et al., 2016;", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related work", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "More related to our work is an extension of Variational Neural Machine Translation to infer latent variables to explicitly model the semantics of source sentences from visual and linguistic information (Toyama et al., 2016) . They report improvements on the Multi30K data set but their model needs additional parameters in the \"neural inferrer\" modules. In our model, the grounded semantics are represented implicitly in the shared encoder. They assume Source-Target-Image training data, whereas our approach achieves equally good results if we train on separate Source-Image and Source-Target datasets. Saha et al. (2016) study cross-lingual image description where the task is to generate a sentence in language L 1 given the image, using only Image-L 2 and L 1 -L 2 training corpora. They propose a Correlational Encoder-Decoder to model the Image-L 2 and L 1 -L 2 data, which learns correlated representations for paired Image-L 2 data and decodes L 1 from the joint representation. Similar to our work, the encoder is trained by minimizing two loss functions: the Image-L 2 correlation loss, and the L 1 decoding cross-entropy loss. Nakayama and Nishida (2017) consider a zero-resource problem, where the task is to translate from L 1 to L 2 with only Image-L 1 and Image-L 2 corpora. Their model embeds the image, L 1 , and L 2 in a joint multimodal space learned by minimizing a multi-task ranking loss between both pairs of examples. In this paper, we focus on enriching source language representations with visual information instead of zeroresource learning.", |
|
"cite_spans": [ |
|
{ |
|
"start": 202, |
|
"end": 223, |
|
"text": "(Toyama et al., 2016)", |
|
"ref_id": "BIBREF52" |
|
}, |
|
{ |
|
"start": 1138, |
|
"end": 1165, |
|
"text": "Nakayama and Nishida (2017)", |
|
"ref_id": "BIBREF36" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related work", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "Multitask Learning improves the generalisability of a model by requiring it to be useful for more than one task (Caruana, 1997) . This approach has recently been used to improve the performance of sentence compression using eye gaze as an auxiliary task (Klerke et al., 2016) , and to improve shallow parsing accuracy through the auxiliary task of predicting keystrokes in an out-of-domain corpus (Plank, 2016) . More recently, Bingel and S\u00f8gaard (2017) analysed the beneficial relationships between primary and auxiliary sequential prediction tasks. In the translation literature, multitask learning has been used to learn a one-to-many languages translation model (Dong et al., 2015) , a multi-lingual translation model with a single attention mechanism shared across multiple languages (Firat et al., 2016) , and in multitask sequence-tosequence learning without an attention-based decoder (Luong et al., 2016) . We explore the benefits of grounded learning in the specific case of multimodal translation. We combine sequence prediction with continuous (image) vector prediction, compared to previous work which multitasks different sequence prediction tasks.", |
|
"cite_spans": [ |
|
{ |
|
"start": 112, |
|
"end": 127, |
|
"text": "(Caruana, 1997)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 254, |
|
"end": 275, |
|
"text": "(Klerke et al., 2016)", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 397, |
|
"end": 410, |
|
"text": "(Plank, 2016)", |
|
"ref_id": "BIBREF39" |
|
}, |
|
{ |
|
"start": 428, |
|
"end": 453, |
|
"text": "Bingel and S\u00f8gaard (2017)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 666, |
|
"end": 685, |
|
"text": "(Dong et al., 2015)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 789, |
|
"end": 809, |
|
"text": "(Firat et al., 2016)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 893, |
|
"end": 913, |
|
"text": "(Luong et al., 2016)", |
|
"ref_id": "BIBREF35" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related work", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "Visual representation prediction has been studied using static images or videos. Lin and Parikh (2015) use a conditional random field to imagine the composition of a clip-art scene for visual paraphrasing and fill-in-the-blank tasks. Chrupa\u0142a et al. (2015) predict the image vector associated with a sentence using an L2 loss; they found this improves multi-modal word similarity compared to text-only baselines. Gelderloos and Chrupa\u0142a (2016) predict the image vector associated with a sequence of phonemes using a max-margin loss, similar to our image prediction objective. Collell et al. (2017) learn to predict the visual feature vector associated with a word for word similarity and relatedness tasks. As a video reconstruction problem, Srivastava et al. (2015) propose an LSTM Autoencoder to predict video frames as a reconstruction task or as a future prediction task. Pasunuru and Bansal (2017) propose a multitask model for video description that combines unsupervised video reconstruction, lexical entailment, and video description. They find improvements from using out-of-domain resources for entailment and video prediction, similar to the improvements we find from using out-of-domain parallel text and described images.", |
|
"cite_spans": [ |
|
{ |
|
"start": 234, |
|
"end": 256, |
|
"text": "Chrupa\u0142a et al. (2015)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 413, |
|
"end": 443, |
|
"text": "Gelderloos and Chrupa\u0142a (2016)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 576, |
|
"end": 597, |
|
"text": "Collell et al. (2017)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 742, |
|
"end": 766, |
|
"text": "Srivastava et al. (2015)", |
|
"ref_id": "BIBREF49" |
|
}, |
|
{ |
|
"start": 876, |
|
"end": 902, |
|
"text": "Pasunuru and Bansal (2017)", |
|
"ref_id": "BIBREF38" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related work", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "We decompose multimodal translation into two sub-problems: learning to translate and learning visually grounded representations. In a multitask learning framework, we show how these subproblems can be addressed by sharing an encoder between a translation model and an image prediction model 5 . Our approach achieves state-of-theart results on the Multi30K dataset without using images for translation. We show that training on separate parallel text and described image datasets does not hurt performance, encouraging future research on multitasking with diverse sources of data. Furthermore, we still find improvements from image prediction when we improve our textonly baseline with the out-of-domain parallel text. Future work includes adapting our decomposition to other NLP tasks that may benefit from out-ofdomain resources, such as semantic role labelling, dependency parsing, and question-answering; exploring methods for inputting the (predicted) image into the translation model; experimenting with different image prediction architectures; multitasking different translation languages into a single shared encoder; and multitasking in both the encoder and decoder(s).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "The Multi30K dataset also contains 155K independently collected descriptions in German and English. In order to make our experiments more comparable with previous work, we do not make use of this data.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Due to differences in the vocabularies of the respective datasets, we do not train on examples where more than 10% of the tokens are out-of-vocabulary in the Multi30K dataset.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We used pre-trained CNNs (https://github.com/ fchollet/deep-learning-models), which claim equal ILSVRC object recognition performance for both models: 7.8% top-5 error with a single-model and single-crop.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Code: http://github.com/elliottd/imagination", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "We are grateful to the anonymous reviewers for their feedback. We thank Joost Bastings for sharing his multitasking Nematus model, Wilker Aziz for discussions about formulating the problem, Stella Frank for finding and explaining the qualitative examples to us, and Afra Alishahi, Grzegorz Chrupa\u0142a, and Philip Schulz for feedback on earlier drafts of the paper. DE acknowledges the support of an Amazon Research Award, NWO Vici grant nr. 277-89-002 awarded to K. Sima'an, and a hardware grant from the NVIDIA Corporation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgments", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Neural machine translation by jointly learning to align and translate", |
|
"authors": [ |
|
{ |
|
"first": "Dzmitry", |
|
"middle": [], |
|
"last": "Bahdanau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kyunghyun", |
|
"middle": [], |
|
"last": "Cho", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshua", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "International Conference on Learning Representations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dzmitry Bahdanau, Kyunghyun Cho, and Yoshua Ben- gio. 2015. Neural machine translation by jointly learning to align and translate. In International Con- ference on Learning Representations.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Identifying beneficial task relations for multi-task learning in deep neural networks", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Bingel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "S\u00f8gaard", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 15th Conference of the European Chapter of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "164--169", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "J. Bingel and A. S\u00f8gaard. 2017. Identifying beneficial task relations for multi-task learning in deep neural networks. In Proceedings of the 15th Conference of the European Chapter of the Association for Com- putational Linguistics, pages 164-169.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Multimodal attention for neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Ozan", |
|
"middle": [], |
|
"last": "Caglayan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lo\u00efc", |
|
"middle": [], |
|
"last": "Barrault", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fethi", |
|
"middle": [], |
|
"last": "Bougares", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ozan Caglayan, Lo\u00efc Barrault, and Fethi Bougares. 2016. Multimodal attention for neural machine translation. CoRR, abs/1609.03976.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "DCU-UvA Multimodal MT System Report", |
|
"authors": [ |
|
{ |
|
"first": "Iacer", |
|
"middle": [], |
|
"last": "Calixto", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Desmond", |
|
"middle": [], |
|
"last": "Elliott", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stella", |
|
"middle": [], |
|
"last": "Frank", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the First Conference on Machine Translation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "634--638", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Iacer Calixto, Desmond Elliott, and Stella Frank. 2016. DCU-UvA Multimodal MT System Report. In Pro- ceedings of the First Conference on Machine Trans- lation, pages 634-638.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Incorporating global visual features into attention-based neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Iacer", |
|
"middle": [], |
|
"last": "Calixto", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Qun", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1003--1014", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Iacer Calixto and Qun Liu. 2017. Incorporating global visual features into attention-based neural machine translation. In Proceedings of the 2017 Conference on Empirical Methods in Natural Language Pro- cessing, pages 1003-1014.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Doubly-Attentive Decoder for Multi-modal Neural Machine Translation", |
|
"authors": [ |
|
{ |
|
"first": "Iacer", |
|
"middle": [], |
|
"last": "Calixto", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Qun", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nick", |
|
"middle": [], |
|
"last": "Campbell", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1913--1924", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Iacer Calixto, Qun Liu, and Nick Campbell. 2017. Doubly-Attentive Decoder for Multi-modal Neural Machine Translation. In Proceedings of the 55th An- nual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1913- 1924.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Multitask learning", |
|
"authors": [ |
|
{ |
|
"first": "Rich", |
|
"middle": [], |
|
"last": "Caruana", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1997, |
|
"venue": "Machine Learning", |
|
"volume": "28", |
|
"issue": "", |
|
"pages": "41--75", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rich Caruana. 1997. Multitask learning. Machine Learning, 28(1):41-75.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Microsoft COCO captions: Data collection and evaluation server", |
|
"authors": [ |
|
{ |
|
"first": "Xinlei", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hao", |
|
"middle": [], |
|
"last": "Fang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tsung-Yi", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ramakrishna", |
|
"middle": [], |
|
"last": "Vedantam", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Saurabh", |
|
"middle": [], |
|
"last": "Gupta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Piotr", |
|
"middle": [], |
|
"last": "Doll\u00e1r", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [ |
|
"Lawrence" |
|
], |
|
"last": "Zitnick", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xinlei Chen, Hao Fang, Tsung-Yi Lin, Ramakr- ishna Vedantam, Saurabh Gupta, Piotr Doll\u00e1r, and C. Lawrence Zitnick. 2015. Microsoft COCO cap- tions: Data collection and evaluation server. CoRR, abs/1504.00325.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Learning phrase representations using RNN encoder-decoder for statistical machine translation", |
|
"authors": [ |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Cho", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Van Merri\u00ebnboer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Gulcehre", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Bahdanau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "F", |
|
"middle": [], |
|
"last": "Bougares", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Schwenk", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1724--1734", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "K. Cho, B. Van Merri\u00ebnboer, C. Gulcehre, D. Bah- danau, F. Bougares, H. Schwenk, and Y. Bengio. 2014. Learning phrase representations using RNN encoder-decoder for statistical machine translation. pages 1724-1734.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Representations of language in a model of visually grounded speech signal", |
|
"authors": [ |
|
{ |
|
"first": "Grzegorz", |
|
"middle": [], |
|
"last": "Chrupa\u0142a", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lieke", |
|
"middle": [], |
|
"last": "Gelderloos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Afra", |
|
"middle": [], |
|
"last": "Alishahi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "613--622", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Grzegorz Chrupa\u0142a, Lieke Gelderloos, and Afra Al- ishahi. 2017. Representations of language in a model of visually grounded speech signal. In Pro- ceedings of the 55th Annual Meeting of the Associa- tion for Computational Linguistics (Volume 1: Long Papers), pages 613-622.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Learning language through pictures", |
|
"authors": [ |
|
{ |
|
"first": "Grzegorz", |
|
"middle": [], |
|
"last": "Chrupa\u0142a", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "\u00c1kos", |
|
"middle": [], |
|
"last": "K\u00e1d\u00e1r", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Afra", |
|
"middle": [], |
|
"last": "Alishahi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th International Joint Conference on Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "112--118", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Grzegorz Chrupa\u0142a,\u00c1kos K\u00e1d\u00e1r, and Afra Alishahi. 2015. Learning language through pictures. In Pro- ceedings of the 53rd Annual Meeting of the Associ- ation for Computational Linguistics and the 7th In- ternational Joint Conference on Natural Language Processing, pages 112-118.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Imagined visual representations as multimodal embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Guillem", |
|
"middle": [], |
|
"last": "Collell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Teddy", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marie-Francine", |
|
"middle": [], |
|
"last": "Moens", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the Thirty-First AAAI Conference on Artificial Intelligence (AAAI-17)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4378--4384", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Guillem Collell, Teddy Zhang, and Marie-Francine Moens. 2017. Imagined visual representations as multimodal embeddings. In Proceedings of the Thirty-First AAAI Conference on Artificial Intelli- gence (AAAI-17), pages 4378-4384.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Meteor universal: Language specific translation evaluation for any target language", |
|
"authors": [ |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Denkowski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alon", |
|
"middle": [], |
|
"last": "Lavie", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the EACL 2014 Workshop on Statistical Machine Translation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Michael Denkowski and Alon Lavie. 2014. Meteor universal: Language specific translation evaluation for any target language. In Proceedings of the EACL 2014 Workshop on Statistical Machine Translation.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Multi-task learning for multiple language translation", |
|
"authors": [ |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Dong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "W", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th International Joint Conference on Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1723--1732", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "D. Dong, H. Wu, W. He, D. Yu, and H. Wang. 2015. Multi-task learning for multiple language transla- tion. In Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th International Joint Conference on Natural Language Processing, pages 1723-1732.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Findings of the second shared task on multimodal machine translation and multilingual image description", |
|
"authors": [ |
|
{ |
|
"first": "Desmond", |
|
"middle": [], |
|
"last": "Elliott", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stella", |
|
"middle": [], |
|
"last": "Frank", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lo\u00efc", |
|
"middle": [], |
|
"last": "Barrault", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fethi", |
|
"middle": [], |
|
"last": "Bougares", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lucia", |
|
"middle": [], |
|
"last": "Specia", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the Second Conference on Machine Translation", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "215--233", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Desmond Elliott, Stella Frank, Lo\u00efc Barrault, Fethi Bougares, and Lucia Specia. 2017. Findings of the second shared task on multimodal machine transla- tion and multilingual image description. In Proceed- ings of the Second Conference on Machine Trans- lation, Volume 2: Shared Task Papers, pages 215- 233, Copenhagen, Denmark. Association for Com- putational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Multi-language image description with neural sequence models", |
|
"authors": [ |
|
{ |
|
"first": "Desmond", |
|
"middle": [], |
|
"last": "Elliott", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stella", |
|
"middle": [], |
|
"last": "Frank", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eva", |
|
"middle": [], |
|
"last": "Hasler", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Desmond Elliott, Stella Frank, and Eva Hasler. 2015. Multi-language image description with neural se- quence models. CoRR, abs/1510.04709.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Multi30K: Multilingual English-German Image Descriptions", |
|
"authors": [ |
|
{ |
|
"first": "Desmond", |
|
"middle": [], |
|
"last": "Elliott", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stella", |
|
"middle": [], |
|
"last": "Frank", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Khalil", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lucia", |
|
"middle": [], |
|
"last": "Sima'an", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Specia", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 5th Workshop on Vision and Language", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Desmond Elliott, Stella Frank, Khalil. Sima'an, and Lucia Specia. 2016. Multi30K: Multilingual English-German Image Descriptions. In Proceed- ings of the 5th Workshop on Vision and Language.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Multi-way, multilingual neural machine translation with a shared attention mechanism", |
|
"authors": [ |
|
{ |
|
"first": "O", |
|
"middle": [], |
|
"last": "Firat", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Cho", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "866--875", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "O. Firat, K. Cho, and Y. Bengio. 2016. Multi-way, mul- tilingual neural machine translation with a shared attention mechanism. In Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 866-875.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Fast domain adaptation for neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Markus", |
|
"middle": [], |
|
"last": "Freitag", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yaser", |
|
"middle": [], |
|
"last": "Al-Onaizan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Markus Freitag and Yaser Al-Onaizan. 2016. Fast domain adaptation for neural machine translation. CoRR, abs/1612.06897.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "A theoretically grounded application of dropout in recurrent neural networks", |
|
"authors": [ |
|
{ |
|
"first": "Yarin", |
|
"middle": [], |
|
"last": "Gal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zoubin", |
|
"middle": [], |
|
"last": "Ghahramani", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "29", |
|
"issue": "", |
|
"pages": "1019--1027", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yarin Gal and Zoubin Ghahramani. 2016. A theoret- ically grounded application of dropout in recurrent neural networks. In Advances in Neural Information Processing Systems 29, pages 1019-1027.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "From phonemes to images: levels of representation in a recurrent neural model of visually-grounded language learning", |
|
"authors": [ |
|
{ |
|
"first": "Lieke", |
|
"middle": [], |
|
"last": "Gelderloos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Grzegorz", |
|
"middle": [], |
|
"last": "Chrupa\u0142a", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of COLING 2016, the 26th International Conference on Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1309--1319", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lieke Gelderloos and Grzegorz Chrupa\u0142a. 2016. From phonemes to images: levels of representation in a recurrent neural model of visually-grounded lan- guage learning. In Proceedings of COLING 2016, the 26th International Conference on Computational Linguistics, pages 1309-1319.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Can machine translation systems be evaluated by the crowd alone", |
|
"authors": [ |
|
{ |
|
"first": "Yvette", |
|
"middle": [], |
|
"last": "Graham", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Timothy", |
|
"middle": [], |
|
"last": "Baldwin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alistair", |
|
"middle": [], |
|
"last": "Moffat", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Justin", |
|
"middle": [], |
|
"last": "Zobel", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Natural Language Engineering", |
|
"volume": "23", |
|
"issue": "1", |
|
"pages": "3--30", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yvette Graham, Timothy Baldwin, Alistair Moffat, and Justin Zobel. 2017. Can machine translation sys- tems be evaluated by the crowd alone. Natural Lan- guage Engineering, 23(1):3-30.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Deep residual learning for image recognition", |
|
"authors": [ |
|
{ |
|
"first": "Kaiming", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiangyu", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shaoqing", |
|
"middle": [], |
|
"last": "Ren", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jian", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "The IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "770--778", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. 2016. Deep residual learning for image recog- nition. In The IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 770-778.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Multimodal Pivots for Image Caption Translation", |
|
"authors": [ |
|
{ |
|
"first": "Julian", |
|
"middle": [], |
|
"last": "Hitschler", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shigehiko", |
|
"middle": [], |
|
"last": "Schamoni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stefan", |
|
"middle": [], |
|
"last": "Riezler", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2399--2409", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Julian Hitschler, Shigehiko Schamoni, and Stefan Rie- zler. 2016. Multimodal Pivots for Image Caption Translation. In Proceedings of the 54th Annual Meeting of the Association for Computational Lin- guistics, pages 2399-2409.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Framing image description as a ranking task: Data, models and evaluation metrics", |
|
"authors": [ |
|
{ |
|
"first": "Micah", |
|
"middle": [], |
|
"last": "Hodosh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peter", |
|
"middle": [], |
|
"last": "Young", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julia", |
|
"middle": [], |
|
"last": "Hockenmaier", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Journal of Artificial Intelligence Research", |
|
"volume": "47", |
|
"issue": "", |
|
"pages": "853--899", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Micah Hodosh, Peter Young, and Julia Hockenmaier. 2013. Framing image description as a ranking task: Data, models and evaluation metrics. Journal of Ar- tificial Intelligence Research, 47:853-899.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Attention-based multimodal neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Po-Yao", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Frederick", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sz-Rung", |
|
"middle": [], |
|
"last": "Shiang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jean", |
|
"middle": [], |
|
"last": "Oh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Dyer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the First Conference on Machine Translation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "639--645", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Po-Yao Huang, Frederick Liu, Sz-Rung Shiang, Jean Oh, and Chris Dyer. 2016. Attention-based multi- modal neural machine translation. In Proceedings of the First Conference on Machine Translation, pages 639-645.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Revisiting visual question answering baselines", |
|
"authors": [ |
|
{ |
|
"first": "Allan", |
|
"middle": [], |
|
"last": "Jabri", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Armand", |
|
"middle": [], |
|
"last": "Joulin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Laurens", |
|
"middle": [], |
|
"last": "Van Der Maaten", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "European conference on computer vision", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "727--739", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Allan Jabri, Armand Joulin, and Laurens van der Maaten. 2016. Revisiting visual question answer- ing baselines. In European conference on computer vision, pages 727-739.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Representation of linguistic form and function in recurrent neural networks", |
|
"authors": [ |
|
{ |
|
"first": "Akos", |
|
"middle": [], |
|
"last": "K\u00e1d\u00e1r", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Grzegorz", |
|
"middle": [], |
|
"last": "Chrupa\u0142a", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Afra", |
|
"middle": [], |
|
"last": "Alishahi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1602.08952" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Akos K\u00e1d\u00e1r, Grzegorz Chrupa\u0142a, and Afra Alishahi. 2016. Representation of linguistic form and func- tion in recurrent neural networks. arXiv preprint arXiv:1602.08952.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Comparing Data Sources and Architectures for Deep Visual Representation Learning in Semantics", |
|
"authors": [ |
|
{ |
|
"first": "Douwe", |
|
"middle": [], |
|
"last": "Kiela", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anita", |
|
"middle": [ |
|
"L" |
|
], |
|
"last": "Ver\u0151", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stephen", |
|
"middle": [], |
|
"last": "Clark", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the Conference on Empirical Methods in Natural Language Processing (EMNLP-16)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "447--456", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Douwe Kiela, Anita L. Ver\u0151, and Stephen Clark. 2016. Comparing Data Sources and Architectures for Deep Visual Representation Learning in Semantics. In Proceedings of the Conference on Empirical Meth- ods in Natural Language Processing (EMNLP-16), pages 447-456.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Adam: A method for stochastic optimization. International Conference on Learning Representations", |
|
"authors": [ |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Diederik", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jimmy", |
|
"middle": [], |
|
"last": "Kingma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Ba", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Diederik P. Kingma and Jimmy Ba. 2015. Adam: A method for stochastic optimization. International Conference on Learning Representations.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "Mt-compareval: Graphical evaluation interface for machine translation development", |
|
"authors": [ |
|
{ |
|
"first": "Ond\u0159ej", |
|
"middle": [], |
|
"last": "Klejch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eleftherios", |
|
"middle": [], |
|
"last": "Avramidis", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "The Prague Bulletin of Mathematical Linguistics", |
|
"volume": "104", |
|
"issue": "1", |
|
"pages": "63--74", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ond\u0159ej Klejch, Eleftherios Avramidis, Aljoscha Bur- chardt, and Martin Popel. 2015. Mt-compareval: Graphical evaluation interface for machine transla- tion development. The Prague Bulletin of Mathe- matical Linguistics, 104(1):63-74.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Improving sentence compression by learning to predict gaze", |
|
"authors": [ |
|
{ |
|
"first": "Sigrid", |
|
"middle": [], |
|
"last": "Klerke", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoav", |
|
"middle": [], |
|
"last": "Goldberg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anders", |
|
"middle": [], |
|
"last": "S\u00f8gaard", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1528--1533", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sigrid Klerke, Yoav Goldberg, and Anders S\u00f8gaard. 2016. Improving sentence compression by learning to predict gaze. In Proceedings of the 2016 Confer- ence of the North American Chapter of the Associ- ation for Computational Linguistics: Human Lan- guage Technologies, pages 1528-1533.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "Moses: Open source toolkit for statistical machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Philipp", |
|
"middle": [], |
|
"last": "Koehn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hieu", |
|
"middle": [], |
|
"last": "Hoang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexandra", |
|
"middle": [], |
|
"last": "Birch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Callison-Burch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marcello", |
|
"middle": [], |
|
"last": "Federico", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nicola", |
|
"middle": [], |
|
"last": "Bertoldi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Brooke", |
|
"middle": [], |
|
"last": "Cowan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wade", |
|
"middle": [], |
|
"last": "Shen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christine", |
|
"middle": [], |
|
"last": "Moran", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Zens", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proceedings of the 45th Annual meeting of Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "177--180", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Philipp Koehn, Hieu Hoang, Alexandra Birch, Chris Callison-Burch, Marcello Federico, Nicola Bertoldi, Brooke Cowan, Wade Shen, Christine Moran, Richard Zens, et al. 2007. Moses: Open source toolkit for statistical machine translation. In Pro- ceedings of the 45th Annual meeting of Association for Computational Linguistics, pages 177-180.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "Cuni system for wmt16 automatic post-editing and multimodal translation tasks", |
|
"authors": [ |
|
{ |
|
"first": "Jind\u0159ich", |
|
"middle": [], |
|
"last": "Libovick\u00fd", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jind\u0159ich", |
|
"middle": [], |
|
"last": "Helcl", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marek", |
|
"middle": [], |
|
"last": "Tlust\u00fd", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the First Conference on Machine Translation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "646--654", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jind\u0159ich Libovick\u00fd, Jind\u0159ich Helcl, Marek Tlust\u00fd, Ond\u0159ej Bojar, and Pavel Pecina. 2016. Cuni system for wmt16 automatic post-editing and multimodal translation tasks. In Proceedings of the First Con- ference on Machine Translation, pages 646-654.", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "Don't just listen, use your imagination: Leveraging visual common sense for non-visual tasks", |
|
"authors": [ |
|
{ |
|
"first": "Xiao", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Devi", |
|
"middle": [], |
|
"last": "Parikh", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2984--2993", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xiao Lin and Devi Parikh. 2015. Don't just listen, use your imagination: Leveraging visual common sense for non-visual tasks. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recog- nition, pages 2984-2993.", |
|
"links": null |
|
}, |
|
"BIBREF35": { |
|
"ref_id": "b35", |
|
"title": "Multi-task sequence to sequence learning", |
|
"authors": [ |
|
{ |
|
"first": "Minh-Thang", |
|
"middle": [], |
|
"last": "Luong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Quoc", |
|
"middle": [ |
|
"V" |
|
], |
|
"last": "Le", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ilya", |
|
"middle": [], |
|
"last": "Sutskever", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Oriol", |
|
"middle": [], |
|
"last": "Vinyals", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lukasz", |
|
"middle": [], |
|
"last": "Kaiser", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "ICLR", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Minh-Thang Luong, Quoc V. Le, Ilya Sutskever, Oriol Vinyals, and Lukasz Kaiser. 2016. Multi-task se- quence to sequence learning. In ICLR.", |
|
"links": null |
|
}, |
|
"BIBREF36": { |
|
"ref_id": "b36", |
|
"title": "Zeroresource machine translation by multimodal encoder-decoder network with multimedia pivot. Machine Translation", |
|
"authors": [ |
|
{ |
|
"first": "Hideki", |
|
"middle": [], |
|
"last": "Nakayama", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noriki", |
|
"middle": [], |
|
"last": "Nishida", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "49--64", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hideki Nakayama and Noriki Nishida. 2017. Zero- resource machine translation by multimodal encoder-decoder network with multimedia pivot. Machine Translation, 31(1-2):49-64.", |
|
"links": null |
|
}, |
|
"BIBREF37": { |
|
"ref_id": "b37", |
|
"title": "Bleu: A method for automatic evaluation of machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Kishore", |
|
"middle": [], |
|
"last": "Papineni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Salim", |
|
"middle": [], |
|
"last": "Roukos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Todd", |
|
"middle": [], |
|
"last": "Ward", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei-Jing", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Proceedings of the 40th Annual Meeting on Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "311--318", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kishore Papineni, Salim Roukos, Todd Ward, and Wei- Jing Zhu. 2002. Bleu: A method for automatic eval- uation of machine translation. In Proceedings of the 40th Annual Meeting on Association for Computa- tional Linguistics, pages 311-318.", |
|
"links": null |
|
}, |
|
"BIBREF38": { |
|
"ref_id": "b38", |
|
"title": "Multi-Task Video Captioning with Video and Entailment Generation", |
|
"authors": [ |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Pasunuru", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Bansal", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1273--1283", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "R. Pasunuru and M. Bansal. 2017. Multi-Task Video Captioning with Video and Entailment Generation. In Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1273-1283.", |
|
"links": null |
|
}, |
|
"BIBREF39": { |
|
"ref_id": "b39", |
|
"title": "Keystroke dynamics as signal for shallow syntactic parsing", |
|
"authors": [ |
|
{ |
|
"first": "Barbara", |
|
"middle": [], |
|
"last": "Plank", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "26th International Conference on Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "609--619", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Barbara Plank. 2016. Keystroke dynamics as sig- nal for shallow syntactic parsing. In 26th Inter- national Conference on Computational Linguistics, pages 609-619.", |
|
"links": null |
|
}, |
|
"BIBREF40": { |
|
"ref_id": "b40", |
|
"title": "A correlational encoder decoder architecture for pivot based sequence generation", |
|
"authors": [ |
|
{ |
|
"first": "Amrita", |
|
"middle": [], |
|
"last": "Saha", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Mitesh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sarath", |
|
"middle": [], |
|
"last": "Khapra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Janarthanan", |
|
"middle": [], |
|
"last": "Chandar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kyunghyun", |
|
"middle": [], |
|
"last": "Rajendran", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Cho", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "26th International Conference on Computational Linguistics: Technical Papers", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "109--118", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Amrita Saha, Mitesh M. Khapra, Sarath Chandar, Ja- narthanan Rajendran, and Kyunghyun Cho. 2016. A correlational encoder decoder architecture for pivot based sequence generation. In 26th International Conference on Computational Linguistics: Techni- cal Papers, pages 109-118.", |
|
"links": null |
|
}, |
|
"BIBREF41": { |
|
"ref_id": "b41", |
|
"title": "Japanese and korean voice search", |
|
"authors": [ |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Schuster", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kaisuke", |
|
"middle": [], |
|
"last": "Nakajima", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "2012 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5149--5152", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mike Schuster and Kaisuke Nakajima. 2012. Japanese and korean voice search. In 2012 IEEE Interna- tional Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 5149-5152.", |
|
"links": null |
|
}, |
|
"BIBREF42": { |
|
"ref_id": "b42", |
|
"title": "Bidirectional recurrent neural networks", |
|
"authors": [ |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Schuster", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Kuldip", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Paliwal", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1997, |
|
"venue": "IEEE Transactions on Signal Processing", |
|
"volume": "45", |
|
"issue": "11", |
|
"pages": "2673--2681", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mike Schuster and Kuldip K Paliwal. 1997. Bidirec- tional recurrent neural networks. IEEE Transactions on Signal Processing, 45(11):2673-2681.", |
|
"links": null |
|
}, |
|
"BIBREF43": { |
|
"ref_id": "b43", |
|
"title": "Nematus: a Toolkit for Neural Machine Translation", |
|
"authors": [ |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Sennrich", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "O", |
|
"middle": [], |
|
"last": "Firat", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Cho", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Birch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Haddow", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Hitschler", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Junczys-Dowmunt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "L\u00e4ubli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Valerio Miceli Barone", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Mokry", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "N\u0203dejde", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "65--68", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "R. Sennrich, O. Firat, K. Cho, A. Birch, B. Haddow, J. Hitschler, M. Junczys-Dowmunt, S. L\u00e4ubli, A. Va- lerio Miceli Barone, J. Mokry, and M. N\u0203dejde. 2017. Nematus: a Toolkit for Neural Machine Translation. pages 65-68.", |
|
"links": null |
|
}, |
|
"BIBREF44": { |
|
"ref_id": "b44", |
|
"title": "Improving neural machine translation models with monolingual data", |
|
"authors": [ |
|
{ |
|
"first": "Rico", |
|
"middle": [], |
|
"last": "Sennrich", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Barry", |
|
"middle": [], |
|
"last": "Haddow", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexandra", |
|
"middle": [], |
|
"last": "Birch", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "86--96", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rico Sennrich, Barry Haddow, and Alexandra Birch. 2016. Improving neural machine translation mod- els with monolingual data. In Proceedings of the 54th Annual Meeting of the Association for Compu- tational Linguistics, pages 86-96.", |
|
"links": null |
|
}, |
|
"BIBREF45": { |
|
"ref_id": "b45", |
|
"title": "Zmorge: A german morphological lexicon extracted from wiktionary", |
|
"authors": [ |
|
{ |
|
"first": "Rico", |
|
"middle": [], |
|
"last": "Sennrich", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Beat", |
|
"middle": [], |
|
"last": "Kunz", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Language Resources and Evaluation Conference", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1063--1067", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rico Sennrich and Beat Kunz. 2014. Zmorge: A german morphological lexicon extracted from wik- tionary. In Language Resources and Evaluation Conference, pages 1063-1067.", |
|
"links": null |
|
}, |
|
"BIBREF46": { |
|
"ref_id": "b46", |
|
"title": "Shef-multimodal: Grounding machine translation on images", |
|
"authors": [ |
|
{ |
|
"first": "Kashif", |
|
"middle": [], |
|
"last": "Shah", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Josiah", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lucia", |
|
"middle": [], |
|
"last": "Specia", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the First Conference on Machine Translation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "660--665", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kashif Shah, Josiah Wang, and Lucia Specia. 2016. Shef-multimodal: Grounding machine translation on images. In Proceedings of the First Conference on Machine Translation, pages 660-665.", |
|
"links": null |
|
}, |
|
"BIBREF47": { |
|
"ref_id": "b47", |
|
"title": "Very deep convolutional networks for large-scale image recognition", |
|
"authors": [ |
|
{ |
|
"first": "Karen", |
|
"middle": [], |
|
"last": "Simonyan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Zisserman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the International Conference on Learning Representations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Karen Simonyan and Andrew Zisserman. 2015. Very deep convolutional networks for large-scale image recognition. In Proceedings of the International Conference on Learning Representations.", |
|
"links": null |
|
}, |
|
"BIBREF48": { |
|
"ref_id": "b48", |
|
"title": "A shared task on multimodal machine translation and crosslingual image description", |
|
"authors": [ |
|
{ |
|
"first": "Lucia", |
|
"middle": [], |
|
"last": "Specia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stella", |
|
"middle": [], |
|
"last": "Frank", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Khalil", |
|
"middle": [], |
|
"last": "Sima'an", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Desmond", |
|
"middle": [], |
|
"last": "Elliott", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the First Conference on Machine Translation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "543--553", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lucia Specia, Stella Frank, Khalil Sima'an, and Desmond Elliott. 2016. A shared task on multi- modal machine translation and crosslingual image description. In Proceedings of the First Conference on Machine Translation, pages 543-553.", |
|
"links": null |
|
}, |
|
"BIBREF49": { |
|
"ref_id": "b49", |
|
"title": "Unsupervised learning of video representations using LSTMs", |
|
"authors": [ |
|
{ |
|
"first": "Nitish", |
|
"middle": [], |
|
"last": "Srivastava", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Elman", |
|
"middle": [], |
|
"last": "Mansimov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ruslan", |
|
"middle": [], |
|
"last": "Salakhudinov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "International Conference on Machine Learning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "843--852", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nitish Srivastava, Elman Mansimov, and Ruslan Salakhudinov. 2015. Unsupervised learning of video representations using LSTMs. In Interna- tional Conference on Machine Learning, pages 843- 852.", |
|
"links": null |
|
}, |
|
"BIBREF50": { |
|
"ref_id": "b50", |
|
"title": "Rethinking the inception architecture for computer vision", |
|
"authors": [ |
|
{ |
|
"first": "Christian", |
|
"middle": [], |
|
"last": "Szegedy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vincent", |
|
"middle": [], |
|
"last": "Vanhoucke", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sergey", |
|
"middle": [], |
|
"last": "Ioffe", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jonathon", |
|
"middle": [], |
|
"last": "Shlens", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zbigniew", |
|
"middle": [], |
|
"last": "Wojna", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Christian Szegedy, Vincent Vanhoucke, Sergey Ioffe, Jonathon Shlens, and Zbigniew Wojna. 2015. Re- thinking the inception architecture for computer vi- sion. CoRR, abs/1512.00567.", |
|
"links": null |
|
}, |
|
"BIBREF51": { |
|
"ref_id": "b51", |
|
"title": "Parallel data, tools and interfaces in opus", |
|
"authors": [ |
|
{ |
|
"first": "J\u00f6rg", |
|
"middle": [], |
|
"last": "Tiedemann", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Eight International Conference on Language Resources and Evaluation (LREC'12)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "J\u00f6rg Tiedemann. 2012. Parallel data, tools and inter- faces in opus. In Eight International Conference on Language Resources and Evaluation (LREC'12).", |
|
"links": null |
|
}, |
|
"BIBREF52": { |
|
"ref_id": "b52", |
|
"title": "Neural machine translation with latent semantic of image and text", |
|
"authors": [ |
|
{ |
|
"first": "Joji", |
|
"middle": [], |
|
"last": "Toyama", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Masanori", |
|
"middle": [], |
|
"last": "Misono", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Masahiro", |
|
"middle": [], |
|
"last": "Suzuki", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kotaro", |
|
"middle": [], |
|
"last": "Nakayama", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yutaka", |
|
"middle": [], |
|
"last": "Matsuo", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Joji Toyama, Masanori Misono, Masahiro Suzuki, Ko- taro Nakayama, and Yutaka Matsuo. 2016. Neural machine translation with latent semantic of image and text. CoRR, abs/1611.08459.", |
|
"links": null |
|
}, |
|
"BIBREF53": { |
|
"ref_id": "b53", |
|
"title": "Order-embeddings of images and language", |
|
"authors": [ |
|
{ |
|
"first": "Ivan", |
|
"middle": [], |
|
"last": "Vendrov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ryan", |
|
"middle": [], |
|
"last": "Kiros", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sanja", |
|
"middle": [], |
|
"last": "Fidler", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Raquel", |
|
"middle": [], |
|
"last": "Urtasun", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ivan Vendrov, Ryan Kiros, Sanja Fidler, and Raquel Urtasun. 2016. Order-embeddings of images and language. ICLR.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"uris": null, |
|
"type_str": "figure", |
|
"text": "The Imagination model learns visuallygrounded representations by sharing the encoder network between the Translation Decoder with image prediction in the IMAGINET Decoder.", |
|
"num": null |
|
}, |
|
"TABREF3": { |
|
"text": "\u00b1 0.4 36.8 \u00b1 0.8 Imagination (COCO) 55.6 \u00b1 0.5 36.4 \u00b1 1.2", |
|
"num": null, |
|
"type_str": "table", |
|
"content": "<table><tr><td/><td>Meteor</td><td>BLEU</td></tr><tr><td>Imagination</td><td>55.8</td></tr></table>", |
|
"html": null |
|
}, |
|
"TABREF4": { |
|
"text": "Translation results when using out-ofdomain described images. Our approach is still effective when the image prediction model is trained over the COCO dataset.", |
|
"num": null, |
|
"type_str": "table", |
|
"content": "<table><tr><td>Meteor</td><td>BLEU</td></tr></table>", |
|
"html": null |
|
} |
|
} |
|
} |
|
} |