id
stringlengths
1
4
tokens
sequence
ner_tags
sequence
200
[ "Using", "such", "a", "framework,", "UnifiedQA", "(Khashabi", "et", "al.,", "2020)", "integrates", "20", "QA", "datasets", "into", "a", "unified", "format", "for", "training,", "and", "achieves", "state-of-the-art", "results", "on", "multiple", "MCQA", "datasets." ]
[ 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11, 0 ]
201
[ "One", "benefit", "is", "that", "extensive", "training", "data", "can", "be", "shared", "across", "different", "tasks." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
202
[ "As", "illustrated", "in", "Figure", "2a,", "adopting", "this", "paradigm", "for", "MCQA,", "the", "question", "Q", "and", "all", "the", "options", "O1,", "O2,", "O3,", "O4}", "are", "spliced", "into", "a", "text", "as", "input,", "and", "the", "correct", "answer", "O1", "is", "used", "as", "the", "generation", "target." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
203
[ "Recently,", "the", "text-to-text", "paradigm", "has", "achieved", "breakthrough", "results", "on", "many", "NLP", "tasks", "(Raffel", "et", "al.,", "2020;", "Lewis", "et", "al.,", "2020)." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
204
[ "2.1", "Text-to-Text", "Paradigm", "for", "MCQA" ]
[ 0, 0, 0, 0, 11 ]
205
[ "2", "Related", "Work" ]
[ 0, 0, 0 ]
206
[ "Code", "Our", "code", "is", "available", "on", "GitHub1", "under", "the", "Apache", "Licence", "2.0." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
207
[ "Outline", "We", "discuss", "related", "work", "in", "Section", "2,", "introduce", "GenMC", "in", "Section", "3,", "describe", "the", "experimental", "setup", "in", "Section", "4,", "report", "the", "results", "in", "Section", "5,", "and", "conclude", "in", "Section", "6." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
208
[ "It", "significantly", "outperforms", "comparable", "models,", "in", "particular,", "text-to-text", "models,", "on", "five", "MCQA", "datasets." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11, 0 ]
209
[ "We", "refer", "to", "this", "generation-enhanced", "MCQA", "model", "as", "GenMC." ]
[ 0, 0, 0, 0, 0, 11, 0, 0, 1 ]
210
[ "The", "clue", "representation", "is", "then", "leveraged", "by", "an", "encoder-based", "model", "to", "read", "the", "options", "and", "make", "prediction." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
211
[ "With", "this", "idea,", "we", "propose", "to", "employ", "a", "pretrained", "encoder-decoder", "model", "to", "generate", "a", "clue", "from", "the", "question", "by", "exploiting", "its", "underlying", "knowledge,", "without", "seeing", "and", "being", "strictly", "confined", "to", "the", "options", "as", "in", "the", "text-to-text", "framework." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
212
[ "Our", "Contribution" ]
[ 0, 0 ]
213
[ "One", "research", "question", "is", "how", "to", "apply", "pre-trained", "encoder-decoder", "models", "in", "a", "more", "natural", "way", "to", "MCQA,", "in", "particular,", "to", "exploit", "their", "NLG", "capabilities." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0 ]
214
[ "Indeed,", "Liu", "et", "al.", "(2021)", "have", "found", "that", "in", "classification", "and", "regression", "tasks,", "the", "decoder", "layer", "is", "often", "under-utilized." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
215
[ "However,", "this", "is", "inconsistent", "with", "how", "encoder-decoder", "models", "are", "pre-trained", "so", "that", "their", "underlying", "knowledge", "may", "not", "be", "sufficiently", "exploited." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
216
[ "Research", "Question", "To", "fit", "MCQA,", "existing", "implementations", "of", "the", "text-to-text", "framework", "take", "all", "the", "options", "as", "input", "and", "are", "trained", "to", "generate", "one", "of", "the", "options,", "i.e.,", "to", "copy", "some", "tokens", "from", "the", "input." ]
[ 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
217
[ "This", "is", "enabled", "by", "the", "text-to-text", "framework,", "which", "transforms", "data", "in", "different", "tasks", "into", "a", "unified", "text-to-text", "format", "so", "that", "knowledge", "spanning", "many", "and", "various", "tasks", "can", "be", "learned,", "aggregated,", "and", "used", "by", "a", "single", "model." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
218
[ "However,", "encoder-decoder", "models", "can", "also", "be", "applied", "to", "MCQA", "(Khashabi", "et", "al.,", "2020;", "Zhou", "et", "al.,", "2021)." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0, 0 ]
219
[ "T5", "(Raffel", "et", "al.,", "2020)", "and", "BART", "(Lewis", "et", "al.,", "2020)", "are", "encoder-decoder", "models,", "being", "more", "suitable", "for", "natural", "language", "generation", "(NLG)", "tasks." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
220
[ "and", "its", "variants", "such", "as", "RoBERTa", "(Liu", "et", "al.,", "2019)", "and", "ALBERT", "(Lan", "et", "al.,", "2020)", "are", "encoder-only", "models,", "being", "more", "suitable", "for", "natural", "language", "understanding", "(NLU)", "tasks", "including", "MCQA", "and", "other", "classification", "and", "regression", "tasks." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11, 12, 12, 11, 0, 0, 11, 0, 0, 0, 0, 0, 0 ]
221
[ "BERT", "(Devlin", "et", "al.,", "2019)" ]
[ 0, 0, 0, 0, 0 ]
222
[ "Basically", "there", "are", "two", "types", "of", "PLMs", "that", "are", "suitable", "for", "different", "tasks." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
223
[ "MCQA", "has", "made", "great", "progress", "with", "the", "development", "of", "pre-trained", "language", "models", "(PLMs)." ]
[ 11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
224
[ "Mihaylov", "et", "al.,", "2018)", "and", "scientific", "knowledge", "(Clark", "et", "al.,", "2018;", "Khot", "et", "al.,", "2020;", "Huang", "et", "al.,", "2019;", "Li", "et", "al.,", "2021),", "and", "have", "reasoning", "skills", "such", "as", "multi-hop", "reasoning", "(Khot", "et", "al.,", "2019)", "and", "logical", "reasoning", "(Yu", "et", "al.,", "2020;", "Liu", "et", "al.,", "2020b;", "Li", "et", "al.,", "2022)." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
225
[ "This", "long-standing", "challenge", "in", "natural", "language", "processing", "(NLP)", "requires", "machines", "to", "have", "a", "wealth", "of", "knowledge,", "such", "as", "commonsense", "knowledge", "(Talmor", "et", "al.,", "2019;" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
226
[ "Multiple-choice", "question", "answering", "(MCQA)", "aims", "at", "selecting", "the", "correct", "answer", "from", "a", "set", "of", "options", "given", "a", "question." ]
[ 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
227
[ "Introduction" ]
[ 0 ]
228
[ "It", "outperforms", "textto-text", "models", "on", "multiple", "MCQA", "datasets." ]
[ 0, 0, 0, 0, 0, 0, 11, 0 ]
229
[ "It", "generates", "a", "clue", "from", "the", "question", "and", "then", "leverages", "the", "clue", "to", "enhance", "a", "reader", "for", "MCQA." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11 ]
230
[ "To", "exploit", "the", "generation", "capability", "and", "underlying", "knowledge", "of", "a", "pre-trained", "encoder-decoder", "model,", "in", "this", "paper,", "we", "propose", "a", "generation-enhanced", "MCQA", "model", "named", "GenMC." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 1 ]
231
[ "However,", "a", "side", "effect", "of", "twisting", "a", "generation", "target", "to", "fit", "the", "classification", "nature", "of", "MCQA", "is", "the", "underutilization", "of", "the", "decoder", "and", "the", "knowledge", "that", "can", "be", "decoded." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
232
[ "By", "unifying", "data", "in", "different", "tasks", "into", "a", "single", "text-to-text", "format,", "it", "trains", "a", "generative", "encoder-decoder", "model", "which", "is", "both", "powerful", "and", "universal." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
233
[ "A", "trending", "paradigm", "for", "multiple-choice", "question", "answering", "(MCQA)", "is", "using", "a", "text-to-text", "framework." ]
[ 0, 0, 0, 0, 11, 12, 12, 11, 0, 0, 0, 0, 0 ]
234
[ "Abstract" ]
[ 0 ]
235
[ "Clues", "Before", "Answers:", "Generation-Enhanced", "Multiple-Choice", "QA" ]
[ 0, 0, 0, 0, 0, 0 ]
236
[ "Figure", "6", "and", "Figure", "7", "display", "the", "10", "first", "dialog", "samples", "produced", "at", "test", "time", "on", "CLEVR,", "while", "figures", "8,", "9,", "and", "10", "display", "the", "15", "first", "dialog", "samples", "produced", "at", "test", "time", "on", "VQAv2." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 13, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 13 ]
237
[ "D", "Additional", "VQG", "Samples" ]
[ 0, 0, 11, 0 ]
238
[ "(b)", "Language", "Grounding", "pairwise", "comparison" ]
[ 0, 0, 0, 0, 0 ]
239
[ "(a)", "Language", "Quality", "pairwise", "comparison" ]
[ 0, 0, 0, 0, 0 ]
240
[ "Figure", "5", "displays", "one", "pairwise", "comparison", "example", "for", "the", "three", "sections,", "and", "a", "full", "form", "example", "is", "available", "at", "the", "following", "url:", "https://forms.gle/kkL38x31wF7A9YKx5." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
241
[ "The", "evaluation", "of", "syntax", "errors", "was", "made", "within", "the", "diversity", "section:", "for", "each", "questions", "pair,", "we", "asked", "participants", "to", "tick", "the", "questions", "if", "they", "are", "grammatically", "incorrect." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
242
[ "Each", "pairwise", "comparison", "is", "sampled", "uniformly", "over", "the", "50", "first", "question", "samples", "generated", "by", "the", "algorithms", "at", "test", "time." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
243
[ "Given", "the", "five", "evaluated", "models,", "there", "are", "ten", "different", "model", "pairs:", "each", "section", "of", "the", "form", "contains", "10", "pairwise", "comparison", "covering", "all", "the", "possible", "model", "pairs", "for", "the", "criteria." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
244
[ "For", "the", "Human", "Evaluation", "study,", "we", "designed", "one", "form", "per", "participant,", "with", "three", "sections", "evaluating", "respectively", "the", "language", "quality,", "language", "grounding", "and", "diversity", "criteria." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
245
[ "C", "Human", "Evaluation", "details" ]
[ 0, 0, 0, 0 ]
246
[ "Additionally,", "on-policy", "versus", "off-policy", "scores", "split", "per", "sampling", "procedure", "are", "displayed", "in", "table", "12:", "unsurprisingly,", "greedy", "decoding", "for", "TrufLLoff", "outperforms", "the", "two", "sampling-based", "methods,", "that", "are", "more", "penalized", "by", "the", "imperfect", "generalization", "of", "the", "optimized", "policy", "over", "the", "full", "vocabulary." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
247
[ "Note", "that", "for", "VQAv2,", "the", "poor", "performances", "of", "TrufLLoff,KL", "on", "the", "external", "LM", "is", "mainly", "due", "to", "numerical", "instability", "challenges", "when", "using", "GPT-2", "as", "the", "target", "policy", "of", "the", "KL", "regularization", "term." ]
[ 0, 0, 0, 13, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
248
[ "Yet,", "keeping", "truncation", "at", "test", "time", "remains", "crucial", "with", "large", "vocabulary." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
249
[ "In", "such", "a", "setting,", "it", "hence", "improves", "the", "global", "scores", "of", "the", "off-policy", "version", "of", "TrufLL,", "and", "enables", "a", "much", "better", "generalization", "at", "test", "time", "of", "the", "global", "policy", "over", "the", "full", "vocabulary." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
250
[ "Interestingly,", "while", "on", "CLEVR,", "TrufLLoff,KL", "trades", "off", "task", "performance", "for", "language", "quality", "when", "compared", "to", "TrufLLoff,", "on", "VQAv2,", "it", "mainly", "provides", "a", "better", "learning", "signal", "for", "the", "complete", "(large)", "vocabulary." ]
[ 0, 0, 0, 13, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 13, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
251
[ "Indeed,", "on", "the", "off-policy", "setting", "for", "such", "a", "task,", "the", "exploding", "values", "for", "e-ppl", "suggest", "that", "the", "optimized", "language", "agent", "samples", "incoherent", "words", "taken", "outside", "the", "truncated", "action", "space,", "as", "corroborated", "by", "the", "low", "values", "of", "the", "sumVA", "ratio." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 0 ]
252
[ "The", "full", "results", "emphasize", "the", "challenges", "of", "the", "approach", "for", "the", "large", "vocabulary", "of", "VQAv2." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 13 ]
253
[ "Table", "11", "displays", "the", "full", "results", "of", "on-policy", "versus", "off-policy", "scores", "for", "TrufLL", "(Task-LM)", "and", "TrufLL", "(Ext-LM)", "on", "the", "two", "tasks." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 0, 1, 2, 0, 0, 0, 0 ]
254
[ "Intuitively,", "it", "encourages", "the", "policy", "to", "stay", "close", "to", "the", "language", "model’s", "distribution,", "with", "a", "distribution", "support", "attributing", "negligible", "probabilities", "to", "words", "outside", "the", "truncated", "action", "space." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
255
[ "Wu", "et", "al.,", "2019),", "and", "refer", "to", "it", "as", "TrufLLoff,KL." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 1 ]
256
[ "To", "ease", "off-policy", "learning,", "we", "propose", "to", "add", "a", "KLregularization", "term", "in", "the", "RL", "loss", "(Jaques", "et", "al.,", "2017,", "2019;" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
257
[ "On-policy", "TrufLL", "versus", "off-policy", "TrufLL." ]
[ 1, 2, 0, 1, 2 ]
258
[ "This", "suggests", "that", "on", "a", "large", "vocabulary", "task,", "the", "language", "distribution", "learned", "by", "the", "SL", "pretrained", "policy", "is", "significantly", "different", "from", "the", "one", "learned", "with", "TrufLL." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1 ]
259
[ "In", "table", "10,", "while", "on", "CLEVR,", "TrufLLpretrain", "marginally", "improves", "the", "results", "of", "the", "pretrain+RL", "fine-tune", "baseline,", "the", "combination", "of", "TrufLL", "with", "a", "pre-training", "phase", "leads", "to", "performance", "degradation", "on", "VQAv2." ]
[ 0, 0, 0, 0, 0, 13, 1, 0, 0, 0, 0, 0, 0, 1, 2, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 13 ]
260
[ "Therefore,", "when", "using", "the", "task-related", "dataset,", "we", "evaluate", "TrufLL", "from", "a", "pretrained", "policy,", "and", "we", "refer", "to", "it", "as", "TrufLLpretrain." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1 ]
261
[ "Although", "TrufLL", "aims", "at", "providing", "a", "robust", "method", "to", "learn", "a", "language", "model", "(almost)", "from", "scratch,", "we", "investigate", "whether", "such", "algorithm", "can", "be", "complementary", "to", "RL", "algorithms", "with", "a", "pre-training", "phase." ]
[ 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
262
[ "TrufLL", "with", "a", "pre-training", "phase." ]
[ 1, 0, 0, 0, 0 ]
263
[ "B.3", "Additional", "discussion" ]
[ 0, 0, 0 ]
264
[ "This", "suggests", "that", "the", "KL", "regularization", "term,", "while", "encouraging", "the", "policy", "distribution", "to", "resemble", "the", "language", "model", "distribution,", "fails", "to", "capture", "the", "task", "pragmatics,", "which", "requires", "generating", "a", "language", "that", "is", "visually", "grounded." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
265
[ "On", "the", "other", "hand,", "the", "scratch+KL", "baselines", "stay", "stuck", "to", "a", "low", "training", "return." ]
[ 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0 ]
266
[ "The", "training", "curves", "of", "TrufLL", "present", "a", "steady", "increase", "in", "the", "return", "until", "reaching", "convergence,", "confirming", "that", "our", "approach,", "by", "guiding", "the", "exploration", "of", "the", "action", "space,", "provides", "a", "sufficient", "learning", "signal." ]
[ 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
267
[ "As", "expected,", "the", "pretrain+RL", "fine-tune", "baseline", "return", "does", "not", "evolve", "much,", "confirming", "that", "the", "policy", "distribution", "almost", "does", "not", "shift", "through", "the", "fine-tuning", "phase." ]
[ 0, 0, 0, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
268
[ "Finally,", "Figure", "4", "displays", "the", "evolution", "of", "the", "training", "return", "for", "TrufLL", "and", "the", "baselines." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0 ]
269
[ "The", "former", "displays", "the", "best", "performance/language", "scores", "trade-off", "for", "the", "schedule", "\"τ:", "3", ">", "1.", "&", "Tu=5,000\",", "while", "the", "latter", "has", "the", "best", "metrics", "trade-off", "for", "\"τ:", "1.5", ">", "1.", "&", "Tu=5,000\"." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 6, 6, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 6, 6, 0, 3 ]
270
[ "(Ext-LM)", "benefit", "slightly", "from", "truncation", "with", "a", "temperature", "schedule", "compared", "to", "a", "vanilla", "truncation." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
271
[ "In", "Table", "9,", "both", "TrufLL", "(Task-LM)", "and", "TrufLL" ]
[ 0, 0, 0, 0, 1, 2, 0, 1 ]
272
[ "While", "temperature", "scaling", "(Bahdanau", "et", "al.,", "2015)", "is", "usually", "used", "at", "test", "time", "to", "control", "the", "smoothness", "of", "the", "language", "model", "distribution,", "temperature", "schedules", "during", "training", "of", "language", "models", "have", "been", "used", "in", "w<t)", "distribution", "is", "several", "settings", "(Jang", "et", "al.,", "2016;" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
273
[ "When", "scaling", "up", "to", "the", "15k", "words", "of", "the", "VQAv2", "task,", "we", "also", "dynamically", "decrease", "the", "truncation", "size", "through", "training,", "by", "applying", "a", "decreasing", "temperature", "schedule", "on", "the", "language", "model." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 13, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
274
[ "Temperature", "scheduling:", "On", "the", "CLEVR", "task,", "we", "observed", "that", "dynamic", "truncations", "outperform", "static", "ones", "such", "as", "top(k):", "indeed,", "they", "better", "take", "into", "account", "the", "inherent", "variability", "of", "the", "language", "structure", "at", "the", "sentence-level." ]
[ 0, 0, 0, 0, 13, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
275
[ "Table", "6:", "CLEVR", "task:", "Ablation", "on", "the", "truncation", "functions", "with", "parameters", "sweep." ]
[ 0, 0, 13, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
276
[ "This", "illustrates", "that", "using", "a", "language", "similarity", "score", "as", "a", "reward", "signal", "is", "much", "less", "interesting", "than", "a", "reward", "based", "on", "a", "task", "completion", "score." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
277
[ "While", "on", "such", "a", "task", "TrufLL", "still", "exhibits", "promising", "language", "scores,", "the", "n-grams", "metrics", "remain", "lower", "than", "the", "pretrained", "baselines." ]
[ 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
278
[ "Finally,", "Table", "8", "reports", "CLEVR", "metrics", "when", "using", "the", "BLEU", "score", "as", "the", "reward." ]
[ 0, 0, 0, 0, 13, 0, 0, 0, 0, 7, 0, 0, 0, 0 ]
279
[ "Such", "an", "ablation", "presents", "a", "similar", "pattern", "than", "VQAv2", "results", "described", "in", "section", "5.2." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 13, 0, 0, 0, 0, 0 ]
280
[ "Table", "6", "displays", "the", "complete", "ablation", "on", "the", "truncation", "functions", "with", "parameters", "sweep." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
281
[ "B.1", "CLEVR" ]
[ 0, 13 ]
282
[ "B", "Additional", "experiments" ]
[ 0, 0, 0 ]
283
[ "In", "this", "section,", "we", "detail", "the", "reward", "function", "used", "for", "the", "VQAv2", "task." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 13, 0 ]
284
[ "A.5", "Reward", "formula", "for", "VQAv2" ]
[ 0, 0, 0, 0, 13 ]
285
[ "None,1,5,10,100", ",", "bs", "}" ]
[ 5, 0, 3, 0 ]
286
[ "0.01,", "0.02,", "0.05,", "0.1", ",", "ϵ", "}", "3,10−", ",", "gradclip", "}" ]
[ 5, 5, 5, 5, 0, 3, 0, 0, 0, 3, 0 ]
287
[ "following", "values", "were", "tested:", "β", "3,5" ]
[ 0, 0, 0, 0, 3, 5 ]
288
[ "We", "kept", "the", "network", "size", "giving", "the", "best", "performances,", "i.e.", "policy", "network", "of", "256", "units", "and", "128", "word", "embedding", "dimension." ]
[ 0, 0, 0, 3, 4, 0, 0, 0, 0, 0, 0, 0, 0, 5, 3, 0, 5, 3, 4, 4 ]
289
[ "}", "Additionally,", "we", "also", "tested", "for", "VQAv2", "policy", "networks", "with", "64,", "256", "and", "1024", "units,", "with", "respectively", "32,", "128", "and", "512", "word", "embedding", "dimensions." ]
[ 0, 0, 0, 0, 0, 0, 13, 0, 0, 0, 5, 5, 0, 5, 3, 0, 0, 5, 5, 0, 5, 3, 4, 4 ]
290
[ "The", "0.01,", "0.02,", "0.05,", "0.1,", "0.5,", "0.9", "lr", ",", "}", "10−", "32,64,128", "." ]
[ 0, 5, 5, 5, 5, 5, 5, 3, 0, 0, 0, 5, 0 ]
291
[ "Such", "hyper-parameters", "were", "selected,", "after", "conducting", "an", "extensive", "hyper-parameter", "search." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
292
[ "5)", "for", "CLEVR", "and", "VQAv2." ]
[ 5, 0, 13, 0, 13 ]
293
[ "Finally,", "for", "the", "RL", "from", "scratch", "baselines,", "we", "perform", "gradient", "clipping", "(gladclip)", "of", "1", "(resp." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 4, 3, 0, 5, 0 ]
294
[ "We", "use", "a", "batch", "size", "(bs)", "on", "CLEVR", "(resp.", "VQAv2),", "and", "5", "of", "128", "for", "all", "models", "except", "the", "ones", "with", "KL", "regularization,", "for", "which", "we", "use", "a", "batch", "size", "of", "64." ]
[ 0, 0, 0, 3, 4, 3, 0, 13, 0, 13, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 4, 0, 5 ]
295
[ "We", "use", "Adam", "optimizer", "(Kingma", "and", "Ba,", "2014)", "with", "a", "learning", "rate", "6)", "for", "RL", "algorithms", "with", "a", "pre-training", "phase", "(lr)", "of", "10−", "4", "for", "models", "including", "a", "KL", "regularization", "term." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 4, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 5, 6, 0, 0, 0, 0, 0, 0, 0 ]
296
[ "VQAv2)." ]
[ 13 ]
297
[ "We", "optimize", "the", "full", "loss", "L=LP", "P", "O", "+αLV", "F", "+βLE", "with", "α=0.5,", "β", "=0.01", "and", "a", "PPO", "clipping", "ratio", "ϵ=0.02", "(resp.", "0.01)", "for", "CLEVR", "(resp." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 5, 0, 0, 0, 0, 0, 3, 0, 5, 0, 13, 0 ]
298
[ "3", "for", "TrufLL", "and", "the", "scratch", "baseline,", "10−" ]
[ 0, 0, 1, 0, 0, 0, 0, 0 ]
299
[ "For", "VQAv2,", "the", "image", "representation", "is", "the", "average", "of", "200", "bounding", "box", "features", "of", "dimension", "1048,", "extracted", "from", "a", "faster", "R-CNN", "(Ren", "et", "al.,", "2015)." ]
[ 0, 13, 0, 0, 0, 0, 0, 0, 0, 5, 3, 4, 4, 0, 3, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]