id
stringlengths
1
4
tokens
sequence
ner_tags
sequence
0
[ "References" ]
[ 0 ]
1
[ "Acknowledgments" ]
[ 0 ]
2
[ "We", "hope", "this", "work", "will", "spur", "more", "research", "in", "how", "to", "better", "use", "pre-trained", "encoder-decoders", "for", "not", "only", "MCQA,", "but", "also", "beyond;", "for", "tasks", "with", "divergent", "structures", "from", "the", "pre-training,", "a", "smarter", "use", "of", "PLMs", "can", "boost", "the", "performance", "significantly." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
3
[ "In", "the", "future,", "we", "will", "focus", "on", "how", "to", "further", "improve", "the", "clue", "generation", "quality,", "which", "remains", "a", "bottleneck", "of", "GenMC." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1 ]
4
[ "Table", "7:", "Inference", "time", "for", "answering", "a", "question", "(seconds)." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
5
[ "In", "our", "future", "research,", "we", "will", "focus", "on", "how", "to", "generate", "more", "helpful", "clues", "from", "questions." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
6
[ "This", "suggests", "a", "great", "room", "for", "improvement." ]
[ 0, 0, 0, 0, 0, 0, 0 ]
7
[ "Though", "the", "majority", "of", "our", "clues", "are", "relevant", "(i.e.,", "76.4%", "of", "them", "are", "relevant", "across", "all", "datasets),", "which", "seems", "positive,", "only", "24%", "of", "the", "clues", "are", "deemed", "as", "helpful." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
8
[ "Figure", "4", "breaks", "down", "by", "dataset." ]
[ 0, 0, 0, 0, 0, 0 ]
9
[ "Table", "6", "shows", "the", "percent", "of", "each", "clue", "type", "across", "all", "datasets", "with", "an", "example", "for", "each", "type." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
10
[ "If", "all", "three", "students", "annotate", "differently", "from", "each", "other", "for", "an", "instance,", "we", "introduce", "a", "fourth", "student", "to", "arbitrate." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
11
[ "To", "ensure", "the", "annotation", "quality,", "we", "aggregate", "annotated", "results", "from", "three", "students", "for", "every", "dataset", "using", "majority", "vote." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
12
[ "to", "answer", "the", "question." ]
[ 0, 0, 0, 0 ]
13
[ "The", "clue", "adds", "helpful", "information" ]
[ 0, 0, 0, 0, 0 ]
14
[ "•", "Helpful:" ]
[ 0, 0 ]
15
[ "•", "Relevant", "but", "unhelpful:", "Though", "relevant,", "the", "clue", "makes", "a", "factually", "incorrect", "statement,", "often", "on", "the", "contrary", "of", "the", "main", "question,", "or", "the", "clue", "contributes", "relevant", "but", "insufficient", "knowledge", "for", "prediction,", "such", "as", "repetition", "of", "the", "question", "or", "other", "distractors." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
16
[ "understandable." ]
[ 0 ]
17
[ "5We", "follow", "a", "similar", "definition", "by", "Shwartz", "et", "al.", "(2020)." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
18
[ "They", "know", "and", "agree", "that", "their", "annotations", "will", "be", "used", "for", "error", "analysis", "in", "a", "research", "paper." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
19
[ "4They", "are", "volunteers", "recruited", "from", "the", "contact", "author’s", "research", "group." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
20
[ "•", "Irrelevant:", "The", "clue", "is", "off", "topic", "or", "is", "not" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
21
[ "We", "then", "ask", "them", "to", "categorize", "clues", "into", "the", "following", "families:5" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
22
[ "We", "show", "six", "graduate", "students", "of", "computer", "science4", "an", "instance", "along", "with", "the", "generated", "clue,", "correct", "answer,", "and", "predicted", "answer." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
23
[ "Specifically,", "we", "randomly", "sample", "50", "negative", "cases", "from", "T5LARGE", "+", "GenMC", "for", "each", "dataset." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 2, 0, 0, 0 ]
24
[ "By", "studying", "these", "potentially", "negative", "clues,", "we", "can", "gain", "more", "insights", "into", "how", "GenMC", "fails", "and", "discuss", "venues", "for", "future", "improvement." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0 ]
25
[ "The", "intuition", "is", "that", "in", "these", "negative", "cases,", "the", "clues", "generated", "by", "GenMC", "may", "play", "a", "negative", "role." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0 ]
26
[ "We", "analyze", "the", "clues", "generated", "by", "GenMC", "using", "T5LARGE", "with", "a", "focus", "on", "instances", "that", "are", "correctly", "predicted", "by", "the", "baseline", "in", "our", "main", "experiments", "(i.e.,", "T5LARGE", "+", "Text2Textvanilla),", "while", "our", "GenMC", "fails." ]
[ 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 2, 0, 0, 1, 0 ]
27
[ "5.4", "Error", "Analysis" ]
[ 0, 0, 0 ]
28
[ "demonstrates", "that", "naively", "using", "explicit", "knowledge", "in", "plain", "text,", "instead", "of", "using", "implicit", "clues", "from", "the", "decoder’s", "hidden", "state,", "is", "inferior", "as", "it", "may", "unnecessarily", "bring", "information", "loss", "and", "noise." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
29
[ "Instance", "Which", "would", "you", "likely", "find", "inside", "a", "beach", "ball?" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
30
[ "We", "also", "observe", "that", "the", "performance", "of", "using", "token-level", "clues", "lags", "much", "behind", "GenMC." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1 ]
31
[ "5.3.2", "Results", "Table", "5", "shows", "that", "masking", "out", "generation", "loss", "leads", "to", "substantial", "performance", "drops", "across", "all", "datasets,", "demonstrating", "that", "fine-tuning", "the", "decoder", "GEN", "helps", "to", "derive", "useful", "with", "generation", "loss", "clues", "from", "pre-trained", "encoder-decoder", "models." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
32
[ "(2020c),", "which", "also", "adopts", "a", "pipeline", "framework", "to", "first", "generate", "a", "token-level", "evidence", "and", "then", "use", "the", "evidence", "to", "expand", "the", "question." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
33
[ "This", "variant", "is", "indeed", "very", "similar", "to", "Liu", "et", "al." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
34
[ "We", "then", "directly", "concatenate", "C", "with", "Q", "and", "Oi", "to", "compute", "a", "score", "for", "Oi", "using", "the", "model’s", "encoder", "part", "stacked", "with", "an", "MLP", "layer." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
35
[ "We", "first", "collect", "the", "generated", "clue", "text", "C", "(instead", "of", "its", "representation)", "from", "the", "decoder." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
36
[ "In", "this", "setting,", "we", "separately", "train", "Token", "Clue", "a", "clue", "generator", "and", "a", "reader." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
37
[ "Intuitively,", "under", "this", "setting,", "the", "generated", "clue", "is", "weaker", "than", "GenMC", "which", "learns", "how", "to", "generate", "a", "clue", "with", "supervision." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
38
[ "We", "train", "this", "variant", "only", "using", "the", "READ,", "so", "only", "the", "encoder", "part", "classification", "loss", "is", "updated,", "while", "the", "decoder", "part", "is", "left", "untouched", "from", "pre-training." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
39
[ "5.3.1", "Variants", "of", "GenMC", "Weak", "Clue" ]
[ 0, 0, 0, 1, 0, 0 ]
40
[ "To", "better", "understand", "its", "superior", "results", "and", "the", "influence", "of", "our", "clue", "generation,", "we", "compare", "with", "two", "variants." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
41
[ "Our", "main", "results", "in", "Section", "5.1", "have", "demonstrated", "the", "effectiveness", "of", "our", "model." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
42
[ "As", "a", "fairer", "comparison", "in", "Table", "4,", "by", "unifying", "the", "training", "sets", "of", "all", "the", "five", "datasets,", "our", "GenMCT5-U", "outperforms", "UnifiedQAT5-FT", "on", "all", "datasets", "except", "for", "CSQA", "with", "large", "models." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 13, 0, 0, 0 ]
43
[ "The", "promising", "results", "of", "GenMC", "further", "reveals", "that", "our", "model", "can", "learn", "to", "effectively", "extract", "knowledge", "from", "pre-trained", "encoder-decoders", "with", "limited", "training", "data." ]
[ 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
44
[ "These", "results", "are", "impressive", "because", "UnifiedQA", "uses", "more", "datasets", "(i.e.,", "eight", "different", "QA", "datasets)", "for", "training." ]
[ 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
45
[ "It", "also", "achieves", "comparable", "results", "on", "the", "remaining", "datasets." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
46
[ "Moreover,", "for", "UnifiedQAT5-FT,", "which", "further", "finetunes", "the", "model", "on", "the", "training", "set", "of", "the", "target", "dataset,", "GenMCT5", "outperforms", "it", "on", "the", "test", "sets", "of", "CSQA,", "OBQA,", "and", "ARC-Easy", "for", "the", "base", "models", "and", "ARC-Easy", "for", "the", "large", "models." ]
[ 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 13, 13, 0, 13, 0, 0, 0, 0, 0, 13, 0, 0, 0, 0 ]
47
[ "More", "interestingly,", "GenMCT5", "also", "performs", "better", "than", "UnifiedQAT5", "on", "most", "datasets." ]
[ 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0 ]
48
[ "The", "results", "in", "Table", "3", "show", "that", "GenMCT5" ]
[ 0, 0, 0, 0, 0, 0, 0, 1 ]
49
[ "significantly", "(with", "p-value", "<", "0.01)", "outperforms", "the", "two", "encoder-only", "strong", "baselines", "RoBERTa", "and", "ALBERT." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1 ]
50
[ "5.2.2", "Results" ]
[ 0, 0 ]
51
[ "All", "models", "are", "of", "comparable", "model", "size", "to", "ours." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
52
[ "In", "addition,", "we", "compare", "with", "two", "encoder-only", "models,", "RoBERTa", "(Liu", "et", "al.,", "2019)", "and", "ALBERT", "(Lan", "et", "al.,", "2020),", "which", "have", "served", "as", "the", "basis", "of", "many", "MCQA", "models." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11, 12, 0 ]
53
[ "5.3", "Ablation", "Study:", "Influence", "of", "Clues" ]
[ 0, 0, 0, 0, 0, 0 ]
54
[ "Note", "that", "instead", "of", "training", "on", "each", "dataset", "separately,", "UnifiedQA", "converts", "a", "line", "of", "popular", "QA", "datasets", "with", "four", "formats", "(e.g.,", "retrieval-based", "QA,", "MCQA)", "into", "a", "unified", "format,", "and", "trains", "a", "single", "model", "over", "all", "training", "data,", "while", "GenMC", "only", "uses", "each" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
55
[ "While", "UnifiedQA", "reports", "the", "best", "score", "using", "its", "T5-11B", "version,", "since", "for", "T5", "we", "experiment", "with", "its", "BASE", "and", "LARGE", "versions,", "we", "only", "report", "and", "compare", "under", "T5BASE", "and", "T5LARGE." ]
[ 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
56
[ "Among", "these", "models,", "UnifiedQA", "(Khashabi", "et", "al.,", "2020)", "is", "the", "current", "best", "model." ]
[ 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
57
[ "However,", "to", "enable", "a", "fair", "comparison,", "we", "only", "compare", "with", "models", "that", "adopt", "the", "same", "setting", "as", "ours,", "where", "a", "question", "and", "its", "options", "are", "the", "only", "input", "to", "the", "model." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
58
[ "Existing", "methods", "that", "rely", "on", "external", "documents", "or", "corpora", "have", "achieved", "state-ofthe-art", "performance", "on", "several", "MCQA", "datasets." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11, 0 ]
59
[ "5.2.1", "Baselines", "UnifiedQA" ]
[ 0, 0, 1 ]
60
[ "5.2", "Comparison", "with", "Other", "Models" ]
[ 0, 0, 0, 0, 0 ]
61
[ "This", "suggests", "that", "the", "embedded", "knowledge", "gained", "from", "pre-training", "is", "critical", "to", "MCQA", "tasks,", "strengthening", "our", "point", "to", "make", "full", "use", "of", "pre-trained", "encoders", "and", "decoders." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
62
[ "In", "addition,", "all", "LARGE", "models", "significantly", "outperform", "their", "BASE", "counterparts." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
63
[ "This", "indicates", "that", "the", "decoder’s", "general", "language", "knowledge", "gained", "from", "pre-training", "is", "largely", "wasted", "by", "only", "using", "it", "as", "a", "classifier,", "which", "may", "further", "explain", "the", "superior", "performance", "of", "our", "model", "because", "GenMC", "can", "exploit", "the", "pre-trained", "decoder", "more", "effectively." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0 ]
64
[ "Moreover,", "we", "interestingly", "find", "that", "the", "decoder-free", "baseline", "Text2Textenc", "outperforms", "Text2Textvanilla", "on", "over", "half", "of", "the", "experiments." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0 ]
65
[ "These", "results", "demonstrate", "that", "GenMC", "is", "a", "more", "effective", "usage", "of", "pre-trained", "encoder-decoder", "models", "than", "existing", "ones." ]
[ 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
66
[ "racy", "of", "23.69%", "to", "39.00%,", "suggesting", "a", "relative", "gain", "of", "around", "65%." ]
[ 0, 0, 9, 0, 9, 0, 0, 0, 0, 0, 0, 9 ]
67
[ "For", "example,", "on", "the", "test", "set", "of", "the", "challenging", "scientific", "MCQA", "dataset", "ARC-Challenge,", "T5BASE", "+", "GenMC", "improves", "T5BASE", "+", "Text2Textvanilla", "from", "an", "accu" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11, 0, 13, 1, 2, 2, 0, 1, 2, 2, 0, 0, 0 ]
68
[ "For", "several", "settings,", "GenMC", "even", "obtains", "an", "absolute", "gain", "of", "over", "10%." ]
[ 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 9 ]
69
[ "The", "main", "results", "(see", "Table", "2)", "show", "that", "GenMC", "consistently", "and", "significantly", "(with", "p-value", "<", "0.01)", "outperforms", "Text2Textvanilla", "and", "Text2Textenc", "on", "all", "datasets." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0 ]
70
[ "5.1.2", "Results" ]
[ 0, 0 ]
71
[ "Though", "Liu", "et", "al.", "(2021)", "find", "that", "their", "encoder-only", "model", "performs", "comparably", "to", "using", "the", "decoder", "as", "a", "classifier,", "we", "argue", "that", "the", "decoder", "part", "can", "further", "improve", "the", "performance,", "if", "being", "properly", "used." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
72
[ "In", "this", "setting,", "the", "decoder", "is", "totally", "unused." ]
[ 0, 0, 0, 0, 0, 0, 0, 0 ]
73
[ "The", "model", "then", "predicts", "the", "option", "with", "the", "highest", "score." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
74
[ "Then", "the", "representation", "is", "fed", "into", "a", "scorer", "(i.e.,", "an", "MLP)", "to", "obtain", "a", "matching", "score", "for", "each", "question-option", "pair." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
75
[ "Each", "option", "is", "independently", "paired", "with", "the", "question", "to", "obtain", "a", "joint", "representation", "using", "the", "encoder." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
76
[ "Text2Textenc", "Similar", "to", "Liu", "et", "al.", "(2021),", "we", "use", "only", "the", "encoder", "part", "of", "a", "pre-trained", "encoderdecoder", "model." ]
[ 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
77
[ "In", "this", "setting,", "the", "decoder", "is", "basically", "used", "as", "a", "classifier." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
78
[ "Based", "on", "the", "joint", "representation,", "the", "decoder", "finally", "outputs", "an", "option", "ID." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
79
[ "The", "concatenated", "sequence", "is", "fed", "into", "the", "encoder", "part", "to", "get", "a", "joint", "representation", "for", "the", "question", "and", "all", "options." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
80
[ "Specifically,", "following", "Raffel", "et", "al.", "(2020),", "we", "concatenate", "the", "input", "question", "with", "all", "candidate", "options,", "where", "each", "option", "is", "also", "preceded", "by", "its", "option", "ID,", "and", "then", "prepend", "the", "sequence", "with", "a", "dataset", "name." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
81
[ "The", "vanilla", "usage", "of", "pre-trained", "encoder-decoders", "for", "MCQA", "is", "to", "reform", "the", "input", "and", "output", "in", "a", "way", "that", "can", "be", "directly", "processed", "by", "a", "encoder-decoder", "model." ]
[ 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
82
[ "5.1.1", "Baselines", "Text2Textvanilla" ]
[ 0, 0, 1 ]
83
[ "Table", "2:", "Comparison", "with", "text-to-text", "models." ]
[ 0, 0, 0, 0, 0, 0 ]
84
[ "To", "empirically", "evaluate", "GenMC", "in", "terms", "of", "whether", "it", "better", "exploits", "the", "potential", "of", "pretrained", "encoder-decoder", "models", "for", "MCQA,", "we", "compare", "GenMC", "with", "a", "standard", "text-to-text", "implementation", "and", "with", "a", "variant", "thereof", "for", "analysis." ]
[ 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
85
[ "Text-to-Text", "Models" ]
[ 0, 0 ]
86
[ "5.1", "Main", "Results:", "Comparison", "with" ]
[ 0, 0, 0, 0, 0 ]
87
[ "5", "Experimental", "Results" ]
[ 0, 0, 0 ]
88
[ "For", "each", "model,", "we", "reported", "its", "proportion", "of", "correctly", "answered", "questions", "in", "each", "dataset." ]
[ 0, 0, 0, 0, 0, 0, 7, 8, 8, 8, 8, 0, 0, 0 ]
89
[ "4.3", "Evaluation", "Metric" ]
[ 0, 0, 0 ]
90
[ "All" ]
[ 0 ]
91
[ "the", "experiments", "were", "performed", "on", "a" ]
[ 0, 0, 0, 0, 0, 0 ]
92
[ "For", "the", "smallest", "three", "random", "seeds", "}", "dataset", "ARC-Challenge,", "we", "used", "five", "random", "seeds", "1,", "10,", "20,", "30,", "40", "}", "{" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 13, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
93
[ "For", "CSQA,", "OBQA,", "ARC-Easy,", "and", "QASC,", "we", "used", "1,", "10,", "20", "." ]
[ 0, 13, 13, 13, 0, 13, 0, 0, 5, 5, 5, 0 ]
94
[ "Because", "neural", "models", "are", "known", "to", "be", "sensitive", "to", "different", "random", "seeds,", "especially", "when", "the", "training", "set", "is", "small,", "we", "performed", "multiple", "experiments", "for", "all", "models", "with", "different", "random", "seeds,", "and", "reported", "the", "mean", "and", "standard", "deviation." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
95
[ "5" ]
[ 0 ]
96
[ "−" ]
[ 0 ]
97
[ "4,", "5e" ]
[ 5, 5 ]
98
[ "For", "each", "model,", "we", "searched", "for", "the", "best", "learning", "rate", "from", "1e", ",", "and", "for", "the", "best", "batch", "size", "{", "}", "−", "out", "of" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 3, 4, 0, 5, 0, 0, 0, 0, 0, 3, 4, 0, 0, 0, 0, 0 ]
99
[ "We", "used", "the", "Adam", "optimizer", "and", "set", "warmup", "fraction", "=", "0.1,", "weight", "decay", "=", "0.01,", "maximum", "source", "length", "=", "64,", "maximum", "target", "length", "=", "32,", "epoch", "=", "30,", "and", "early", "stop", "training", "when", "there", "was", "no", "better", "result", "on", "the", "dev", "set", "after", "5", "epochs." ]
[ 0, 0, 0, 0, 0, 0, 0, 3, 4, 0, 5, 3, 4, 0, 5, 3, 4, 4, 0, 5, 3, 4, 4, 0, 5, 3, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]

Dataset Card for [naacl2022]

Dataset Summary

This is a named entity recognition dataset annotated for the science entity recognition task, a project from the CMU 11-711 course.

Supported Tasks and Leaderboards

NER task.

Languages

English

Dataset Structure

Data Instances

A sample of the dataset {'id': '0', 'tokens': ['We', 'sample', '50', 'negative', 'cases', 'from', 'T5LARGE', '+', 'GenMC', 'for', 'each', 'dataset'], 'ner_tags':['O', 'O', 'O', 'O', 'O', 'O', 'B-MethodName', 'O', 'B-MethodName', 'O', 'O', 'O']}

Data Fields

id,tokens,ner_tags

  • id: a string feature give the sample index.
  • tokens: a list of string features give the sequence.
  • ner_tags: a list of classification labels for each token in the sentence, with possible values including O (0), B-MethodName (1), I-MethodName (2), B-HyperparameterName (3),I-HyperparameterName (4),B-HyperparameterValue (5),I-HyperparameterValue (6),B-MetricName (7),I-MetricName (8),B-MetricValue (9),I-MetricValue (10),B-TaskName (11),I-TaskName (12),B-DatasetName (13),I-DatasetName (14).

Data Splits

Data split into train.txt dev.txt test.txt

Dataset Creation

Curation Rationale

[More Information Needed]

Source Data

Initial Data Collection and Normalization

[More Information Needed]

Who are the source language producers?

[More Information Needed]

Annotations

Annotation process

The data is annotated by using labelstudio, the papers are collected from TACL and ACL 2022 conferences.

Who are the annotators?

Xiaoyue Cui and Haotian Teng annotated the datasets.

Personal and Sensitive Information

[More Information Needed]

Considerations for Using the Data

Social Impact of Dataset

[More Information Needed]

Discussion of Biases

[More Information Needed]

Other Known Limitations

[More Information Needed]

Additional Information

Dataset Curators

[More Information Needed]

Licensing Information

[More Information Needed]

Citation Information

[More Information Needed]

Contributions

Thanks to @xcui297; @haotianteng for adding this dataset.

Downloads last month
42