id
stringlengths
1
4
tokens
sequence
ner_tags
sequence
100
[ "We", "used", "PyTorch", "1.7." ]
[ 0, 0, 0, 0 ]
101
[ "External", "Knowledge", "For", "all", "these", "datasets,", "our", "experiments", "did", "not", "rely", "on", "any", "provided", "documents", "or", "external", "corpora;", "a", "question", "was", "solely" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
102
[ "The", "dataset", "statistics", "are", "shown", "in", "Table", "1." ]
[ 0, 0, 0, 0, 0, 0, 0, 0 ]
103
[ "For", "CSQA", "and", "QASC,", "since", "the", "correct", "answers", "in", "the", "official", "test", "set", "are", "not", "public,", "we", "took", "their", "official", "dev", "set", "as", "our", "test", "set", "for", "experiments", "and", "randomly", "held", "out", "an", "in-house", "dev", "set", "from", "the", "training", "set." ]
[ 0, 13, 0, 13, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
104
[ "Train-Dev-Test", "Split", "For", "OBQA,", "ARC-Easy,", "and", "ARC-Challenge", "we", "used", "their", "official", "train,", "dev,", "and", "test", "sets." ]
[ 5, 6, 0, 13, 13, 0, 13, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
105
[ "QASC", "(Khot", "et", "al.,", "2020)", "is", "collected", "from", "elementary", "and", "middle", "school", "level", "science", "with", "8", "options", "for", "each", "question." ]
[ 13, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
106
[ "ARC-Easy", "and", "ARC-Challenge,", "denoting", "two", "disjointed", "subsets", "of", "ARC", "(Clark", "et", "al.,", "2018),", "contain", "natural", "grade-school", "science", "questions", "with", "4", "options,", "where", "ARC-Challenge", "comprises", "difficult", "questions", "which", "require", "more", "advanced", "reasoning." ]
[ 13, 0, 13, 0, 0, 0, 0, 0, 13, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 13, 0, 0, 0, 0, 0, 0, 0, 0 ]
107
[ "Each", "question", "is", "given", "with", "5", "options", "in", "CSQA", "and", "4", "options", "in", "OBQA." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 13, 0, 0, 0, 0, 13 ]
108
[ "Datasets", "CSQA", "(Talmor", "et", "al.,", "2019)", "and", "OBQA", "(Mihaylov", "et", "al.,", "2018)", "are", "two", "commonsense", "MCQA", "datasets", "created", "by", "crowd", "workers", "based", "on", "commonsense", "facts." ]
[ 0, 13, 0, 0, 0, 0, 0, 13, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
109
[ "The", "former", "requires", "commonsense", "knowledge", "and", "reasoning,", "and", "the", "latter", "requires", "inference", "over", "scientific", "facts." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
110
[ "We", "conducted", "experiments", "on", "five", "popular", "MCQA", "datasets", "spanning", "from", "commonsense", "questions", "to", "scientific", "questions." ]
[ 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0, 0 ]
111
[ "4.1", "Data" ]
[ 0, 0 ]
112
[ "4", "Experimental", "Setup" ]
[ 0, 0, 0 ]
113
[ "For", "each", "model,", "we", "experimented", "with", "its", "BASE", "and", "LARGE", "versions." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
114
[ "We", "used", "two", "popular", "encoder-decoder", "models", "as", "a", "basis,", "BART", "(Lewis", "et", "al.,", "2020)", "and", "T5", "(Raffel", "et", "al.,", "2020)." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
115
[ "4.2", "Implementation", "Details" ]
[ 0, 0, 0 ]
116
[ "It", "means", "that", "pre-trained", "models", "were", "used", "as", "the", "primary", "source", "of", "knowledge", "in", "the", "experiments." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
117
[ "provided", "with", "its", "options", "to", "form", "the", "input." ]
[ 0, 0, 0, 0, 0, 0, 0, 0 ]
118
[ "Such", "usage", "is", "more", "natural", "than", "the", "text-totext", "paradigm", "(Khashabi", "et", "al.,", "2020;", "Zhou", "et", "al.,", "2021),", "thus", "having", "the", "potential", "to", "outperform." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
119
[ "We", "use", "Ot", "as", "a", "text", "to", "supervise", "our", "clue", "generator,", "and", "as", "an", "index", "(i.e.,", "classification", "label)", "to", "supervise", "our", "enhanced", "reader." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
120
[ "The", "above", "training", "objective", "exploits", "the", "double", "properties", "of", "the", "correct", "answer", "Ot", "in", "MCQA:", "as", "a", "text", "and", "as", "an", "index." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0 ]
121
[ "For", "CSQA", "and", "QASC,", "their", "official", "dev", "sets", "are", "used", "as", "our", "test", "sets,", "and", "our", "dev", "sets", "are", "in-house", "split", "from", "their", "official", "training", "sets." ]
[ 0, 13, 0, 13, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
122
[ "L" ]
[ 0 ]
123
[ "Note", "that", "we", "update", "the", "encoder", "using", "the", "joint", "loss", "READ", "to", "be", "backpropL", "agated", "to", "the", "decoder", "part", "to", "reduce", "the", "memory", "consumption." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
124
[ "READ,", "we", "simply", "calculate", "a", "O" ]
[ 0, 0, 0, 0, 0, 0 ]
125
[ "Reader", "Loss", "For", "cross-entropy", "loss", "given", "the", "correct", "answer", "Ot", "∈", "as", "follows:" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
126
[ "where", "pOt", "denotes", "the", "probability", "distribution", "j", "over", "the", "decoding", "vocabulary", "at", "the", "j-th", "step,", "and", "pOt", "j,aj", "is", "the", "probability", "of", "token", "aj." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
127
[ "We", "jointly", "train", "the", "clue", "generator", "and", "the", "enhanced", "reader", "in", "an", "end-to-end", "fashion", "with", "a", "combined", "loss:" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
128
[ "3.3", "Training", "Objective" ]
[ 0, 0, 0 ]
129
[ "We", "select", "the", "option", "with", "the", "highest", "score", "as", "the", "predicted", "answer,", "denoted", "as", "Op." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
130
[ "3A", "delimiter", "\"", "n\"", "is", "inserted", "between", "Q", "and", "each", "Oi." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
131
[ "forming", "beam", "search." ]
[ 0, 0, 0 ]
132
[ "2For", "efficiency,", "we", "decode", "the", "clue", "greedily", "without", "per" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
133
[ "Specifically,", "we", "first", "concatenate", "Q", "and", "each", "Oi", "independently3", "and", "feed", "the", "concatenated", "input", "into", "the", "pre-trained", "encoder", "(which", "is", "shared", "with", "our", "clue", "generator)", "to", "obtain", "Oi’s", "contextualized" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
134
[ "By", "contrast,", "we", "use", "the", "previously", "generated", "clue", "representation", "to", "enhance", "our", "reader", "for", "a", "deeper", "understanding", "of", "each", "question-option", "pair." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
135
[ "Previous", "works", "often", "directly", "model", "the", "relevance", "O", "to", "Q", "via", "joint", "encoding", "using", "a", "preof", "each", "Oi", "∈", "trained", "encoder,", "which", "largely", "performs", "superficial", "lexical", "reasoning", "(Zellers", "et", "al.,", "2019)." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
136
[ "HQC", "carries", "the", "where", "[", "information", "of", "C", "which", "can", "be", "helpful", "to", "better", "understand", "and", "answer", "Q." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
137
[ "To", "encourage", "the", "tokens", "in", "C", "to", "thoroughly", "interact", "with", "each", "other", "and", "with", "Q,", "we", "strengthen", "the", "clue", "representation", "by", "passing", "it", "to", "a", "transformer", "layer", "(Vaswani", "et", "al.,", "2017)", "and", "obtain", "HQC:" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
138
[ "where", "Decoder", "(", "1,", "·", "−", "the", "representation", "for", "the", "decoding", "history", "HC", "<j,", "and", "HQ", "as", "input,", "and", "outputs", "the", "hidden", "state", "HC", "j", "together", "with", "the", "probability", "distribution", "pC", "j", "over", "the", "decoding", "vocabulary", "at", "the", "j-th", "step." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
139
[ "i" ]
[ 0 ]
140
[ "To", "obtain", "the", "final", "score", "si", "for", "each", "Oi,", "we", "concatenate", "the", "dual", "matching", "features", "f", "QO", "and", "f", "QC", "i", "and", "feed", "them", "into", "a", "two-layer", "multi-layer", "perceptron", "(MLP):" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
141
[ "i" ]
[ 0 ]
142
[ "Then", "we", "perform", "max-pooling", "to", "aggregate", "the", "matching", "features:" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
143
[ "Specifically,", "inspired", "by", "Huang", "et", "al.", "(2021),", "we", "first", "use", "dual-attention", "(Liu", "et", "al.,", "2020a)", "to", "fuse", "information", "from", "HQO", "to", "HQC", "and", "i", "from", "HQC", "to", "HQO", "." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
144
[ "Next,", "based", "on", "the", "clue", "representation", "HQC,", "our", "model", "intensively", "reads", "each", "question-option", "pair", "and", "obtains", "the", "matching", "signal", "between", "the", "clue", "and", "the", "option." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
145
[ "HC", "j", ",", "denoting", "the", "C,", "is", "comrepresentation", "of", "the", "j-th", "token", "cj", "∈", "puted", "as", "follows:" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
146
[ "Specifically,", "we", "obtain", "the", "question", "represenRd", "tation", "HQ", "|", "and", "the", "clue", "representation", "HC", "Rd", "|", "from", "the", "last", "layer", "of", "the", "encoder", "and", "of", "the", "decoder,", "respectively,", "where", "d", "denotes", "the", "representation", "dimension." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
147
[ ",", "c", "using", "a", "pre-trained", "encoder-decoder", "model.2", "Note", "that", "not", "the", "clue", "text", "C", "but", "its", "representation", "HC", "will", "be", "used", "in", "our", "model,", "although", "one", "could", "output", "C", "as", "evidence", "for", "explainability." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
148
[ "3.1", "Clue", "Generator", "The", "clue", "generator", "takes", "the", "question", "Q", "as", "input", "and", "autoregressively", "outputs", "a", "clue", "C", "=", "c1,", ".", ".", "." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
149
[ "Then", "the", "enhanced", "reader", "(Section", "3.2)", "uses", "the", "generated", "clue", "to", "augment", "question-option", "understanding." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
150
[ "The", "clue", "generator", "(Section", "3.1)", "first", "generates", "a", "clue", "representation", "only", "given", "Q." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
151
[ "The", "overall", "architecture", "of", "GenMC", "is", "shown", "in" ]
[ 0, 0, 0, 0, 1, 0, 0, 0 ]
152
[ "Our", "model", "design", "mimics", "how", "humans", "solve", "an", "MCQA", "task,", "i.e.,", "after", "reading", "a", "question,", "humans", "may", "firstly", "associate", "it", "with", "some", "of", "their", "background", "knowledge", "(i.e.,", "looking", "for", "clues)", "that", "helps", "them", "to", "later", "identify", "the", "correct", "answer." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
153
[ "Then", "GenMC", "employs", "the", "generated", "clue", "representation", "as", "intermediate", "knowledge", "connecting", "the", "question", "and", "the", "correct", "answer", "to", "interact", "with", "and", "enhance", "a", "reader", "for", "solving", "MCQA." ]
[ 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11 ]
154
[ "Building", "on", "a", "pre-trained", "encoderdecoder", "model,", "GenMC", "firstly", "generates", "a", "clue", "which", "is", "indicative", "of", "the", "correct", "answer,", "thereby", "exploiting", "the", "NLG", "capability", "and", "underlying", "knowledge", "of", "the", "pre-trained", "encoder-decoder", "model." ]
[ 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
155
[ "Our", "proposed", "model", "GenMC", "overcomes", "these", "limitations." ]
[ 0, 0, 0, 1, 0, 0, 0 ]
156
[ "Moreover,", "a", "simple", "joint", "encoding", "of", "Q", "and", "each", "Oi", "can", "only", "enable", "lexical-level", "reasoning", "(Zellers", "et", "al.,", "2019)", "which", "is", "insufficient", "for", "MCQA", "tasks." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11, 0 ]
157
[ "However,", "previous", "works", "directly", "use", "the", "decoder", "to", "generate", "an", "option", "in", "O,", "i.e.,", "using", "the", "decoder", "as", "a", "classifier,", "which", "may", "have", "under-exploited", "the", "model’s", "NLG", "capability", "(Liu", "et", "al.,", "2021)." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
158
[ "We", "follow", "the", "trend", "of", "building", "on", "a", "pretrained", "encoder-decoder", "model", "and", "use", "the", "encoder", "to", "jointly", "encode", "Q", "and", "each", "Oi." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
159
[ "The", "key", "to", "finding", "the", "correct", "answer", "is", "to", "capture", "and", "deeply", "understand", "the", "connection", "between", "Q", "and", "each", "Oi", "∈", "O,", "which", "oftentimes", "is", "beyond", "the", "lexical", "level", "and", "requires", "a", "non-trivial", "entailment", "process." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
160
[ "In", "MCQA,", "a", "question", "Q", "is", "given", "together", "with", "a", "set", "of", "n", "options", "O", "=", "with", "exactly", "one", "option", "being", "the", "correct", "answer." ]
[ 0, 11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
161
[ "Such", "token-level", "interaction", "can", "lead", "to", "significant", "losses", "in", "accuracy", "as", "we", "will", "see", "in", "our", "experiments,", "where", "our", "representation-level", "interaction", "exhibits", "better", "performance." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
162
[ "However,", "the", "generative", "model", "and", "the", "reading", "model", "are", "separate", "steps", "in", "a", "pipeline", "and", "are", "connected", "only", "via", "the", "evidence", "text." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
163
[ "It", "first", "uses", "a", "generative", "model", "to", "generate", "evidence,", "and", "then", "uses", "a", "reading", "model", "to", "incorporate", "the", "evidence", "and", "predict", "the", "answer,", "both", "using", "answer", "supervision." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
164
[ "CEGI", "(Liu", "et", "al.,", "2020c)", "is", "probably", "the", "most", "similar", "work", "to", "ours." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
165
[ "Although", "it", "somewhat", "improves", "the", "explainability", "of", "MCQA,", "in", "terms", "of", "accuracy", "of", "MCQA", "there", "is", "little", "advancement." ]
[ 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0 ]
166
[ "Latcinnik", "and", "Berant", "(2020)", "propose", "a", "joint", "generator-classifier", "model", "where", "the", "generator", "produces", "a", "human-readable", "textual", "hypothesis." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
167
[ "However,", "CAGE", "relies", "on", "explanations", "annotated", "by", "humans,", "which", "are", "not", "available", "in", "many", "real", "scenarios", "and", "datasets." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
168
[ "Rajani", "et", "al.", "(2019)", "propose", "CAGE", "as", "a", "framework", "for", "generating", "explanations", "for", "commonsense", "QA." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
169
[ "There", "is", "also", "research", "on", "MCQA", "trying", "to", "exporting", "knowledge", "from", "PLMs", "before", "answering." ]
[ 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0, 0 ]
170
[ "Exploiting", "the", "knowledge", "in", "PLMs", "for", "QA", "tasks", "has", "come", "into", "play", "in", "many", "forms", "including", "question", "expansion", "(Mao", "et", "al.,", "2021)", "and", "question", "generation", "(Shwartz", "et", "al.,", "2020)." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
171
[ "Recently,", "PLMs", "have", "been", "used", "as", "knowledge", "bases", "(Petroni", "et", "al.,", "2019),", "and", "the", "knowledge", "in", "parameters", "can", "be", "exported", "via", "methods", "such", "as", "Prompt", "(Jiang", "et", "al.,", "2020;", "Shin", "et", "al.,", "2020)." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
172
[ "3", "GenMC", "Model" ]
[ 0, 1, 0 ]
173
[ "2.3", "Knowledge", "in", "PLMs" ]
[ 0, 0, 0, 0 ]
174
[ "READ." ]
[ 0 ]
175
[ "GEN", "and", "the", "classification", "loss" ]
[ 0, 0, 0, 0, 0 ]
176
[ "The", "whole", "model", "is", "trained", "in", "an", "end-to-end", "manner", "with", "both", "the", "generation", "loss" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
177
[ "The", "enhanced", "reader", "then", "relies", "on", "the", "generated", "clue", "representation", "to", "better", "attend", "to", "options", "from", "O", "and", "makes", "the", "final", "prediction." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
178
[ "To", "make", "the", "prediction", "Op", "∈", "O,", "the", "clue", "generator", "first", "takes", "Q", "as", "input", "and", "outputs", "a", "clue", "representation", "HQC", "which", "is", "indicative", "of", "the", "correct", "answer." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
179
[ "A" ]
[ 0 ]
180
[ "Question:" ]
[ 0 ]
181
[ "By", "contrast,", "we", "aim", "at", "exporting", "clues", "from", "pre-trained", "models", "without", "resorting", "to", "extra", "sources." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
182
[ "There,", "evidence", "is", "derived", "from", "the", "given", "passage", "or", "retrieved", "from", "external", "corpora." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
183
[ "Other", "efforts", "mimic", "human", "behavior", "of", "reading", "evidence", "and", "answering", "questions", "(Ran", "et", "al.,", "2019;", "Tang", "et", "al.,", "2019;", "Sun", "et", "al.,", "2019)." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
184
[ "Building", "on", "this,", "some", "works", "study", "how", "to", "design", "better", "attention-based", "models", "to", "identify", "evidence", "(Chen", "et", "al.,", "2019;", "Zhang", "et", "al.,", "2020;", "Zhu", "et", "al.,", "2020)." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
185
[ "As", "illustrated", "in", "Figure", "2b,", "in", "this", "paradigm,", "the", "question", "Q", "and", "each", "option", "in", "are", "interacted", "to", "calculate", "a", "score,", "and", "the", "option", "with", "the", "highest", "score", "is", "chosen", "as", "the", "answer." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
186
[ "Liu", "et", "al.,", "2019;", "Lan", "et", "al.,", "2020),", "the", "encoder-only", "paradigm", "has", "been", "popular", "for", "MCQA." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11 ]
187
[ "Benefiting", "from", "the", "powerful", "NLU", "capabilities", "of", "BERT-style", "PLMs", "(Devlin", "et", "al.,", "2019;" ]
[ 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0, 0 ]
188
[ "O1,", "O2,", "O3,", "O4}", "{" ]
[ 0, 0, 0, 0, 0 ]
189
[ "2.2", "Encoder-Only", "Paradigm", "for", "MCQA" ]
[ 0, 0, 0, 0, 11 ]
190
[ "They", "are", "orthogonal", "to", "our", "work", "as", "we", "leverage", "existing", "pre-trained", "encoder-decoder", "models", "instead", "of", "pre-training", "new", "models", "at", "an", "additional", "cost." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
191
[ "Some", "other", "works", "propose", "new", "pre-trained", "models", "for", "unified", "generation", "and", "classification", "tasks", "by", "designing", "universal", "encoders", "and", "task-specific", "decoders", "(Shao", "et", "al.,", "2021;", "Sun", "et", "al.,", "2021)." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
192
[ "By", "contrast,", "we", "address", "this", "issue", "from", "a", "different", "perspective", "of", "how", "to", "exploit", "the", "NLG", "capability", "of", "pre-trained", "encoder-decoder", "models", "for", "MCQA", "to", "improve", "accuracy." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0 ]
193
[ "Therefore,", "they", "propose", "a", "method", "to", "reduce", "the", "number", "of", "T5", "parameters", "to", "improve", "efficiency", "without", "reducing", "accuracy." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
194
[ "Liu", "et", "al.", "(2021)", "point", "out", "that", "the", "decoder", "layers", "of", "T5", "are", "under-utilized", "when", "finetuning", "on", "classification", "and", "regression", "tasks." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
195
[ "However,", "it", "might", "be", "debatable", "whether", "it", "is", "appropriate", "to", "train", "a", "classification", "task", "via", "a", "generation", "target." ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
196
[ "learns", "concept-centric", "knowledge", "from", "text", "for", "commonsense", "QA." ]
[ 0, 0, 0, 0, 0, 0, 0, 0 ]
197
[ "1https://github.com/nju-websoft/GenMC" ]
[ 0 ]
198
[ "{" ]
[ 0 ]
199
[ "Similarly,", "CALM", "(Zhou", "et", "al.,", "2021)" ]
[ 0, 0, 0, 0, 0, 0 ]