|
{ |
|
"paper_id": "2020", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T01:09:37.184516Z" |
|
}, |
|
"title": "What Happens To BERT Embeddings During Fine-tuning?", |
|
"authors": [ |
|
{ |
|
"first": "Amil", |
|
"middle": [], |
|
"last": "Merchant", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Brown University", |
|
"location": {} |
|
}, |
|
"email": "amilmerchant@google.com" |
|
}, |
|
{ |
|
"first": "Elahe", |
|
"middle": [], |
|
"last": "Rahimtoroghi", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Brown University", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Ellie", |
|
"middle": [], |
|
"last": "Pavlick", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Brown University", |
|
"location": {} |
|
}, |
|
"email": "epavlick@google.com" |
|
}, |
|
{ |
|
"first": "Ian", |
|
"middle": [], |
|
"last": "Tenney", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Brown University", |
|
"location": {} |
|
}, |
|
"email": "iftenney@google.com" |
|
}, |
|
{ |
|
"first": "Google", |
|
"middle": [], |
|
"last": "Research", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Brown University", |
|
"location": {} |
|
}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "While much recent work has examined how linguistic information is encoded in pretrained sentence representations, comparatively little is understood about how these models change when adapted to solve downstream tasks. Using a suite of analysis techniques-supervised probing, unsupervised similarity analysis, and layer-based ablations-we investigate how fine-tuning affects the representations of the BERT model. We find that while fine-tuning necessarily makes some significant changes, there is no catastrophic forgetting of linguistic phenomena. We instead find that fine-tuning is a conservative process that primarily affects the top layers of BERT, albeit with noteworthy variation across tasks. In particular, dependency parsing reconfigures most of the model, whereas SQuAD and MNLI involve much shallower processing. Finally, we also find that fine-tuning has a weaker effect on representations of out-of-domain sentences, suggesting room for improvement in model generalization.", |
|
"pdf_parse": { |
|
"paper_id": "2020", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "While much recent work has examined how linguistic information is encoded in pretrained sentence representations, comparatively little is understood about how these models change when adapted to solve downstream tasks. Using a suite of analysis techniques-supervised probing, unsupervised similarity analysis, and layer-based ablations-we investigate how fine-tuning affects the representations of the BERT model. We find that while fine-tuning necessarily makes some significant changes, there is no catastrophic forgetting of linguistic phenomena. We instead find that fine-tuning is a conservative process that primarily affects the top layers of BERT, albeit with noteworthy variation across tasks. In particular, dependency parsing reconfigures most of the model, whereas SQuAD and MNLI involve much shallower processing. Finally, we also find that fine-tuning has a weaker effect on representations of out-of-domain sentences, suggesting room for improvement in model generalization.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Unsupervised pre-training of deep language models has led to significant advances on many NLP tasks, with the popular BERT model (Devlin et al., 2019) and successors (e.g. Lan et al., 2019; Raffel et al., 2020) dominating the GLUE leaderboard and other benchmarks over the past year. Many recent works have attempted to better understand these models and explain what makes them so powerful. Particularly, behavioral studies (e.g. Marvin and Linzen, 2018; Goldberg, 2019) , diagnostic probing classifiers (e.g. Veldhoen et al., 2016; Belinkov et al., 2017; Hupkes et al., 2018) , and unsupervised techniques (e.g. Saphra and Lopez, 2019; Voita et al., 2019a) have shed light on the representations from the pretrained models and have shown that they encode a wide variety of linguistic phenomena (Tenney et al., 2019b; .", |
|
"cite_spans": [ |
|
{ |
|
"start": 129, |
|
"end": 150, |
|
"text": "(Devlin et al., 2019)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 172, |
|
"end": 189, |
|
"text": "Lan et al., 2019;", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 190, |
|
"end": 210, |
|
"text": "Raffel et al., 2020)", |
|
"ref_id": "BIBREF43" |
|
}, |
|
{ |
|
"start": 431, |
|
"end": 455, |
|
"text": "Marvin and Linzen, 2018;", |
|
"ref_id": "BIBREF32" |
|
}, |
|
{ |
|
"start": 456, |
|
"end": 471, |
|
"text": "Goldberg, 2019)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 511, |
|
"end": 533, |
|
"text": "Veldhoen et al., 2016;", |
|
"ref_id": "BIBREF54" |
|
}, |
|
{ |
|
"start": 534, |
|
"end": 556, |
|
"text": "Belinkov et al., 2017;", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 557, |
|
"end": 577, |
|
"text": "Hupkes et al., 2018)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 614, |
|
"end": 637, |
|
"text": "Saphra and Lopez, 2019;", |
|
"ref_id": "BIBREF47" |
|
}, |
|
{ |
|
"start": 638, |
|
"end": 658, |
|
"text": "Voita et al., 2019a)", |
|
"ref_id": "BIBREF56" |
|
}, |
|
{ |
|
"start": 796, |
|
"end": 818, |
|
"text": "(Tenney et al., 2019b;", |
|
"ref_id": "BIBREF53" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "However, in the standard recipe for models such as BERT (Devlin et al., 2019) , after initializing with pre-trained weights, they are then trained for a few epochs on a supervised dataset. Considerably less is understood about what happens during this fine-tuning stage. Current understanding is based largely on the models' performance. While fine-tuned Transformers achieve state-ofthe-art accuracy, they also can end up learning shallow heuristics Gururangan et al., 2018; Poliak et al., 2018) , suggesting a disconnect between the richness of features learned from pre-training and those used by finetuned models. Thus, in this work, we seek to understand how the internals of the model-the representation space-change when fine-tuned for downstream tasks. We focus on three widelyused NLP tasks: dependency parsing, natural language inference (MNLI), and reading comprehension (SQuAD), and ask:", |
|
"cite_spans": [ |
|
{ |
|
"start": 56, |
|
"end": 77, |
|
"text": "(Devlin et al., 2019)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 451, |
|
"end": 475, |
|
"text": "Gururangan et al., 2018;", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 476, |
|
"end": 496, |
|
"text": "Poliak et al., 2018)", |
|
"ref_id": "BIBREF41" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 What happens to the encoding of linguistic features such as syntactic and semantic roles? Are these preserved, reinforced, or forgotten as the encoder learns a new task? Do different tasks change how shallowly this information is encoded? (Section 4)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 Where in the model are changes made? Are parameter updates concentrated in a small number of layers or are there changes throughout the model? (Section 5)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 Do these changes generalize or does the new behavior only apply to the specific domain on which fine-tuning occurred? (Section 6) We approach these questions with three complementary analysis techniques. Supervised probing classifiers (Tenney et al., 2019b; Hewitt and Manning, 2019; Voita and Titov, 2020) provide a means of explicitly testing for the presence of pre-specified linguistic phenomena, while Representational Similarity Analysis (RSA; Kriegeskorte et al., 2008) gives a task-agnostic measurement of the change in model activations. Finally, we corroborate our results with two types of layerbased ablations-truncation and partial freezingand measure their effect on end-task performance.", |
|
"cite_spans": [ |
|
{ |
|
"start": 120, |
|
"end": 131, |
|
"text": "(Section 6)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 237, |
|
"end": 259, |
|
"text": "(Tenney et al., 2019b;", |
|
"ref_id": "BIBREF53" |
|
}, |
|
{ |
|
"start": 260, |
|
"end": 285, |
|
"text": "Hewitt and Manning, 2019;", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 286, |
|
"end": 308, |
|
"text": "Voita and Titov, 2020)", |
|
"ref_id": "BIBREF58" |
|
}, |
|
{ |
|
"start": 452, |
|
"end": 478, |
|
"text": "Kriegeskorte et al., 2008)", |
|
"ref_id": "BIBREF28" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Taken together, we conclude that fine-tuning involves primarily shallow model changes, evidenced by three specific observations. First, linguistic features are not lost during fine-tuning but tasks can differ in how they either surface or obfuscate different phenomena. Second, fine-tuning tends to affect only the top few layers of BERT, albeit with variation across tasks: SQuAD and MNLI have a relatively shallow effect, while dependency parsing involves deeper changes to the encoder. We confirm this by partial-freezing experiments which test how many layers need to change to do well on each task and relate this to an estimate of task difficulty (with respect to the pre-training regime) via layer ablations. Finally, we observe that fine-tuning induces large changes on in-domain examples, yet on out-of-domain sentences, the representations more closely resemble those of the pre-trained model.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Base model Many recent papers have focused on understanding sentence encoders such as ELMo (Peters et al., 2018a) and BERT (Devlin et al., 2019) , focusing primarily on the \"innate\" abilities of the pre-trained (\"Base\") models. For example, analyses of attention weights have shown interpretable patterns (Coenen et al., 2019; Vig and Belinkov, 2019; Voita et al., 2019b; Hoover et al., 2019) and found strong correlations to syntax (Clark et al., 2019) . Kovaleva et al. (2019) also saw that fine-tuning mainly changes the attention of the last few layers, consistent with our findings in Section 5.1. However, other studies have cast doubt on what conclusions can be drawn from attention patterns (Jain and Wallace, 2019; Serrano and Smith, 2019; Brunner et al., 2019) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 91, |
|
"end": 113, |
|
"text": "(Peters et al., 2018a)", |
|
"ref_id": "BIBREF37" |
|
}, |
|
{ |
|
"start": 118, |
|
"end": 144, |
|
"text": "BERT (Devlin et al., 2019)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 305, |
|
"end": 326, |
|
"text": "(Coenen et al., 2019;", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 327, |
|
"end": 350, |
|
"text": "Vig and Belinkov, 2019;", |
|
"ref_id": "BIBREF55" |
|
}, |
|
{ |
|
"start": 351, |
|
"end": 371, |
|
"text": "Voita et al., 2019b;", |
|
"ref_id": "BIBREF57" |
|
}, |
|
{ |
|
"start": 372, |
|
"end": 392, |
|
"text": "Hoover et al., 2019)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 433, |
|
"end": 453, |
|
"text": "(Clark et al., 2019)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 456, |
|
"end": 478, |
|
"text": "Kovaleva et al. (2019)", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 699, |
|
"end": 723, |
|
"text": "(Jain and Wallace, 2019;", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 724, |
|
"end": 748, |
|
"text": "Serrano and Smith, 2019;", |
|
"ref_id": "BIBREF48" |
|
}, |
|
{ |
|
"start": 749, |
|
"end": 770, |
|
"text": "Brunner et al., 2019)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "More generally, supervised probing models and diagnostic classifiers make few assumptions be-yond the existence of model activations and can test for the presence of a wide variety of phenomena. Tenney et al. (2019b) ; ; Peters et al. (2018b) introduced task suites that probe for high-level linguistic phenomena such as partof-speech, entity types, and coreference, while Tenney et al. (2019a) showed that these phenomena are represented in a hierarchical order within the layers of BERT. Hewitt and Manning (2019) used a geometrically-motivated probe to explore syntactic structures, and Voita and Titov (2020) and Pimentel et al. (2020) designed informationtheoretic techniques that can measure the model and data complexity. 1 While probing models depend on labelled data, parallel work has studied the same encoders using unsupervised techniques. Voita et al. (2019a) used a form of canonical correlation analysis (PW-CCA; Morcos et al., 2018) to study the layerwise evolution of representations, while Saphra and Lopez (2019) explored how these representations evolve during training. Abnar et al. (2019) used Representational Similarity Analysis (RSA; Laakso and Cottrell, 2000; Kriegeskorte et al., 2008) to study the effect of context on encoder representations, while Chrupa\u0142a and Alishahi (2019) correlated them with syntax.", |
|
"cite_spans": [ |
|
{ |
|
"start": 195, |
|
"end": 216, |
|
"text": "Tenney et al. (2019b)", |
|
"ref_id": "BIBREF53" |
|
}, |
|
{ |
|
"start": 221, |
|
"end": 242, |
|
"text": "Peters et al. (2018b)", |
|
"ref_id": "BIBREF38" |
|
}, |
|
{ |
|
"start": 373, |
|
"end": 394, |
|
"text": "Tenney et al. (2019a)", |
|
"ref_id": "BIBREF52" |
|
}, |
|
{ |
|
"start": 490, |
|
"end": 515, |
|
"text": "Hewitt and Manning (2019)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 590, |
|
"end": 612, |
|
"text": "Voita and Titov (2020)", |
|
"ref_id": "BIBREF58" |
|
}, |
|
{ |
|
"start": 617, |
|
"end": 639, |
|
"text": "Pimentel et al. (2020)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 729, |
|
"end": 730, |
|
"text": "1", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 852, |
|
"end": 872, |
|
"text": "Voita et al. (2019a)", |
|
"ref_id": "BIBREF56" |
|
}, |
|
{ |
|
"start": 928, |
|
"end": 948, |
|
"text": "Morcos et al., 2018)", |
|
"ref_id": "BIBREF36" |
|
}, |
|
{ |
|
"start": 1091, |
|
"end": 1110, |
|
"text": "Abnar et al. (2019)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 1159, |
|
"end": 1185, |
|
"text": "Laakso and Cottrell, 2000;", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 1186, |
|
"end": 1212, |
|
"text": "Kriegeskorte et al., 2008)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 1278, |
|
"end": 1306, |
|
"text": "Chrupa\u0142a and Alishahi (2019)", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Fine-tuning Comparatively few analyses have focused on understanding the fine-tuning process. Initial studies of fine-tuned encoders have shown state-of-the-art performance on benchmark suites such as GLUE and surprising sample efficiency (Peters et al., 2018a) . However, behavioral studies with challenge sets Poliak et al., 2018; Ettinger et al., 2018; Kim et al., 2018) have shown limited ability to generalize to out-of-domain data and across syntactic perturbations. van Aken et al. 2019focused on question-answering models with taskspecific probes. analyzed the effects of fine-tuning with respect to the performance of diagnostic classifiers. Gauthier and Levy (2019) studied fine-tuning via RSA, finding a significant divergence between the representations of models fine-tuned on different tasks. Concurrent work by Tamkin et al. (2020) investigated the transferability of pre-trained language models and performed an number of layer ablations. Consistent with our observations in Section 5.2, they find differences in which layers are important for finetuning different tasks. However, none of the prior provides a comprehensive analysis of what happens to the internal representations of the BERT model. In our work, we find that by comparing the Base to the fine-tuned models either via probing, RSA, and layer ablations provides novel insights about this additional phase of training.", |
|
"cite_spans": [ |
|
{ |
|
"start": 239, |
|
"end": 261, |
|
"text": "(Peters et al., 2018a)", |
|
"ref_id": "BIBREF37" |
|
}, |
|
{ |
|
"start": 312, |
|
"end": 332, |
|
"text": "Poliak et al., 2018;", |
|
"ref_id": "BIBREF41" |
|
}, |
|
{ |
|
"start": 333, |
|
"end": 355, |
|
"text": "Ettinger et al., 2018;", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 356, |
|
"end": 373, |
|
"text": "Kim et al., 2018)", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 826, |
|
"end": 846, |
|
"text": "Tamkin et al. (2020)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "BERT We focus on the popular BERT model (Devlin et al., 2019) , focusing on the 12-layer base uncased variant. 2 We denote the pretrained model as Base and refer to fine-tuned versions by the name of the task.", |
|
"cite_spans": [ |
|
{ |
|
"start": 40, |
|
"end": 61, |
|
"text": "(Devlin et al., 2019)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 111, |
|
"end": 112, |
|
"text": "2", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Setup", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "MNLI A common benchmark for natural language understanding, the MNLI dataset (Williams et al., 2018) contains over 433K sentence pairs annotated with textual entailment information. We fine-tune BERT using the architecture and parameters of Devlin et al. (2019) , using a softmax layer on [CLS] representation to predict the output label. Across three trials, the evaluation accuracy of our BERT Base model is 83.3 \u00b1 0.1, slightly lower but comparable to the published score of 84.6. SQuAD The SQuADv1.1 dataset (Rajpurkar et al., 2016) contains over 100K crowd-sourced question-answer pairs, created from a set of Wikipedia articles. We fine-tune BERT using the architecture and parameters of Devlin et al. (2019) , which uses two independent softmax layers to predict the start and end tokens of the answer span. Our average F1 score is 89.2 \u00b1 0.2, slightly higher than the published 88.5.", |
|
"cite_spans": [ |
|
{ |
|
"start": 77, |
|
"end": 100, |
|
"text": "(Williams et al., 2018)", |
|
"ref_id": "BIBREF61" |
|
}, |
|
{ |
|
"start": 241, |
|
"end": 261, |
|
"text": "Devlin et al. (2019)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 289, |
|
"end": 294, |
|
"text": "[CLS]", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 512, |
|
"end": 536, |
|
"text": "(Rajpurkar et al., 2016)", |
|
"ref_id": "BIBREF44" |
|
}, |
|
{ |
|
"start": 694, |
|
"end": 714, |
|
"text": "Devlin et al. (2019)", |
|
"ref_id": "BIBREF11" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Setup", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "We also introduce a BERT model fine-tuned on dependency parsing (Dep). We include this task to present a contrasting perspective from the prior two datasets, since prior research has suggested that much of the information needed to solve dependency parsing is already present after pre-training (Hewitt and Manning, 2019; Goldberg, 2019; Tenney et al., 2019b) . Our model is trained on data from the CoNLL 2017 Shared Task (Zeman et al., 2017) and uses the features of BERT as input to a biaffine classifier, similar to Dozat and Manning (2017) . The model uses a learning rate of 3\u21e510 5 with a 10% warm-up portion, uses an Adam optimizer (Kingma and Ba, 2014) , and is trained for 20 epochs. The Labeled Attachment Score (LAS) on the development set is 96.3\u00b10.1 for our model 3", |
|
"cite_spans": [ |
|
{ |
|
"start": 295, |
|
"end": 321, |
|
"text": "(Hewitt and Manning, 2019;", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 322, |
|
"end": 337, |
|
"text": "Goldberg, 2019;", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 338, |
|
"end": 359, |
|
"text": "Tenney et al., 2019b)", |
|
"ref_id": "BIBREF53" |
|
}, |
|
{ |
|
"start": 423, |
|
"end": 443, |
|
"text": "(Zeman et al., 2017)", |
|
"ref_id": "BIBREF63" |
|
}, |
|
{ |
|
"start": 520, |
|
"end": 544, |
|
"text": "Dozat and Manning (2017)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 639, |
|
"end": 660, |
|
"text": "(Kingma and Ba, 2014)", |
|
"ref_id": "BIBREF26" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dependency Parsing", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Equipped with the models trained on these downstream tasks, we ask how the representation of linguistic features compare to those in the pretrained model? Recent studies have shown that these robust features are not necessarily used to inform predictions on downstream tasks, with models appearing to use dataset heuristics such as lexical overlap or word priors (Poliak et al., 2018) , but it is an open question whether this is because these features are forgotten entirely or simply are not always used. We explore this with supervised probing techniques, using edge probing (Tenney et al., 2019b) and structural probes (Hewitt and Manning, 2019) to explore how well linguistic information can be recovered from the fine-tuned model.", |
|
"cite_spans": [ |
|
{ |
|
"start": 363, |
|
"end": 384, |
|
"text": "(Poliak et al., 2018)", |
|
"ref_id": "BIBREF41" |
|
}, |
|
{ |
|
"start": 578, |
|
"end": 600, |
|
"text": "(Tenney et al., 2019b)", |
|
"ref_id": "BIBREF53" |
|
}, |
|
{ |
|
"start": 623, |
|
"end": 649, |
|
"text": "(Hewitt and Manning, 2019)", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "What happens to linguistic features?", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Edge Probing Edge probing aims to measure how contextual representations encode various linguistic phenomena, including part-of-speech, entity typing, and coreference. We use the tasks and parameters of Tenney et al. (2019b) , which uses a two-layer MLP to predict edge and span labels from frozen encoder representations. 4 As we are interested in whether the linguistic knowledge is retained by the model overall, we utilize the mix version of the edge probes, which takes as input a learned scalar mixing of the representations from every layer. 5 After training, we report the microaveraged F1 scores on a held-out test set.", |
|
"cite_spans": [ |
|
{ |
|
"start": 203, |
|
"end": 224, |
|
"text": "Tenney et al. (2019b)", |
|
"ref_id": "BIBREF53" |
|
}, |
|
{ |
|
"start": 323, |
|
"end": 324, |
|
"text": "4", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "What happens to linguistic features?", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Structural Probe Complementary to the edge probes, the structural probes of Hewitt and Manning (2019) analyze how well representations encode syntactic structure. Specifically, the probe identifies whether the squared L2 distance of representations under some linear transformation en- Table 1 : Comparison of F1 performance on the edge probing tasks before and after fine-tuning. The BERT Base performance is consistent with (Tenney et al., 2019b) , and the results show that the fine-tuned models retain most of the linguistic concepts discovered during unsupervised pre-training. We report single numbers for clarity, but note that variation across runs is \u00b10.5 between probing runs, \u00b10.7 between fine-tuning runs from the same checkpoint, and \u00b11.0 point between different pre-training runs.", |
|
"cite_spans": [ |
|
{ |
|
"start": 76, |
|
"end": 101, |
|
"text": "Hewitt and Manning (2019)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 426, |
|
"end": 448, |
|
"text": "(Tenney et al., 2019b)", |
|
"ref_id": "BIBREF53" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 286, |
|
"end": 293, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "What happens to linguistic features?", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "codes the dependency parse. The two versions of the structural probe either attempt to predict the tree depth for each word (distance from the root node) or pairwise distances for all words in the parse tree. For both, we measure the Spearman correlation between predicted and true values 6", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "What happens to linguistic features?", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "The results from both probing tasks demonstrate that the linguistic features from pre-training are preserved in the fine-tuned models. This is first seen in the edge probing metrics presented in Table 1. For the sake of comparison, we provide baseline results on the output of the embedding layer (Lexical) and a randomly initialized BERT architecture (Randomized). These baselines are important as inspection-based analysis can often discover patterns that are not obviously present due to the high capacity of auxiliary classifiers. For example, Zhang and Bowman (2018); Hewitt and Liang (2019) found that expressive-enough probing methods can perform surprisingly well even when trained on randomized encoders. Across the edge probing suite, we see only small changes in F1 score from the fine-tuned models compared to BERT base. In most cases, we observe a drop in performance of 0.5-2%, with some variation: MNLI and SQuAD lead to drops of 1.5-3% on syntactic tasks-constituents, and POS, dependencies, and SRL, respectively-while the dependency parsing model leads to signifi-cantly improved syntactic performance (+4% on constituent labeling) while dropping performance on the more semantically-oriented coreference, SPR, and relation classification tasks. We hypothesize that these changes relate to the similarity between tasks: a task like constituent labels help improve dependency parsing, and is thus strengthened, whereas higher level semantic tasks such as SPR contribute less directly and such information may be lost during fine-tuning. Nonetheless, in most cases these effects are small: they are comparable to the variation between randomly-seeded fine-tuning runs (\u00b10.7), and much smaller than the difference between the full model and the Lexical or Randomized baselines, suggesting that most linguistic information from BERT is still available within the model after fine-tuning.", |
|
"cite_spans": [ |
|
{ |
|
"start": 573, |
|
"end": 596, |
|
"text": "Hewitt and Liang (2019)", |
|
"ref_id": "BIBREF19" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Next, we turn to the structural probe, with results seen in Figure 1 . First, the dependency parsing fine-tuned model shows improvements in the Spearman correlation, as early as layer 5. Since the structural probes are designed and trained to look for syntax, this result suggests that the finetuning improves the model's internal representation of such information. This makes intuitive sense as the fine-tuning task is aligned with the probing task. On the MNLI and SQuAD finetuned models, we observe minimal changes in performance, with small drops within the final layer. This artifact likely emerges from the fine-tuning setup where the last layer is only needed for classification or span prediction and therefore is unlikely to also retain all the linguistic information. 7 Figure 1 : Comparison of the structural probe performance on BERT models before and after fine-tuning. The stability of the Spearman correlations between both the depths and distance probes suggest that the embeddings still retain significant information about the syntax of inputted sentences.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 60, |
|
"end": 68, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 781, |
|
"end": 789, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "This result suggests that the actual magnitude of change within the \"syntactic subspace\" is quite small. This is consistent with observations by Gauthier and Levy (2019) and suggests that information about syntactic structure is well-preserved in models on downstream tasks.", |
|
"cite_spans": [ |
|
{ |
|
"start": 145, |
|
"end": 169, |
|
"text": "Gauthier and Levy (2019)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "One caveat of the experimentation above is that it uses complex diagnostic classifiers and only reports final model performance. Instead, what if the linguistic features were simply becoming more difficult to extract from the representations? Then, they could be not as readily \"available\" after finetuning. We explored this hypothesis using Minimum Description Length probes (Voita and Titov, 2020) , with the results presented in Appendix B. We found minimal differences across most tasks, where the only significant result was that finetuning on dependency parsing made the corresponding edge probing task easier to learn as a function of the number of examples.", |
|
"cite_spans": [ |
|
{ |
|
"start": 376, |
|
"end": 399, |
|
"text": "(Voita and Titov, 2020)", |
|
"ref_id": "BIBREF58" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Overall, our results suggest that linguistic features are still available, and that the fine-tuning process does not lead to catastrophic forgetting. Nonetheless, behavioral analyses have shown that finetuned models can still fail to leverage even simple syntactic knowledge in their predictions (McCoy et al., 2019b,a; Min et al., 2020) , and may instead rely on annotation artifacts (Gururangan et al., 2018) or pattern matching (Jia and Liang, 2017) . This suggests that the changes from fine-tuning are conservative: rich features are still present even if the model ends up finding a naive, simple solution.", |
|
"cite_spans": [ |
|
{ |
|
"start": 296, |
|
"end": 319, |
|
"text": "(McCoy et al., 2019b,a;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 320, |
|
"end": 337, |
|
"text": "Min et al., 2020)", |
|
"ref_id": "BIBREF35" |
|
}, |
|
{ |
|
"start": 431, |
|
"end": 452, |
|
"text": "(Jia and Liang, 2017)", |
|
"ref_id": "BIBREF24" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "The supervised probes from the previous section are highly targeted: as trained models, they are sensitive to particular linguistic phenomena, but they also can learn to ignore everything else. If the supervised probe is closely related to the finetuning task-such as for syntactic probes and dependency parsing-we observe significant changes in performance, but otherwise we see little effect. Nonetheless, we know that something must be changing during fine-tuning-at minimum because, as shown in , performance degrades significantly if the encoder is completely frozen. To explore this change, we turn to an unsupervised technique, Representational Similarity Analysis (RSA; Laakso and Cottrell, 2000) , which is sensitive to the global structure of the embedding space, and corroborate our findings with layer-based ablations. While these techniques are not targeted to specific linguistic phenomena, they do provide a powerful exploratory tool that can illuminate which parts of the model change and how they vary across datasets.", |
|
"cite_spans": [ |
|
{ |
|
"start": 678, |
|
"end": 704, |
|
"text": "Laakso and Cottrell, 2000)", |
|
"ref_id": "BIBREF29" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Where do the representations change?", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "RSA is a technique for measuring the similarity between two different representation spaces for a given set of stimuli. Originally developed for neuroscience (Kriegeskorte et al., 2008) , it has become increasingly used to analyze similarity between neural network activations (Abnar et al., 2019; Chrupa\u0142a and Alishahi, 2019) . The method works by using a common set of n examples, used to create two sets of representations. For each set, a kernel is used to define a pairwise similarity matrix in R n\u21e5n . The final similarity score between the two representation spaces is calculated as the Pearson correlation between the flattened upper triangulars of the two similarity matrices.", |
|
"cite_spans": [ |
|
{ |
|
"start": 158, |
|
"end": 185, |
|
"text": "(Kriegeskorte et al., 2008)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 277, |
|
"end": 297, |
|
"text": "(Abnar et al., 2019;", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 298, |
|
"end": 326, |
|
"text": "Chrupa\u0142a and Alishahi, 2019)", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Representational Similarity Analysis", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "In our application, we pass ordinary sentences (Wikipedia), sentence-pairs (MNLI), or questionanswer pairs (SQuAD) as inputs to the BERT model, and select a random sample (n = 5000) of tokens as stimuli. This input is consistent with the masked language model pre-training, various finetuning tasks, and diagnostic classifiers in analyzing the contextual representations for every token. We extract the activations of corresponding layers from the two models to compare (e.g. Base vs. a fine-tuned model). Following previous applications of RSA to text representations (Abnar et al., 2019; Chrupa\u0142a and Alishahi, 2019) , we adopt the cosine similarity kernel.", |
|
"cite_spans": [ |
|
{ |
|
"start": 569, |
|
"end": 589, |
|
"text": "(Abnar et al., 2019;", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 590, |
|
"end": 618, |
|
"text": "Chrupa\u0142a and Alishahi, 2019)", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Representational Similarity Analysis", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "While RSA does not require learning any parameters and is thus resistant to overfitting (Abdou et al., 2019) , the metric can be sensitive to spurious signals in the representations that may not be relevant to model behavior. 8 To mitigate this, we repeat the BERT pre-training procedure (as described in Section 3 of Devlin et al., 2019) from scratch three times. For each pre-trained checkpoints, we fine-tune on the three downstream task and report the average for these independent runs.", |
|
"cite_spans": [ |
|
{ |
|
"start": 88, |
|
"end": 108, |
|
"text": "(Abdou et al., 2019)", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Representational Similarity Analysis", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "Results Figure 2 shows the results of our RSA analysis comparing the three task models, Dep, MNLI, and SQuAD, to BERT Base at each layer. Note that in these figures, lower values imply greater change relative to the pre-trained model. Across all tasks, we observe that changes generally arise in the top layers of the network, with little change observed in the layers closest to the in-put. To first order, this may be a result of optimization: vanishing gradients result in the most change in the layers closest to the loss. Yet we interestingly do observe significant differences between tasks. For dependency parsing, we observe the deepest changes, departing from the Base model as early as layers 4 and 5. This result likely arises as syntactic understanding of input is maximized in the early layers of the model, as measured by the edge probes of (Tenney et al., 2019a) and presented structural probes. Performing optimally on this task would require surfacing this information in all subsequent layers, leading to these changes.", |
|
"cite_spans": [ |
|
{ |
|
"start": 855, |
|
"end": 877, |
|
"text": "(Tenney et al., 2019a)", |
|
"ref_id": "BIBREF52" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 8, |
|
"end": 16, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Representational Similarity Analysis", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "Except for the last layer which is particularly sensitive to the form of the output (span-based for dependencies and SQuAD, or using the [CLS] token for MNLI), we see that MNLI involves the smallest changes to the model: the second-to-last attention layer still shows a very high similarity score of 0.84 \u00b1 0.02 compared to the representations of the pre-trained encoder. The SQuAD model shows a slightly steeper change, behaving similarly to the Base model through layer 7 but dropping off afterwards -suggesting that finetuning on this task involves a deeper, yet still relatively shallow reconfiguration of the encoder. SQuAD likely shows deeper processing as choosing an answer span still requires satisfying a number of syntactic constraints and requires evolution across more than just two layers (van Aken et al., 2019), but overall, we see that for these benchmark tasks, fine-tuning is conservative and only changes a fraction of the model's representations.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Representational Similarity Analysis", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "As an unsupervised, metric-based technique, RSA tells us about broad changes in the representation space, but does not in itself say if these changes are important for the model's behavior-i.e. for the processing necessary to solve the downstream task. To measure our observations in terms of task performance, we turn to two layer ablation studies.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Layer Ablations", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "Partial Freezing can be thought of as a test for how many layers need to change for a downstream task. We freeze the bottom k layers (and the embeddings)-treating them as features-but allow the rest to adapt. Effectively, this clamps the first k layers to have RSA similarity of 1 with the Base model. Also, we perform model truncation as a rough estimate of difficulty for each task, and as an attempt to de-couple the results of partial Figure 3 : Effects of freezing an increasing number of layers during fine-tuning on performance (we report the evaluation accuracy for MNLI, F1 score for SQuAD, and LAS for Dep). The point at -1 corresponds to no frozen components. The graph shows that only a few unfrozen layers are needed to improve task performance, supporting the shallow processing conclusion.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 439, |
|
"end": 447, |
|
"text": "Figure 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Layer Ablations", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "freezing from helpful features that may be available in top layers of BERT Base (Tenney et al., 2019a) . The patterns we observe corroborate the findings of our RSA analysis. On MNLI, we find that performance does not drop significantly unless the last two layers are frozen, while the truncated models are able to achieve comparable performance with only three attention layers. This suggests that while natural language inference (Dagan et al., 2006) is known to be a complex task in the limit, most MNLI examples can be resolved with relatively shallow processing. SQuAD exhibits a similar trend: we see a significant performance drop when 3 or fewer layers are allowed to change (e.g. freezing through layer 8 or higher), consistent with where RSA finds the greatest change. From our truncation experiment, we similarly see that only five layers are needed to achieve comparable performance to the full model. Dependency parsing performance drops even more rapidly-in both experiments-consistent with the results from RSA. This is surprising, since probing analysis (Goldberg, 2019; Marvin and Linzen, 2018) suggests that many syntactic phenomena are well-captured by the pre-trained model, and diagnostics for dependency parsing in particular (Tenney et al., 2019b,a; Hewitt and Manning, 2019; Clark et al., 2019) show strong performance from probes on frozen models. Yet Figure 4 : Effects of fine-tuning at earlier layers of BERT. We note that the MNLI evaluation accuracy and SQuAD F1 score approach the full model performance by layer 6, whereas the dependency parsing LAS seems to require more layers.", |
|
"cite_spans": [ |
|
{ |
|
"start": 80, |
|
"end": 102, |
|
"text": "(Tenney et al., 2019a)", |
|
"ref_id": "BIBREF52" |
|
}, |
|
{ |
|
"start": 432, |
|
"end": 452, |
|
"text": "(Dagan et al., 2006)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 1070, |
|
"end": 1086, |
|
"text": "(Goldberg, 2019;", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 1087, |
|
"end": 1111, |
|
"text": "Marvin and Linzen, 2018)", |
|
"ref_id": "BIBREF32" |
|
}, |
|
{ |
|
"start": 1248, |
|
"end": 1272, |
|
"text": "(Tenney et al., 2019b,a;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1273, |
|
"end": 1298, |
|
"text": "Hewitt and Manning, 2019;", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 1299, |
|
"end": 1318, |
|
"text": "Clark et al., 2019)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 1377, |
|
"end": 1385, |
|
"text": "Figure 4", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Layer Ablations", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "as observed with the structural probes ( Figure 1 ) there is headroom available, and it appears that to capture it requires changing deeper parts of the model. We hypothesize that this effect may come from the hierarchical nature of parsing, which requires additional layers to determine the full tree structure. Fully reconciling these observations would be a promising direction for future work.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 41, |
|
"end": 49, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Layer Ablations", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "Finally, we ask whether the effects of fine-tuning are general: do they apply only to inputs that look like the fine-tuning data, or do they lead to broader changes in behavior? This is usually explored by behavioral methods, in which a model is trained on one domain and evaluated on anotherfor example, the mismatched evaluation for MNLI (Williams et al., 2018) -but this analysis is limited by the availability of labeled data. By using RSA, we can test this in an unsupervised manner.", |
|
"cite_spans": [ |
|
{ |
|
"start": 340, |
|
"end": 363, |
|
"text": "(Williams et al., 2018)", |
|
"ref_id": "BIBREF61" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Out-of-Domain Behavior", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "We use RSA to compare the fine-tuned model to Base and observe the degree of similarity when inputs are drawn from different corpora. We use random samples from the development sets for MNLI (as premise [SEP] hypothesis) and SQuAD (as question [SEP] passage) as in-domain for their respective models, 9 and as the out-of-domain control we use random Wikipedia sentences (which resemble the pretraining domain). As in Section 5.1, we use the Figure 5 : Comparison of the representations in the MNLI (left) and SQuAD (right) fine-tuned models and those of BERT Base, with the different lines corresponding to examples coming from various datasets. These graphs show that fine-tuning models only lead to shallow changes, consolidated to the last few layers. Also, we see that fine-tuning has a much greater impact on the token representations of in-domain data.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 441, |
|
"end": 449, |
|
"text": "Figure 5", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Out-of-Domain Behavior", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "representations of n = 5000 tokens as our stimuli for each comparison. 10 Results for the MNLI and SQuAD fine-tuned models are shown in Figure 5 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 136, |
|
"end": 144, |
|
"text": "Figure 5", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Out-of-Domain Behavior", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Although we see that all models diverge from BERT Base in the top layers, there is a significantly larger change in the representations on indomain examples. This suggests that fine-tuning is specific to the target domain. For other examples, such as the Wikipedia sentences which resemble the pre-training data, the similarity score with BERT Base is much higher. This suggests that fine-tuning leads the model to change its representations for the new domain but to continue to behave more like the Base model otherwise. This final result again shows that fine-tuning is conservative and suggests room for improvement in model generalization to out-of-domain sentences.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Out-of-Domain Behavior", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "In this paper, we employ three complementary analysis methods to gain insight into effects of fine-tuning on the representations produced by BERT. From supervised probing analyses, we find that the linguistic structures discovered during pretraining remain available after fine-tuning, though this information is not strengthened by tuning on benchmark tasks such as MNLI and SQuAD. In light of prior studies Jia and Liang, 2017) which have shown that end-task models often fall back on simple heuristics, our results are especially interesting: they suggest that the model has the option of using stronger features, but chooses to use heuristics instead.", |
|
"cite_spans": [ |
|
{ |
|
"start": 409, |
|
"end": 429, |
|
"text": "Jia and Liang, 2017)", |
|
"ref_id": "BIBREF24" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "Next, our results using RSA and layer ablations show that the changes from fine-tuning alter a fraction of the model capacity, specifically within the top few layers (up to some variation across tasks). Also, although fine-tuning has a significant effect on the representations of in-domain sentences, the representations of out-of-domain examples remain much closer to those of the pre-trained model. Overall, these conclusions suggest that finetuning-as currently practiced-is a conservative process: preserving linguistic features, affecting only a few layers, and specific to in-domain examples. While the standard fine-tuning recipe undeniably leads to strong performance on many tasks, there appears to be room for improvement: an opportunity to refine this transfer step-potentially by utilizing more of the model capacity-to better the generalization and transferability.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "Finally, in this work, we pulled from a range of analysis techniques to understand very finegrained aspects of model representations (via probing classifiers) and coarse-grained ones (via RSA). An important direction for future work is the development of new techniques which allow for more exploration of the middle ground. Given available techniques, we can illuminate broadly that models are changing and test hypotheses about specific features (with probing tasks or attention analyses). New principled methods for discovering which features change will be invaluable for a deeper understanding of these models.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "\u21e4 Work done as member of the Google AI Residency program https://ai.google/research/ join-us/ai-residency/", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "See Belinkov and Glass (2019) andRogers et al. (2020) for a survey of probing methods.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We use the original TensorFlow(Abadi et al., 2015) implementation from https://github.com/ google-research/bert.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We provide additional details of the experiments and datasets in Appendix C for the purpose of reproducibility.4 The dependency labeling task is from the English Web Treebank(Silveira et al., 2014), SPR corresponds to SPR1 fromTeichert et al. (2017), and relations is Task 8 from Se-mEval 2010(Hendrickx et al., 2010). All of the other tasks are from OntoNotes 5.0(Weischedel et al., 2013).5 We also explored the effects of fine-tuning on the top layer of BERT to provide additional insight into whether this linguistic information may be lost from the top layers even if still present elsewhere. For results, see Appendix A.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Note that Hall Maudslay et al. (2020) has recently raised concern about these metrics, but we follow the original method ofHewitt and Manning (2019) for the most comparable results.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "A similar story emerges when repeating the edge probing models on the last layer of BERT; see Appendix A.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We note that probing techniques are more robust to this, since they learn to focus on relevant features.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Note that these are unseen during fine-tuning, although RSA scores do not change significantly if the MNLI or SQuAD training sets are used.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We also tested single-sentence examples from MNLI and SQuAD by only taking the premise and question respectively; the trends were similar toFigure 5.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "We thank our anonymous reviewers for their helpful feedback; Deepak Ramachandran, Kelvin Guu, and Slav Petrov for providing feedback on an early draft of this paper; and Tim Dozat for his help implementing the fine-tuning task for dependency parsing.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowldgements", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Higher-order comparisons of sentence encoder representations", |
|
"authors": [ |
|
{ |
|
"first": "Mostafa", |
|
"middle": [], |
|
"last": "Abdou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Artur", |
|
"middle": [], |
|
"last": "Kulmizev", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Felix", |
|
"middle": [], |
|
"last": "Hill", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Low", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anders", |
|
"middle": [], |
|
"last": "S\u00f8gaard", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5837--5844", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D19-1593" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mostafa Abdou, Artur Kulmizev, Felix Hill, Daniel M. Low, and Anders S\u00f8gaard. 2019. Higher-order com- parisons of sentence encoder representations. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Lan- guage Processing (EMNLP-IJCNLP), pages 5837- 5844, Hong Kong, China. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Blackbox meets blackbox: Representational similarity & stability analysis of neural language models and brains", |
|
"authors": [ |
|
{ |
|
"first": "Samira", |
|
"middle": [], |
|
"last": "Abnar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lisa", |
|
"middle": [], |
|
"last": "Beinborn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rochelle", |
|
"middle": [], |
|
"last": "Choenni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Willem", |
|
"middle": [], |
|
"last": "Zuidema", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 ACL Workshop BlackboxNLP: Analyzing and Interpreting Neural Networks for NLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "191--203", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W19-4820" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Samira Abnar, Lisa Beinborn, Rochelle Choenni, and Willem Zuidema. 2019. Blackbox meets blackbox: Representational similarity & stability analysis of neural language models and brains. In Proceedings of the 2019 ACL Workshop BlackboxNLP: Analyzing and Interpreting Neural Networks for NLP, pages 191-203, Florence, Italy. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "How does bert answer questions? a layer-wise analysis of transformer representations", |
|
"authors": [ |
|
{ |
|
"first": "Benjamin", |
|
"middle": [], |
|
"last": "Betty Van Aken", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexander", |
|
"middle": [], |
|
"last": "Winter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Felix", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Lser", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Gers", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Betty van Aken, Benjamin Winter, Alexander Lser, and Felix A. Gers. 2019. How does bert answer ques- tions? a layer-wise analysis of transformer repre- sentations.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "What do neural machine translation models learn about morphology?", |
|
"authors": [ |
|
{ |
|
"first": "Yonatan", |
|
"middle": [], |
|
"last": "Belinkov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nadir", |
|
"middle": [], |
|
"last": "Durrani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fahim", |
|
"middle": [], |
|
"last": "Dalvi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hassan", |
|
"middle": [], |
|
"last": "Sajjad", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "James", |
|
"middle": [], |
|
"last": "Glass", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "861--872", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P17-1080" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yonatan Belinkov, Nadir Durrani, Fahim Dalvi, Hassan Sajjad, and James Glass. 2017. What do neural ma- chine translation models learn about morphology? In Proceedings of the 55th Annual Meeting of the As- sociation for Computational Linguistics (Volume 1: Long Papers), pages 861-872, Vancouver, Canada. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Analysis methods in neural language processing: A survey", |
|
"authors": [ |
|
{ |
|
"first": "Yonatan", |
|
"middle": [], |
|
"last": "Belinkov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "James", |
|
"middle": [], |
|
"last": "Glass", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Transactions of the Association for Computational Linguistics", |
|
"volume": "7", |
|
"issue": "", |
|
"pages": "49--72", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1162/tacl_a_00254" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yonatan Belinkov and James Glass. 2019. Analysis methods in neural language processing: A survey. Transactions of the Association for Computational Linguistics, 7:49-72.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Correlating neural and symbolic representations of language", |
|
"authors": [ |
|
{ |
|
"first": "Grzegorz", |
|
"middle": [], |
|
"last": "Chrupa\u0142a", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Afra", |
|
"middle": [], |
|
"last": "Alishahi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2952--2962", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P19-1283" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Grzegorz Chrupa\u0142a and Afra Alishahi. 2019. Corre- lating neural and symbolic representations of lan- guage. In Proceedings of the 57th Annual Meet- ing of the Association for Computational Linguis- tics, pages 2952-2962, Florence, Italy. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "What does BERT look at? an analysis of BERT's attention", |
|
"authors": [ |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "Clark", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Urvashi", |
|
"middle": [], |
|
"last": "Khandelwal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Omer", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 ACL Workshop BlackboxNLP: Analyzing and Interpreting Neural Networks for NLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "276--286", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W19-4828" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kevin Clark, Urvashi Khandelwal, Omer Levy, and Christopher D. Manning. 2019. What does BERT look at? an analysis of BERT's attention. In Pro- ceedings of the 2019 ACL Workshop BlackboxNLP: Analyzing and Interpreting Neural Networks for NLP, pages 276-286, Florence, Italy. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Visualizing and measuring the geometry of BERT", |
|
"authors": [ |
|
{ |
|
"first": "Andy", |
|
"middle": [], |
|
"last": "Coenen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Emily", |
|
"middle": [], |
|
"last": "Reif", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ann", |
|
"middle": [], |
|
"last": "Yuan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Been", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adam", |
|
"middle": [], |
|
"last": "Pearce", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fernanda", |
|
"middle": [ |
|
"B" |
|
], |
|
"last": "Vi\u00e9gas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Martin", |
|
"middle": [], |
|
"last": "Wattenberg", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Andy Coenen, Emily Reif, Ann Yuan, Been Kim, Adam Pearce, Fernanda B. Vi\u00e9gas, and Martin Wat- tenberg. 2019. Visualizing and measuring the geom- etry of BERT. CoRR, abs/1906.02715.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "The pascal recognising textual entailment challenge", |
|
"authors": [ |
|
{ |
|
"first": "Oren", |
|
"middle": [], |
|
"last": "Ido Dagan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bernardo", |
|
"middle": [], |
|
"last": "Glickman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Magnini", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Proceedings of the First International Conference on Machine Learning Challenges: Evaluating Predictive Uncertainty Visual Object Classification, and Recognizing Textual Entailment, MLCW'05", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "177--190", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1007/11736790_9" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ido Dagan, Oren Glickman, and Bernardo Magnini. 2006. The pascal recognising textual entailment challenge. In Proceedings of the First Inter- national Conference on Machine Learning Chal- lenges: Evaluating Predictive Uncertainty Visual Object Classification, and Recognizing Textual En- tailment, MLCW'05, pages 177-190, Berlin, Hei- delberg. Springer-Verlag.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "BERT: Pre-training of deep bidirectional transformers for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "4171--4186", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N19-1423" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirectional transformers for language under- standing. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 4171-4186, Minneapolis, Minnesota. Associ- ation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Deep biaffine attention for neural dependency parsing", |
|
"authors": [ |
|
{ |
|
"first": "Timothy", |
|
"middle": [], |
|
"last": "Dozat", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "ICLR (Poster). OpenReview.net", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Timothy Dozat and Christopher D. Manning. 2017. Deep biaffine attention for neural dependency pars- ing. In ICLR (Poster). OpenReview.net.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Assessing composition in sentence vector representations", |
|
"authors": [ |
|
{ |
|
"first": "Allyson", |
|
"middle": [], |
|
"last": "Ettinger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ahmed", |
|
"middle": [], |
|
"last": "Elgohary", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Colin", |
|
"middle": [], |
|
"last": "Phillips", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Philip", |
|
"middle": [], |
|
"last": "Resnik", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 27th International Conference on Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1790--1801", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Allyson Ettinger, Ahmed Elgohary, Colin Phillips, and Philip Resnik. 2018. Assessing composition in sen- tence vector representations. In Proceedings of the 27th International Conference on Computational Linguistics, pages 1790-1801, Santa Fe, New Mex- ico, USA. Association for Computational Linguis- tics.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Linking artificial and human neural representations of language", |
|
"authors": [ |
|
{ |
|
"first": "Jon", |
|
"middle": [], |
|
"last": "Gauthier", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Roger", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "529--539", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D19-1050" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jon Gauthier and Roger Levy. 2019. Linking artificial and human neural representations of language. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Lan- guage Processing (EMNLP-IJCNLP), pages 529- 539, Hong Kong, China. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Assessing bert's syntactic abilities", |
|
"authors": [ |
|
{ |
|
"first": "Yoav", |
|
"middle": [], |
|
"last": "Goldberg", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yoav Goldberg. 2019. Assessing bert's syntactic abili- ties. CoRR, abs/1901.05287.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Annotation artifacts in natural language inference data", |
|
"authors": [ |
|
{ |
|
"first": "Swabha", |
|
"middle": [], |
|
"last": "Suchin Gururangan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Omer", |
|
"middle": [], |
|
"last": "Swayamdipta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Roy", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Samuel", |
|
"middle": [], |
|
"last": "Schwartz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noah", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Bowman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Smith", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "107--112", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N18-2017" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Suchin Gururangan, Swabha Swayamdipta, Omer Levy, Roy Schwartz, Samuel Bowman, and Noah A. Smith. 2018. Annotation artifacts in natural lan- guage inference data. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 2 (Short Papers), pages 107-112, New Orleans, Louisiana. Associa- tion for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "A tale of a probe and a parser", |
|
"authors": [ |
|
{ |
|
"first": "Josef", |
|
"middle": [], |
|
"last": "Rowan Hall Maudslay", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tiago", |
|
"middle": [], |
|
"last": "Valvoda", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adina", |
|
"middle": [], |
|
"last": "Pimentel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ryan", |
|
"middle": [], |
|
"last": "Williams", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Cotterell", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "7389--7395", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.acl-main.659" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rowan Hall Maudslay, Josef Valvoda, Tiago Pimentel, Adina Williams, and Ryan Cotterell. 2020. A tale of a probe and a parser. In Proceedings of the 58th An- nual Meeting of the Association for Computational Linguistics, pages 7389-7395, Online. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "SemEval-2010 task 8: Multi-way classification of semantic relations between pairs of nominals", |
|
"authors": [ |
|
{ |
|
"first": "Iris", |
|
"middle": [], |
|
"last": "Hendrickx", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Su", |
|
"middle": [ |
|
"Nam" |
|
], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zornitsa", |
|
"middle": [], |
|
"last": "Kozareva", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Preslav", |
|
"middle": [], |
|
"last": "Nakov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Diarmuid\u00f3", |
|
"middle": [], |
|
"last": "S\u00e9aghdha", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Pad\u00f3", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marco", |
|
"middle": [], |
|
"last": "Pennacchiotti", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lorenza", |
|
"middle": [], |
|
"last": "Romano", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stan", |
|
"middle": [], |
|
"last": "Szpakowicz", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proceedings of the 5th International Workshop on Semantic Evaluation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "33--38", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Iris Hendrickx, Su Nam Kim, Zornitsa Kozareva, Preslav Nakov, Diarmuid\u00d3 S\u00e9aghdha, Sebastian Pad\u00f3, Marco Pennacchiotti, Lorenza Romano, and Stan Szpakowicz. 2010. SemEval-2010 task 8: Multi-way classification of semantic relations be- tween pairs of nominals. In Proceedings of the 5th International Workshop on Semantic Evalua- tion, pages 33-38, Uppsala, Sweden. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Designing and interpreting probes with control tasks", |
|
"authors": [ |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Hewitt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Percy", |
|
"middle": [], |
|
"last": "Liang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2733--2743", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D19-1275" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "John Hewitt and Percy Liang. 2019. Designing and in- terpreting probes with control tasks. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th Interna- tional Joint Conference on Natural Language Pro- cessing (EMNLP-IJCNLP), pages 2733-2743, Hong Kong, China. Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "A structural probe for finding syntax in word representations", |
|
"authors": [ |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Hewitt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Christopher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "4129--4138", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N19-1419" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "John Hewitt and Christopher D. Manning. 2019. A structural probe for finding syntax in word repre- sentations. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 4129-4138, Minneapolis, Minnesota. Associ- ation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "exbert: A visual analysis tool to explore learned representations in transformers models", |
|
"authors": [ |
|
{ |
|
"first": "Benjamin", |
|
"middle": [], |
|
"last": "Hoover", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hendrik", |
|
"middle": [], |
|
"last": "Strobelt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Gehrmann", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1910.05276" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Benjamin Hoover, Hendrik Strobelt, and Sebastian Gehrmann. 2019. exbert: A visual analysis tool to explore learned representations in transformers models. arXiv preprint arXiv:1910.05276.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Visualisation and'diagnostic classifiers' reveal how recurrent and recursive neural networks process hierarchical structure", |
|
"authors": [ |
|
{ |
|
"first": "Dieuwke", |
|
"middle": [], |
|
"last": "Hupkes", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sara", |
|
"middle": [], |
|
"last": "Veldhoen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Willem", |
|
"middle": [], |
|
"last": "Zuidema", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Journal of Artificial Intelligence Research", |
|
"volume": "61", |
|
"issue": "", |
|
"pages": "907--926", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dieuwke Hupkes, Sara Veldhoen, and Willem Zuidema. 2018. Visualisation and'diagnostic classi- fiers' reveal how recurrent and recursive neural net- works process hierarchical structure. Journal of Ar- tificial Intelligence Research, 61:907-926.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Attention is not Explanation", |
|
"authors": [ |
|
{ |
|
"first": "Sarthak", |
|
"middle": [], |
|
"last": "Jain", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Byron", |
|
"middle": [ |
|
"C" |
|
], |
|
"last": "Wallace", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "3543--3556", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N19-1357" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sarthak Jain and Byron C. Wallace. 2019. Attention is not Explanation. In Proceedings of the 2019 Con- ference of the North American Chapter of the Asso- ciation for Computational Linguistics: Human Lan- guage Technologies, Volume 1 (Long and Short Pa- pers), pages 3543-3556, Minneapolis, Minnesota. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Adversarial examples for evaluating reading comprehension systems", |
|
"authors": [ |
|
{ |
|
"first": "Robin", |
|
"middle": [], |
|
"last": "Jia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Percy", |
|
"middle": [], |
|
"last": "Liang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2021--2031", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D17-1215" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Robin Jia and Percy Liang. 2017. Adversarial exam- ples for evaluating reading comprehension systems. In Proceedings of the 2017 Conference on Empiri- cal Methods in Natural Language Processing, pages 2021-2031, Copenhagen, Denmark. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Teaching syntax by adversarial distraction", |
|
"authors": [ |
|
{ |
|
"first": "Juho", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Malon", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Asim", |
|
"middle": [], |
|
"last": "Kadav", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the First Workshop on Fact Extraction and VERification (FEVER)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "79--84", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W18-5512" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Juho Kim, Christopher Malon, and Asim Kadav. 2018. Teaching syntax by adversarial distraction. In Pro- ceedings of the First Workshop on Fact Extraction and VERification (FEVER), pages 79-84, Brussels, Belgium. Association for Computational Linguis- tics.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Adam: A method for stochastic optimization", |
|
"authors": [ |
|
{ |
|
"first": "Diederik", |
|
"middle": [], |
|
"last": "Kingma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jimmy", |
|
"middle": [], |
|
"last": "Ba", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "International Conference on Learning Representations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Diederik Kingma and Jimmy Ba. 2014. Adam: A method for stochastic optimization. International Conference on Learning Representations.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Revealing the dark secrets of bert", |
|
"authors": [ |
|
{ |
|
"first": "Olga", |
|
"middle": [], |
|
"last": "Kovaleva", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexey", |
|
"middle": [], |
|
"last": "Romanov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anna", |
|
"middle": [], |
|
"last": "Rogers", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anna", |
|
"middle": [], |
|
"last": "Rumshisky", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1908.08593" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Olga Kovaleva, Alexey Romanov, Anna Rogers, and Anna Rumshisky. 2019. Revealing the dark secrets of bert. arXiv preprint arXiv:1908.08593.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Representational similarity analysis -connecting the branches of systems neuroscience", |
|
"authors": [ |
|
{ |
|
"first": "N", |
|
"middle": [], |
|
"last": "Kriegeskorte", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Mur", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Bandettini", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Front Syst Neurosci", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "N. Kriegeskorte, M. Mur, and P. Bandettini. 2008. Representational similarity analysis -connecting the branches of systems neuroscience. Front Syst Neu- rosci, 2:4.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Content and cluster analysis: assessing representational similarity in neural systems", |
|
"authors": [ |
|
{ |
|
"first": "Aarre", |
|
"middle": [], |
|
"last": "Laakso", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Garrison", |
|
"middle": [], |
|
"last": "Cottrell", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2000, |
|
"venue": "Philosophical psychology", |
|
"volume": "13", |
|
"issue": "1", |
|
"pages": "47--76", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Aarre Laakso and Garrison Cottrell. 2000. Content and cluster analysis: assessing representational sim- ilarity in neural systems. Philosophical psychology, 13(1):47-76.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "Albert: A lite bert for self-supervised learning of language representations", |
|
"authors": [ |
|
{ |
|
"first": "Zhenzhong", |
|
"middle": [], |
|
"last": "Lan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mingda", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Goodman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "Gimpel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Piyush", |
|
"middle": [], |
|
"last": "Sharma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Radu", |
|
"middle": [], |
|
"last": "Soricut", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhenzhong Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, and Radu Soricut. 2019. Albert: A lite bert for self-supervised learn- ing of language representations.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Linguistic knowledge and transferability of contextual representations", |
|
"authors": [ |
|
{ |
|
"first": "Nelson", |
|
"middle": [ |
|
"F" |
|
], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matt", |
|
"middle": [], |
|
"last": "Gardner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yonatan", |
|
"middle": [], |
|
"last": "Belinkov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matthew", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "Peters", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noah", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Smith", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1073--1094", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N19-1112" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nelson F. Liu, Matt Gardner, Yonatan Belinkov, Matthew E. Peters, and Noah A. Smith. 2019. Lin- guistic knowledge and transferability of contextual representations. In Proceedings of the 2019 Confer- ence of the North American Chapter of the Associ- ation for Computational Linguistics: Human Lan- guage Technologies, Volume 1 (Long and Short Pa- pers), pages 1073-1094, Minneapolis, Minnesota. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "Targeted syntactic evaluation of language models", |
|
"authors": [ |
|
{ |
|
"first": "Rebecca", |
|
"middle": [], |
|
"last": "Marvin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tal", |
|
"middle": [], |
|
"last": "Linzen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1192--1202", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D18-1151" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rebecca Marvin and Tal Linzen. 2018. Targeted syn- tactic evaluation of language models. In Proceed- ings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 1192-1202, Brussels, Belgium. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "Berts of a feather do not generalize together: Large variability in generalization across models with similar test set performance", |
|
"authors": [ |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Mccoy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Junghyun", |
|
"middle": [], |
|
"last": "Min", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tal", |
|
"middle": [], |
|
"last": "Linzen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1911.02969" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "R Thomas McCoy, Junghyun Min, and Tal Linzen. 2019a. Berts of a feather do not generalize to- gether: Large variability in generalization across models with similar test set performance. arXiv preprint arXiv:1911.02969.", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "Right for the wrong reasons: Diagnosing syntactic heuristics in natural language inference", |
|
"authors": [ |
|
{ |
|
"first": "Tom", |
|
"middle": [], |
|
"last": "Mccoy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ellie", |
|
"middle": [], |
|
"last": "Pavlick", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tal", |
|
"middle": [], |
|
"last": "Linzen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3428--3448", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P19-1334" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tom McCoy, Ellie Pavlick, and Tal Linzen. 2019b. Right for the wrong reasons: Diagnosing syntactic heuristics in natural language inference. In Proceed- ings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 3428-3448, Florence, Italy. Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF35": { |
|
"ref_id": "b35", |
|
"title": "Syntactic data augmentation increases robustness to inference heuristics", |
|
"authors": [ |
|
{ |
|
"first": "Junghyun", |
|
"middle": [], |
|
"last": "Min", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Mccoy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dipanjan", |
|
"middle": [], |
|
"last": "Das", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Emily", |
|
"middle": [], |
|
"last": "Pitler", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tal", |
|
"middle": [], |
|
"last": "Linzen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2004.11999" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Junghyun Min, R Thomas McCoy, Dipanjan Das, Emily Pitler, and Tal Linzen. 2020. Syntactic data augmentation increases robustness to inference heuristics. arXiv preprint arXiv:2004.11999.", |
|
"links": null |
|
}, |
|
"BIBREF36": { |
|
"ref_id": "b36", |
|
"title": "Insights on representational similarity in neural networks with canonical correlation", |
|
"authors": [ |
|
{ |
|
"first": "Ari", |
|
"middle": [], |
|
"last": "Morcos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maithra", |
|
"middle": [], |
|
"last": "Raghu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Samy", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Advances in Neural Information Processing Systems 31", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5727--5736", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ari Morcos, Maithra Raghu, and Samy Bengio. 2018. Insights on representational similarity in neural net- works with canonical correlation. In S. Bengio, H. Wallach, H. Larochelle, K. Grauman, N. Cesa- Bianchi, and R. Garnett, editors, Advances in Neu- ral Information Processing Systems 31, pages 5727- 5736. Curran Associates, Inc.", |
|
"links": null |
|
}, |
|
"BIBREF37": { |
|
"ref_id": "b37", |
|
"title": "Deep contextualized word representations", |
|
"authors": [ |
|
{ |
|
"first": "Matthew", |
|
"middle": [], |
|
"last": "Peters", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Neumann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohit", |
|
"middle": [], |
|
"last": "Iyyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matt", |
|
"middle": [], |
|
"last": "Gardner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Clark", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "2227--2237", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N18-1202" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Matthew Peters, Mark Neumann, Mohit Iyyer, Matt Gardner, Christopher Clark, Kenton Lee, and Luke Zettlemoyer. 2018a. Deep contextualized word rep- resentations. In Proceedings of the 2018 Confer- ence of the North American Chapter of the Associ- ation for Computational Linguistics: Human Lan- guage Technologies, Volume 1 (Long Papers), pages 2227-2237, New Orleans, Louisiana. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF38": { |
|
"ref_id": "b38", |
|
"title": "Dissecting contextual word embeddings: Architecture and representation", |
|
"authors": [ |
|
{ |
|
"first": "Matthew", |
|
"middle": [], |
|
"last": "Peters", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Neumann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wen-Tau", |
|
"middle": [], |
|
"last": "Yih", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1499--1509", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D18-1179" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Matthew Peters, Mark Neumann, Luke Zettlemoyer, and Wen-tau Yih. 2018b. Dissecting contextual word embeddings: Architecture and representation. In Proceedings of the 2018 Conference on Em- pirical Methods in Natural Language Processing, pages 1499-1509, Brussels, Belgium. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF39": { |
|
"ref_id": "b39", |
|
"title": "To tune or not to tune? adapting pretrained representations to diverse tasks", |
|
"authors": [ |
|
{ |
|
"first": "Matthew", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "Peters", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Ruder", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noah", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Smith", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 4th Workshop on Representation Learning for NLP (RepL4NLP-2019)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "7--14", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W19-4302" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Matthew E. Peters, Sebastian Ruder, and Noah A. Smith. 2019. To tune or not to tune? adapting pre- trained representations to diverse tasks. In Proceed- ings of the 4th Workshop on Representation Learn- ing for NLP (RepL4NLP-2019), pages 7-14, Flo- rence, Italy. Association for Computational Linguis- tics.", |
|
"links": null |
|
}, |
|
"BIBREF40": { |
|
"ref_id": "b40", |
|
"title": "2020. Information-theoretic probing for linguistic structure", |
|
"authors": [ |
|
{ |
|
"first": "Tiago", |
|
"middle": [], |
|
"last": "Pimentel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Josef", |
|
"middle": [], |
|
"last": "Valvoda", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rowan", |
|
"middle": [], |
|
"last": "Hall Maudslay", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ran", |
|
"middle": [], |
|
"last": "Zmigrod", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adina", |
|
"middle": [], |
|
"last": "Williams", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ryan", |
|
"middle": [], |
|
"last": "Cotterell", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2004.03061" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tiago Pimentel, Josef Valvoda, Rowan Hall Maudslay, Ran Zmigrod, Adina Williams, and Ryan Cotterell. 2020. Information-theoretic probing for linguistic structure. arXiv preprint arXiv:2004.03061.", |
|
"links": null |
|
}, |
|
"BIBREF41": { |
|
"ref_id": "b41", |
|
"title": "Hypothesis only baselines in natural language inference", |
|
"authors": [ |
|
{ |
|
"first": "Adam", |
|
"middle": [], |
|
"last": "Poliak", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Naradowsky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aparajita", |
|
"middle": [], |
|
"last": "Haldar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rachel", |
|
"middle": [], |
|
"last": "Rudinger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Benjamin", |
|
"middle": [], |
|
"last": "Van Durme", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the Seventh Joint Conference on Lexical and Computational Semantics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "180--191", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/S18-2023" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Adam Poliak, Jason Naradowsky, Aparajita Haldar, Rachel Rudinger, and Benjamin Van Durme. 2018. Hypothesis only baselines in natural language in- ference. In Proceedings of the Seventh Joint Con- ference on Lexical and Computational Semantics, pages 180-191, New Orleans, Louisiana. Associa- tion for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF42": { |
|
"ref_id": "b42", |
|
"title": "jiant: A software toolkit for research on general-purpose text understanding models", |
|
"authors": [ |
|
{ |
|
"first": "Yada", |
|
"middle": [], |
|
"last": "Pruksachatkun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Phil", |
|
"middle": [], |
|
"last": "Yeres", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Haokun", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Phang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alex", |
|
"middle": [], |
|
"last": "Phu Mon Htut", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ian", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Samuel R", |
|
"middle": [], |
|
"last": "Tenney", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Bowman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2003.02249" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yada Pruksachatkun, Phil Yeres, Haokun Liu, Jason Phang, Phu Mon Htut, Alex Wang, Ian Tenney, and Samuel R Bowman. 2020. jiant: A software toolkit for research on general-purpose text understanding models. arXiv preprint arXiv:2003.02249.", |
|
"links": null |
|
}, |
|
"BIBREF43": { |
|
"ref_id": "b43", |
|
"title": "Exploring the limits of transfer learning with a unified text-to-text transformer", |
|
"authors": [ |
|
{ |
|
"first": "Colin", |
|
"middle": [], |
|
"last": "Raffel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noam", |
|
"middle": [], |
|
"last": "Shazeer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adam", |
|
"middle": [], |
|
"last": "Roberts", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Katherine", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sharan", |
|
"middle": [], |
|
"last": "Narang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Matena", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yanqi", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peter J", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Journal of Machine Learning Research", |
|
"volume": "21", |
|
"issue": "140", |
|
"pages": "1--67", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, and Peter J Liu. 2020. Exploring the limits of transfer learning with a unified text-to-text trans- former. Journal of Machine Learning Research, 21(140):1-67.", |
|
"links": null |
|
}, |
|
"BIBREF44": { |
|
"ref_id": "b44", |
|
"title": "SQuAD: 100,000+ questions for machine comprehension of text", |
|
"authors": [ |
|
{ |
|
"first": "Pranav", |
|
"middle": [], |
|
"last": "Rajpurkar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jian", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Konstantin", |
|
"middle": [], |
|
"last": "Lopyrev", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Percy", |
|
"middle": [], |
|
"last": "Liang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2383--2392", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D16-1264" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Pranav Rajpurkar, Jian Zhang, Konstantin Lopyrev, and Percy Liang. 2016. SQuAD: 100,000+ questions for machine comprehension of text. In Proceedings of the 2016 Conference on Empirical Methods in Natu- ral Language Processing, pages 2383-2392, Austin, Texas. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF45": { |
|
"ref_id": "b45", |
|
"title": "Universal coding, information, prediction, and estimation", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Rissanen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1984, |
|
"venue": "IEEE Transactions on Information Theory", |
|
"volume": "30", |
|
"issue": "4", |
|
"pages": "629--636", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "J. Rissanen. 1984. Universal coding, information, pre- diction, and estimation. IEEE Transactions on In- formation Theory, 30(4):629-636.", |
|
"links": null |
|
}, |
|
"BIBREF46": { |
|
"ref_id": "b46", |
|
"title": "2020. A primer in bertology: What we know about how bert works", |
|
"authors": [ |
|
{ |
|
"first": "Anna", |
|
"middle": [], |
|
"last": "Rogers", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Olga", |
|
"middle": [], |
|
"last": "Kovaleva", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anna", |
|
"middle": [], |
|
"last": "Rumshisky", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2002.12327" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Anna Rogers, Olga Kovaleva, and Anna Rumshisky. 2020. A primer in bertology: What we know about how bert works. arXiv preprint arXiv:2002.12327.", |
|
"links": null |
|
}, |
|
"BIBREF47": { |
|
"ref_id": "b47", |
|
"title": "Understanding learning dynamics of language models with SVCCA", |
|
"authors": [ |
|
{ |
|
"first": "Naomi", |
|
"middle": [], |
|
"last": "Saphra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adam", |
|
"middle": [], |
|
"last": "Lopez", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "3257--3267", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N19-1329" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Naomi Saphra and Adam Lopez. 2019. Understand- ing learning dynamics of language models with SVCCA. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 3257-3267, Minneapolis, Minnesota. Associ- ation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF48": { |
|
"ref_id": "b48", |
|
"title": "Is attention interpretable?", |
|
"authors": [ |
|
{ |
|
"first": "Sofia", |
|
"middle": [], |
|
"last": "Serrano", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noah", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Smith", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2931--2951", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P19-1282" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sofia Serrano and Noah A. Smith. 2019. Is attention interpretable? In Proceedings of the 57th Annual Meeting of the Association for Computational Lin- guistics, pages 2931-2951, Florence, Italy. Associa- tion for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF49": { |
|
"ref_id": "b49", |
|
"title": "A gold standard dependency corpus for English", |
|
"authors": [ |
|
{ |
|
"first": "Natalia", |
|
"middle": [], |
|
"last": "Silveira", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Timothy", |
|
"middle": [], |
|
"last": "Dozat", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marie-Catherine", |
|
"middle": [], |
|
"last": "De Marneffe", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Samuel", |
|
"middle": [], |
|
"last": "Bowman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Miriam", |
|
"middle": [], |
|
"last": "Connor", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Bauer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the Ninth International Conference on Language Resources and Evaluation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Natalia Silveira, Timothy Dozat, Marie-Catherine de Marneffe, Samuel Bowman, Miriam Connor, John Bauer, and Christopher D. Manning. 2014. A gold standard dependency corpus for English. In Proceedings of the Ninth International Conference on Language Resources and Evaluation (LREC- 2014).", |
|
"links": null |
|
}, |
|
"BIBREF50": { |
|
"ref_id": "b50", |
|
"title": "2020. Investigating transferability in pretrained language models", |
|
"authors": [ |
|
{ |
|
"first": "Alex", |
|
"middle": [], |
|
"last": "Tamkin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Trisha", |
|
"middle": [], |
|
"last": "Singh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Davide", |
|
"middle": [], |
|
"last": "Giovanardi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noah", |
|
"middle": [], |
|
"last": "Goodman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2004.14975" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alex Tamkin, Trisha Singh, Davide Giovanardi, and Noah Goodman. 2020. Investigating transferabil- ity in pretrained language models. arXiv preprint arXiv:2004.14975.", |
|
"links": null |
|
}, |
|
"BIBREF51": { |
|
"ref_id": "b51", |
|
"title": "Semantic proto-role labeling", |
|
"authors": [ |
|
{ |
|
"first": "Adam", |
|
"middle": [], |
|
"last": "Teichert", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adam", |
|
"middle": [], |
|
"last": "Poliak", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Benjamin", |
|
"middle": [], |
|
"last": "Van Durme", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matthew", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Gormley", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Thirty-First AAAI Conference on Artificial Intelligence (AAAI-17)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Adam Teichert, Adam Poliak, Benjamin Van Durme, and Matthew R Gormley. 2017. Semantic proto-role labeling. In Thirty-First AAAI Conference on Artifi- cial Intelligence (AAAI-17).", |
|
"links": null |
|
}, |
|
"BIBREF52": { |
|
"ref_id": "b52", |
|
"title": "BERT rediscovers the classical NLP pipeline", |
|
"authors": [ |
|
{ |
|
"first": "Ian", |
|
"middle": [], |
|
"last": "Tenney", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dipanjan", |
|
"middle": [], |
|
"last": "Das", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ellie", |
|
"middle": [], |
|
"last": "Pavlick", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4593--4601", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P19-1452" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ian Tenney, Dipanjan Das, and Ellie Pavlick. 2019a. BERT rediscovers the classical NLP pipeline. In Proceedings of the 57th Annual Meeting of the Asso- ciation for Computational Linguistics, pages 4593- 4601, Florence, Italy. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF53": { |
|
"ref_id": "b53", |
|
"title": "What do you learn from context? probing for sentence structure in contextualized word representations", |
|
"authors": [ |
|
{ |
|
"first": "Ian", |
|
"middle": [], |
|
"last": "Tenney", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Patrick", |
|
"middle": [], |
|
"last": "Xia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Berlin", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alex", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adam", |
|
"middle": [], |
|
"last": "Poliak", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Mccoy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Najoung", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Benjamin", |
|
"middle": [], |
|
"last": "Van Durme", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sam", |
|
"middle": [], |
|
"last": "Bowman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dipanjan", |
|
"middle": [], |
|
"last": "Das", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ellie", |
|
"middle": [], |
|
"last": "Pavlick", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "International Conference on Learning Representations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ian Tenney, Patrick Xia, Berlin Chen, Alex Wang, Adam Poliak, R Thomas McCoy, Najoung Kim, Benjamin Van Durme, Sam Bowman, Dipanjan Das, and Ellie Pavlick. 2019b. What do you learn from context? probing for sentence structure in contextu- alized word representations. In International Con- ference on Learning Representations.", |
|
"links": null |
|
}, |
|
"BIBREF54": { |
|
"ref_id": "b54", |
|
"title": "Diagnostic classifiers revealing how neural networks process hierarchical structure", |
|
"authors": [ |
|
{ |
|
"first": "Sara", |
|
"middle": [], |
|
"last": "Veldhoen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dieuwke", |
|
"middle": [], |
|
"last": "Hupkes", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Willem", |
|
"middle": [ |
|
"H" |
|
], |
|
"last": "Zuidema", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "CoCo@ NIPS", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "69--77", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sara Veldhoen, Dieuwke Hupkes, Willem H Zuidema, et al. 2016. Diagnostic classifiers revealing how neural networks process hierarchical structure. In CoCo@ NIPS, pages 69-77.", |
|
"links": null |
|
}, |
|
"BIBREF55": { |
|
"ref_id": "b55", |
|
"title": "Analyzing the structure of attention in a transformer language model", |
|
"authors": [ |
|
{ |
|
"first": "Jesse", |
|
"middle": [], |
|
"last": "Vig", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yonatan", |
|
"middle": [], |
|
"last": "Belinkov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 ACL Workshop BlackboxNLP: Analyzing and Interpreting Neural Networks for NLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "63--76", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W19-4808" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jesse Vig and Yonatan Belinkov. 2019. Analyzing the structure of attention in a transformer language model. In Proceedings of the 2019 ACL Workshop BlackboxNLP: Analyzing and Interpreting Neural Networks for NLP, pages 63-76, Florence, Italy. As- sociation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF56": { |
|
"ref_id": "b56", |
|
"title": "The bottom-up evolution of representations in the transformer: A study with machine translation and language modeling objectives", |
|
"authors": [ |
|
{ |
|
"first": "Elena", |
|
"middle": [], |
|
"last": "Voita", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rico", |
|
"middle": [], |
|
"last": "Sennrich", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ivan", |
|
"middle": [], |
|
"last": "Titov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4395--4405", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D19-1448" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Elena Voita, Rico Sennrich, and Ivan Titov. 2019a. The bottom-up evolution of representations in the trans- former: A study with machine translation and lan- guage modeling objectives. In Proceedings of the 2019 Conference on Empirical Methods in Natu- ral Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pages 4395-4405, Hong Kong, China. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF57": { |
|
"ref_id": "b57", |
|
"title": "Analyzing multi-head self-attention: Specialized heads do the heavy lifting, the rest can be pruned", |
|
"authors": [ |
|
{ |
|
"first": "Elena", |
|
"middle": [], |
|
"last": "Voita", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Talbot", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fedor", |
|
"middle": [], |
|
"last": "Moiseev", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rico", |
|
"middle": [], |
|
"last": "Sennrich", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ivan", |
|
"middle": [], |
|
"last": "Titov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5797--5808", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P19-1580" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Elena Voita, David Talbot, Fedor Moiseev, Rico Sen- nrich, and Ivan Titov. 2019b. Analyzing multi-head self-attention: Specialized heads do the heavy lift- ing, the rest can be pruned. In Proceedings of the 57th Annual Meeting of the Association for Com- putational Linguistics, pages 5797-5808, Florence, Italy. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF58": { |
|
"ref_id": "b58", |
|
"title": "Informationtheoretic probing with minimum description length", |
|
"authors": [ |
|
{ |
|
"first": "Elena", |
|
"middle": [], |
|
"last": "Voita", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ivan", |
|
"middle": [], |
|
"last": "Titov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2003.12298" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Elena Voita and Ivan Titov. 2020. Information- theoretic probing with minimum description length. arXiv preprint arXiv:2003.12298.", |
|
"links": null |
|
}, |
|
"BIBREF59": { |
|
"ref_id": "b59", |
|
"title": "GLUE: A multi-task benchmark and analysis platform for natural language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Alex", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Amanpreet", |
|
"middle": [], |
|
"last": "Singh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julian", |
|
"middle": [], |
|
"last": "Michael", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Felix", |
|
"middle": [], |
|
"last": "Hill", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Omer", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Samuel", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Bowman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "International Conference on Learning Representations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alex Wang, Amanpreet Singh, Julian Michael, Felix Hill, Omer Levy, and Samuel R. Bowman. 2019. GLUE: A multi-task benchmark and analysis plat- form for natural language understanding. In Inter- national Conference on Learning Representations.", |
|
"links": null |
|
}, |
|
"BIBREF60": { |
|
"ref_id": "b60", |
|
"title": "Ontonotes release 5.0 ldc2013t19. Linguistic Data Consortium, Philadelphia", |
|
"authors": [ |
|
{ |
|
"first": "Ralph", |
|
"middle": [], |
|
"last": "Weischedel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Martha", |
|
"middle": [], |
|
"last": "Palmer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mitchell", |
|
"middle": [], |
|
"last": "Marcus", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eduard", |
|
"middle": [], |
|
"last": "Hovy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sameer", |
|
"middle": [], |
|
"last": "Pradhan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lance", |
|
"middle": [], |
|
"last": "Ramshaw", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nianwen", |
|
"middle": [], |
|
"last": "Xue", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ann", |
|
"middle": [], |
|
"last": "Taylor", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeff", |
|
"middle": [], |
|
"last": "Kaufman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michelle", |
|
"middle": [], |
|
"last": "Franchini", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ralph Weischedel, Martha Palmer, Mitchell Marcus, Eduard Hovy, Sameer Pradhan, Lance Ramshaw, Nianwen Xue, Ann Taylor, Jeff Kaufman, Michelle Franchini, et al. 2013. Ontonotes release 5.0 ldc2013t19. Linguistic Data Consortium, Philadel- phia, PA, 23.", |
|
"links": null |
|
}, |
|
"BIBREF61": { |
|
"ref_id": "b61", |
|
"title": "A broad-coverage challenge corpus for sentence understanding through inference", |
|
"authors": [ |
|
{ |
|
"first": "Adina", |
|
"middle": [], |
|
"last": "Williams", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nikita", |
|
"middle": [], |
|
"last": "Nangia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Samuel", |
|
"middle": [], |
|
"last": "Bowman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1112--1122", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Adina Williams, Nikita Nangia, and Samuel Bowman. 2018. A broad-coverage challenge corpus for sen- tence understanding through inference. In Proceed- ings of the 2018 Conference of the North American Chapter of the Association for Computational Lin- guistics: Human Language Technologies, Volume 1 (Long Papers), pages 1112-1122. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF62": { |
|
"ref_id": "b62", |
|
"title": "Learning and evaluating general linguistic intelligence", |
|
"authors": [ |
|
{ |
|
"first": "Dani", |
|
"middle": [], |
|
"last": "Yogatama", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Cyprien", |
|
"middle": [], |
|
"last": "De Masson D'autume", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jerome", |
|
"middle": [], |
|
"last": "Connor", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tomas", |
|
"middle": [], |
|
"last": "Kocisky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Chrzanowski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lingpeng", |
|
"middle": [], |
|
"last": "Kong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Angeliki", |
|
"middle": [], |
|
"last": "Lazaridou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wang", |
|
"middle": [], |
|
"last": "Ling", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lei", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Dyer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1901.11373" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dani Yogatama, Cyprien de Masson d'Autume, Jerome Connor, Tomas Kocisky, Mike Chrzanowski, Ling- peng Kong, Angeliki Lazaridou, Wang Ling, Lei Yu, Chris Dyer, et al. 2019. Learning and evalu- ating general linguistic intelligence. arXiv preprint arXiv:1901.11373.", |
|
"links": null |
|
}, |
|
"BIBREF63": { |
|
"ref_id": "b63", |
|
"title": "Conll 2017 shared task: Multilingual parsing from raw text to universal dependencies", |
|
"authors": [ |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Zeman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Martin", |
|
"middle": [], |
|
"last": "Popel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Milan", |
|
"middle": [], |
|
"last": "Straka", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jan", |
|
"middle": [], |
|
"last": "Hajic", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joakim", |
|
"middle": [], |
|
"last": "Nivre", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Filip", |
|
"middle": [], |
|
"last": "Ginter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Juhani", |
|
"middle": [], |
|
"last": "Luotolahti", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sampo", |
|
"middle": [], |
|
"last": "Pyysalo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Slav", |
|
"middle": [], |
|
"last": "Petrov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Martin", |
|
"middle": [], |
|
"last": "Potthast", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Francis", |
|
"middle": [], |
|
"last": "Tyers", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Elena", |
|
"middle": [], |
|
"last": "Badmaeva", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Memduh", |
|
"middle": [], |
|
"last": "Gokirmak", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anna", |
|
"middle": [], |
|
"last": "Nedoluzhko", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Silvie", |
|
"middle": [], |
|
"last": "Cinkova", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jan", |
|
"middle": [], |
|
"last": "Hajic Jr", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jaroslava", |
|
"middle": [], |
|
"last": "Hlavacova", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V\u00e1clava", |
|
"middle": [], |
|
"last": "Kettnerov\u00e1", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zdenka", |
|
"middle": [], |
|
"last": "Uresova", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jenna", |
|
"middle": [], |
|
"last": "Kanerva", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stina", |
|
"middle": [], |
|
"last": "Ojala", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anna", |
|
"middle": [], |
|
"last": "Missil\u00e4", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Manning", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Schuster", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Siva", |
|
"middle": [], |
|
"last": "Reddy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dima", |
|
"middle": [], |
|
"last": "Taji", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nizar", |
|
"middle": [], |
|
"last": "Habash", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Herman", |
|
"middle": [], |
|
"last": "Leung", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marie-Catherine", |
|
"middle": [], |
|
"last": "De Marneffe", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Manuela", |
|
"middle": [], |
|
"last": "Sanguinetti", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maria", |
|
"middle": [], |
|
"last": "Simi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hiroshi", |
|
"middle": [], |
|
"last": "Kanayama", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Valeria", |
|
"middle": [], |
|
"last": "De-Paiva", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the CoNLL 2017 Shared Task: Multilingual Parsing from Raw Text to Universal Dependencies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--19", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Daniel Zeman, Martin Popel, Milan Straka, Jan Ha- jic, Joakim Nivre, Filip Ginter, Juhani Luotolahti, Sampo Pyysalo, Slav Petrov, Martin Potthast, Fran- cis Tyers, Elena Badmaeva, Memduh Gokirmak, Anna Nedoluzhko, Silvie Cinkova, Jan Hajic jr., Jaroslava Hlavacova, V\u00e1clava Kettnerov\u00e1, Zdenka Uresova, Jenna Kanerva, Stina Ojala, Anna Mis- sil\u00e4, Christopher D. Manning, Sebastian Schuster, Siva Reddy, Dima Taji, Nizar Habash, Herman Le- ung, Marie-Catherine de Marneffe, Manuela San- guinetti, Maria Simi, Hiroshi Kanayama, Valeria de- Paiva, Kira Droganova, H\u00e9ctor Mart\u00ednez Alonso, \u00c7 agr \u00c7\u00f6ltekin, Umut Sulubacak, Hans Uszkor- eit, Vivien Macketanz, Aljoscha Burchardt, Kim Harris, Katrin Marheinecke, Georg Rehm, Tolga Kayadelen, Mohammed Attia, Ali Elkahky, Zhuoran Yu, Emily Pitler, Saran Lertpradit, Michael Mandl, Jesse Kirchner, Hector Fernandez Alcalde, Jana Str- nadov\u00e1, Esha Banerjee, Ruli Manurung, Antonio Stella, Atsuko Shimada, Sookyoung Kwak, Gustavo Mendonca, Tatiana Lando, Rattima Nitisaroj, and Josie Li. 2017. Conll 2017 shared task: Multilingual parsing from raw text to universal dependencies. In Proceedings of the CoNLL 2017 Shared Task: Multi- lingual Parsing from Raw Text to Universal Depen- dencies, pages 1-19, Vancouver, Canada. Associa- tion for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF64": { |
|
"ref_id": "b64", |
|
"title": "Language modeling teaches you more than translation does: Lessons learned through auxiliary syntactic task analysis", |
|
"authors": [ |
|
{ |
|
"first": "Kelly", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Samuel", |
|
"middle": [], |
|
"last": "Bowman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 EMNLP Workshop BlackboxNLP: Analyzing and Interpreting Neural Networks for NLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "359--361", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kelly Zhang and Samuel Bowman. 2018. Language modeling teaches you more than translation does: Lessons learned through auxiliary syntactic task analysis. In Proceedings of the 2018 EMNLP Workshop BlackboxNLP: Analyzing and Interpret- ing Neural Networks for NLP, pages 359-361.", |
|
"links": null |
|
}, |
|
"BIBREF65": { |
|
"ref_id": "b65", |
|
"title": "Aligning books and movies: Towards story-like visual explanations by watching movies and reading books", |
|
"authors": [ |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Kiros", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Zemel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Salakhutdinov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Urtasun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Torralba", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Fidler", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "2015 IEEE International Conference on Computer Vision (ICCV)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "19--27", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1109/ICCV.2015.11" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Y. Zhu, R. Kiros, R. Zemel, R. Salakhutdinov, R. Ur- tasun, A. Torralba, and S. Fidler. 2015. Aligning books and movies: Towards story-like visual expla- nations by watching movies and reading books. In 2015 IEEE International Conference on Computer Vision (ICCV), pages 19-27.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"text": "Comparison of the representations from BERT base and various fine-tuned models, when tested on Wikipedia examples. The dependency probing model starts to diverge from BERT Base around layer 5, matching previous results from edge probing. For the MNLI and SQuAD models, the differences from the Base model arise in the top layers of the network.", |
|
"uris": null, |
|
"num": null, |
|
"type_str": "figure" |
|
}, |
|
"FIGREF1": { |
|
"text": "Figure 3(partial freezing) andFigure 4(truncation) show the effect on task performance.", |
|
"uris": null, |
|
"num": null, |
|
"type_str": "figure" |
|
} |
|
} |
|
} |
|
} |