ACL-OCL / Base_JSON /prefixD /json /dadc /2022.dadc-1.1.json
Benjamin Aw
Add updated pkl file v3
6fa4bc9
{
"paper_id": "2022",
"header": {
"generated_with": "S2ORC 1.0.0",
"date_generated": "2023-01-19T16:43:39.460300Z"
},
"title": "Resilience of Named Entity Recognition Models Under Adversarial Attack",
"authors": [
{
"first": "Sudeshna",
"middle": [],
"last": "Das",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "Indian Institute of Technology Kharagpur",
"location": {}
},
"email": "sudeshna.das@iitkgp.ac.in"
},
{
"first": "Jiaul",
"middle": [
"H"
],
"last": "Paik",
"suffix": "",
"affiliation": {},
"email": ""
}
],
"year": "",
"venue": null,
"identifiers": {},
"abstract": "Named entity recognition (NER) is a popular language processing task with wide applications. Progress in NER has been noteworthy, as evidenced by the F1 scores obtained on standard datasets. In practice, however, the end-user uses an NER model on their dataset out-of-the-box, on text that may not be pristine. In this paper we present four modelagnostic adversarial attacks to gauge the resilience of NER models in such scenarios. Our experiments on four state-of-the-art NER methods with five English datasets suggest that the NER models are over-reliant on case information and do not utilise contextual information well. As such, they are highly susceptible to adversarial attacks based on these features.",
"pdf_parse": {
"paper_id": "2022",
"_pdf_hash": "",
"abstract": [
{
"text": "Named entity recognition (NER) is a popular language processing task with wide applications. Progress in NER has been noteworthy, as evidenced by the F1 scores obtained on standard datasets. In practice, however, the end-user uses an NER model on their dataset out-of-the-box, on text that may not be pristine. In this paper we present four modelagnostic adversarial attacks to gauge the resilience of NER models in such scenarios. Our experiments on four state-of-the-art NER methods with five English datasets suggest that the NER models are over-reliant on case information and do not utilise contextual information well. As such, they are highly susceptible to adversarial attacks based on these features.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Abstract",
"sec_num": null
}
],
"body_text": [
{
"text": "Named entity recognition (NER) is a popular language processing task that involves identifying and classifying named entities in text (Mayhew et al., 2020) . Progress in NER has been rapid and noteworthy, especially in the current age of deep learning . The general impetus in deep learning-based NER has been to develop models that incorporate context better (Akbik et al., 2018; Devlin et al., 2019; Manning et al., 2014) and are resilient to noise such as inconsistencies in case information (Mayhew et al., 2019; Bodapati et al., 2019; Mayhew et al., 2020) . There has, however, been modest focus on determining the extent to which state-of-the-art NER models succeed in doing so. Identifying the weaknesses of NER models can help drive focused work to ameliorate them and move NER beyond marginal improvements in F1 scores (Stanislawek et al., 2019) .",
"cite_spans": [
{
"start": 134,
"end": 155,
"text": "(Mayhew et al., 2020)",
"ref_id": "BIBREF21"
},
{
"start": 360,
"end": 380,
"text": "(Akbik et al., 2018;",
"ref_id": "BIBREF0"
},
{
"start": 381,
"end": 401,
"text": "Devlin et al., 2019;",
"ref_id": "BIBREF11"
},
{
"start": 402,
"end": 423,
"text": "Manning et al., 2014)",
"ref_id": "BIBREF20"
},
{
"start": 495,
"end": 516,
"text": "(Mayhew et al., 2019;",
"ref_id": "BIBREF22"
},
{
"start": 517,
"end": 539,
"text": "Bodapati et al., 2019;",
"ref_id": "BIBREF6"
},
{
"start": 540,
"end": 560,
"text": "Mayhew et al., 2020)",
"ref_id": "BIBREF21"
},
{
"start": 828,
"end": 854,
"text": "(Stanislawek et al., 2019)",
"ref_id": "BIBREF27"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "Adversarial attacks designed for NLP models largely focus on classification tasks (Wallace et al., 2019; Ren et al., 2019; Jia et al., 2019; Wallace et al., 2019; Papernot et al., 2016) . Many existing studies work with vector representations (Ebrahimi et al., 2018; Zhao et al., 2018) , which are not intuitively interpretable by humans. Such methods require white-box access to the models (Ren et al., 2019) . The additional requirement of human intervention to adjudge the quality of adversarial samples generated may also be involved (Alzantot et al., 2018) .",
"cite_spans": [
{
"start": 82,
"end": 104,
"text": "(Wallace et al., 2019;",
"ref_id": "BIBREF30"
},
{
"start": 105,
"end": 122,
"text": "Ren et al., 2019;",
"ref_id": "BIBREF25"
},
{
"start": 123,
"end": 140,
"text": "Jia et al., 2019;",
"ref_id": "BIBREF16"
},
{
"start": 141,
"end": 162,
"text": "Wallace et al., 2019;",
"ref_id": "BIBREF30"
},
{
"start": 163,
"end": 185,
"text": "Papernot et al., 2016)",
"ref_id": "BIBREF24"
},
{
"start": 243,
"end": 266,
"text": "(Ebrahimi et al., 2018;",
"ref_id": "BIBREF12"
},
{
"start": 267,
"end": 285,
"text": "Zhao et al., 2018)",
"ref_id": "BIBREF32"
},
{
"start": 391,
"end": 409,
"text": "(Ren et al., 2019)",
"ref_id": "BIBREF25"
},
{
"start": 538,
"end": 561,
"text": "(Alzantot et al., 2018)",
"ref_id": "BIBREF1"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "Adversarial NER has broadly seen two types of approaches: (a) adversarial training, and (b) adversarial evaluation. Adversarial training of NER models involves introducing small perturbations in the training data to make models robust (Bekoulis et al., 2018) . Such perturbations are introduced in the text representation level Bai et al., 2020; Huang et al., 2022) . The adversarial evaluation of NER models, on the other hand, involves benchmarking the models on synthetically generated data (Lin et al., 2021; Simoncini and Spanakis, 2021) . We follow the latter line of investigation.",
"cite_spans": [
{
"start": 235,
"end": 258,
"text": "(Bekoulis et al., 2018)",
"ref_id": "BIBREF5"
},
{
"start": 328,
"end": 345,
"text": "Bai et al., 2020;",
"ref_id": "BIBREF3"
},
{
"start": 346,
"end": 365,
"text": "Huang et al., 2022)",
"ref_id": "BIBREF15"
},
{
"start": 494,
"end": 512,
"text": "(Lin et al., 2021;",
"ref_id": "BIBREF19"
},
{
"start": 513,
"end": 542,
"text": "Simoncini and Spanakis, 2021)",
"ref_id": "BIBREF26"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "We present four model-agnostic adversarial attacks targeted at NER models. Our task-specific approach allows us to generate natural language adversaries that work with pre-trained models and are easily interpretable by humans. In principle, our work is similar to the label-preserving substitutions explored by Ren et al. (2019) and the wordsubstitution methods explored by Alzantot et al. (2018) , although they do not evaluate their methods on NER. Generating adversarial data for evaluating NER models is explored by Simoncini and Spanakis (2021) using BERT to replace and/or add non-named entity tokens to text. Lin et al. (2021) also use pre-trained BERT to generate contextlevel adversarial attacks to evaluate NER models. In contrast to their work, we use simple rulebased methods for generating adversarial data. Our method has the advantage of not requiring retraining or fine-tuning of pre-trained models.",
"cite_spans": [
{
"start": 311,
"end": 328,
"text": "Ren et al. (2019)",
"ref_id": "BIBREF25"
},
{
"start": 374,
"end": 396,
"text": "Alzantot et al. (2018)",
"ref_id": "BIBREF1"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "The datasets and models we use are all openly available, aiding reproducibility. 1 Further, our ex- (Nadeau and Sekine, 2007) , for this study.",
"cite_spans": [
{
"start": 100,
"end": 125,
"text": "(Nadeau and Sekine, 2007)",
"ref_id": "BIBREF23"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "The CoNLL-2003 (CoNLL) dataset consists of news articles from the Reuters Corpus (Tjong Kim Sang and De Meulder, 2003) . In keeping with the standard evaluation schemes, we report results only on the test split of the dataset.",
"cite_spans": [
{
"start": 92,
"end": 118,
"text": "Sang and De Meulder, 2003)",
"ref_id": "BIBREF29"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "CoNLL-2003",
"sec_num": null
},
{
"text": "WikiGold The WikiGold dataset (WIKI) comprises of manually annotated English Wikipedia articles (Balasuriya et al., 2009) .",
"cite_spans": [
{
"start": 96,
"end": 121,
"text": "(Balasuriya et al., 2009)",
"ref_id": "BIBREF4"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "CoNLL-2003",
"sec_num": null
},
{
"text": "The English dataset (FIRE) from the NER for Indian Languages task at FIRE 2013 comprises of text crawled from Indian websites as well as Wikipedia articles.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "FIRE NER 2013",
"sec_num": null
},
{
"text": "NIST IE-ER 1999 IEER refers to the gold standard NEWSWIRE development test data for the NIST 1999 IE-ER Evaluation available with NLTK (Steven Bird and Klein, 2009). GMB 2.2 The Groningen Meaning Bank 2.2 dataset comprises of public domain texts that include news articles, stories, jokes, and transcripts. NLP tools are used to provide a preliminary annotation which is then updated by a combination of human experts, NLP tools, and crowd-sourcing to yield a silver-standard corpus (Bos et al., 2017) .",
"cite_spans": [
{
"start": 483,
"end": 501,
"text": "(Bos et al., 2017)",
"ref_id": "BIBREF7"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "FIRE NER 2013",
"sec_num": null
},
{
"text": "We use four named entity recognizers for our experiments, all of which are open-source. Of these, spaCy is the current state-of-the-art in terms of document processing speed (Choi et al., 2015) and Flair is near the current state-of-the-art. 2 Flair NER The Flair named entity recognizer is based on neural character embeddings. It uses contextual neural string embeddings that are obtained by pre-training on large, unlabelled corpora. Every sentence is represented in the form of string embeddings which are then stacked with pre-computed uncased GloVe embeddings, before being passed through a BiLSTM-CRF architecture that generates labels for each word (Akbik et al., 2018) .",
"cite_spans": [
{
"start": 174,
"end": 193,
"text": "(Choi et al., 2015)",
"ref_id": "BIBREF10"
},
{
"start": 242,
"end": 243,
"text": "2",
"ref_id": null
},
{
"start": 657,
"end": 677,
"text": "(Akbik et al., 2018)",
"ref_id": "BIBREF0"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Methods",
"sec_num": "3"
},
{
"text": "spaCy NER spaCy's named entity recognizer employs a transition-based entity recognition methodology where state changes are triggered by actions. It uses trigram CNNs with residual connections that transform context-independent vectors into context-sensitive vectors (Honnibal, 2016) .",
"cite_spans": [
{
"start": 267,
"end": 283,
"text": "(Honnibal, 2016)",
"ref_id": null
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Methods",
"sec_num": "3"
},
{
"text": "CoreNLP NER CoreNLP NER (Manning et al., 2014) is based on linear chain Conditional Random Field (CRF) sequence models of arbitrary order (Finkel et al., 2005) . For our experiments, we use the caseless model that ignores capitalization as well as the Truecase annotator that attempts to rectify incorrect casing, in addition to the default model.",
"cite_spans": [
{
"start": 138,
"end": 159,
"text": "(Finkel et al., 2005)",
"ref_id": "BIBREF13"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Methods",
"sec_num": "3"
},
{
"text": "DeepPavlov NER DeepPavlov's named entity recognition model uses the English cased model of BERT with 12 layers, 768 hidden nodes, 12 attention heads, and 110M parameters (Devlin et al., 2019) . The first sub-word representation of each word is passed through a dense layer to generate labels (Burtsev et al., 2018) . ",
"cite_spans": [
{
"start": 170,
"end": 191,
"text": "(Devlin et al., 2019)",
"ref_id": "BIBREF11"
},
{
"start": 292,
"end": 314,
"text": "(Burtsev et al., 2018)",
"ref_id": "BIBREF8"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Methods",
"sec_num": "3"
},
{
"text": "In this section we describe the design of two broad types of adversarial attacks on NER models.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Adversarial Attacks",
"sec_num": "4"
},
{
"text": "is not publicly available, we choose not to include it in our experiments. We strongly believe that this does not affect the conclusions of our work.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Adversarial Attacks",
"sec_num": "4"
},
{
"text": "Case is one of the strongest indicators of named entities in English (Mayhew et al., 2020) and it is well known that case affects the performance of NER models (Mayhew et al., 2019; Bodapati et al., 2019) . We formulate two adversarial attacks that emulate data where (i) case information may be unavailable, such as informal texts, and (ii) case information is unreliable, such as text extracted from PDF or OCR-ed documents.",
"cite_spans": [
{
"start": 69,
"end": 90,
"text": "(Mayhew et al., 2020)",
"ref_id": "BIBREF21"
},
{
"start": 160,
"end": 181,
"text": "(Mayhew et al., 2019;",
"ref_id": "BIBREF22"
},
{
"start": 182,
"end": 204,
"text": "Bodapati et al., 2019)",
"ref_id": "BIBREF6"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Case-based Adversarial Attacks",
"sec_num": "4.1"
},
{
"text": "In case ablation, we drop the case information while keeping the rest of the text intact. The caseablated named entities attempt to fool the NER models into misclassifying them as non-entities. This allows us to quantify what percentage of the correctly identified named entities rely completely on case information.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Case Ablation",
"sec_num": "4.1.1"
},
{
"text": "In this setup, we randomly capitalise N percent of the tokens in each dataset, where N is the percentage of actual named entity tokens in the corresponding original text. The randomly capitalised tokens attempt to fool the model into marking them as named entities. We choose N rather than an arbitrary value in order to maintain the distribution of capitalised and lowercase tokens in the datasets.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Case Aberration",
"sec_num": "4.1.2"
},
{
"text": "The surrounding text of a named entity is arguably the most useful feature in identifying named entities. All the NER models we evaluate attempt to capture context to leverage this information. We formulate two adversarial attacks that attempt to determine how well such information is captured by these models.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Context-based Adversarial Attacks",
"sec_num": "4.2"
},
{
"text": "We create local perturbations for named entities. That is, we change the immediately surrounding text of the named entities while retaining syntactic structure and a semblance of semantics. To achieve this, we replace named entities of each class by named entities of the other two classes, with an equal probability. The local context of a named entity attempts to fool the NER model into classifying it incorrectly. This attack is similar in nature to the data augmentation procedure used by Lin et al. (2021). However, they restrict named entity substitutions within the same entity class. Since we carry out inter-class entity substitutions, we posit that our method is better able to detect when NER models rely on memorising named entity tokens.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Context Perturbation",
"sec_num": "4.2.1"
},
{
"text": "We alter the context of named entities on a global scale. To achieve this, we randomly select named entities with equal probability and place them in random locations in the text. In almost all cases, the text becomes grammatically incorrect, as is illustrated in Table 1 . Thus, neither semantics nor syntactic rules are maintained, effectively altering the global contextual frame of named entities. In this case, it is desirable for models to misclassify named entities. That is, we consider a model to be better if it is susceptible to this attack. This is based on our hypothesis that a model that captures context better should perform worse when the context is meaningless.",
"cite_spans": [],
"ref_spans": [
{
"start": 264,
"end": 271,
"text": "Table 1",
"ref_id": "TABREF0"
}
],
"eq_spans": [],
"section": "Context Alteration",
"sec_num": "4.2.2"
},
{
"text": "We follow the CoNLL-2003 Shared Task guidelines to report the F1 scores (Tjong Kim Sang and De Meulder, 2003) . Compatible classes are clubbed with the closest enamex class (such as, GPE (Geo-political entity) is clubbed with LOCATION for spaCy, BERT, and the GMB dataset). The class labels present in different datasets/produced by different models do not always have a close one-to-one correspondence to the class labels in other datasets/produced by other models. Thus, non-enamex entities are considered to be non-entities to provide a fair comparison across datasets and models. NER models and datasets also differ in their tagging schemes. Since it is not possible to map IO tags to IOB or IOBES, and IOB tags to IOBES (Cho et al., 2013) , we map all tags into the IO scheme. The mapping of compatible entity classes and tagging schemes causes our evaluation results to differ from the officially reported scores of these NER models. Table 2 shows the F1 scores of the models on the original dataset. This gives us the benchmark against which we compare the performance for the different data variants. ",
"cite_spans": [
{
"start": 83,
"end": 109,
"text": "Sang and De Meulder, 2003)",
"ref_id": "BIBREF29"
},
{
"start": 725,
"end": 743,
"text": "(Cho et al., 2013)",
"ref_id": "BIBREF9"
}
],
"ref_spans": [
{
"start": 940,
"end": 947,
"text": "Table 2",
"ref_id": "TABREF1"
}
],
"eq_spans": [],
"section": "Evaluation",
"sec_num": "5"
},
{
"text": "We observe significantly large performance drops for every model with respect to model performance on the original datasets. This is unsurprising, as case information is an important indicator of named entities.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Case ablation",
"sec_num": "6.1"
},
{
"text": "If we consider the CoreNLP-c scores as the upper bound (since this model is trained on caseless data and hence, reflects the ability of NER models to work on caseless data), we still notice large drops in F1 scores for the other models. This reflects the tendency of NER models to over-rely on case information. Among the cased models, we find BERT to be the better performer with Flair trailing as a close competitor. This is an interesting finding as it suggests that cased BERT is more resilient to case-based adversarial attacks than Flair, which uses uncased GloVe embeddings.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Case ablation",
"sec_num": "6.1"
},
{
"text": "We observe large drops in performance for the case aberration attack. The performance for CoreNLP-t is worse than that of CoreNLP-c, which suggests that truecasing is not as effective as caseless training. Among the case-sensitive models, we find Flair outperforming other models. The performance drop for case aberration is slightly less than that for case ablation. ",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Case aberration",
"sec_num": "6.2"
},
{
"text": "Despite including mechanisms to incorporate contextual information, NER models show large performance drops under context perturbation attacks. Since an NER model is highly likely to have come across \"London\" as a LOCATION and \"Alice\" as a PERSON during training, it predicts them as such, ignoring the local context in which they appear. Despite large performance drops in general, Flair outperforms other models for all five datasets. This suggests that Flair captures local context better, likely due to the use of character embeddings. ",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Context perturbation",
"sec_num": "6.3"
},
{
"text": "We note here that unlike the previous experiments, it is desirable to have higher percentage drops in performance for the context alteration attacks. 3 All the models show drops in performance. This hints at NER models having a tendency to learn the names themselves during training, rather than relying on the context in which the names appear. The magnitude of drops in performance is generally less than that observed for context perturbation, which suggests that NER models capture the local context of named entities better than their global context. Flair shows the largest performance drops, closely trailed by BERT.",
"cite_spans": [
{
"start": 150,
"end": 151,
"text": "3",
"ref_id": null
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Context alteration",
"sec_num": "6.4"
},
{
"text": "The adversarial evaluation of NLP models rely either on human-generated adversaries (Kaushik et al., 2019) or automated adversary generation with human-in-the-loop (Alzantot et al., 2018 ). However, it is possible to do away with human intervention for generating adversarial samples for the task of NER, as we demonstrate. Further, unlike existing work, our approach for adversarial evaluation does not require any re-training or finetuning of models for adversarial data creation.",
"cite_spans": [
{
"start": 84,
"end": 106,
"text": "(Kaushik et al., 2019)",
"ref_id": "BIBREF17"
},
{
"start": 164,
"end": 186,
"text": "(Alzantot et al., 2018",
"ref_id": "BIBREF1"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Discussion",
"sec_num": "7"
},
{
"text": "The generalizability of NER models can also be evaluated with the proposed approaches. In particular, context perturbation can be used as an alternative to studying the effect of named entities that have not been seen during training (Augenstein et al., 2017) with the same label.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Discussion",
"sec_num": "7"
},
{
"text": "In this paper, we present an adversarial evaluation of four popular named-entity recognizers on five English datasets. The four model-agnostic adversarial attacks we present do not require white-box access to pre-trained NER models. Our experiments show that the popular NER models are overreliant on the case information and under-utilise the contextual information. Since NER is a prerequisite for a large number of NLP tasks, further work for improvement in these directions is warranted.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Conclusions",
"sec_num": "8"
},
{
"text": "https://github.com/das-sudeshna/adversarial-ner",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
},
{
"text": "The F1 score of the current state-of-the-art model is 0.935. (Flair's F1 score is 0.931.) Since a pre-trained model",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
},
{
"text": "Lower F1 scores are also desirable. However, low F1 scores can also be caused due to a model being poor generally and not specifically due to the inability to capture global context. Thus, we cannot draw concrete conclusions from the absolute F1 scores.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
}
],
"back_matter": [],
"bib_entries": {
"BIBREF0": {
"ref_id": "b0",
"title": "Contextual string embeddings for sequence labeling",
"authors": [
{
"first": "Alan",
"middle": [],
"last": "Akbik",
"suffix": ""
},
{
"first": "Duncan",
"middle": [],
"last": "Blythe",
"suffix": ""
},
{
"first": "Roland",
"middle": [],
"last": "Vollgraf",
"suffix": ""
}
],
"year": 2018,
"venue": "Proceedings of the 27th International Conference on Computational Linguistics",
"volume": "",
"issue": "",
"pages": "1638--1649",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Alan Akbik, Duncan Blythe, and Roland Vollgraf. 2018. Contextual string embeddings for sequence labeling. In Proceedings of the 27th International Conference on Computational Linguistics, pages 1638-1649.",
"links": null
},
"BIBREF1": {
"ref_id": "b1",
"title": "Generating natural language adversarial examples",
"authors": [
{
"first": "Moustafa",
"middle": [],
"last": "Alzantot",
"suffix": ""
},
{
"first": "Yash",
"middle": [],
"last": "Sharma",
"suffix": ""
},
{
"first": "Ahmed",
"middle": [],
"last": "Elgohary",
"suffix": ""
},
{
"first": "Bo-Jhang",
"middle": [],
"last": "Ho",
"suffix": ""
},
{
"first": "Mani",
"middle": [],
"last": "Srivastava",
"suffix": ""
},
{
"first": "Kai-Wei",
"middle": [],
"last": "Chang",
"suffix": ""
}
],
"year": 2018,
"venue": "Proceedings of the Conference on Empirical Methods in Natural Language Processing",
"volume": "",
"issue": "",
"pages": "2890--2896",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Moustafa Alzantot, Yash Sharma, Ahmed Elgohary, Bo-Jhang Ho, Mani Srivastava, and Kai-Wei Chang. 2018. Generating natural language adversarial ex- amples. In Proceedings of the Conference on Em- pirical Methods in Natural Language Processing, pages 2890-2896.",
"links": null
},
"BIBREF2": {
"ref_id": "b2",
"title": "Generalisation in named entity recognition: A quantitative analysis",
"authors": [
{
"first": "Isabelle",
"middle": [],
"last": "Augenstein",
"suffix": ""
},
{
"first": "Leon",
"middle": [],
"last": "Derczynski",
"suffix": ""
},
{
"first": "Kalina",
"middle": [],
"last": "Bontcheva",
"suffix": ""
}
],
"year": 2017,
"venue": "Computer Speech & Language",
"volume": "44",
"issue": "",
"pages": "61--83",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Isabelle Augenstein, Leon Derczynski, and Kalina Bontcheva. 2017. Generalisation in named entity recognition: A quantitative analysis. Computer Speech & Language, 44:61-83.",
"links": null
},
"BIBREF3": {
"ref_id": "b3",
"title": "Adversarial named entity recognition with pos label embedding",
"authors": [
{
"first": "Yuxuan",
"middle": [],
"last": "Bai",
"suffix": ""
},
{
"first": "Yu",
"middle": [],
"last": "Wang",
"suffix": ""
},
{
"first": "Bin",
"middle": [],
"last": "Xia",
"suffix": ""
},
{
"first": "Yun",
"middle": [],
"last": "Li",
"suffix": ""
},
{
"first": "Ziye",
"middle": [],
"last": "Zhu",
"suffix": ""
}
],
"year": 2020,
"venue": "2020 International Joint Conference on Neural Networks",
"volume": "",
"issue": "",
"pages": "1--8",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Yuxuan Bai, Yu Wang, Bin Xia, Yun Li, and Ziye Zhu. 2020. Adversarial named entity recognition with pos label embedding. In 2020 International Joint Conference on Neural Networks, pages 1-8. IEEE.",
"links": null
},
"BIBREF4": {
"ref_id": "b4",
"title": "Named entity recognition in wikipedia",
"authors": [
{
"first": "Dominic",
"middle": [],
"last": "Balasuriya",
"suffix": ""
},
{
"first": "Nicky",
"middle": [],
"last": "Ringland",
"suffix": ""
},
{
"first": "Joel",
"middle": [],
"last": "Nothman",
"suffix": ""
},
{
"first": "Tara",
"middle": [],
"last": "Murphy",
"suffix": ""
},
{
"first": "James R",
"middle": [],
"last": "Curran",
"suffix": ""
}
],
"year": 2009,
"venue": "Proceedings of the Workshop on The People's Web Meets NLP",
"volume": "",
"issue": "",
"pages": "10--18",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Dominic Balasuriya, Nicky Ringland, Joel Nothman, Tara Murphy, and James R Curran. 2009. Named entity recognition in wikipedia. In Proceedings of the Workshop on The People's Web Meets NLP, pages 10-18.",
"links": null
},
"BIBREF5": {
"ref_id": "b5",
"title": "Adversarial training for multi-context joint entity and relation extraction",
"authors": [
{
"first": "Giannis",
"middle": [],
"last": "Bekoulis",
"suffix": ""
},
{
"first": "Johannes",
"middle": [],
"last": "Deleu",
"suffix": ""
},
{
"first": "Thomas",
"middle": [],
"last": "Demeester",
"suffix": ""
},
{
"first": "Chris",
"middle": [],
"last": "Develder",
"suffix": ""
}
],
"year": 2018,
"venue": "Proceedings of the Conference on Empirical Methods in Natural Language Processing",
"volume": "",
"issue": "",
"pages": "2830--2836",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Giannis Bekoulis, Johannes Deleu, Thomas Demeester, and Chris Develder. 2018. Adversarial training for multi-context joint entity and relation extraction. In Proceedings of the Conference on Empirical Meth- ods in Natural Language Processing, pages 2830- 2836.",
"links": null
},
"BIBREF6": {
"ref_id": "b6",
"title": "Robustness to capitalization errors in named entity recognition",
"authors": [
{
"first": "Sravan",
"middle": [],
"last": "Bodapati",
"suffix": ""
},
{
"first": "Hyokun",
"middle": [],
"last": "Yun",
"suffix": ""
},
{
"first": "Yaser",
"middle": [],
"last": "Al-Onaizan",
"suffix": ""
}
],
"year": 2019,
"venue": "Proceedings of the 5th Workshop on Noisy User-generated Text",
"volume": "",
"issue": "",
"pages": "237--242",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Sravan Bodapati, Hyokun Yun, and Yaser Al-Onaizan. 2019. Robustness to capitalization errors in named entity recognition. In Proceedings of the 5th Work- shop on Noisy User-generated Text, pages 237-242.",
"links": null
},
"BIBREF7": {
"ref_id": "b7",
"title": "The groningen meaning bank",
"authors": [
{
"first": "Johan",
"middle": [],
"last": "Bos",
"suffix": ""
},
{
"first": "Valerio",
"middle": [],
"last": "Basile",
"suffix": ""
},
{
"first": "Kilian",
"middle": [],
"last": "Evang",
"suffix": ""
},
{
"first": "J",
"middle": [],
"last": "Noortje",
"suffix": ""
},
{
"first": "Johannes",
"middle": [],
"last": "Venhuizen",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Bjerva",
"suffix": ""
}
],
"year": 2017,
"venue": "Handbook of linguistic annotation",
"volume": "",
"issue": "",
"pages": "463--496",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Johan Bos, Valerio Basile, Kilian Evang, Noortje J Venhuizen, and Johannes Bjerva. 2017. The gronin- gen meaning bank. In Handbook of linguistic anno- tation, pages 463-496.",
"links": null
},
"BIBREF8": {
"ref_id": "b8",
"title": "Deeppavlov: Open-source library for dialogue systems",
"authors": [
{
"first": "Mikhail",
"middle": [],
"last": "Burtsev",
"suffix": ""
},
{
"first": "Alexander",
"middle": [],
"last": "Seliverstov",
"suffix": ""
},
{
"first": "Rafael",
"middle": [],
"last": "Airapetyan",
"suffix": ""
},
{
"first": "Mikhail",
"middle": [],
"last": "Arkhipov",
"suffix": ""
},
{
"first": "Dilyara",
"middle": [],
"last": "Baymurzina",
"suffix": ""
},
{
"first": "Nickolay",
"middle": [],
"last": "Bushkov",
"suffix": ""
},
{
"first": "Olga",
"middle": [],
"last": "Gureenkova",
"suffix": ""
},
{
"first": "Taras",
"middle": [],
"last": "Khakhulin",
"suffix": ""
},
{
"first": "Yurii",
"middle": [],
"last": "Kuratov",
"suffix": ""
},
{
"first": "Denis",
"middle": [],
"last": "Kuznetsov",
"suffix": ""
}
],
"year": 2018,
"venue": "Proceedings of ACL 2018",
"volume": "",
"issue": "",
"pages": "122--127",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Mikhail Burtsev, Alexander Seliverstov, Rafael Airapetyan, Mikhail Arkhipov, Dilyara Baymurz- ina, Nickolay Bushkov, Olga Gureenkova, Taras Khakhulin, Yurii Kuratov, Denis Kuznetsov, et al. 2018. Deeppavlov: Open-source library for dia- logue systems. In Proceedings of ACL 2018, pages 122-127.",
"links": null
},
"BIBREF9": {
"ref_id": "b9",
"title": "Named entity recognition with multiple segment representations",
"authors": [
{
"first": "Han-Cheol",
"middle": [],
"last": "Cho",
"suffix": ""
}
],
"year": 2013,
"venue": "Information Processing & Management",
"volume": "49",
"issue": "4",
"pages": "954--965",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Han-Cheol Cho et al. 2013. Named entity recognition with multiple segment representations. Information Processing & Management, 49(4):954-965.",
"links": null
},
"BIBREF10": {
"ref_id": "b10",
"title": "It depends: Dependency parser comparison using a web-based evaluation tool",
"authors": [
{
"first": "D",
"middle": [],
"last": "Jinho",
"suffix": ""
},
{
"first": "Joel",
"middle": [],
"last": "Choi",
"suffix": ""
},
{
"first": "Amanda",
"middle": [],
"last": "Tetreault",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Stent",
"suffix": ""
}
],
"year": 2015,
"venue": "Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th International Joint Conference on Natural Language Processing",
"volume": "",
"issue": "",
"pages": "387--396",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Jinho D Choi, Joel Tetreault, and Amanda Stent. 2015. It depends: Dependency parser comparison using a web-based evaluation tool. In Proceedings of the 53rd Annual Meeting of the Association for Compu- tational Linguistics and the 7th International Joint Conference on Natural Language Processing, pages 387-396.",
"links": null
},
"BIBREF11": {
"ref_id": "b11",
"title": "Bert: Pre-training of deep bidirectional transformers for language understanding",
"authors": [
{
"first": "Jacob",
"middle": [],
"last": "Devlin",
"suffix": ""
},
{
"first": "Ming-Wei",
"middle": [],
"last": "Chang",
"suffix": ""
},
{
"first": "Kenton",
"middle": [],
"last": "Lee",
"suffix": ""
},
{
"first": "Kristina",
"middle": [],
"last": "Toutanova",
"suffix": ""
}
],
"year": 2019,
"venue": "Proceedings of the Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies",
"volume": "",
"issue": "",
"pages": "4171--4186",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. Bert: Pre-training of deep bidirectional transformers for language understand- ing. In Proceedings of the Conference of the North American Chapter of the Association for Computa- tional Linguistics: Human Language Technologies, pages 4171-4186.",
"links": null
},
"BIBREF12": {
"ref_id": "b12",
"title": "Hotflip: White-box adversarial examples for text classification",
"authors": [
{
"first": "Javid",
"middle": [],
"last": "Ebrahimi",
"suffix": ""
},
{
"first": "Anyi",
"middle": [],
"last": "Rao",
"suffix": ""
},
{
"first": "Daniel",
"middle": [],
"last": "Lowd",
"suffix": ""
},
{
"first": "Dejing",
"middle": [],
"last": "Dou",
"suffix": ""
}
],
"year": 2018,
"venue": "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics",
"volume": "",
"issue": "",
"pages": "31--36",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Javid Ebrahimi, Anyi Rao, Daniel Lowd, and Dejing Dou. 2018. Hotflip: White-box adversarial exam- ples for text classification. In Proceedings of the 56th Annual Meeting of the Association for Compu- tational Linguistics, pages 31-36.",
"links": null
},
"BIBREF13": {
"ref_id": "b13",
"title": "Incorporating non-local information into information extraction systems by gibbs sampling",
"authors": [
{
"first": "Jenny",
"middle": [
"Rose"
],
"last": "Finkel",
"suffix": ""
},
{
"first": "Trond",
"middle": [],
"last": "Grenager",
"suffix": ""
},
{
"first": "Christopher D",
"middle": [],
"last": "Manning",
"suffix": ""
}
],
"year": 2005,
"venue": "Proceedings of the 43rd Annual Meeting of the Association for Computational Linguistics",
"volume": "",
"issue": "",
"pages": "363--370",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Jenny Rose Finkel, Trond Grenager, and Christopher D Manning. 2005. Incorporating non-local informa- tion into information extraction systems by gibbs sampling. In Proceedings of the 43rd Annual Meet- ing of the Association for Computational Linguis- tics, pages 363-370.",
"links": null
},
"BIBREF15": {
"ref_id": "b15",
"title": "Extract-select: A span selection framework for nested named entity recognition with generative adversarial training",
"authors": [
{
"first": "Peixin",
"middle": [],
"last": "Huang",
"suffix": ""
},
{
"first": "Xiang",
"middle": [],
"last": "Zhao",
"suffix": ""
},
{
"first": "Minghao",
"middle": [],
"last": "Hu",
"suffix": ""
},
{
"first": "Yang",
"middle": [],
"last": "Fang",
"suffix": ""
},
{
"first": "Xinyi",
"middle": [],
"last": "Li",
"suffix": ""
},
{
"first": "Weidong",
"middle": [],
"last": "Xiao",
"suffix": ""
}
],
"year": 2022,
"venue": "Findings of the Association for Computational Linguistics",
"volume": "",
"issue": "",
"pages": "85--96",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Peixin Huang, Xiang Zhao, Minghao Hu, Yang Fang, Xinyi Li, and Weidong Xiao. 2022. Extract-select: A span selection framework for nested named entity recognition with generative adversarial training. In Findings of the Association for Computational Lin- guistics, pages 85-96.",
"links": null
},
"BIBREF16": {
"ref_id": "b16",
"title": "Certified robustness to adversarial word substitutions",
"authors": [
{
"first": "Robin",
"middle": [],
"last": "Jia",
"suffix": ""
},
{
"first": "Aditi",
"middle": [],
"last": "Raghunathan",
"suffix": ""
},
{
"first": "Kerem",
"middle": [],
"last": "G\u00f6ksel",
"suffix": ""
},
{
"first": "Percy",
"middle": [],
"last": "Liang",
"suffix": ""
}
],
"year": 2019,
"venue": "Proceedings of the Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Robin Jia, Aditi Raghunathan, Kerem G\u00f6ksel, and Percy Liang. 2019. Certified robustness to adversar- ial word substitutions. In Proceedings of the Con- ference on Empirical Methods in Natural Language Processing and the 9th International Joint Confer- ence on Natural Language Processing.",
"links": null
},
"BIBREF17": {
"ref_id": "b17",
"title": "Learning the difference that makes a difference with counterfactually-augmented data",
"authors": [
{
"first": "Divyansh",
"middle": [],
"last": "Kaushik",
"suffix": ""
},
{
"first": "Eduard",
"middle": [],
"last": "Hovy",
"suffix": ""
},
{
"first": "Zachary",
"middle": [],
"last": "Lipton",
"suffix": ""
}
],
"year": 2019,
"venue": "International Conference on Learning Representations",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Divyansh Kaushik, Eduard Hovy, and Zachary Lipton. 2019. Learning the difference that makes a dif- ference with counterfactually-augmented data. In International Conference on Learning Representa- tions.",
"links": null
},
"BIBREF18": {
"ref_id": "b18",
"title": "A survey on deep learning for named entity recognition",
"authors": [
{
"first": "Jing",
"middle": [],
"last": "Li",
"suffix": ""
},
{
"first": "Aixin",
"middle": [],
"last": "Sun",
"suffix": ""
},
{
"first": "Jianglei",
"middle": [],
"last": "Han",
"suffix": ""
},
{
"first": "Chenliang",
"middle": [],
"last": "Li",
"suffix": ""
}
],
"year": 2020,
"venue": "IEEE Transactions on Knowledge and Data Engineering",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Jing Li, Aixin Sun, Jianglei Han, and Chenliang Li. 2020. A survey on deep learning for named entity recognition. IEEE Transactions on Knowledge and Data Engineering.",
"links": null
},
"BIBREF19": {
"ref_id": "b19",
"title": "Rockner: A simple method to create adversarial examples for evaluating the robustness of named entity recognition models",
"authors": [
{
"first": "Wenyang",
"middle": [],
"last": "Bill Yuchen Lin",
"suffix": ""
},
{
"first": "Jun",
"middle": [],
"last": "Gao",
"suffix": ""
},
{
"first": "Ryan",
"middle": [],
"last": "Yan",
"suffix": ""
},
{
"first": "Xiang",
"middle": [],
"last": "Moreno",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Ren",
"suffix": ""
}
],
"year": 2021,
"venue": "Proceedings of the Conference on Empirical Methods in Natural Language Processing",
"volume": "",
"issue": "",
"pages": "3728--3737",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Bill Yuchen Lin, Wenyang Gao, Jun Yan, Ryan Moreno, and Xiang Ren. 2021. Rockner: A simple method to create adversarial examples for evaluat- ing the robustness of named entity recognition mod- els. In Proceedings of the Conference on Empiri- cal Methods in Natural Language Processing, pages 3728-3737.",
"links": null
},
"BIBREF20": {
"ref_id": "b20",
"title": "The stanford corenlp natural language processing toolkit",
"authors": [
{
"first": "D",
"middle": [],
"last": "Christopher",
"suffix": ""
},
{
"first": "Mihai",
"middle": [],
"last": "Manning",
"suffix": ""
},
{
"first": "John",
"middle": [],
"last": "Surdeanu",
"suffix": ""
},
{
"first": "Jenny",
"middle": [
"Rose"
],
"last": "Bauer",
"suffix": ""
},
{
"first": "Steven",
"middle": [],
"last": "Finkel",
"suffix": ""
},
{
"first": "David",
"middle": [],
"last": "Bethard",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Mc-Closky",
"suffix": ""
}
],
"year": 2014,
"venue": "Proceedings of 52nd annual meeting of the association for computational linguistics",
"volume": "",
"issue": "",
"pages": "55--60",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Christopher D Manning, Mihai Surdeanu, John Bauer, Jenny Rose Finkel, Steven Bethard, and David Mc- Closky. 2014. The stanford corenlp natural language processing toolkit. In Proceedings of 52nd annual meeting of the association for computational lin- guistics, pages 55-60.",
"links": null
},
"BIBREF21": {
"ref_id": "b21",
"title": "Robust named entity recognition with truecasing pretraining",
"authors": [
{
"first": "Stephen",
"middle": [],
"last": "Mayhew",
"suffix": ""
},
{
"first": "Gupta",
"middle": [],
"last": "Nitish",
"suffix": ""
},
{
"first": "Dan",
"middle": [],
"last": "Roth",
"suffix": ""
}
],
"year": 2020,
"venue": "Proceedings of the AAAI Conference on Artificial Intelligence",
"volume": "34",
"issue": "",
"pages": "8480--8487",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Stephen Mayhew, Gupta Nitish, and Dan Roth. 2020. Robust named entity recognition with truecasing pretraining. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 34, pages 8480- 8487.",
"links": null
},
"BIBREF22": {
"ref_id": "b22",
"title": "ner and pos when nothing is capitalized",
"authors": [
{
"first": "Stephen",
"middle": [],
"last": "Mayhew",
"suffix": ""
},
{
"first": "Tatiana",
"middle": [],
"last": "Tsygankova",
"suffix": ""
},
{
"first": "Dan",
"middle": [],
"last": "Roth",
"suffix": ""
}
],
"year": 2019,
"venue": "Proceedings of the Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing",
"volume": "",
"issue": "",
"pages": "6257--6262",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Stephen Mayhew, Tatiana Tsygankova, and Dan Roth. 2019. ner and pos when nothing is capitalized. In Proceedings of the Conference on Empirical Meth- ods in Natural Language Processing and the 9th In- ternational Joint Conference on Natural Language Processing, pages 6257-6262.",
"links": null
},
"BIBREF23": {
"ref_id": "b23",
"title": "A survey of named entity recognition and classification",
"authors": [
{
"first": "David",
"middle": [],
"last": "Nadeau",
"suffix": ""
},
{
"first": "Satoshi",
"middle": [],
"last": "Sekine",
"suffix": ""
}
],
"year": 2007,
"venue": "Lingvisticae Investigationes",
"volume": "30",
"issue": "",
"pages": "3--26",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "David Nadeau and Satoshi Sekine. 2007. A sur- vey of named entity recognition and classification. Lingvisticae Investigationes, 30(1):3-26.",
"links": null
},
"BIBREF24": {
"ref_id": "b24",
"title": "Crafting adversarial input sequences for recurrent neural networks",
"authors": [
{
"first": "Nicolas",
"middle": [],
"last": "Papernot",
"suffix": ""
},
{
"first": "Patrick",
"middle": [],
"last": "Mcdaniel",
"suffix": ""
},
{
"first": "Ananthram",
"middle": [],
"last": "Swami",
"suffix": ""
},
{
"first": "Richard",
"middle": [],
"last": "Harang",
"suffix": ""
}
],
"year": 2016,
"venue": "IEEE Military Communications Conference",
"volume": "",
"issue": "",
"pages": "49--54",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Nicolas Papernot, Patrick McDaniel, Ananthram Swami, and Richard Harang. 2016. Crafting ad- versarial input sequences for recurrent neural net- works. In IEEE Military Communications Confer- ence, pages 49-54. IEEE.",
"links": null
},
"BIBREF25": {
"ref_id": "b25",
"title": "Generating natural language adversarial examples through probability weighted word saliency",
"authors": [
{
"first": "Yihe",
"middle": [],
"last": "Shuhuai Ren",
"suffix": ""
},
{
"first": "Kun",
"middle": [],
"last": "Deng",
"suffix": ""
},
{
"first": "Wanxiang",
"middle": [],
"last": "He",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Che",
"suffix": ""
}
],
"year": 2019,
"venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics",
"volume": "",
"issue": "",
"pages": "1085--1097",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Shuhuai Ren, Yihe Deng, Kun He, and Wanxiang Che. 2019. Generating natural language adversarial ex- amples through probability weighted word saliency. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 1085-1097.",
"links": null
},
"BIBREF26": {
"ref_id": "b26",
"title": "Seqattack: On adversarial attacks for named entity recognition",
"authors": [
{
"first": "Walter",
"middle": [],
"last": "Simoncini",
"suffix": ""
},
{
"first": "Gerasimos",
"middle": [],
"last": "Spanakis",
"suffix": ""
}
],
"year": 2021,
"venue": "Proceedings of the Conference on Empirical Methods in Natural Language Processing",
"volume": "",
"issue": "",
"pages": "308--318",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Walter Simoncini and Gerasimos Spanakis. 2021. Se- qattack: On adversarial attacks for named entity recognition. In Proceedings of the Conference on Empirical Methods in Natural Language Process- ing, pages 308-318.",
"links": null
},
"BIBREF27": {
"ref_id": "b27",
"title": "Named entity recognition -is there a glass ceiling?",
"authors": [
{
"first": "Tomasz",
"middle": [],
"last": "Stanislawek",
"suffix": ""
},
{
"first": "Anna",
"middle": [],
"last": "Wr\u00f3blewska",
"suffix": ""
},
{
"first": "Alicja",
"middle": [],
"last": "W\u00f3jcicka",
"suffix": ""
},
{
"first": "Daniel",
"middle": [],
"last": "Ziembicki",
"suffix": ""
},
{
"first": "Przemyslaw",
"middle": [],
"last": "Biecek",
"suffix": ""
}
],
"year": 2019,
"venue": "Proceedings of the 23rd Conference on Computational Natural Language Learning",
"volume": "",
"issue": "",
"pages": "624--633",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Tomasz Stanislawek, Anna Wr\u00f3blewska, Alicja W\u00f3j- cicka, Daniel Ziembicki, and Przemyslaw Biecek. 2019. Named entity recognition -is there a glass ceiling? In Proceedings of the 23rd Conference on Computational Natural Language Learning, pages 624-633.",
"links": null
},
"BIBREF28": {
"ref_id": "b28",
"title": "Natural Language Processing with Python",
"authors": [
{
"first": "Edward",
"middle": [],
"last": "Loper",
"suffix": ""
},
{
"first": "Steven",
"middle": [],
"last": "Bird",
"suffix": ""
},
{
"first": "Ewan",
"middle": [],
"last": "Klein",
"suffix": ""
}
],
"year": 2009,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Edward Loper Steven Bird and Ewan Klein. 2009. Nat- ural Language Processing with Python.",
"links": null
},
"BIBREF29": {
"ref_id": "b29",
"title": "Introduction to the conll-2003 shared task: language-independent named entity recognition",
"authors": [
{
"first": "Erik F Tjong Kim",
"middle": [],
"last": "Sang",
"suffix": ""
},
{
"first": "Fien",
"middle": [],
"last": "De Meulder",
"suffix": ""
}
],
"year": 2003,
"venue": "Proceedings of the 7th conference on Natural language learning at HLT-NAACL 2003",
"volume": "",
"issue": "",
"pages": "142--147",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Erik F Tjong Kim Sang and Fien De Meulder. 2003. Introduction to the conll-2003 shared task: language-independent named entity recognition. In Proceedings of the 7th conference on Natural lan- guage learning at HLT-NAACL 2003, pages 142- 147.",
"links": null
},
"BIBREF30": {
"ref_id": "b30",
"title": "Universal adversarial triggers for attacking and analyzing nlp",
"authors": [
{
"first": "Eric",
"middle": [],
"last": "Wallace",
"suffix": ""
},
{
"first": "Shi",
"middle": [],
"last": "Feng",
"suffix": ""
},
{
"first": "Nikhil",
"middle": [],
"last": "Kandpal",
"suffix": ""
},
{
"first": "Matt",
"middle": [],
"last": "Gardner",
"suffix": ""
},
{
"first": "Sameer",
"middle": [],
"last": "Singh",
"suffix": ""
}
],
"year": 2019,
"venue": "Proceedings of the Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing",
"volume": "",
"issue": "",
"pages": "2153--2162",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Eric Wallace, Shi Feng, Nikhil Kandpal, Matt Gardner, and Sameer Singh. 2019. Universal adversarial trig- gers for attacking and analyzing nlp. In Proceedings of the Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing, pages 2153-2162.",
"links": null
},
"BIBREF31": {
"ref_id": "b31",
"title": "Astral: adversarial trained lstm-cnn for named entity recognition. Knowledge-Based Systems",
"authors": [
{
"first": "Jiuniu",
"middle": [],
"last": "Wang",
"suffix": ""
},
{
"first": "Wenjia",
"middle": [],
"last": "Xu",
"suffix": ""
},
{
"first": "Xingyu",
"middle": [],
"last": "Fu",
"suffix": ""
},
{
"first": "Guangluan",
"middle": [],
"last": "Xu",
"suffix": ""
},
{
"first": "Yirong",
"middle": [],
"last": "Wu",
"suffix": ""
}
],
"year": 2020,
"venue": "",
"volume": "197",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Jiuniu Wang, Wenjia Xu, Xingyu Fu, Guangluan Xu, and Yirong Wu. 2020. Astral: adversarial trained lstm-cnn for named entity recognition. Knowledge- Based Systems, 197:105842.",
"links": null
},
"BIBREF32": {
"ref_id": "b32",
"title": "Generating natural adversarial examples",
"authors": [
{
"first": "Zhengli",
"middle": [],
"last": "Zhao",
"suffix": ""
},
{
"first": "Dheeru",
"middle": [],
"last": "Dua",
"suffix": ""
},
{
"first": "Sameer",
"middle": [],
"last": "Singh",
"suffix": ""
}
],
"year": 2018,
"venue": "6th International Conference on Learning Representations",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Zhengli Zhao, Dheeru Dua, and Sameer Singh. 2018. Generating natural adversarial examples. In 6th International Conference on Learning Representa- tions.",
"links": null
}
},
"ref_entries": {
"FIGREF0": {
"text": "Dataset variants. Class* denotes named entities that should desirably be misclassified from their context.",
"type_str": "figure",
"uris": null,
"num": null
},
"TABREF0": {
"content": "<table><tr><td>periments do not require white-box access to the</td></tr><tr><td>models.</td></tr><tr><td>2 Data</td></tr><tr><td>We use five openly available general do-</td></tr><tr><td>main datasets that contain the enamex</td></tr><tr><td>classes (LOCATION, PERSON, &amp; ORGANI-</td></tr><tr><td>ZATION)</td></tr></table>",
"num": null,
"type_str": "table",
"text": "Data description: frequency of named entities.",
"html": null
},
"TABREF1": {
"content": "<table/>",
"num": null,
"type_str": "table",
"text": "F1 scores on original datasets.",
"html": null
},
"TABREF2": {
"content": "<table/>",
"num": null,
"type_str": "table",
"text": "F1 scores on case ablated datasets. High F1 score and low percentage drops are desirable.",
"html": null
},
"TABREF3": {
"content": "<table/>",
"num": null,
"type_str": "table",
"text": "F1 scores on case aberrated datasets. High F1 score and low percentage drops are desirable.",
"html": null
},
"TABREF4": {
"content": "<table/>",
"num": null,
"type_str": "table",
"text": "F1 scores on context perturbed datasets. High F1 score and low percentage drops are desirable.",
"html": null
},
"TABREF5": {
"content": "<table/>",
"num": null,
"type_str": "table",
"text": "F1 scores on context altered datasets. High percentage drops are desirable.",
"html": null
}
}
}
}