|
{ |
|
"paper_id": "2021", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T01:07:54.145677Z" |
|
}, |
|
"title": "Semi-Automated Labeling of Requirement Datasets for Relation Extraction", |
|
"authors": [ |
|
{ |
|
"first": "Jeremias", |
|
"middle": [], |
|
"last": "Bohn", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Technical University of Munich", |
|
"location": { |
|
"country": "Germany" |
|
} |
|
}, |
|
"email": "jeremias.bohn@tum.de" |
|
}, |
|
{ |
|
"first": "Jannik", |
|
"middle": [], |
|
"last": "Fischbach", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "jannik.fischbach@qualicen.de" |
|
}, |
|
{ |
|
"first": "Martin", |
|
"middle": [], |
|
"last": "Schmitt", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "martin@cis.lmu.de" |
|
}, |
|
{ |
|
"first": "Hinrich", |
|
"middle": [], |
|
"last": "Sch\u00fctze", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Andreas", |
|
"middle": [], |
|
"last": "Vogelsang", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Cologne", |
|
"location": { |
|
"country": "Germany" |
|
} |
|
}, |
|
"email": "vogelsang@cis.uni-koeln.de" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Creating datasets manually by human annotators is a laborious task that can lead to biased and inhomogeneous labels. We propose a flexible, semi-automatic framework for labeling data for relation extraction. Furthermore, we provide a dataset of preprocessed sentences from the requirements engineering domain, including a set of automatically created as well as hand-crafted labels. In our case study, we compare the human and automatic labels and show that there is a substantial overlap between both annotations.", |
|
"pdf_parse": { |
|
"paper_id": "2021", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Creating datasets manually by human annotators is a laborious task that can lead to biased and inhomogeneous labels. We propose a flexible, semi-automatic framework for labeling data for relation extraction. Furthermore, we provide a dataset of preprocessed sentences from the requirements engineering domain, including a set of automatically created as well as hand-crafted labels. In our case study, we compare the human and automatic labels and show that there is a substantial overlap between both annotations.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "While recent advances in Natural Language Processing have yielded high-quality language models such as BERT (Devlin et al., 2019) , GPT-3 (Brown et al., 2020) and ELECTRA (Clark et al., 2020) which are able to continue sentences, fill in masked words and correctly parse human language, using these models for most use-case scenarios still requires them to be trained on a down-stream task using labeled data. For some tasks, e.g. sentiment analysis of reviews, creating datasets is relatively easy as large databases with annotations already exist (such as the IMDb movie review dataset (Maas et al., 2011) ). However, training a model on niche tasks often demands hand-crafting new datasets from spread-out documents. This is usually done by humans who collect, preprocess, and annotate sentences which is a laborious task and can result in biased and/or inhomogeneous labeling, e.g. if annotation instructions were not understood correctly or left room for subjective interpretation. This becomes especially apparent if multiple, non-expert individuals are involved in this process. In requirements engineering, we usually work with large documents written in natural language (Mich et al., 2004; Kassab et al., 2014) which describe the specifications of a software project, usually classified as either functional requirements, specifying what functionality the system should provide, and non-functional requirements, specifying in what way the system should implement those functions. However, these documents are often updated during the life cycle of the project and span up to multiple hundreds of pages, depending on the project size. Keeping track of all the changes and maintaining the software based on the requirement document can soon become a challenge (Fischbach et al., 2020) which is why an automatic conversion to, e.g., UML diagrams can come in handy. To do so, it is necessary to parse the relations between entities from the written text into a structured format, thus creating a comparable corpus of requirements in natural language and the same relation in a formal language. In this paper, we propose a semi-automatic approach that, given a clean, grammatically correct sentence stating a software requirement, outputs a labeling corresponding to the relation the requirement describes based on a small set of pre-defined rules of word dependency relations. This should reduce human bias manifesting in labels as the annotator does not actively choose the labels for each word anymore but instead defines abstract rules which provide for homogeneous, deterministic labeling and reduce the amount of labor for creating such datasets. This automatically annotated data can then be used for training a more powerful model, as shown by Schmitt et al. (2020) . We summarize our main contributions as follows:", |
|
"cite_spans": [ |
|
{ |
|
"start": 108, |
|
"end": 129, |
|
"text": "(Devlin et al., 2019)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 132, |
|
"end": 158, |
|
"text": "GPT-3 (Brown et al., 2020)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 171, |
|
"end": 191, |
|
"text": "(Clark et al., 2020)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 588, |
|
"end": 607, |
|
"text": "(Maas et al., 2011)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 1180, |
|
"end": 1199, |
|
"text": "(Mich et al., 2004;", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 1200, |
|
"end": 1220, |
|
"text": "Kassab et al., 2014)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 1768, |
|
"end": 1792, |
|
"text": "(Fischbach et al., 2020)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 2757, |
|
"end": 2778, |
|
"text": "Schmitt et al. (2020)", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 We provide a high-quality, preprocessed dataset of 2,093 requirement sentences together with 1,848 automatically created labels and another 199 manually created labels for a subset of the automatically labeled sentences as a resource for further research projects.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 We provide a flexible, semi-automatic framework for data annotation of the relation extraction domain based on dependency parsing and pattern matching.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 We conduct a case study on the said framework on requirement document sentences, showing its annotation results are matching those of humans to a substantial degree.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Gamallo et al. 2012propose a simple Open Information Extraction system based on dependency parse trees. The algorithm extracts triples with two arguments and a sentence part relating those. However, the patterns are not very sophisticated and put a large part of the sentence into the relation. Hence, this approach is not suitable for our use case as we would eventually like to generate object diagrams from the relations we extracted. Erkan et al. (2007) use dependency parse trees to extract relations between proteins from sentences. They do so by classifying whether a sentence, given a dependency tree, describes a relation between any pair of proteins occurring in the sentence using semi-supervised harmonic functions and support vector machines. However, their entities (the protein names) are already annotated which is not the case if we only have the raw sentences as in our approach. Mausam et al. (2012) use dependency trees and a labeled bootstrap dataset to automatically generate patterns for information extraction, unlike our approach which does not require to annotate any data manually but instead to produce patterns. While this approach might be able to extract simple triples well, one needs either a larger annotated dataset, defeating the purpose of our work, or the patterns might not generalize well, thus being unsuitable for constructing a qualitative annotated corpus. Reddy et al. (2016) propose an algorithm to automatically extract logical expressions from dependency parse trees for question answering. These were then converted into a graph indicating the relations between the named entities in the sentence by applying semantic parsing. However, this approach always converts the entire sentence into a graph and may include information that is irrelevant for a dataset that is to be generated. Inago et al. 2019use a rule-based approach on dependency trees to process natural language car parking instructions with decision trees for automated driving systems. Unlike our data (or most datasets in general), sentences of the application domain are very short and similar in structure. While our approach could be effectively converted into a decision tree, it is easier to construct rules with our pattern engine for more complex data.", |
|
"cite_spans": [ |
|
{ |
|
"start": 438, |
|
"end": 457, |
|
"text": "Erkan et al. (2007)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 898, |
|
"end": 918, |
|
"text": "Mausam et al. (2012)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 1401, |
|
"end": 1420, |
|
"text": "Reddy et al. (2016)", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "3 Corpus Creation", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "For our dataset, we use 19 publicly available requirement documents in the English language from the PURE dataset (Ferrari et al., 2017) , with a large topical variety, including governmental institution software in military and scientific fields, inventory management systems and video games. All documents are provided in .PDF, .HTML or .DOC format. From these, we manually extracted 2,104 requirement sentences (1,639 functional, 465 nonfunctional requirements).", |
|
"cite_spans": [ |
|
{ |
|
"start": 114, |
|
"end": 136, |
|
"text": "(Ferrari et al., 2017)", |
|
"ref_id": "BIBREF4" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dataset", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "As we want to automatically dependency parse our sentences, we have to ensure that all input to the model is grammatically and orthographically sound. We also have to ensure that any unnecessary information is removed to not confuse the parser. Therefore, we manually applied the following formatting operations to each sentence during data extraction:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Preprocessing", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "\u2022 Splitting of enumerations into multiple sentences, adjusting words if necessary to make the sentence sound (e.g., nounification of verbs); e.g., \u2022 Removal of abbreviations after written-out expressions (e.g., in \"automated teller machine (ATM)\", the \"(ATM)\" is dropped)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Preprocessing", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "\u2022 Removal of requirement reference numbers \u2022 Lower-casing of words if they are not abbreviations (e.g., \"NOT\" becomes \"not\")", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Preprocessing", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "\u2022 Remove brackets around additional plural 's' (e.g., \"socket(s)\" becomes \"sockets\")", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Preprocessing", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "\u2022 Exchanging \"/\" with \"and\" or \"or\" where applicable and possible given the context (e.g. \"The system should support adding/deleting files\" becomes \"The system should support adding and deleting files\")", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Preprocessing", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "\u2022 Unification of the possessive 's' preceding symbols (\"'\" and \" \" are changed to \"'\")", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Preprocessing", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "\u2022 Removal of duplicate sentences (11 in total)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Preprocessing", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "After these preprocessing steps, the average sentence length is 19.87 words, the maximum is 69 words and the minimum 4 words.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Preprocessing", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "These final 2,093 sentences (1,628 functional, 465 non-functional requirements) are parsed to extract dependencies using the Neural Adobe-UCSD Parser (Mrini et al., 2020) which achieved state-ofthe-art performance on the Penn Treebank dataset (Marcus et al., 1993) . Based on these dependencies, we handcraft a total of 102 patterns to label 91.03%", |
|
"cite_spans": [ |
|
{ |
|
"start": 150, |
|
"end": 170, |
|
"text": "(Mrini et al., 2020)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 243, |
|
"end": 264, |
|
"text": "(Marcus et al., 1993)", |
|
"ref_id": "BIBREF11" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Labeling", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "of the functional and 78.71% of the non-functional sentences without any further human interaction. Each pattern is a sequence of triples (l, dp, c) where l is a label, dp a sequence of dependency labels forming a path downwards a dependency tree and c a Boolean value indicating whether all children (direct and indirect) should be left out from labeling or not. Each sequence applies all or a subset of the following entity tags to the sentences:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Labeling", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "\u2022 ent1: The main entity of the requirement. Either the acting component or the component on which a constraint is applied (if there is no second entity)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Labeling", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "\u2022 rel: The relation/action of the requirement.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Labeling", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "\u2022 ent2: The passive entity of the requirement. Either the component on which an action is performed or which is involved in the action passively", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Labeling", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "\u2022 cond: Any modifier of the requirement. Can further specify the requirement or put conditions on it how or when it will be applied.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Labeling", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "An excerpt of automatic annotations can be found in Table 1 . Each pattern is applied using tree traversal: for each label that is to be applied, a sequence of dependency labels (optionally with modifiers) is given, starting at the root. The algorithm checks whether the current nodes have any direct children connected to them with the current dependency label of the sequence. If so, we check whether these children have children connected to them with the next label in the sequence. If not, the pattern fitting is stopped and no labeling is applied to the sentence. If we reach the end of the sequence, the final node is labeled with the given label and, depending on a parameter, all of its children, too. A simple example can be found in Table 2 , row 1. Dependency labels can include modifiers to allow for more complex patterns:", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 52, |
|
"end": 59, |
|
"text": "Table 1", |
|
"ref_id": "TABREF2" |
|
}, |
|
{ |
|
"start": 744, |
|
"end": 751, |
|
"text": "Table 2", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Labeling", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "\u2022 Starting with !, the pattern matching will remove any node that has one or more children with the given dependency label. Thus, no step downwards the tree is taken A bulk entry ent1 can be used to add rel many assets ent2 . The HATS-GUI ent1 shall interact with the Host OS to compare rel time stamps ent2 for files cond . The BE ent1 shall be able to apply rel corrections ent2 based on state count and/or quantizer power measurement data cond . \u2022 .. lets us traverse back to the parent of the current node. This allows us to check nodes for their existence without including them in the actual labeling A selection of patterns used can be found in Table 2 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 652, |
|
"end": 659, |
|
"text": "Table 2", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Labeling", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "In our setting, one sentence usually holds one relation, however, this is not the case for conjunctions of multiple main clauses or instructions. Due to current limitations of our engine (see Section 6), the relation of the first main clause is always chosen, however, this depends on the pattern design. Even though we only use requirements written in English, a large portion of the rules could be applied to data in different languages as the Universal Dependencies (Schuster and Manning, 2016) rely on the concept of primacy of content, allowing for very similar dependency trees. However, patterns explicitly using keywords may not generalize well for other languages. The code for the labeling task as well as the labeled data can be found on GitHub 1 .", |
|
"cite_spans": [ |
|
{ |
|
"start": 469, |
|
"end": 497, |
|
"text": "(Schuster and Manning, 2016)", |
|
"ref_id": "BIBREF17" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Labeling", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "Given our automatically labeled data, we evaluate the quality of the labels by comparing its output to human annotations. To do so, we randomly sample 199 sentences (10.77%) from the 1848 sentences which were automatically labeled. Two of the authors then annotated these sentences manually. The annotators were given the descriptions of each label type, but had no access to the actual labeling from the algorithm. Annotators collaboratively labeled the data, discussing the labeling for each sentence and agreeing upon a single valid labeling. We then calculate inter-rater reliability with the Cohen's \u03ba between the human annotators and the automatic annotator, once over all labels and once as average inter-reliability per sentence (i.e., we calculate one Cohen's \u03ba score per sentence and average over all sentences -this considers each sentence equally while the overall score puts more weight on longer sentences). The results can be found in Table 3 . While the overall score puts more weight on long sentences, the sentence average provides us an approximation of the reliability of our automatic annotator for any sentence. According to the taxonomy of Landis and Koch (Landis and Koch, 1977) , the per sentence average \u03ba value indicates a substantial inter-annotator agreement, the overall \u03ba a moderate agreement. While the main acting entity is extracted very well with almost perfect agreement according to Landis and Koch, extracting relational modifiers proofs to be the hardest with only moderate agreement between our automatic approach and the human annotators. This is mostly due to the nature of the label itself, spanning a large variety of modifiers from conditions to entities not involved in the relation itself. While one could split the cond label into multiple different labels, this would increase the number of patterns required a lot. Alternatively, one might reduce the coverage of the labeling in general but we focused on including as much information as possible. The relatively low score for ent2 mainly arises from sentences containing multiple relations where many words describe a passive entity for other relations than the one of the main sentence. Our approach currently is not able to effectively extract multiple relations from a single sentence yet. This is also the reason why the score rel is lower than the one for ent1.", |
|
"cite_spans": [ |
|
{ |
|
"start": 1174, |
|
"end": 1202, |
|
"text": "Koch (Landis and Koch, 1977)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 950, |
|
"end": 957, |
|
"text": "Table 3", |
|
"ref_id": "TABREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Evaluation", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "While our approach works well for requirements documents -after all, relations between software entities and modifications of these relations can be extracted well by syntactically parsing the sentence structure -this does not apply to word labels which require a semantic understanding of the input. For example, if we were to create labels for Named Entity Recognition, our algorithm would fail as Pattern Description ('rel', ['root'] , True) ('ent1', ['root', 'nsubj'] , False) ('ent2', ['root', 'dobj'] , False) ('cond', ['root', 'advcl'], False) Simple pattern, sets the root of the sentence as the relation (only this single word), the entire nominal subject as the acting entity, the entire direct object as the passive entity. An adverbial clause is treated as a relation modifier. ('rel', ['root=capable', 'prep=of', 'pcomp'] , True) ('ent1', ['root', 'nsubj'] , False) ('ent2', ['root', 'prep=of', 'pcomp', 'prep=in', 'pobj'] , False) ('cond', ['root', 'advcl'] , False)", |
|
"cite_spans": [ |
|
{ |
|
"start": 420, |
|
"end": 436, |
|
"text": "('rel', ['root']", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 454, |
|
"end": 471, |
|
"text": "['root', 'nsubj']", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 481, |
|
"end": 506, |
|
"text": "('ent2', ['root', 'dobj']", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 516, |
|
"end": 550, |
|
"text": "('cond', ['root', 'advcl'], False)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 790, |
|
"end": 834, |
|
"text": "('rel', ['root=capable', 'prep=of', 'pcomp']", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 843, |
|
"end": 869, |
|
"text": "('ent1', ['root', 'nsubj']", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 879, |
|
"end": 935, |
|
"text": "('ent2', ['root', 'prep=of', 'pcomp', 'prep=in', 'pobj']", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 945, |
|
"end": 971, |
|
"text": "('cond', ['root', 'advcl']", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Limitations", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Catches phrases like \"The system should be capable of [...]\" and searches for the passive entity in the prepositional object of the prepositional clause starting with \"in\". ('rel', ['root', '!dobj'] , True) ('ent1', ['root', 'nsubjpass'] , False) ('cond', ['root', 'prep=in', 'pobj=case', '..'] , False) ('cond', ['root', 'advmod'], False) Pattern is only applied if the sentence has no direct object (which could serve as the passive entity).", |
|
"cite_spans": [ |
|
{ |
|
"start": 173, |
|
"end": 198, |
|
"text": "('rel', ['root', '!dobj']", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 216, |
|
"end": 237, |
|
"text": "['root', 'nsubjpass']", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 247, |
|
"end": 294, |
|
"text": "('cond', ['root', 'prep=in', 'pobj=case', '..']", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 304, |
|
"end": 339, |
|
"text": "('cond', ['root', 'advmod'], False)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Limitations", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Prepositional sentences starting with \"in case\" are labeled as requirement modifier (we have to traverse the tree upwards again to include the 'in' as well). it is not possible to find syntactic rules to distinguish between, e.g., an organization and a person. Also, the algorithm fails in some cases if either rules are not specific enough or the dependency parser mistakenly adds dependencies between sentence parts where there is no dependency between them. The latter may especially occur frequently if the sentences were not preprocessed well which is why our algorithm is not suitable as a classifier in general (if we, on the other hand, use our data as training input for a Transformer model (Vaswani et al., 2017) , it may overcome these strict syntactic requirements and generalize better on real-world data).", |
|
"cite_spans": [ |
|
{ |
|
"start": 700, |
|
"end": 722, |
|
"text": "(Vaswani et al., 2017)", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Limitations", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "In this paper, we present a novel approach for data labeling which allows users to annotate sentences for relation extraction within a shorter time period compared to manual annotation while at the same time having a consistent labeling scheme for the entire dataset. Our approach exploits syntactic features which are the integral foundation of most relation extraction tasks. For the future, it would be helpful to implement an automatic extraction of requirement sentences by, e.g., training a classifier to identify relevant sen-tences in plain text or .PDF documents as well as a semi-automatic approach with human validation for preprocessing sentences into grammatically and orthographically sound ones. We plan on extending the pattern engine our algorithm relies on, e.g., allowing for recursive patterns to parse nested sentences and to extract multiple relations from one sentence as well as optional pattern parts to reduce redundancy (e.g., a sentence where the active entity is the nominal subject, the relation the dependency tree root and the passive entity the direct object may have a relation modifier in an adverbial clause. As of the current state, this requires two patterns (exponentially increasing with the number of optional dependencies) while with a pattern where this adverbial clause is considered optional, we only need a single pattern).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion & Outlook", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "https://github.com/JeremiasBohn/ RequirementRelationExtractor", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Language models are few-shot learners", |
|
"authors": [ |
|
{ |
|
"first": "Benjamin", |
|
"middle": [], |
|
"last": "Tom B Brown", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nick", |
|
"middle": [], |
|
"last": "Mann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Melanie", |
|
"middle": [], |
|
"last": "Ryder", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jared", |
|
"middle": [], |
|
"last": "Subbiah", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Prafulla", |
|
"middle": [], |
|
"last": "Kaplan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Arvind", |
|
"middle": [], |
|
"last": "Dhariwal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pranav", |
|
"middle": [], |
|
"last": "Neelakantan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Girish", |
|
"middle": [], |
|
"last": "Shyam", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Amanda", |
|
"middle": [], |
|
"last": "Sastry", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Askell", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2005.14165" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tom B Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. 2020. Language models are few-shot learners. arXiv preprint arXiv:2005.14165.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Electra: Pre-training text encoders as discriminators rather than generators", |
|
"authors": [ |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "Clark", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Minh-Thang", |
|
"middle": [], |
|
"last": "Luong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Quoc", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher D", |
|
"middle": [], |
|
"last": "Le", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2003.10555" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kevin Clark, Minh-Thang Luong, Quoc V Le, and Christopher D Manning. 2020. Electra: Pre-training text encoders as discriminators rather than genera- tors. arXiv preprint arXiv:2003.10555.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "BERT: Pre-training of deep bidirectional transformers for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "4171--4186", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N19-1423" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirectional transformers for language under- standing. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 4171-4186, Minneapolis, Minnesota. Associ- ation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Semi-supervised classification for extracting protein interaction sentences using dependency parsing", |
|
"authors": [ |
|
{ |
|
"first": "G\u00fcne\u015f", |
|
"middle": [], |
|
"last": "Erkan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Arzucan\u00f6zg\u00fcr", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dragomir", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Radev", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proceedings of the 2007 Joint Conference on Empirical Methods in Natural Language Processing and Computational Natural Language Learning (EMNLP-CoNLL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "228--237", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "G\u00fcne\u015f Erkan, Arzucan\u00d6zg\u00fcr, and Dragomir R. Radev. 2007. Semi-supervised classification for extracting protein interaction sentences using dependency pars- ing. In Proceedings of the 2007 Joint Conference on Empirical Methods in Natural Language Process- ing and Computational Natural Language Learning (EMNLP-CoNLL), pages 228-237, Prague, Czech Republic. Association for Computational Linguis- tics.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Pure: A dataset of public requirements documents", |
|
"authors": [ |
|
{ |
|
"first": "Alessio", |
|
"middle": [], |
|
"last": "Ferrari", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Giorgio", |
|
"middle": [ |
|
"Oronzo" |
|
], |
|
"last": "Spagnolo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stefania", |
|
"middle": [], |
|
"last": "Gnesi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "2017 IEEE 25th International Requirements Engineering Conference (RE)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "502--505", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1109/RE.2017.29" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alessio Ferrari, Giorgio Oronzo Spagnolo, and Stefa- nia Gnesi. 2017. Pure: A dataset of public require- ments documents. In 2017 IEEE 25th International Requirements Engineering Conference (RE), pages 502-505.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "What makes agile test artifacts useful? an activity-based quality model from a practitioners' perspective", |
|
"authors": [ |
|
{ |
|
"first": "Jannik", |
|
"middle": [], |
|
"last": "Fischbach", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Henning", |
|
"middle": [], |
|
"last": "Femmer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Mendez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Davide", |
|
"middle": [], |
|
"last": "Fucci", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andreas", |
|
"middle": [], |
|
"last": "Vogelsang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 14th ACM / IEEE International Symposium on Empirical Software Engineering and Measurement (ESEM), ESEM '20", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1145/3382494.3421462" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jannik Fischbach, Henning Femmer, Daniel Mendez, Davide Fucci, and Andreas Vogelsang. 2020. What makes agile test artifacts useful? an activity-based quality model from a practitioners' perspective. In Proceedings of the 14th ACM / IEEE International Symposium on Empirical Software Engineering and Measurement (ESEM), ESEM '20, New York, NY, USA. Association for Computing Machinery.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Dependency-based open information extraction", |
|
"authors": [ |
|
{ |
|
"first": "Pablo", |
|
"middle": [], |
|
"last": "Gamallo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marcos", |
|
"middle": [], |
|
"last": "Garcia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Santiago", |
|
"middle": [], |
|
"last": "Fern\u00e1ndez-Lanza", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of the Joint Workshop on Unsupervised and Semi-Supervised Learning in NLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "10--18", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Pablo Gamallo, Marcos Garcia, and Santiago Fern\u00e1ndez-Lanza. 2012. Dependency-based open information extraction. In Proceedings of the Joint Workshop on Unsupervised and Semi- Supervised Learning in NLP, pages 10-18, Avignon, France. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Parsing parking instructions for self-driving cars into spatial semantic descriptions", |
|
"authors": [ |
|
{ |
|
"first": "Akari", |
|
"middle": [], |
|
"last": "Inago", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hiroshi", |
|
"middle": [], |
|
"last": "Tsukahara", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ichiro", |
|
"middle": [], |
|
"last": "Kobayashi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "J. Comput", |
|
"volume": "14", |
|
"issue": "5", |
|
"pages": "328--338", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Akari Inago, Hiroshi Tsukahara, and Ichiro Kobayashi. 2019. Parsing parking instructions for self-driving cars into spatial semantic descriptions. J. Comput., 14(5):328-338.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "State of practice in requirements engineering: contemporary data", |
|
"authors": [ |
|
{ |
|
"first": "Mohamad", |
|
"middle": [], |
|
"last": "Kassab", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Colin", |
|
"middle": [], |
|
"last": "Neill", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Phillip", |
|
"middle": [], |
|
"last": "Laplante", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Innovations in Systems and Software Engineering", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mohamad Kassab, Colin Neill, and Phillip Laplante. 2014. State of practice in requirements engineering: contemporary data. Innovations in Systems and Soft- ware Engineering.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "The measurement of observer agreement for categorical data", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Landis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gary", |
|
"middle": [ |
|
"G" |
|
], |
|
"last": "Koch", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1977, |
|
"venue": "Biometrics", |
|
"volume": "33", |
|
"issue": "1", |
|
"pages": "159--174", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "J. Richard Landis and Gary G. Koch. 1977. The mea- surement of observer agreement for categorical data. Biometrics, 33(1):159-174.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Learning word vectors for sentiment analysis", |
|
"authors": [ |
|
{ |
|
"first": "Andrew", |
|
"middle": [ |
|
"L" |
|
], |
|
"last": "Maas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Raymond", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "Daly", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peter", |
|
"middle": [ |
|
"T" |
|
], |
|
"last": "Pham", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [ |
|
"Y" |
|
], |
|
"last": "Ng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Potts", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "142--150", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Andrew L. Maas, Raymond E. Daly, Peter T. Pham, Dan Huang, Andrew Y. Ng, and Christopher Potts. 2011. Learning word vectors for sentiment analy- sis. In Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Hu- man Language Technologies, pages 142-150, Port- land, Oregon, USA. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Building a large annotated corpus of English: The Penn Treebank", |
|
"authors": [ |
|
{ |
|
"first": "Mitchell", |
|
"middle": [ |
|
"P" |
|
], |
|
"last": "Marcus", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Beatrice", |
|
"middle": [], |
|
"last": "Santorini", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mary", |
|
"middle": [ |
|
"Ann" |
|
], |
|
"last": "Marcinkiewicz", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1993, |
|
"venue": "Computational Linguistics", |
|
"volume": "19", |
|
"issue": "2", |
|
"pages": "313--330", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mitchell P. Marcus, Beatrice Santorini, and Mary Ann Marcinkiewicz. 1993. Building a large annotated corpus of English: The Penn Treebank. Computa- tional Linguistics, 19(2):313-330.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Open language learning for information extraction", |
|
"authors": [ |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Mausam", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stephen", |
|
"middle": [], |
|
"last": "Schmitz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Robert", |
|
"middle": [], |
|
"last": "Soderland", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Oren", |
|
"middle": [], |
|
"last": "Bart", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Etzioni", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of the 2012 Joint Conference on Empirical Methods in Natural Language Processing and Computational Natural Language Learning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "523--534", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mausam, Michael Schmitz, Stephen Soderland, Robert Bart, and Oren Etzioni. 2012. Open language learn- ing for information extraction. In Proceedings of the 2012 Joint Conference on Empirical Methods in Nat- ural Language Processing and Computational Natu- ral Language Learning, pages 523-534, Jeju Island, Korea. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Market research for requirements analysis using linguistic tools", |
|
"authors": [ |
|
{ |
|
"first": "Luisa", |
|
"middle": [], |
|
"last": "Mich", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mariangela", |
|
"middle": [], |
|
"last": "Franch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pierluigi Novi", |
|
"middle": [], |
|
"last": "Inverardi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Luisa Mich, Mariangela Franch, and Pierluigi Novi Inverardi. 2004. Market research for requirements analysis using linguistic tools. Requirements Engi- neering.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Rethinking self-attention: Towards interpretability in neural parsing", |
|
"authors": [ |
|
{ |
|
"first": "Khalil", |
|
"middle": [], |
|
"last": "Mrini", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Franck", |
|
"middle": [], |
|
"last": "Dernoncourt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Trung", |
|
"middle": [], |
|
"last": "Quan Hung Tran", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Walter", |
|
"middle": [], |
|
"last": "Bui", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ndapa", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Nakashole", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Findings of the Association for Computational Linguistics: EMNLP 2020", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "731--742", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.findings-emnlp.65" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Khalil Mrini, Franck Dernoncourt, Quan Hung Tran, Trung Bui, Walter Chang, and Ndapa Nakashole. 2020. Rethinking self-attention: Towards inter- pretability in neural parsing. In Findings of the As- sociation for Computational Linguistics: EMNLP 2020, pages 731-742, Online. Association for Com- putational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Transforming dependency structures to logical forms for semantic parsing", |
|
"authors": [ |
|
{ |
|
"first": "Siva", |
|
"middle": [], |
|
"last": "Reddy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Oscar", |
|
"middle": [], |
|
"last": "T\u00e4ckstr\u00f6m", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Collins", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tom", |
|
"middle": [], |
|
"last": "Kwiatkowski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dipanjan", |
|
"middle": [], |
|
"last": "Das", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Steedman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mirella", |
|
"middle": [], |
|
"last": "Lapata", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Transactions of the Association for Computational Linguistics", |
|
"volume": "4", |
|
"issue": "", |
|
"pages": "127--140", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1162/tacl_a_00088" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Siva Reddy, Oscar T\u00e4ckstr\u00f6m, Michael Collins, Tom Kwiatkowski, Dipanjan Das, Mark Steedman, and Mirella Lapata. 2016. Transforming dependency structures to logical forms for semantic parsing. Transactions of the Association for Computational Linguistics, 4:127-140.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "An unsupervised joint system for text generation from knowledge graphs and semantic parsing", |
|
"authors": [ |
|
{ |
|
"first": "Martin", |
|
"middle": [], |
|
"last": "Schmitt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sahand", |
|
"middle": [], |
|
"last": "Sharifzadeh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hinrich", |
|
"middle": [], |
|
"last": "Volker Tresp", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Sch\u00fctze", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "7117--7130", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.emnlp-main.577" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Martin Schmitt, Sahand Sharifzadeh, Volker Tresp, and Hinrich Sch\u00fctze. 2020. An unsupervised joint sys- tem for text generation from knowledge graphs and semantic parsing. In Proceedings of the 2020 Con- ference on Empirical Methods in Natural Language Processing (EMNLP), pages 7117-7130, Online. As- sociation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Enhanced English Universal Dependencies: An improved representation for natural language understanding tasks", |
|
"authors": [ |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Schuster", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Christopher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the Tenth International Conference on Language Resources and Evaluation (LREC'16)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2371--2378", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sebastian Schuster and Christopher D. Manning. 2016. Enhanced English Universal Dependencies: An im- proved representation for natural language under- standing tasks. In Proceedings of the Tenth Inter- national Conference on Language Resources and Evaluation (LREC'16), pages 2371-2378, Portoro\u017e, Slovenia. European Language Resources Associa- tion (ELRA).", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Attention is all you need", |
|
"authors": [ |
|
{ |
|
"first": "Ashish", |
|
"middle": [], |
|
"last": "Vaswani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noam", |
|
"middle": [], |
|
"last": "Shazeer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Niki", |
|
"middle": [], |
|
"last": "Parmar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jakob", |
|
"middle": [], |
|
"last": "Uszkoreit", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Llion", |
|
"middle": [], |
|
"last": "Jones", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aidan", |
|
"middle": [ |
|
"N" |
|
], |
|
"last": "Gomez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "\u0141ukasz", |
|
"middle": [], |
|
"last": "Kaiser", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Illia", |
|
"middle": [], |
|
"last": "Polosukhin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Advances in neural information processing systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5998--6008", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, \u0141ukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in neural information pro- cessing systems, pages 5998-6008.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure", |
|
"text": "\"The system has to include a) [...] b) [...] c) [...]\" becomes 3 sentences, each including exactly one of the requirements \u2022 Removal of extra inter-punctuation (additional spaces, dots, commas, etc.) \u2022 Removal of references to sections, tables, figures, or other requirements of the document as they are not relevant for extracting the relation of the sentence itself" |
|
}, |
|
"TABREF1": { |
|
"num": null, |
|
"html": null, |
|
"type_str": "table", |
|
"content": "<table/>", |
|
"text": "SentenceWhile flying two MAE AVs Beyond Line Of Sight cond , the TCS ent1 shall provide rel full control functionality ent2 of each AV cond . NPAC SMS ent1 shall default rel the EDR Indicator ent2 to False cond ." |
|
}, |
|
"TABREF2": { |
|
"num": null, |
|
"html": null, |
|
"type_str": "table", |
|
"content": "<table/>", |
|
"text": "Examples of Labeling" |
|
}, |
|
"TABREF3": { |
|
"num": null, |
|
"html": null, |
|
"type_str": "table", |
|
"content": "<table><tr><td colspan=\"3\">Labels considered Sentence Avg. Overall</td></tr><tr><td>All labels</td><td>0.632</td><td>0.576</td></tr><tr><td>rel only</td><td>0.790</td><td>0.720</td></tr><tr><td>ent1 only</td><td>0.855</td><td>0.822</td></tr><tr><td>ent2 only</td><td>0.619</td><td>0.561</td></tr><tr><td>cond only</td><td>0.532</td><td>0.543</td></tr></table>", |
|
"text": "Examples of Patterns" |
|
}, |
|
"TABREF4": { |
|
"num": null, |
|
"html": null, |
|
"type_str": "table", |
|
"content": "<table/>", |
|
"text": "" |
|
} |
|
} |
|
} |
|
} |