|
{ |
|
"paper_id": "D19-1029", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T16:11:08.362470Z" |
|
}, |
|
"title": "Multi-Input Multi-Output Sequence Labeling for Joint Extraction of Fact and Condition Tuples from Scientific Text", |
|
"authors": [ |
|
{ |
|
"first": "Tianwen", |
|
"middle": [], |
|
"last": "Jiang", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Harbin Institute of Technology", |
|
"location": { |
|
"settlement": "Harbin, Heilongjiang", |
|
"country": "China" |
|
} |
|
}, |
|
"email": "twjiang@ir.hit.edu.cn" |
|
}, |
|
{ |
|
"first": "Tong", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Notre Dame", |
|
"location": { |
|
"region": "Indiana", |
|
"country": "USA" |
|
} |
|
}, |
|
"email": "tzhao2@nd.edu" |
|
}, |
|
{ |
|
"first": "Bing", |
|
"middle": [], |
|
"last": "Qin", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Harbin Institute of Technology", |
|
"location": { |
|
"settlement": "Harbin, Heilongjiang", |
|
"country": "China" |
|
} |
|
}, |
|
"email": "bqin@ir.hit.edu.cn" |
|
}, |
|
{ |
|
"first": "Ting", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Harbin Institute of Technology", |
|
"location": { |
|
"settlement": "Harbin, Heilongjiang", |
|
"country": "China" |
|
} |
|
}, |
|
"email": "tliu@ir.hit.edu.cn" |
|
}, |
|
{ |
|
"first": "Nitesh", |
|
"middle": [ |
|
"V" |
|
], |
|
"last": "Chawla", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Notre Dame", |
|
"location": { |
|
"region": "Indiana", |
|
"country": "USA" |
|
} |
|
}, |
|
"email": "nchawla@nd.edu" |
|
}, |
|
{ |
|
"first": "Meng", |
|
"middle": [], |
|
"last": "Jiang", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Notre Dame", |
|
"location": { |
|
"region": "Indiana", |
|
"country": "USA" |
|
} |
|
}, |
|
"email": "mjiang2@nd.edu" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Condition is essential in scientific statement. Without the conditions (e.g., equipment, environment) that were precisely specified, facts (e.g., observations) in the statements may no longer be valid. Existing ScienceIE methods, which aim at extracting factual tuples from scientific text, do not consider the conditions. In this work, we propose a new sequence labeling framework (as well as a new tag schema) to jointly extract the fact and condition tuples from statement sentences. The framework has (1) a multi-output module to generate one or multiple tuples and (2) a multi-input module to feed in multiple types of signals as sequences. It improves F1 score relatively by 4.2% on BioNLP2013 and by 6.2% on a new bio-text dataset for tuple extraction.", |
|
"pdf_parse": { |
|
"paper_id": "D19-1029", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Condition is essential in scientific statement. Without the conditions (e.g., equipment, environment) that were precisely specified, facts (e.g., observations) in the statements may no longer be valid. Existing ScienceIE methods, which aim at extracting factual tuples from scientific text, do not consider the conditions. In this work, we propose a new sequence labeling framework (as well as a new tag schema) to jointly extract the fact and condition tuples from statement sentences. The framework has (1) a multi-output module to generate one or multiple tuples and (2) a multi-input module to feed in multiple types of signals as sequences. It improves F1 score relatively by 4.2% on BioNLP2013 and by 6.2% on a new bio-text dataset for tuple extraction.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Conditions such as environment and equipment provide validation supports for facts, while the facts focus on scientific observation and hypothesis in scientific literature (Miller, 1947) . Existing ScienceIE methods, which extract (subject, relational phrase, object)-tuples from scientific text, do not distinguish the roles of fact and condition. Simply adding a tuple classification module has two weak points: (1) one tuple may have different roles in different sentences; (2) the tuples in one sentence have high dependencies with each other, for example, given a statement sentence in a biochemistry paper (Tomilin et al., 2016) : \"We observed that ... alkaline pH increases the activity of TRPV5/V6 channels in Jurkat T cells.\"", |
|
"cite_spans": [ |
|
{ |
|
"start": 172, |
|
"end": 186, |
|
"text": "(Miller, 1947)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 612, |
|
"end": 634, |
|
"text": "(Tomilin et al., 2016)", |
|
"ref_id": "BIBREF28" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "an existing system (Stanovsky et al., 2018) would return one tuple as below: Figure 1: Our framework has two modules: (1) a multiinput module (bottom) based on a multi-head encoderdecoder model with multi-input gates;", |
|
"cite_spans": [ |
|
{ |
|
"start": 19, |
|
"end": 43, |
|
"text": "(Stanovsky et al., 2018)", |
|
"ref_id": "BIBREF27" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Encoder Decoder LSTMd Feed Forward Feed Forward Softmax Softmax {! \" # , ! % # , \u2026 ! ' # } {! \" ( , ! % ( , \u2026 ! ) ( } Feed Forward Feed Forward ...", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "(2) a multioutput module (top) of a relation name tagging layer and a tuple completion tagging layer.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "(alkaline pH, increases, activity of TRPV5/V6 channels in Jurkat T cells). where (a) the object should just be the channel's activity and (b) the condition tuple (TRPV5/V6 channels, in, Jurkat T cells) was not found. Note that the term \"TRPV5/V6 channels\" is not only the concept in the fact tuple's object but also the condition tuple's subject. In this work, we define the joint tuple extraction task as a multi-output sequence labeling problem. First, we create a new tag schema: Non-\"O\" tags are formatted as \"B/I-XYZ\", where", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 X \u2208 {fact, condition};", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 Y \u2208 {1: subject; 2: relation; 3: object};", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 Z \u2208 {concept, attribute, relational phrase}. Note that if Y=\"2\" then Z=\"p\". So, the number of non-\"O\" tags is 20. Now each fact/condition tuple can be represented as a tag sequence. Moreover, it is the first work in sequence labeling that concepts and attributes are separated. The fact tuple in the example will ideally be: (alkaline pH, increases, {TRPV5/V6 channels : activity}). Figure 1 shows our framework. Multiple tag sequences are generated after the LSTMd decoder, each of which represents a fact or condition tuple. This multi-output module has two layers: one is a relation name tagging layer that predicts the tags of relational phrases and determines the number of output sequences; the other is a tuple completion tagging layer that generates the tag sequences for completing the fact and condition tuples.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 385, |
|
"end": 393, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "To address the challenge of modeling the complex tag schema, besides language model, we incorporate as much information as possible from upstream tools such as Part-of-Speech tagging (POS), Concept detection, Attribute name extraction, and Phrase mining (CAP). And we transform them into tag sequences as the model input. We observe strong dependencies between the token's POS/CAP tags and target tags. We appreciate the high accuracy of existing techniques making the multi-input sequences available for new datasets.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The multi-input multi-output sequence labeling framework is named as MIMO. Experiments demonstrate that it improves F1 score relatively by 6.2% over state-of-the-art models for tuple extraction on a new bio-text dataset we will introduce in the later section. When transferred to the BioNLP2013 dataset without additional training, it improves F1 score relatively by 4.2%. We apply MIMO to a large set of 15.5M MEDLINE papers and construct a knowledge graph: An example can be found in Figure 4 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 486, |
|
"end": 494, |
|
"text": "Figure 4", |
|
"ref_id": "FIGREF6" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "We built a system with GUI ( Figure 2 ) to collect a new dataset for the joint tuple extraction purpose, named Biomedical Conditional Fact Extraction (BioCFE). Three participants (experts in biomedical domain) manually annotated the fact and condition tuples from statement sentences from 31 paper abstracts in the MEDLINE database. The an- notation procedure took over 30 minutes on average for each paper. Here is a brief guide to the system. First, the users merged the token(s) into a span. Second, they gave a proper number of fact and/or condition tuple(s), where the proper number is not fixed but depends on the concrete sentence. Each tuple has five slots (subject's concept, subject's attribute, relation phrase, object's concept, and object's attribute). Third, they dragged the spans filling into the slots. If the three annotations are inconsistent, we filtered out the case. Eventually we have 756 fact tuples and 654 condition tuples from 336 annotated sentences. It is common to see one sentence having multiple facts and/or conditions, and actually 61%/52% statement sentences have more than one fact/condition tuples.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 29, |
|
"end": 37, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "A New Dataset", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Our approach has two modules: (1) a multi-input module that harnesses recent NLP development to process the text for input sequences from multiple tasks and feeds them into a multi-head encoderdecoder model with multi-input gates;", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Proposed Approach", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "(2) a multioutput module that generates multiple tuple tag sequences for fact and condition tuples, which consists of a relation name tagging layer and a tuple completion tagging layer, as shown in Figure 1 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 198, |
|
"end": 206, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "The Proposed Approach", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Preprocessing for input sequences: Following fundamental NLP techniques have achieved high accuracy requiring no additional training with labeled data: Language Model (LM) (Howard and Ruder, 2018) , POS (Labeau et al., 2015) , CAP (Luan et al., 2018; Jiang et al., 2017; Shang et al., 2018; Wang et al., 2018a) . For any given input sentence, we tokenize it and represent each token by its word embedding (pre-trained GloVe vector in this paper). Then we get another three input sequences by the input sentence and the above three fundamental NLP techniques. (1) A pre-trained LSTM-based language model takes the sentence as input and returns semantic embedding sequence, where the dependencies between a token and its predecessors in distant contexts are preserved. 2We employ NLTK tool to generate the POS tag sequence for the given sentence. The POS tag sequence indicates syntactic patterns of the words in a sentence, that is the dependencies between POS tags and output tags, like verbs (e.g., VBD) and predicates (e.g., B-f2p).", |
|
"cite_spans": [ |
|
{ |
|
"start": 172, |
|
"end": 196, |
|
"text": "(Howard and Ruder, 2018)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 203, |
|
"end": 224, |
|
"text": "(Labeau et al., 2015)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 231, |
|
"end": 250, |
|
"text": "(Luan et al., 2018;", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 251, |
|
"end": 270, |
|
"text": "Jiang et al., 2017;", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 271, |
|
"end": 290, |
|
"text": "Shang et al., 2018;", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 291, |
|
"end": 310, |
|
"text": "Wang et al., 2018a)", |
|
"ref_id": "BIBREF30" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Multi-Input Module", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "(3) Multiple complementary IE techniques are used to detect concepts, attributes and phrases from the given sentences, being merged and resulting a CAP sequence. We make tags in the format of \"B/Ic/a/p\" for the tokens of concepts, attributes, and phrases.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Multi-Input Module", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Each sequence encodes a specific type of dependencies. A combination of multi-type dependencies learns the complicated dependencies on the 21 tuple tags better than any sole type. LM learns the dependencies between a token and its predecessors in distant contexts, which helps predict the position of subject, relation, and object. POS encodes the syntactic features of words. Dependencies between the POS tag and tuple tag (e.g., \"VBD\" and \"B-f2p\") can be modeled. We also spot high dependencies between the CAP tag and tuple tag. For example, the tokens of \"B/I-c\" (concept) and \"B/I-a\" (attribute) tags have high probability of being labeled as \"B/I-XYc\" and \"B/I-XYa\" in the output sequences, respectively.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Multi-Input Module", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Multi-head Encoder-Decoder: We investigate two neural models as encoder: one is bidirectional LSTM (BiLSTM), the other is the renown, bidirectional encoder representations from Transformers (BERT). We adopt a LSTM structure as the decoding layer (LSTMd) (Zheng et al., 2017) . We observe that the input sequences may have different tag predictability on different sentences. For short sentences, POS and CAP are more useful (modeling local dependencies); for long sentences, LM is more effective (modeling distant dependencies). In order to secure the model's robustness on massive data, we apply a multi-head mechanism to the encoder-decoder model. Each head of the encoder-decoder is fed with one type of input sequence, and they are combined at the end of decoder layer. Thus, the tag prediction becomes more stable than using a simple encoderdecoder without the multi-head. Multi-input gates: We adopt the multi-input gates in ResNet (He et al., 2016) to take the most use of the multi-input sequences. We add the gates to the input of BiLSTM or BERT encoder, the input of LSTMd decoder, and the multi-output module.", |
|
"cite_spans": [ |
|
{ |
|
"start": 254, |
|
"end": 274, |
|
"text": "(Zheng et al., 2017)", |
|
"ref_id": "BIBREF37" |
|
}, |
|
{ |
|
"start": 938, |
|
"end": 955, |
|
"text": "(He et al., 2016)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Multi-Input Module", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "We propose to generate multiple output sequences. As annotating multiple tuples from one sentence is common, a token may have different expected tags in the tuples. On BioCFE, we observe that 93.8% statement sentences make multiple tuples: 21.7% of the sentences have at least one token that appears in at least one fact tuple and at least one condition tuple, expecting tags \"B/I-fYZ\" and \"B/I-cYZ\"; 18.1% of the sentences have at least one token that appears in one condition tuple as a part of subject and in another condition tuple as a part of object, expecting tags \"B/I-c1Z\" and \"B/I-c3Z\". Therefore, we extend the typical one-output sequence labeling to a multi-output design.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Multi-Output Module", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Then what is the number of output sequences? We reveal the significant role of relation names in making tuples. If we tagged the relation names out, for each relation name, of tags beginning with \"B-f2p\" as a fact's and \"B-c2p\" as a condition's, the module would generate an output sequence, respectively. Then we extract all possible tuples, whose relation has been specified, from every output sequence. Two observations on the annotated data support this idea: We transform each of the 1,410 tuples into a tag sequence. For the same sentence, if the tuples' relation names are the same, we merge their tag sequences into one and then use the matching function in (Stanovsky et al., 2018) to recover the tuples. First, 0 token has conflicting tags among the 240 merged sequences. Second, the recovery has 0 missing or wrong tuple. So, generating one output sequence and completing the tuples per relation name is practical.", |
|
"cite_spans": [ |
|
{ |
|
"start": 666, |
|
"end": 690, |
|
"text": "(Stanovsky et al., 2018)", |
|
"ref_id": "BIBREF27" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Multi-Output Module", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "The multi-output module has two layers: one is a relation name tagging layer and the other is a tuple completion tagging layer. Relation name tagging (RNT) layer: It consists of feed-forward neural networks (FFNs) and softmax layers. Decoded vectors are fed into the FFNs and the softmax predict the probability distribution of tags on fact and condition, respectively:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Multi-Output Module", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "p f i = softmax(FFN f RN T (d i )), (1) p c i = softmax(FFN c RN T (d i )). (2)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Multi-Output Module", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "where f is for fact and c for condition. d i denotes the i-th token's vector given by the LSTMd. Now we have two tag sequences, one for fact and the other for condition. As we have argued with one-output, extracting tuples from the \"twooutput\" sequences cannot resolve the tag conflicts, either. Here we extract only the relation names: Here we take condition sequences as an example to describe the details of the method. When predicting the j-th tag sequence, we define the position embedding of the i-th token as follows, representing the relative position to the j-th relation name's tag \"B-c2p\":", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Multi-Output Module", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "{r f 1 , r f 2 , \u2022 \u2022 \u2022 , r f n } denotes", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Multi-Output Module", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "v c i,j = g emb (r c j , i).", |
|
"eq_num": "(3)" |
|
} |
|
], |
|
"section": "Multi-Output Module", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Thus, the tag probability distributions of the i-th token in the condition tag sequences are:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Multi-Output Module", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "\uf8f1 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f2 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f3 p (r c 1 ) i = softmax(FFN c T CT (v c i,1 + d i )), p (r c 2 ) i = softmax(FFN c T CT (v c i,2 + d i )), \u2022 \u2022 \u2022 , p (r c m ) i = softmax(FFN c T CT (v c i,m + d i )).", |
|
"eq_num": "(4)" |
|
} |
|
], |
|
"section": "Multi-Output Module", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Similarly, we have the following tag distributions for the i-th token in the fact tag sequences:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Multi-Output Module", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "\uf8f1 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f2 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f3 p (r f 1 ) i = softmax(FFN f T CT (v f i,1 + d i )), p (r f 2 ) i = softmax(FFN f T CT (v f i,2 + d i )), \u2022 \u2022 \u2022 , p (r f n ) i = softmax(FFN f T CT (v f i,n + d i )),", |
|
"eq_num": "(5)" |
|
} |
|
], |
|
"section": "Multi-Output Module", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "where v f i,j is the position embedding of the i-th token in the j-th fact sequence, representing the relative position to the relation name's tag \"B-f2p\".", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Multi-Output Module", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Finally, we apply the matching function in (Stanovsky et al., 2018) to complete and extract the tuples (i.e., the concepts and/or attributes in the subjects and objects) for each output sequence.", |
|
"cite_spans": [ |
|
{ |
|
"start": 43, |
|
"end": 67, |
|
"text": "(Stanovsky et al., 2018)", |
|
"ref_id": "BIBREF27" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Multi-Output Module", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Given a sentence s, the loss function of the relation name tagging layer can be written as below:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Loss Function and Training", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "s RN T = \u2212 N s i=1 (log(p f i,y f i ) + log(p c i,y c i )),", |
|
"eq_num": "(6)" |
|
} |
|
], |
|
"section": "Loss Function and Training", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "where p f i,y and p c i,y are the probability of predicting y as the tag of the i-th token in the fact and condition tag sequences, respectively. y f i and y c i are the observed tag of the i-th token in the fact and condition tuple, respectively. N s is the length of the sentence s.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Loss Function and Training", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "The loss function of the tuple completion tagging layer is consisted of two parts, loss on fact tuples and loss on condition tuples:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Loss Function and Training", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "\uf8f1 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f2 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f3 s T CT = s f act + s cond. , s f act = \u2212 N s i=1 n j=1 log(p (r f j ) i,y f i,j ), s cond. = \u2212 N s i=1 m j=1 log(p (r c j ) i,y c i,j ),", |
|
"eq_num": "(7)" |
|
} |
|
], |
|
"section": "Loss Function and Training", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "where n and m are the number of fact and condition tag sequences for the sentence s, respectively.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Loss Function and Training", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "p (r f j ) i,y f i,j and p (r c j ) i,y c i,j", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Loss Function and Training", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "are the probability of predicting y f i,j and y c i,j as the tag of the i-th token in the j-th fact and condition tag sequence, respectively.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Loss Function and Training", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "The overall loss function for optimization is:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Loss Function and Training", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "= RN T + T CT = s\u2208S ( s RN T + s T CT ),", |
|
"eq_num": "(8)" |
|
} |
|
], |
|
"section": "Loss Function and Training", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "where S is the set of statement sentences.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Loss Function and Training", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "Training details: On one hand, Equations (6) and 7show that the error signal can be propagated from the RNT/TCT layers to the encoder-decoder model. On the other hand, the RNT layer specifies the relation names, or say, the tokens that have tags \"B/I-f2p\" and \"B/I-c2p\" for each tag sequence in the TCT layer. So we cannot have smooth gradients for back propagation from the TCT layer to the RNT layer. So, in order to have good learning effectiveness, the quality of predicting relation names has been secured beforehand. We pre-train the RNT layer with the multi-input module till the relation name's tag prediction achieves a higherthan-0.8 F1 score. Then we plug the TCT layer onto the RNT layer and train the entire framework to generate the multi-output tag sequences.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Loss Function and Training", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "We evaluate the performance of condition/fact tag prediction and tuple extraction by the proposed MIMO model, its variants, and state-of-the-art models on the newly annotated BioCFE dataset and transferred to the BioNLP2013 dataset.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Datasets: Statistics of BioCFE has been given in the Section 2. Additionally, the attribute-related tags take 11.7% and 9.4% of non-\"O\" tags in fact and condition tuples, respectively. So, it is important to distinguish concept and attribute. To the best of our knowledge, it is the first time that conditional information was carefully annotated on biomedical literature. We use the system in Figure 2 to annotate a subset of BioNLP2013 Cancer Genetics (CG) task dataset (N\u00e9dellec et al., 2013) . We have 197 fact tuples and 173 condition tuples. We use this BioNLP dataset as an extra test set for task of fact and condition tuples extraction, but the model will not be trained on this dataset. Validation: The ratio of training:validation:test is 60:8:32. For BioCFE, the evaluation set has 242 fact tuples and 209 condition tuples (on average from 108 sentences). We repeat five times, evaluate the performance, and report average results. Evaluation metrics: For tag prediction, We use standard metrics, precision, recall, and F1 scores. We have similar observations on Micro F1 scores as Macro F1 scores, so we report Macro F1 only. For evaluating tuple extraction, we use pair-wise comparison to match the extracted and groundtruth tuples. We evaluate the correctness on the tuple's five slots using the same metrics. Baselines: We compare with statistical sequence labeling methods: Structured Support Vector Machine (SVM) (Tsochantaridis et al., 2005) and Conditional random field (CRF) (Lafferty et al., 2001) . We compare with a neural sequence labeling method, BiLSTM-LSTMd (Zheng et al., 2017) . We replace its encoder with BERT (Devlin et al., 2018) to make it a more competitive baseline. We also compare against two renown OpenIE systems, Stanford OpenIE (Angeli et al., 2015) and AllenNLP OpenIE (Stanovsky et al., 2018) followed by a condition/fact classification.", |
|
"cite_spans": [ |
|
{ |
|
"start": 472, |
|
"end": 495, |
|
"text": "(N\u00e9dellec et al., 2013)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 1431, |
|
"end": 1460, |
|
"text": "(Tsochantaridis et al., 2005)", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 1496, |
|
"end": 1519, |
|
"text": "(Lafferty et al., 2001)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 1586, |
|
"end": 1606, |
|
"text": "(Zheng et al., 2017)", |
|
"ref_id": "BIBREF37" |
|
}, |
|
{ |
|
"start": 1642, |
|
"end": 1663, |
|
"text": "(Devlin et al., 2018)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 1813, |
|
"end": 1837, |
|
"text": "(Stanovsky et al., 2018)", |
|
"ref_id": "BIBREF27" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 394, |
|
"end": 402, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Experimental Setup", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "We enhance statistical sequence labeling models with multi-input signals for fairness, and train them for fact tuple and condition tuple extrac-tion separately. In the neural baselines (BiLSTM-LSTMd and BERT-LSTMd), fact extraction and condition extraction share the encoder-decoder model and use different, proper parameters in the linear-softmax layer. Hyperparameters: The multi-input module has a BiLSTM/BERT encoder and a LSTM decoder. The word embeddings were obtained from GloVe (Pennington et al., 2014) with the dimension size d W E = 50. The language model dimension size ns d LM = 200. The size of POS tag embedding is d P OS = 6. The size of CAP tag embedding is d CAP = 3. The number of LSTM units in the encoding layer is 300. The number of transformer units in the BERT encoding layer is 768.", |
|
"cite_spans": [ |
|
{ |
|
"start": 486, |
|
"end": 511, |
|
"text": "(Pennington et al., 2014)", |
|
"ref_id": "BIBREF23" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Setup", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "In this section, we present overall performance, ablation study, error analysis, and efficiency. Table 1 shows that the proposed multi-input multioutput sequence labeling model with a BERT encoder consistently performs the best over all the baselines on tag prediction and tuple extraction. Compared to BiLSTM-LSTMd, BiLSTM-based MIMO improves F1 score relatively by 7.1% on tag prediction and by 8.8% on tuple extraction; compared to BERT-LSTMd, BERT-based MIMO improve F1 by 4.7% and 6.2% on the two tasks, respectively. Apparently the BERT encoder significantly improves the performance (by 16.9-17.2% on tag prediction and 7.7-10.3% on tuple extraction). And the MIMO design can further improve it. Neural sequence labeling models perform better than OpenIE systems and statistical methods. Neural sequence labeling models are more adaptive to learning structures with the new tag schema. Open IE method plus a condition/fact classification is not effective.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 97, |
|
"end": 104, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results on BioCFE", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Compared to BERT-LSTMd, the BERT-based MIMO improves precision and recall relatively by 8.3% and 1.3% on tag prediction; and relatively by 3.1% and 9.3% on tuple extraction, respectively. When the tags were more precisely predicted, the tuple's five slots would be more accurately filled, and we would have more complete tuples.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Overall Performance", |
|
"sec_num": "4.2.1" |
|
}, |
|
{ |
|
"text": "We also observe that the improvements on condition's tags/tuples are consistently bigger than the improvements on fact's tag/tuples. It demonstrates that the MIMO design recognizes the role of conditions in the statement sentences better. (Tsochantaridis et al., 2005) 32.68 25.80 28.83 / 32.76, 24.71 47.62 46.15 46.87 / 45.01, 48.72 CRF (Lafferty et al., 2001 ) 60.07 41.92 49.37 / 56.23, 41.87 65.19 62.44 63.78 / 64.07, 63.44 BiLSTM-LSTMd (Zheng et al., 2017 61 Table 2 compares variants of the proposed model to evaluate the effectiveness of the following components: (1) multi-input sequences, such as none, or one (in LM, POS, and CAP), double combination, or triple combination;", |
|
"cite_spans": [ |
|
{ |
|
"start": 239, |
|
"end": 334, |
|
"text": "(Tsochantaridis et al., 2005) 32.68 25.80 28.83 / 32.76, 24.71 47.62 46.15 46.87 / 45.01, 48.72", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 339, |
|
"end": 361, |
|
"text": "(Lafferty et al., 2001", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 362, |
|
"end": 462, |
|
"text": ") 60.07 41.92 49.37 / 56.23, 41.87 65.19 62.44 63.78 / 64.07, 63.44 BiLSTM-LSTMd (Zheng et al., 2017", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 466, |
|
"end": 473, |
|
"text": "Table 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Overall Performance", |
|
"sec_num": "4.2.1" |
|
}, |
|
{ |
|
"text": "(2) multi-input encoder model, BiLSTM or BERT;", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Ablation Study", |
|
"sec_num": "4.2.2" |
|
}, |
|
{ |
|
"text": "(3) multi-output module, with the RNT layer only (generating one fact tag sequence and one condition tag sequence) or a combination of RNT and TCT layers (generating multiple sequences for each tuple type).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Ablation Study", |
|
"sec_num": "4.2.2" |
|
}, |
|
{ |
|
"text": "Multi-input sequences: When the choices of the encoder model and multi-output layers are specified, we observe that triple combination of input sequences performs better than double combinations and the double combinations win over the sole input. An additional sequence makes a relative F1 improvement by 1.0-2.4%. The triple combination improves F1 relatively by 3.2-4.1%. This demonstrates that the three types of input sequences encode complementary information for learning dependencies in the proposed tag schema. First, the language model learns the dependencies between a token and its predecessors in distant contexts. Having the LM sequence recognizes subjects and objects relative to the relation names and reduces the false positives of \"B/I-X1Z\" and \"B/I-X3Z\". Second, the POS tag encodes the token's syntactic feature. Having the POS sequence improves the precision of tag prediction. For example, verbs and prepositions (e.g., \"in\", \"during\") often act as the relation name of facts and conditions, respectively; conjunction words (e.g., \"that\", \"which\") indicate subordinate clauses, so the noun phrase before the conjunction word is likely to be the subject of the tuple given by the clause. Third, the formerly-detected concepts, attribute names, and phrases are absolutely useful for tagging the slots of subjects and objects. In other words, the tags \"B/I-c\" and \"B/I-a\" in the CAP sequence are strongly associated with the target tags \"B/I-XYc\" and \"B/I-XYa\", respectively. Encoder in the multi-input module: Comparing the middle three columns (BiLSTM-based encoder) and the right-hand three columns (BERTbased encoder), one can easily tell the significant improvement brought by the BERT model. Layers in the multi-output module: If the multioutput models have both RNT and TCT layers, the F1 score is relatively 1.4-5.0% higher than the models that have the RNT layer only. Moreover, the recall is improved relatively by 1.5-9.0%. So the TCT layer, which generates multiple tag sequences for each type of tuple (i.e., fact and condition), plays a very important role in recognizing the multiple tuples from one statement sentence. Table 3 presents the confusion matrices made by the BERT-based MIMO on predicting non-\"O\" tags for facts and conditions, respectively. The columns are predicted tags and the rows are actual ones. Perfect results would be diagonal matrices.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 2154, |
|
"end": 2161, |
|
"text": "Table 3", |
|
"ref_id": "TABREF6" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Ablation Study", |
|
"sec_num": "4.2.2" |
|
}, |
|
{ |
|
"text": "We observe that the numbers at the diagonal are consistently bigger than the numbers on the corresponding row and column. The accuracy scores are 0.905 for predicting fact tags and 0.908 for predicting condition tags. Of the 182 actual \"B-f2p\", the model predicted that 175 were \"B-f2p\"; of the 186 actual \"B-c2p\", it predicted that one was \"I-c1c\" and one was \"I-c3c\". It demonstrates the high accuracy (0.961 and 0.989) of extracting relation names for multi-output generation. The ovals in each confusion matrix present the most significant type of error. Of a small set of actual subjects, the model predicted them as objects, and vise versa, though the fact/condition role and concept role were correctly predicted.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Error Analysis", |
|
"sec_num": "4.2.3" |
|
}, |
|
{ |
|
"text": "The dashed circles show the second frequent type of error. Of the actual \"I-f2p\" tokens, the model predicted that 7 were \"B-f2p\"; for the actual \"I-c2p\", it predicted that 6 were \"B-c2p\". Basically, it was because of missing the beginning word of the relational phrases. Of the actual \"B-f3a\" tokens, the model predicted 6 were \"I-f2p\". Future work will aim at improving the prediction of the boundaries of long relational phrases.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Error Analysis", |
|
"sec_num": "4.2.3" |
|
}, |
|
{ |
|
"text": "All the experiments were conducted on 16 Graphics Cards (GeForce GTX 1080 Ti), where one individual model only used 1 GPU. Each model was trained for 1,000 epochs. For the BiLSTM-LSTMd MIMOs, the pre-training took 2.4 hours and the re-training (TCT layer) took 0.4 hour. For the BERT-LSTMd MIMOs of the best performance, the pre-training took 3.5 hours and the retraining took 0.9 hour. It took 5.7 hours to extract fact and condition tuples from 141 million sentences in the MEDLINE text data. It is comparable with existing approaches in terms of scalability.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Efficiency", |
|
"sec_num": "4.2.4" |
|
}, |
|
{ |
|
"text": "As shown in Table 3 , the BERT-LSTMd MIMO model achieves an F1 score of 0.790 on tuple extraction from BioNLP2013. Note that the model was trained on BioCFE that has no overlapping sentence with BioNLP2013. This score is comparable with the testing F1 score on the BioCFE (0.808), which demonstrates the effectiveness and reliability of the proposed model.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 12, |
|
"end": 19, |
|
"text": "Table 3", |
|
"ref_id": "TABREF6" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results on BioNLP2013", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "Our model improves the F1 score relatively by 4.2% over the best baseline BERT-LSTMd. The improvement on recall is more substantial: It improves recall relatively by 5.8%. It was because of the design of the multi-output module: the TCT layer generates multiple tag sequences based on the relation names predicted by the RNT layer. A token in a statement sentence may have different roles in different tuples of the same type (fact or condition). For example, given the following statement sentence:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results on BioNLP2013", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "\"Immunohistochemical staining of the tumors demonstrated a decreased number of blood vessels in the treatment group versus the controls.\"", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results on BioNLP2013", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "The proposed model is able to find one fact tuple and two condition tuples precisely:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results on BioNLP2013", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "- -Condition 1: (blood vessels,in,treatment group) -Condition 2: (treatment group,versus,controls) Note that the concept \"treatment group\" acts as the object of Condition Tuple 1 (having tags \"B/I-c3c\") and the subject of Condition Tuple 2 (having tags \"B/I-c1c\"). The multi-output design tackled this issue while other models could not. Compared with BioCFE: On BioCFE, the F1 score on condition tuple extraction is a bit higher than that on fact tuple extraction (81.64 vs 79.94). On BioNLP2013, we have the opposite observation (78.58 vs 79.42). They are still comparable but if we look at the error cases, we find that most of the false predictions of condition tuple come from long sentences (having more than 30 words). And 35% of the sentences in BioNLP are long sentences, while only 5% in Bio CFE are long. Long dependency modeling is always challenging for IE, especially condition extraction. We will study it in the future work. being valid in the graph. As we have applied our model to the large MEDLINE dataset, Figure 4 visualizes the fact and condition tuples extracted from four statement sentences about \"cell proliferation\". On the left side, we find (1) \"VPA treatment\" and the \"incubation\" of \"HDLs\" increased cell proliferation, while (2)\"Chlorin e6-PDT\" and the \"inhibition\" of \"MiR-199a-5p\" decreased cell proliferation. On the right, we are aware of the conditions of the factual claims. They describe the methodology of the observation (e.g., \"using\", \"in combination with\") or the context (e.g., \"in\" a specific disease or \"from\" specific animals). In some other cases, we find the temperature and pH values are detected as the conditions of observations.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 1026, |
|
"end": 1034, |
|
"text": "Figure 4", |
|
"ref_id": "FIGREF6" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results on BioNLP2013", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "5 Related Work", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A Visualized Case Study", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "Information extraction in scientific literature, e.g., computer science, biology and chemistry, has been receiving much attention in recent years. Scien-ceIE in computer science focus on concept recognition and factual relation extraction (Luan et al., 2017; G\u00e1bor et al., 2018; Luan et al., 2018) . Sci-enceIE in biological literature aims at identifying the relationships between biological concepts (i.e., proteins, diseases, drugs and genes) (Kang et al., 2012; Xu et al., 2018) . Rule-based approaches were used in early studies (Rindflesch and Fiszman, 2003; Kang et al., 2012) . Recently, a wide line of neural network models have been proposed and outperformed traditional methods (Wang et al., 2018b; Xu et al., 2018; . Wang et al. (2018b) investigated different kinds of word embeddings on different NLP tasks in the biological domain. employed attentionbased neural networks to extract chemical-protein relations. Xu et al. (2018) used the BiLSTM model to recognize the drug interaction. In our work, we extract biological relational facts as well as their conditions. The condition tuples are essential to interpreting the factual claims.", |
|
"cite_spans": [ |
|
{ |
|
"start": 239, |
|
"end": 258, |
|
"text": "(Luan et al., 2017;", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 259, |
|
"end": 278, |
|
"text": "G\u00e1bor et al., 2018;", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 279, |
|
"end": 297, |
|
"text": "Luan et al., 2018)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 446, |
|
"end": 465, |
|
"text": "(Kang et al., 2012;", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 466, |
|
"end": 482, |
|
"text": "Xu et al., 2018)", |
|
"ref_id": "BIBREF34" |
|
}, |
|
{ |
|
"start": 534, |
|
"end": 564, |
|
"text": "(Rindflesch and Fiszman, 2003;", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 565, |
|
"end": 583, |
|
"text": "Kang et al., 2012)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 689, |
|
"end": 709, |
|
"text": "(Wang et al., 2018b;", |
|
"ref_id": "BIBREF32" |
|
}, |
|
{ |
|
"start": 710, |
|
"end": 726, |
|
"text": "Xu et al., 2018;", |
|
"ref_id": "BIBREF34" |
|
}, |
|
{ |
|
"start": 729, |
|
"end": 748, |
|
"text": "Wang et al. (2018b)", |
|
"ref_id": "BIBREF32" |
|
}, |
|
{ |
|
"start": 925, |
|
"end": 941, |
|
"text": "Xu et al. (2018)", |
|
"ref_id": "BIBREF34" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Scientific Information Extraction", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "Open IE refers to the extraction of (subject, relation, object)-triples from plain text (Angeli et al., 2015; Stanovsky et al., 2018; Saha et al., 2018; Wang et al., 2018a) . The schema for the relations does not need to be specified in advance. Distant supervision has been widely used because the size of the benchmark data is often limited (Banko et al., 2007; Wu and Weld, 2010) . Stanovsky et al. (2018) proposed supervised neural methods for OpenIE. The idea was to transform annotated tuples into tags and learn via sequence tagging. We create a new tag schema and propose a novel sequence labeling framework.", |
|
"cite_spans": [ |
|
{ |
|
"start": 88, |
|
"end": 109, |
|
"text": "(Angeli et al., 2015;", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 110, |
|
"end": 133, |
|
"text": "Stanovsky et al., 2018;", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 134, |
|
"end": 152, |
|
"text": "Saha et al., 2018;", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 153, |
|
"end": 172, |
|
"text": "Wang et al., 2018a)", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 343, |
|
"end": 363, |
|
"text": "(Banko et al., 2007;", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 364, |
|
"end": 382, |
|
"text": "Wu and Weld, 2010)", |
|
"ref_id": "BIBREF33" |
|
}, |
|
{ |
|
"start": 385, |
|
"end": 408, |
|
"text": "Stanovsky et al. (2018)", |
|
"ref_id": "BIBREF27" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Open-Domain IE", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "Statistical models have been studied for long, including Hidden Markov Models (HMM), Support Vector Machine (SVM), and Conditional Random Fields (CRF) (Lafferty et al., 2001; Tsochantaridis et al., 2005; Passos et al., 2014; Luo et al., 2015; Li et al., 2018) . However, these methods rely heavily on hand-crafted features. Then neural network models become popular and obtain more promising performance than traditional statistical methods (Yang and Mitchell, 2017; Zheng et al., 2017; Wang et al., 2019; Yu et al., 2019) . So, we use them as strong baselines.", |
|
"cite_spans": [ |
|
{ |
|
"start": 151, |
|
"end": 174, |
|
"text": "(Lafferty et al., 2001;", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 175, |
|
"end": 203, |
|
"text": "Tsochantaridis et al., 2005;", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 204, |
|
"end": 224, |
|
"text": "Passos et al., 2014;", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 225, |
|
"end": 242, |
|
"text": "Luo et al., 2015;", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 243, |
|
"end": 259, |
|
"text": "Li et al., 2018)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 441, |
|
"end": 466, |
|
"text": "(Yang and Mitchell, 2017;", |
|
"ref_id": "BIBREF35" |
|
}, |
|
{ |
|
"start": 467, |
|
"end": 486, |
|
"text": "Zheng et al., 2017;", |
|
"ref_id": "BIBREF37" |
|
}, |
|
{ |
|
"start": 487, |
|
"end": 505, |
|
"text": "Wang et al., 2019;", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 506, |
|
"end": 522, |
|
"text": "Yu et al., 2019)", |
|
"ref_id": "BIBREF36" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sequence Labeling for IE", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "We present a new problem to find conditional information in scientific statements. We created a new tag schema for jointly extracting condi-tion and fact tuples from scientific text. We proposed a multi-input multi-output sequence labeling model to utilize results from well-established related tasks and extract an uncertain number of fact(s)/condition(s). Our model yields improvement over all the baselines on a newly annotated dataset BioCFE and a public dataset BioNLP2013. We argue that structured representations of knowledge, such as fact/condition tuple, for scientific statements will enable more intelligent downstream applications. In the future work, we will explore the use of the structured tuples to bridge the gap between text content and knowledge-based applications, such as knowledge-based scientific literature search.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "This work was done when the first author was visiting the University of Notre Dame.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": " Predicted 151 4 4 1 8 1 143 4 1 1 10 1 3 40 1 4 2 4 14 1 1 1 1 1 ", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 1, |
|
"end": 88, |
|
"text": "Predicted 151 4 4 1 8 1 143 4 1 1 10 1 3 40 1 4 2 4 14 1 1 1 1 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Actual", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "BiLSTM-based Encoder (%) BERT-based Encoder (%)", |
|
"authors": [], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "BiLSTM-based Encoder (%) BERT-based Encoder (%)", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Table 2: The proposed MIMO that employs (a) multi-input Language ModelsB-f1c I-f1c B-f1a I-f1a B-f2p I-f2p B-f3c I-f3c B-f3a I-f3a", |
|
"authors": [], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Table 2: The proposed MIMO that employs (a) multi-input Language ModelsB-f1c I-f1c B-f1a I-f1a B-f2p I-f2p B-f3c I-f3c B-f3a I-f3a", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "B-f1c I-f1c B-f1a I-f1a B-f2p I-f2p B-f3c I-f3c B-f3a I-f3a", |
|
"authors": [], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "B-f1c I-f1c B-f1a I-f1a B-f2p I-f2p B-f3c I-f3c B-f3a I-f3a", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Leveraging linguistic structure for open domain information extraction", |
|
"authors": [ |
|
{ |
|
"first": "Melvin Jose Johnson", |
|
"middle": [], |
|
"last": "References Gabor Angeli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher D", |
|
"middle": [], |
|
"last": "Premkumar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th International Joint Conference on Natural Language Processing", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "344--354", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "References Gabor Angeli, Melvin Jose Johnson Premkumar, and Christopher D Manning. 2015. Leveraging linguis- tic structure for open domain information extraction. In Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th International Joint Conference on Natural Lan- guage Processing (Volume 1: Long Papers), vol- ume 1, pages 344-354.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Open information extraction from the web", |
|
"authors": [ |
|
{ |
|
"first": "Michele", |
|
"middle": [], |
|
"last": "Banko", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Michael", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stephen", |
|
"middle": [], |
|
"last": "Cafarella", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matthew", |
|
"middle": [], |
|
"last": "Soderland", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Oren", |
|
"middle": [], |
|
"last": "Broadhead", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Etzioni", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "IJ-CAI", |
|
"volume": "7", |
|
"issue": "", |
|
"pages": "2670--2676", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Michele Banko, Michael J Cafarella, Stephen Soder- land, Matthew Broadhead, and Oren Etzioni. 2007. Open information extraction from the web. In IJ- CAI, volume 7, pages 2670-2676.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Bert: Pre-training of deep bidirectional transformers for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1810.04805" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2018. Bert: Pre-training of deep bidirectional transformers for language understand- ing. arXiv preprint arXiv:1810.04805.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Semeval-2018 task 7: Semantic relation extraction and classification in scientific papers", |
|
"authors": [ |
|
{ |
|
"first": "Kata", |
|
"middle": [], |
|
"last": "G\u00e1bor", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Davide", |
|
"middle": [], |
|
"last": "Buscaldi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anne-Kathrin", |
|
"middle": [], |
|
"last": "Schumann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Behrang", |
|
"middle": [], |
|
"last": "Qasemizadeh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Haifa", |
|
"middle": [], |
|
"last": "Zargayouna", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Thierry", |
|
"middle": [], |
|
"last": "Charnois", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "SemEval", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "679--688", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kata G\u00e1bor, Davide Buscaldi, Anne-Kathrin Schu- mann, Behrang QasemiZadeh, Haifa Zargayouna, and Thierry Charnois. 2018. Semeval-2018 task 7: Semantic relation extraction and classification in sci- entific papers. In SemEval, pages 679-688.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Deep residual learning for image recognition", |
|
"authors": [ |
|
{ |
|
"first": "Kaiming", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiangyu", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shaoqing", |
|
"middle": [], |
|
"last": "Ren", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jian", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "CVPR", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "770--778", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. 2016. Deep residual learning for image recog- nition. In CVPR, pages 770-778.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Universal language model fine-tuning for text classification", |
|
"authors": [ |
|
{ |
|
"first": "Jeremy", |
|
"middle": [], |
|
"last": "Howard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Ruder", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "328--339", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jeremy Howard and Sebastian Ruder. 2018. Universal language model fine-tuning for text classification. In Proceedings of the 56th Annual Meeting of the As- sociation for Computational Linguistics (Volume 1: Long Papers), volume 1, pages 328-339.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Metapad: Meta pattern discovery from massive text corpora", |
|
"authors": [ |
|
{ |
|
"first": "Meng", |
|
"middle": [], |
|
"last": "Jiang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jingbo", |
|
"middle": [], |
|
"last": "Shang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Taylor", |
|
"middle": [], |
|
"last": "Cassidy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiang", |
|
"middle": [], |
|
"last": "Ren", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Lance", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Timothy", |
|
"middle": [ |
|
"P" |
|
], |
|
"last": "Kaplan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jiawei", |
|
"middle": [], |
|
"last": "Hanratty", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Han", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 23rd ACM SIGKDD International Conference on Knowledge Discovery and Data Mining", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "877--886", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Meng Jiang, Jingbo Shang, Taylor Cassidy, Xiang Ren, Lance M Kaplan, Timothy P Hanratty, and Jiawei Han. 2017. Metapad: Meta pattern discovery from massive text corpora. In Proceedings of the 23rd ACM SIGKDD International Conference on Knowl- edge Discovery and Data Mining, pages 877-886. ACM.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "The role of \"condition\": A novel scientific knowledge graph representation and construction model", |
|
"authors": [ |
|
{ |
|
"first": "Tianwen", |
|
"middle": [], |
|
"last": "Jiang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tong", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bing", |
|
"middle": [], |
|
"last": "Qin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ting", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Nitesh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Meng", |
|
"middle": [], |
|
"last": "Chawla", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Jiang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 25th ACM SIGKDD International Conference on Knowledge Discovery & Data Mining", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1634--1642", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tianwen Jiang, Tong Zhao, Bing Qin, Ting Liu, Nitesh V Chawla, and Meng Jiang. 2019. The role of \"condition\": A novel scientific knowledge graph representation and construction model. In Proceed- ings of the 25th ACM SIGKDD International Con- ference on Knowledge Discovery & Data Mining, pages 1634-1642. ACM.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Using rule-based natural language processing to improve disease normalization in biomedical text", |
|
"authors": [ |
|
{ |
|
"first": "Ning", |
|
"middle": [], |
|
"last": "Kang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bharat", |
|
"middle": [], |
|
"last": "Singh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zubair", |
|
"middle": [], |
|
"last": "Afzal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Erik", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Van Mulligen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jan", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Kors", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Journal of the American Medical Informatics Association", |
|
"volume": "20", |
|
"issue": "5", |
|
"pages": "876--881", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ning Kang, Bharat Singh, Zubair Afzal, Erik M van Mulligen, and Jan A Kors. 2012. Using rule-based natural language processing to improve disease nor- malization in biomedical text. Journal of the Amer- ican Medical Informatics Association, 20(5):876- 881.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Non-lexical neural architecture for fine-grained pos tagging", |
|
"authors": [ |
|
{ |
|
"first": "Matthieu", |
|
"middle": [], |
|
"last": "Labeau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "L\u00f6ser", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexandre", |
|
"middle": [], |
|
"last": "Allauzen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "232--237", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Matthieu Labeau, Kevin L\u00f6ser, and Alexandre Al- lauzen. 2015. Non-lexical neural architecture for fine-grained pos tagging. In Proceedings of the 2015 Conference on Empirical Methods in Natural Lan- guage Processing, pages 232-237.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Conditional random fields: Probabilistic models for segmenting and labeling sequence data", |
|
"authors": [ |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Lafferty", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Mccallum", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fernando Cn", |
|
"middle": [], |
|
"last": "Pereira", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2001, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "John Lafferty, Andrew McCallum, and Fernando CN Pereira. 2001. Conditional random fields: Prob- abilistic models for segmenting and labeling se- quence data.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Truepie: Discovering reliable patterns in pattern-based information extraction", |
|
"authors": [ |
|
{ |
|
"first": "Qi", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Meng", |
|
"middle": [], |
|
"last": "Jiang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xikun", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Meng", |
|
"middle": [], |
|
"last": "Qu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Timothy", |
|
"middle": [ |
|
"P" |
|
], |
|
"last": "Hanratty", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jing", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jiawei", |
|
"middle": [], |
|
"last": "Han", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 24th ACM SIGKDD International Conference on Knowledge Discovery & Data Mining", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1675--1684", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Qi Li, Meng Jiang, Xikun Zhang, Meng Qu, Timothy P Hanratty, Jing Gao, and Jiawei Han. 2018. Truepie: Discovering reliable patterns in pattern-based infor- mation extraction. In Proceedings of the 24th ACM SIGKDD International Conference on Knowledge Discovery & Data Mining, pages 1675-1684. ACM.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Extracting chemical-protein relations using attention-based neural networks", |
|
"authors": [ |
|
{ |
|
"first": "Sijia", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Feichen", |
|
"middle": [], |
|
"last": "Shen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ravikumar", |
|
"middle": [], |
|
"last": "Komandur Elayavilli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yanshan", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Majid", |
|
"middle": [], |
|
"last": "Rastegar-Mojarad", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vipin", |
|
"middle": [], |
|
"last": "Chaudhary", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hongfang", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Database", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sijia Liu, Feichen Shen, Ravikumar Komandur Elayav- illi, Yanshan Wang, Majid Rastegar-Mojarad, Vipin Chaudhary, and Hongfang Liu. 2018. Extract- ing chemical-protein relations using attention-based neural networks. Database, 2018:bay102.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Multi-task identification of entities, relations, and coreferencefor scientific knowledge graph construction", |
|
"authors": [ |
|
{ |
|
"first": "Yi", |
|
"middle": [], |
|
"last": "Luan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luheng", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mari", |
|
"middle": [], |
|
"last": "Ostendorf", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hannaneh", |
|
"middle": [], |
|
"last": "Hajishirzi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proc. Conf. Empirical Methods Natural Language Process. (EMNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yi Luan, Luheng He, Mari Ostendorf, and Hannaneh Hajishirzi. 2018. Multi-task identification of enti- ties, relations, and coreferencefor scientific knowl- edge graph construction. In Proc. Conf. Empirical Methods Natural Language Process. (EMNLP).", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Scientific information extraction with semisupervised neural tagging", |
|
"authors": [ |
|
{ |
|
"first": "Yi", |
|
"middle": [], |
|
"last": "Luan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mari", |
|
"middle": [], |
|
"last": "Ostendorf", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hannaneh", |
|
"middle": [], |
|
"last": "Hajishirzi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2641--2651", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yi Luan, Mari Ostendorf, and Hannaneh Hajishirzi. 2017. Scientific information extraction with semi- supervised neural tagging. In Proceedings of the 2017 Conference on Empirical Methods in Natu- ral Language Processing, pages 2641-2651, Copen- hagen, Denmark. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Joint entity recognition and disambiguation", |
|
"authors": [ |
|
{ |
|
"first": "Gang", |
|
"middle": [], |
|
"last": "Luo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaojiang", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chin-Yew", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zaiqing", |
|
"middle": [], |
|
"last": "Nie", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "879--888", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Gang Luo, Xiaojiang Huang, Chin-Yew Lin, and Za- iqing Nie. 2015. Joint entity recognition and disam- biguation. In Proceedings of the 2015 Conference on Empirical Methods in Natural Language Pro- cessing, pages 879-888.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "The nature of scientific statements", |
|
"authors": [ |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "David", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Miller", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1947, |
|
"venue": "Philosophy of Science", |
|
"volume": "14", |
|
"issue": "3", |
|
"pages": "219--223", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "David L Miller. 1947. The nature of scientific state- ments. Philosophy of Science, 14(3):219-223.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Overview of bionlp shared task 2013", |
|
"authors": [ |
|
{ |
|
"first": "Claire", |
|
"middle": [], |
|
"last": "N\u00e9dellec", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Robert", |
|
"middle": [], |
|
"last": "Bossy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jin-Dong", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jung-Jae", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tomoko", |
|
"middle": [], |
|
"last": "Ohta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sampo", |
|
"middle": [], |
|
"last": "Pyysalo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pierre", |
|
"middle": [], |
|
"last": "Zweigenbaum", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of the BioNLP Shared Task 2013 Workshop", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--7", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Claire N\u00e9dellec, Robert Bossy, Jin-Dong Kim, Jung- Jae Kim, Tomoko Ohta, Sampo Pyysalo, and Pierre Zweigenbaum. 2013. Overview of bionlp shared task 2013. In Proceedings of the BioNLP Shared Task 2013 Workshop, pages 1-7. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Lexicon infused phrase embeddings for named entity resolution", |
|
"authors": [ |
|
{ |
|
"first": "Alexandre", |
|
"middle": [], |
|
"last": "Passos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vineet", |
|
"middle": [], |
|
"last": "Kumar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Mc-Callum", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1404.5367" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alexandre Passos, Vineet Kumar, and Andrew Mc- Callum. 2014. Lexicon infused phrase embed- dings for named entity resolution. arXiv preprint arXiv:1404.5367.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Glove: Global vectors for word representation", |
|
"authors": [ |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Pennington", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Socher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1532--1543", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jeffrey Pennington, Richard Socher, and Christopher Manning. 2014. Glove: Global vectors for word representation. In EMNLP, pages 1532-1543.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "The interaction of domain knowledge and linguistic structure in natural language processing: interpreting hypernymic propositions in biomedical text", |
|
"authors": [ |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Thomas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marcelo", |
|
"middle": [], |
|
"last": "Rindflesch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Fiszman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "Journal of biomedical informatics", |
|
"volume": "36", |
|
"issue": "6", |
|
"pages": "462--477", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thomas C Rindflesch and Marcelo Fiszman. 2003. The interaction of domain knowledge and linguis- tic structure in natural language processing: inter- preting hypernymic propositions in biomedical text. Journal of biomedical informatics, 36(6):462-477.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Open information extraction from conjunctive sentences", |
|
"authors": [ |
|
{ |
|
"first": "Swarnadeep", |
|
"middle": [], |
|
"last": "Saha", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "COLING", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2288--2299", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Swarnadeep Saha et al. 2018. Open information ex- traction from conjunctive sentences. In COLING, pages 2288-2299.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Automated phrase mining from massive text corpora", |
|
"authors": [ |
|
{ |
|
"first": "Jingbo", |
|
"middle": [], |
|
"last": "Shang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jialu", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Meng", |
|
"middle": [], |
|
"last": "Jiang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiang", |
|
"middle": [], |
|
"last": "Ren", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Clare", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Voss", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jiawei", |
|
"middle": [], |
|
"last": "Han", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "IEEE Transactions on Knowledge and Data Engineering", |
|
"volume": "30", |
|
"issue": "10", |
|
"pages": "1825--1837", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jingbo Shang, Jialu Liu, Meng Jiang, Xiang Ren, Clare R Voss, and Jiawei Han. 2018. Automated phrase mining from massive text corpora. IEEE Transactions on Knowledge and Data Engineering, 30(10):1825-1837.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Supervised open information extraction", |
|
"authors": [ |
|
{ |
|
"first": "Gabriel", |
|
"middle": [], |
|
"last": "Stanovsky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julian", |
|
"middle": [], |
|
"last": "Michael", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ido", |
|
"middle": [], |
|
"last": "Dagan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "885--895", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Gabriel Stanovsky, Julian Michael, Luke Zettlemoyer, and Ido Dagan. 2018. Supervised open information extraction. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long Papers), volume 1, pages 885-895.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Trpv5/v6 channels mediate ca2+ influx in jurkat t cells under the control of extracellular ph", |
|
"authors": [ |
|
{ |
|
"first": "Alena", |
|
"middle": [ |
|
"L" |
|
], |
|
"last": "Victor N Tomilin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yuri", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Cherezova", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Svetlana", |
|
"middle": [ |
|
"B" |
|
], |
|
"last": "Negulyaev", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Semenova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Journal of cellular biochemistry", |
|
"volume": "117", |
|
"issue": "1", |
|
"pages": "197--206", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Victor N Tomilin, Alena L Cherezova, Yuri A Neg- ulyaev, and Svetlana B Semenova. 2016. Trpv5/v6 channels mediate ca2+ influx in jurkat t cells under the control of extracellular ph. Journal of cellular biochemistry, 117(1):197-206.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Large margin methods for structured and interdependent output variables", |
|
"authors": [ |
|
{ |
|
"first": "Ioannis", |
|
"middle": [], |
|
"last": "Tsochantaridis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Thorsten", |
|
"middle": [], |
|
"last": "Joachims", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Hofmann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yasemin", |
|
"middle": [], |
|
"last": "Altun", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Journal of machine learning research", |
|
"volume": "6", |
|
"issue": "", |
|
"pages": "1453--1484", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ioannis Tsochantaridis, Thorsten Joachims, Thomas Hofmann, and Yasemin Altun. 2005. Large mar- gin methods for structured and interdependent out- put variables. Journal of machine learning research, 6(Sep):1453-1484.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "Open information extraction with meta-pattern discovery in biomedical literature", |
|
"authors": [ |
|
{ |
|
"first": "Xuan", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yu", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Qi", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yinyin", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jiawei", |
|
"middle": [], |
|
"last": "Han", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 ACM International Conference on Bioinformatics, Computational Biology, and Health Informatics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "291--300", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xuan Wang, Yu Zhang, Qi Li, Yinyin Chen, and Ji- awei Han. 2018a. Open information extraction with meta-pattern discovery in biomedical literature. In Proceedings of the 2018 ACM International Confer- ence on Bioinformatics, Computational Biology, and Health Informatics, pages 291-300. ACM.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "A novel unsupervised approach for precise temporal slot filling from incomplete and noisy temporal contexts", |
|
"authors": [ |
|
{ |
|
"first": "Xueying", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Haiqiao", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Qi", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yiyu", |
|
"middle": [], |
|
"last": "Shi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Meng", |
|
"middle": [], |
|
"last": "Jiang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "The World Wide Web Conference", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3328--3334", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xueying Wang, Haiqiao Zhang, Qi Li, Yiyu Shi, and Meng Jiang. 2019. A novel unsupervised approach for precise temporal slot filling from incomplete and noisy temporal contexts. In The World Wide Web Conference, pages 3328-3334. ACM.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "A comparison of word embeddings for the biomedical natural language processing", |
|
"authors": [ |
|
{ |
|
"first": "Yanshan", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sijia", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Naveed", |
|
"middle": [], |
|
"last": "Afzal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Majid", |
|
"middle": [], |
|
"last": "Rastegar-Mojarad", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Liwei", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Feichen", |
|
"middle": [], |
|
"last": "Shen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Paul", |
|
"middle": [], |
|
"last": "Kingsbury", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hongfang", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Journal of biomedical informatics", |
|
"volume": "87", |
|
"issue": "", |
|
"pages": "12--20", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yanshan Wang, Sijia Liu, Naveed Afzal, Majid Rastegar-Mojarad, Liwei Wang, Feichen Shen, Paul Kingsbury, and Hongfang Liu. 2018b. A compari- son of word embeddings for the biomedical natural language processing. Journal of biomedical infor- matics, 87:12-20.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "Open information extraction using wikipedia", |
|
"authors": [ |
|
{ |
|
"first": "Fei", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Daniel S Weld", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proceedings of the 48th annual meeting of the association for computational linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "118--127", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Fei Wu and Daniel S Weld. 2010. Open information extraction using wikipedia. In Proceedings of the 48th annual meeting of the association for compu- tational linguistics, pages 118-127. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "Leveraging biomedical resources in bi-lstm for drug-drug interaction extraction", |
|
"authors": [ |
|
{ |
|
"first": "Bo", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiufeng", |
|
"middle": [], |
|
"last": "Shi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhehuan", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Zheng", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "IEEE Access", |
|
"volume": "6", |
|
"issue": "", |
|
"pages": "33432--33439", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bo Xu, Xiufeng Shi, Zhehuan Zhao, and Wei Zheng. 2018. Leveraging biomedical resources in bi-lstm for drug-drug interaction extraction. IEEE Access, 6:33432-33439.", |
|
"links": null |
|
}, |
|
"BIBREF35": { |
|
"ref_id": "b35", |
|
"title": "Leveraging knowledge bases in lstms for improving machine reading", |
|
"authors": [ |
|
{ |
|
"first": "Bishan", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tom", |
|
"middle": [], |
|
"last": "Mitchell", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1436--1446", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bishan Yang and Tom Mitchell. 2017. Leveraging knowledge bases in lstms for improving machine reading. In Proceedings of the 55th Annual Meet- ing of the Association for Computational Linguistics (Volume 1: Long Papers), volume 1, pages 1436- 1446.", |
|
"links": null |
|
}, |
|
"BIBREF36": { |
|
"ref_id": "b36", |
|
"title": "Tablepedia: Automating pdf table reading in an experimental evidence exploration and analytic system", |
|
"authors": [ |
|
{ |
|
"first": "Wenhao", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zongze", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Qingkai", |
|
"middle": [], |
|
"last": "Zeng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Meng", |
|
"middle": [], |
|
"last": "Jiang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "The World Wide Web Conference", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3615--3619", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wenhao Yu, Zongze Li, Qingkai Zeng, and Meng Jiang. 2019. Tablepedia: Automating pdf table read- ing in an experimental evidence exploration and an- alytic system. In The World Wide Web Conference, pages 3615-3619. ACM.", |
|
"links": null |
|
}, |
|
"BIBREF37": { |
|
"ref_id": "b37", |
|
"title": "Joint extraction of entities and relations based on a novel tagging scheme", |
|
"authors": [ |
|
{ |
|
"first": "Suncong", |
|
"middle": [], |
|
"last": "Zheng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Feng", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hongyun", |
|
"middle": [], |
|
"last": "Bao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yuexing", |
|
"middle": [], |
|
"last": "Hao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peng", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bo", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1227--1236", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Suncong Zheng, Feng Wang, Hongyun Bao, Yuexing Hao, Peng Zhou, and Bo Xu. 2017. Joint extrac- tion of entities and relations based on a novel tag- ging scheme. In Proceedings of the 55th Annual Meeting of the Association for Computational Lin- guistics (Volume 1: Long Papers), volume 1, pages 1227-1236.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"type_str": "figure", |
|
"num": null, |
|
"uris": null, |
|
"text": ",-\" , *+,-% \u2026 *+,-' } {./01 \" , ./01 % \u2026 ./01 )" |
|
}, |
|
"FIGREF1": { |
|
"type_str": "figure", |
|
"num": null, |
|
"uris": null, |
|
"text": "Annotation by four steps: (1) merge token(s) into a span; (2) make slots for a new tuple; (3) drag spans into the slots; (4) save annotations." |
|
}, |
|
"FIGREF2": { |
|
"type_str": "figure", |
|
"num": null, |
|
"uris": null, |
|
"text": "the n relation names (beginning with \"B-f2p\" tag) in fact tuples and {r c 1 , r c 2 , \u2022 \u2022 \u2022 , r c m } denotes the m relation names (beginning with \"B-c2p\" tag) in condition tuples. Tuple completion tagging (TCT) Layer: This layer predicts n fact tag sequences and m condition tag sequences. Each sequence is generated by a FFN and a softmax layer. The FFN obtains the relation name from the RNT layer. The FFN's input also includes the token's vectors from the encoder-decoder model of the multi-input module." |
|
}, |
|
"FIGREF3": { |
|
"type_str": "figure", |
|
"num": null, |
|
"uris": null, |
|
"text": "2: The proposed MIMO that employs (a) multi-input Language Models, POS tags, and Concept-Attribute-Phrase sequences, (b) multi-output tag sequences, (c) BERT-based encoder performs the best on tuple extraction." |
|
}, |
|
"FIGREF4": { |
|
"type_str": "figure", |
|
"num": null, |
|
"uris": null, |
|
"text": "Confusion matrices on predicting fact tags (Top) and condition tags (Bottom) in BioCFE data." |
|
}, |
|
"FIGREF6": { |
|
"type_str": "figure", |
|
"num": null, |
|
"uris": null, |
|
"text": "Structuring tuples detected from four statement sentences that mention \"cell proliferation\" into a snapshot of scientific knowledge graph with fact tuples on the left and condition tuples on the right." |
|
}, |
|
"TABREF1": { |
|
"type_str": "table", |
|
"html": null, |
|
"text": "Prec. Rec. F1 / F1 F act , F1 Cond. Prec. Rec. F1 / F1 F act , F1 Cond.", |
|
"num": null, |
|
"content": "<table><tr><td>Methods</td><td/><td colspan=\"2\">Tag Prediction (%)</td><td colspan=\"2\">Tuple Extraction(%)</td></tr><tr><td>Allennlp OpenIE (Stanovsky et al., 2018)</td><td>-</td><td>-</td><td>-</td><td>42.60 38.22</td><td>40.29 / -, -</td></tr><tr><td>Stanford OpenIE (Angeli et al., 2015)</td><td>-</td><td>-</td><td>-</td><td>47.11 41.62</td><td>44.19 / -, -</td></tr><tr><td>Structured SVM</td><td/><td/><td/><td/><td/></tr></table>" |
|
}, |
|
"TABREF2": { |
|
"type_str": "table", |
|
"html": null, |
|
"text": ".0873.41 / 76.01, 70.75 81.06 80.53 80.79 / 79.94, 81.64 Table 1: The proposed MIMO outperforms existing methods on tag prediction and tuple extraction in the BioCFE dataset. The MIMO with BERT-based encoder performs the best. Higher score performs better.", |
|
"num": null, |
|
"content": "<table><tr><td/><td>.00 56.26 58.53 / 65.16, 51.78 71.57 66.55 68.97 / 69.51, 68.41</td></tr><tr><td>BERT-LSTMd</td><td>70.07 70.19 70.13 / 74.30, 65.88 78.64 73.67 76.08 / 76.14, 75.99</td></tr><tr><td>MIMO (BiLSTM based)</td><td>67.80 58.24 62.66 / 66.67, 58.58 75.35 74.67 75.01 / 74.91, 75.10</td></tr><tr><td>MIMO (BERT based)</td><td>75.91 71</td></tr></table>" |
|
}, |
|
"TABREF3": { |
|
"type_str": "table", |
|
"html": null, |
|
"text": "", |
|
"num": null, |
|
"content": "<table/>" |
|
}, |
|
"TABREF6": { |
|
"type_str": "table", |
|
"html": null, |
|
"text": "The BERT-LSTMd MIMO model performs the best on tuple extraction in BioNLP2013.", |
|
"num": null, |
|
"content": "<table/>" |
|
} |
|
} |
|
} |
|
} |