|
{ |
|
"paper_id": "2020", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T01:06:30.603745Z" |
|
}, |
|
"title": "Entity-Enriched Neural Models for Clinical Question Answering", |
|
"authors": [ |
|
{ |
|
"first": "Bhanu", |
|
"middle": [], |
|
"last": "Pratap", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Singh", |
|
"middle": [], |
|
"last": "Rawat", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "brawat@umass.edu" |
|
}, |
|
{ |
|
"first": "Wei-Hung", |
|
"middle": [], |
|
"last": "Weng", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "MIT-IBM Watson AI Lab", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "So", |
|
"middle": [], |
|
"last": "Yeon", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "MIT-IBM Watson AI Lab", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Preethi", |
|
"middle": [], |
|
"last": "Raghavan", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "MIT-IBM Watson AI Lab", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Peter", |
|
"middle": [], |
|
"last": "Szolovits", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "MIT-IBM Watson AI Lab", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Umass", |
|
"middle": [ |
|
"-" |
|
], |
|
"last": "Amherst", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Mit", |
|
"middle": [], |
|
"last": "Csail", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "We explore state-of-the-art neural models for question answering on electronic medical records and improve their ability to generalize better on previously unseen (paraphrased) questions at test time. We enable this by learning to predict logical forms as an auxiliary task along with the main task of answer span detection. The predicted logical forms also serve as a rationale for the answer. Further, we also incorporate medical entity information in these models via the ERNIE (Zhang et al., 2019a) architecture. We train our models on the large-scale emrQA dataset and observe that our multi-task entity-enriched models generalize to paraphrased questions \u223c 5% better than the baseline BERT model.", |
|
"pdf_parse": { |
|
"paper_id": "2020", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "We explore state-of-the-art neural models for question answering on electronic medical records and improve their ability to generalize better on previously unseen (paraphrased) questions at test time. We enable this by learning to predict logical forms as an auxiliary task along with the main task of answer span detection. The predicted logical forms also serve as a rationale for the answer. Further, we also incorporate medical entity information in these models via the ERNIE (Zhang et al., 2019a) architecture. We train our models on the large-scale emrQA dataset and observe that our multi-task entity-enriched models generalize to paraphrased questions \u223c 5% better than the baseline BERT model.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "The field of question answering (QA) has seen significant progress with several resources, models and benchmark datasets. Pre-trained neural language encoders like BERT (Devlin et al., 2019) and its variants (Seo et al., 2016; Zhang et al., 2019b) have achieved near-human or even better performance on popular open-domain QA tasks such as SQuAD 2.0 (Rajpurkar et al., 2016) . While there has been some progress in biomedical QA on medical literature (\u0160uster and Daelemans, 2018; Tsatsaronis et al., 2012) , existing models have not been similarly adapted to clinical domain on electronic medical records (EMRs).", |
|
"cite_spans": [ |
|
{ |
|
"start": 169, |
|
"end": 190, |
|
"text": "(Devlin et al., 2019)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 208, |
|
"end": 226, |
|
"text": "(Seo et al., 2016;", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 227, |
|
"end": 247, |
|
"text": "Zhang et al., 2019b)", |
|
"ref_id": "BIBREF34" |
|
}, |
|
{ |
|
"start": 350, |
|
"end": 374, |
|
"text": "(Rajpurkar et al., 2016)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 451, |
|
"end": 479, |
|
"text": "(\u0160uster and Daelemans, 2018;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 480, |
|
"end": 505, |
|
"text": "Tsatsaronis et al., 2012)", |
|
"ref_id": "BIBREF26" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Community-shared large-scale datasets like em-rQA (Pampari et al., 2018) allow us to apply stateof-the-art models, establish benchmarks, innovate and adapt them to clinical domain-specific needs. emrQA enables question answering from electronic medical records (EMRs) where a question is asked by a physician against a patient's medical record (clinical notes). Thus, we adapt these models for EMR QA while focusing on model generalization via the following. (1) learning to predict the logical form (a structured semantic representation that captures the answering needs corresponding to a natural language question) along with the answer and (2) incorporating medical entity embeddings into models for EMR QA. We now examine the motivation behind these.", |
|
"cite_spans": [ |
|
{ |
|
"start": 50, |
|
"end": 72, |
|
"text": "(Pampari et al., 2018)", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "A physician interacting with a QA system on EMRs may ask the same question in several different ways; a physician may frame a question as: \"Is the patient allergic to penicillin?\" whereas the other could frame it as \"Does penicillin cause any allergic reactions to the patient?\". Since paraphrasing is a common form of generalization in natural language processing (NLP) (Bhagat et al., 2009 ), a QA model should be able to generalize well to such paraphrased question variants that may not be seen during training (and avoid simply memorizing the questions). However, current state-of-the-art models do not consider the use of meta-information such as the semantic parse or logical form of the questions in unstructured QA. In order to give the model the ability to understand the semantic information about answering needs of a question, we frame our problem in a multitask learning setting where the primary task is extractive QA and the auxiliary task is the logical form prediction of the question.", |
|
"cite_spans": [ |
|
{ |
|
"start": 371, |
|
"end": 391, |
|
"text": "(Bhagat et al., 2009", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Fine-tuning on medical copora (MIMIC-III, PubMed (Johnson et al., 2016; Lee et al., 2020) ) helps models like BERT align their representations according to medical vocabulary (since they are previously trained on open-domain corpora such as WikiText (Zhu et al., 2015) ). However, another challenge for developing EMR QA models is that different physicians can use different medical terminology to express the same entity; e.g., \"heart attack\" vs. \"myocardial infarction\". Mapping these phrases to the same UMLS semantic type 1 as disease or syndrome (dsyn) provides common information between such medical terminologies. Incorporating such entity information about tokens in the context and question can further improve the performance of QA models for the clinical domain.", |
|
"cite_spans": [ |
|
{ |
|
"start": 30, |
|
"end": 71, |
|
"text": "(MIMIC-III, PubMed (Johnson et al., 2016;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 72, |
|
"end": 89, |
|
"text": "Lee et al., 2020)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 250, |
|
"end": 268, |
|
"text": "(Zhu et al., 2015)", |
|
"ref_id": "BIBREF35" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Our contributions are as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "1. We establish state-of-the-art benchmarks for EMR QA on a large clinical question answering dataset, emrQA (Pampari et al., 2018) 2. We demonstrate that incorporating an auxiliary task of predicting the logical form of a question helps the proposed models generalize well over unseen paraphrases, improving the overall performance on emrQA by \u223c 5% over BERT (Devlin et al., 2019) and by \u223c 3.5% over clinicalBERT (Alsentzer et al., 2019) . We support this hypothesis by running our proposed model over both emrQA and another clinical QA dataset, MADE (Jagannatha et al., 2019) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 109, |
|
"end": 131, |
|
"text": "(Pampari et al., 2018)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 360, |
|
"end": 381, |
|
"text": "(Devlin et al., 2019)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 414, |
|
"end": 438, |
|
"text": "(Alsentzer et al., 2019)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 552, |
|
"end": 577, |
|
"text": "(Jagannatha et al., 2019)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "3. The predicted logical form for unseen paraphrases helps in understanding the model better and provides a rationale (explanation) for why the answer was predicted for the provided question. This information is critical in clinical domain as it provides an accompanying answer justification for clinicians.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "4. We incorporate medical entity information by including entity embeddings via the ERNIE (Zhang et al., 2019a) architecture (Zhang et al., 2019a) and observe that the model accuracy and ability to generalize goes up by \u223c 3% over BERT base (Devlin et al., 2019) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 90, |
|
"end": 111, |
|
"text": "(Zhang et al., 2019a)", |
|
"ref_id": "BIBREF33" |
|
}, |
|
{ |
|
"start": 125, |
|
"end": 146, |
|
"text": "(Zhang et al., 2019a)", |
|
"ref_id": "BIBREF33" |
|
}, |
|
{ |
|
"start": 240, |
|
"end": 261, |
|
"text": "(Devlin et al., 2019)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "We formulate the EMR QA problem as a reading comprehension task. Given a natural language question (asked by a physician) and a context, where the context is a set of contiguous sentences from a patient's EMR (unstructured clinical notes), the task is to predict the answer span from the given context. Along with the (question, context, answer) triplet, also available as input are clinical entities extracted from the question and context. Also available as input is the, logical form (LF) that is a structured representation that captures answering needs of the question through entities, attributes and relations required to be in the answer (Pampari et al., 2018) . A question may have multiple paraphrases where all paraphrases map to the same LF (and the same answer, fig. 1 ).", |
|
"cite_spans": [ |
|
{ |
|
"start": 646, |
|
"end": 668, |
|
"text": "(Pampari et al., 2018)", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 775, |
|
"end": 781, |
|
"text": "fig. 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Problem Formulation", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "In this section, we briefly describe BERT (Devlin et al., 2019) , ERNIE (Zhang et al., 2019a ) and our proposed model.", |
|
"cite_spans": [ |
|
{ |
|
"start": 42, |
|
"end": 63, |
|
"text": "(Devlin et al., 2019)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 72, |
|
"end": 92, |
|
"text": "(Zhang et al., 2019a", |
|
"ref_id": "BIBREF33" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methodology", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "BERT (Devlin et al., 2019) uses multi-layer bidirectional Transformer (Vaswani et al., 2017) networks to encode contextualised language representations. BERT representations are learned from two tasks: masked language modeling (Taylor, 1953) and next sentence prediction task. We chose BERT model as pre-trained BERT models can be finetuned with just one additional inference layer and it achieved state-of-the-art results for a wide range of tasks such as question answering, such as SQuAD (Rajpurkar et al., 2016 (Rajpurkar et al., , 2018 , and multiple language inference tasks, such as MultiNLI (Williams et al., 2017) . clinicalBERT (Alsentzer et al., 2019) yielded superior performance on clinical-related NLP tasks such as i2b2 named entity recognition (NER) challenges (Uzuner et al., 2011) . It was created by further fine-tuning of BERT base with biomedical and clinical corpus (MIMIC-III) (Johnson et al., 2016) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 5, |
|
"end": 26, |
|
"text": "(Devlin et al., 2019)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 70, |
|
"end": 92, |
|
"text": "(Vaswani et al., 2017)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 227, |
|
"end": 241, |
|
"text": "(Taylor, 1953)", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 491, |
|
"end": 514, |
|
"text": "(Rajpurkar et al., 2016", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 515, |
|
"end": 540, |
|
"text": "(Rajpurkar et al., , 2018", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 599, |
|
"end": 622, |
|
"text": "(Williams et al., 2017)", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 638, |
|
"end": 662, |
|
"text": "(Alsentzer et al., 2019)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 777, |
|
"end": 798, |
|
"text": "(Uzuner et al., 2011)", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 900, |
|
"end": 922, |
|
"text": "(Johnson et al., 2016)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Bidirectional Encoder Representations from Transformers (BERT)", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "We adopt the ERNIE framework (Zhang et al., 2019a) to integrate the entity-level clinical concept information into the BERT architecture, which", |
|
"cite_spans": [ |
|
{ |
|
"start": 29, |
|
"end": 50, |
|
"text": "(Zhang et al., 2019a)", |
|
"ref_id": "BIBREF33" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Enhanced Language Representation with Informative Entities (ERNIE)", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "has not yet been explored in the previous works. Figure 2 : The network architecture of our multi-task learning question answering model (M-cERNIE). The question and context are provided to a multi-head attention model (orange) and are also passed through MetaMap to extract clinical entities which are passed through a separate multi-head attention (yellow). The token and entity representations are then passed through an information fusion layer (blue) to extract entity-enriched token representations which are then used for answer span prediction. The pooled sequence representation from the information fusion layer is passed through logical form inference layer to predict the logical form.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 49, |
|
"end": 57, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Enhanced Language Representation with Informative Entities (ERNIE)", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "N/A N/A N/A N/A N/A Finding N/A Finding N/A N/A w 1 w 1 w t w", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Enhanced Language Representation with Informative Entities (ERNIE)", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "ERNIE has shown significant improvement in different entity typing and relation classification tasks, as it utilises the extra entity information which is provided from knowledge graphs. ERNIE uses BERT for extracting contextualized token embeddings and a multi-head attention model to generate entity embeddings. These two set of embeddings are aligned and provided as an input to an information fusion layer which provides entity-enriched token embeddings. For a token (w j ) and its aligned entity (e k = f (w j )), the information fusion process is as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Enhanced Language Representation with Informative Entities (ERNIE)", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "h j = \u03c3(W (i) t w (i) j + W (i) e e (i) k + b (i) )", |
|
"eq_num": "(1)" |
|
} |
|
], |
|
"section": "Enhanced Language Representation with Informative Entities (ERNIE)", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Here h j represents the entity enriched token embedding, \u03c3 is the non-linear activation function, W t refers to an affine layer for token embeddings and W e refers to an affine layer for entity embeddings. For the tokens without corresponding entities, the information fusion process becomes:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Enhanced Language Representation with Informative Entities (ERNIE)", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "h j = \u03c3(W (i) t w (i) j + b (i) )", |
|
"eq_num": "(2)" |
|
} |
|
], |
|
"section": "Enhanced Language Representation with Informative Entities (ERNIE)", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Initially, each entity embedding is assigned randomly and is fine-tuned along with token embeddings throughout the training procedure. The ERNIE architecture would be applicable to the model even if the logical forms are not available.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Enhanced Language Representation with Informative Entities (ERNIE)", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "In order to improve the ability of a QA model to generalize better over paraphrases, it helps to provide the model information about the logical form that links these paraphrases. Since the answer to all the paraphrased questions is the same (and hence, logical form is the same), we constructed a multitask learning framework to incorporate the logical form information into the model. Thus, along with predicting the answer span, we added an auxiliary task to also predict the corresponding logical form of the question. Multi-task learning provides an inductive bias to enhance the primary task's performance via auxiliary tasks . In our setting, the primary task is span detection of the answer and the auxiliary task is logical form prediction for both emrQA and MADE (both datasets are explained in detail in \u00a7 4). The final loss for our model is defined as:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Multi-task Learning for Extractive QA", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "L model = \u03c9L lf + (1 \u2212 \u03c9)L span ,", |
|
"eq_num": "(3)" |
|
} |
|
], |
|
"section": "Multi-task Learning for Extractive QA", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "where \u03c9 is the weightage given to the loss of aux-", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Multi-task Learning for Extractive QA", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "illary task (L lf ), logical form prediction. L span", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Multi-task Learning for Extractive QA", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "is loss for answer span prediction and L model is the final loss for our proposed model. The multitask learning model can work with both BERT and ERNIE as the base model. Figure 2 depicts the proposed multi-task model to predict both the answer and logical form given a question and ERNIE architecture that is used to learn entity-enriched token embeddings.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 171, |
|
"end": 179, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Multi-task Learning for Extractive QA", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "We used emrQA 2 and MADE 3 datasets for our experiments. We provide a brief summary of each dataset and the methodology followed to split these datasets into train and test sets.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Datasets", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "emrQA The emrQA corpus (Pampari et al., 2018) is the only community-shared clinical QA dataset that consists of questions, posed by physicians against electronic medical records (EMRs) of a patient, along with their answers. The dataset was developed by leveraging existing annotations available for other clinical natural language processing (NLP) tasks (i2b2 challenge datasets (Uzuner et al., 2011) ). It is a credible resource for clinical QA as logical forms that are generated by a physician help slot fill question templates and extract corresponding answers from annotated notes. Multiple question templates can be mapped to the same logical form (LF), as shown in Table 1 , and are referred to as paraphrases of each other. The emrQA corpus has over 1M + question, logical form, and answer/evidence triplets, an example of a context, question, its logical form and a paraphrase is shown in Fig 1. The evidences are the sentences from the clinical note that are relevant to a particular question. There are total 30 logical forms in the emrQA dataset 4 . MADE MADE 1.0 (Jagannatha et al., 2019) dataset was hosted as an adverse drug reactions (ADRs) and medication extraction challenge from EMRs. This dataset was converted into a QA dataset by following the same procedure as enumerated in the literature of emrQA (Pampari et al., 2018) . MADE QA dataset is smaller than emrQA, as emrQA consists of multiple datasets taken from i2b2 (Uzuner et al., 2011) whereas MADE only has specific relations and entity mentions to that of ADRs and medications. This resulted in a clinical QA dataset which has different properties as compared to emrQA. MADE also has lesser number of logical forms (8 LFs) as compared to emrQA because of fewer entities and relations. The 8 LFs for MADE are provided in Appendix B.", |
|
"cite_spans": [ |
|
{ |
|
"start": 23, |
|
"end": 45, |
|
"text": "(Pampari et al., 2018)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 380, |
|
"end": 401, |
|
"text": "(Uzuner et al., 2011)", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 1077, |
|
"end": 1102, |
|
"text": "(Jagannatha et al., 2019)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 1323, |
|
"end": 1345, |
|
"text": "(Pampari et al., 2018)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 1442, |
|
"end": 1463, |
|
"text": "(Uzuner et al., 2011)", |
|
"ref_id": "BIBREF27" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 673, |
|
"end": 680, |
|
"text": "Table 1", |
|
"ref_id": "TABREF3" |
|
}, |
|
{ |
|
"start": 899, |
|
"end": 905, |
|
"text": "Fig 1.", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Datasets", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "The emrQA dataset is generated using a semiautomated process that normalizes real physician questions to create question templates, associates expert annotated logical forms with each template and slot fills them using annotations for various NLP tasks from i2b2 challenge datasets (for e.g., fig. 1 ). emrQA is rich in paraphrases as physicians often tend to express the same information need in different ways. As shown in Table. 1, all paraphrases of a question map to the same logical form. Thus, if a model has observed some of the paraphrases it should be able to generalize to the others effectively with the help of their shared logical form \"MedicationEvent (|medication|) [dosage=x]\". In order to simulate this, and test the true capability of the model to generalize to unseen paraphrased questions, we create a splitting scheme and refer to it as paraphraselevel split.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 293, |
|
"end": 299, |
|
"text": "fig. 1", |
|
"ref_id": "FIGREF0" |
|
}, |
|
{ |
|
"start": 425, |
|
"end": 431, |
|
"text": "Table.", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Train/test splits", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "The basic idea is that some of question templates would be observed by the model during training and remaining would be used during validation and testing. The steps taken for creating this split are enumerated below:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Paraphrase-level split", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "1. First, the clinical notes are separated into train, val and test sets. Then the question, logical form and context triplets are generated for each set resulting in the full dataset. Here the context is the set of contiguous sentences from the EMR.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Paraphrase-level split", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "2. Then for each logical form (LF), 70% of its corresponding question templates are chosen for train dataset and the rest are kept for validation and test dataset. Considering the LF shown in Table 1 , four of the question templates (QT tr ) would be assigned for training and two (QT v/t ) of them would be assigned for validation/testing. So any sample in training dataset whose question is generated from the question template set Q v/t would be discarded. Similarly, any sample with a question generated from the question template set Q tr would be discarded.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 192, |
|
"end": 199, |
|
"text": "Table 1", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Paraphrase-level split", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "3. To compare the generalizability performance of our model, we keep the training dataset with both set of question templates (QT tr + QT v/t ) as well. Essentially, a baseline model which has observed all the question templates (QT tr + QT v/t ) should be able to perform better on the QT v/t set as compared to a model which has only observed QT tr set. This comparison would help us in measuring the improvement in performance with the help of logical forms even when a set of question templates are not observed by the model.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Paraphrase-level split", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The dataset statistics for both emrQA and MADE are shown in Table 2 . The training set with both question template sets (QT tr + QT v/t ) is shown with '(r)' appended as suffix, as it is essentially a random split, whereas the training set with the question template (QT tr ) is appended with '(pl)' for paraphrase-level split. ", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 60, |
|
"end": 67, |
|
"text": "Table 2", |
|
"ref_id": "TABREF5" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Paraphrase-level split", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In this section, we briefly discuss the experimental settings, clinical entity extraction method, implementation details of our proposed model and evaluation metrics for our experiments.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "As a reading comprehension style task, the model has to identify the span of the answer given the question-context pair. For both emrQA and MADE dataset, the span is marked as the answer to the question and the sentence is marked as the evidence. Hence, we perform extractive question answering at two levels: sentence and paragraph.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Setting", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "Sentence setting: For this setting, the evidence sentence which contains the answer span is provided as the context to the question and the model has to predict the span of the answer, given the question.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Setting", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "Paragraph setting: Clinical notes are noisy and often contain incomplete sentences, lists and embedded tables making it difficult to segment paragraphs in notes. Hence, we decided to define the context as evidence sentence and 15 \u2212 20 sentences around it. We randomly chose the length of the paragraph (l para ) and another number less than the length of the paragraph (l pre < l para ). We chose l pre contiguous sentences which exist prior to the evidence sentence in the EMR and (l para \u2212l pre ) sentences after the evidence sentence. We adopted this strategy because the model could have benefited from the information that the evidence sentence is exactly in the middle of a fixed length paragraph.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Setting", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "The model has to predict the span of the answer from the l para sentences long paragraph (context) given the question. The datasets are appended by '-p' and '-s' for paragraph and sentence settings respectively. The sentence setting is a relatively easier setting, for the model, compared to the paragraph setting because the scope of the answer is narrowed down to lesser number of tokens and there is less noise. For both settings, as also mentioned in \u00a7 4, we kept the train set where all the question templates (paraphrases) are observed by the model during training and that is referred with '(r)' prefix, suggesting 'random' selection and no filtering based on question templates (paraphrases). All these dataset abbreviations are shown in the first column of Table 3 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 766, |
|
"end": 773, |
|
"text": "Table 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Experimental Setting", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "MetaMap (Aronson, 2001) uses a knowledgeintensive approach to discover different clinical concepts referred to in the text according to unified medical language system (UMLS) (Bodenreider, 2004) . The clinical ontologies, such as SNOMED (Spackman et al., 1997) and RxNorm (Liu et al., 2005) , embedded in MetaMap are quite useful in extracting \u223c 127 entities across diagnosis, medication, procedure and sign/symptoms. We shortlisted these entities (semantic types) by mapping them to the entities which were used for creating logical forms of the questions as these are the main entities for which the question has been posed. The selected entities are: acab, aggp, anab, anst, bpoc, cgab, clnd, diap, emod, evnt, fndg, inpo, lbpr, lbtr, phob, qnco, sbst, sosy and topp. Their descriptions are provided in Appendix C.", |
|
"cite_spans": [ |
|
{ |
|
"start": 175, |
|
"end": 194, |
|
"text": "(Bodenreider, 2004)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 237, |
|
"end": 260, |
|
"text": "(Spackman et al., 1997)", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 272, |
|
"end": 290, |
|
"text": "(Liu et al., 2005)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Extracting Entity Information", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "These filtered entities (Table 7) , extracted from MetaMap, are provided to ERNIE. A separate embedding space is defined for the entity embeddings which are passed through a multi-head attention layer (Vaswani et al., 2017) before interacting with token embeddings in the information fusion layer. The entity-enriched token embeddings are then used to predict the span of the answer from the context. We fine-tuned these entity embeddings along with the token embeddings, as opposed to using learned entities and not fine-tuning during downstream tasks (Zhang et al., 2019a) . The architecture is illustrated in Fig 2 .", |
|
"cite_spans": [ |
|
{ |
|
"start": 201, |
|
"end": 223, |
|
"text": "(Vaswani et al., 2017)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 553, |
|
"end": 574, |
|
"text": "(Zhang et al., 2019a)", |
|
"ref_id": "BIBREF33" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 24, |
|
"end": 33, |
|
"text": "(Table 7)", |
|
"ref_id": "TABREF15" |
|
}, |
|
{ |
|
"start": 612, |
|
"end": 617, |
|
"text": "Fig 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Extracting Entity Information", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "The BERT model was released with pre-trained weights as BERT base and BERT large . BERT base has lesser number of parameters but achieved stateof-the-art results on a number of open-domain NLP tasks. We performed our experiments with BERT base and hence, from here onwards we refer to BERT base as BERT. A fine-tuned version of BERT base on clinical notes was released as clin-icalBERT (cBERT) (Alsentzer et al., 2019) . We use cBERT as the multi-head attention model for getting the token representations in ERNIE. We refer to this version of ERNIE, with entities from MetaMap, as cERNIE for clinical ERNIE. Our final multi-task learning model, incorporated with an auxillary task of predicting logical forms, is referred to as M-cERNIE for multi-task clinical ERNIE. The code for all the models is provided at https://github.com/emrQA/bionlp_acl20. Evaluation Metrics For our extractive question answering task, we utilised exact match and F1score for evaluation as per earlier literature (Rajpurkar et al., 2016) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 394, |
|
"end": 418, |
|
"text": "(Alsentzer et al., 2019)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 991, |
|
"end": 1015, |
|
"text": "(Rajpurkar et al., 2016)", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Implementation Details", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "In this section, we compare the results of all the models that we introduced in \u00a7 3. With the help of different experiments, we try to analyse whether the induced entity and logical form information Table 3 : F1-score and exact match values for Models on emrQA and MADE. The '-s' suffix refers to the sentence setting and '-p' refers to the paragraph setting for the context provided in our reading comprehension style QA task. The '(pl)' refers to the paraphrase-level and '(r)' refers to the random split as explained in \u00a7 4. BERT refers to BERT base , cBERT refers to clinical-BERT, cERNIE refers to clinicalERNIE and M-cERNIE refers to the multi-task learning clinicalERNIE model. help the model in achieving better performance or not. We also analyse the logical form predictions to understand whether it provides a rationale for the answer predicted by our proposed model. The compiled results for all the models are shown in Table 3 . The hyper-parameter values for the best performing models are provided in Appendix A.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 199, |
|
"end": 206, |
|
"text": "Table 3", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 932, |
|
"end": 939, |
|
"text": "Table 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results and Discussion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Does clinical entity information improve models' performance? Across all settings, the F1score of cERNIE improves by \u223c 2\u22125% over BERT and \u223c 0.75 \u2212 3% over cBERT. The exact match performance improved by \u223c 3 \u2212 4.5 over BERT and 1.5 \u2212 3.25% over cBERT. Also, as expected, the performance in sentence setting (-s) improved relatively more than it did in paragraph-setting. The entity-enriched tokens help in identifying the tokens which are required by the question. For example, in Fig. 3 , the token 'infiltrative' in the question as well as the context get highlighted with the help of the identified entity 'topp' (therapeutic or preventive procedure) and then relevant tokens in the context, chest x ray, get highlighted with the relevant entity 'diap' (diagnostic procedure). This information aids the model in narrowing down its focus to highlighted diagnostic procedures in the context for answer extraction.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 479, |
|
"end": 485, |
|
"text": "Fig. 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results and Discussion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Question: How was diffuse infiltrative topp diagnosed fndg ?", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results and Discussion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Context: Earlier that day, pt had a chest x ray diap which showed diffuse infiltrative topp process concerning for ARDS.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results and Discussion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Answer: chest x ray Figure 3 : An example of a question, context, their extracted entities and expected answer.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 20, |
|
"end": 28, |
|
"text": "Figure 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results and Discussion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "In order to answer this question, we compared the performance of our M-cERNIE model to cERNIE model and observed an improvement of 1.1 \u2212 2.5% in F1-score and an improvement of 1.4 \u2212 1.8% in exact match performance. Here as well, the performance improvement is more for sentence setting (-s) as compared to the paragraph setting (-p) . This helps the model in understanding the information need expressed in the question and helps in narrowing down its focus to certain tokens as the candidate answer. As seen in example 3, the logical form helps in understanding that the 'dose' of 'medication' needs to be extracted from the context where 'dose' was already highlighted with the help of the entity embedding of 'qnco'. Overall, the performance of our proposed model improves the F1-score by 1.2 \u2212 7.7% and exactmatch by 3.1 \u2212 6.8% over BERT model. Thus, embedding clinical entity information with the help of further fine-tuning, entity-enriching and logical form prediction help the model in performing better over the unseen paraphrases by a significant margin. For emrQA, the performance of M-cERNIE is still below the upper bound performance of the cBERT model which is achieved when all the question templates are observed (emrQA-s/p (r)) by the model but for MADE, in sentence setting (-s), the performance of M-cERNIE is even better than the upper bound model performance. For MADE-p dataset the performance dropped a little when the LF prediction information is added to the model which might be because MADE-p only has 8 logical forms (Appendix B) in total, resulting in low variety between the questions. Thus, the auxiliary task did not add much value to the learning of the base model (cERNIE) at paragraph level.", |
|
"cite_spans": [ |
|
{ |
|
"start": 328, |
|
"end": 332, |
|
"text": "(-p)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Does logical form information help the model generalize better?", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Does the model provide a supporting rationale via logical form (LF) prediction? We analyzed the performance of M-cERNIE on MADE-s and emrQA-s datasets for logical form prediction, as we saw most improvement in sentence setting (-s). We calculated macro-weighted precision, recall and F1-score for logical form classification. The model achieved a F1-score of \u223c 0.45 \u2212 0.59 for both datasets, as shown in Table 4 , exact match setting. We analysed the confusion matrix of predicted LF and observed that the model mainly gets confused between the logical forms which convey similar semantic information as shown in Fig. 4 As we can see in Fig. 4 that both logical forms refer to quite similar information, hence, we decided to obtain performance metrics (precision, recall and F1-score) in relaxed setting. We designed this relaxed setting to create a more realistic setting, where the tokens of predicted and actual logical forms are matched rather than the whole logical form. An example of logical form tokenization is shown in Fig. 5 . The model achieves a F1-score of 0.92 for emrQA-s and 0.84 for MADE-s in relaxed setting (Table 4) . This suggests that the model can efficiently identify important semantic information from the question, which is critical for efficient QA. During inference, the M-cERNIE models yield a rationale regarding a new test question (unseen paraphrase) by predicting the logical form of the question as an auxiliary task. For ex, the LF in Fig. 1 provides a rationale that any lab or procedure event related to the condition event needs to be extracted from the EMR for diagnosis. Can logical form information be induced in multi-class QA tasks as well? To answer this question, we performed another experiment where the model has to classify the evidence sentences from the non-evidence sentences making it a twoclass classification task. The model would be provided a tuple of question and a sentence and it has to predict whether the sentence is evidence or not?", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 404, |
|
"end": 411, |
|
"text": "Table 4", |
|
"ref_id": "TABREF9" |
|
}, |
|
{ |
|
"start": 613, |
|
"end": 619, |
|
"text": "Fig. 4", |
|
"ref_id": "FIGREF1" |
|
}, |
|
{ |
|
"start": 637, |
|
"end": 643, |
|
"text": "Fig. 4", |
|
"ref_id": "FIGREF1" |
|
}, |
|
{ |
|
"start": 1029, |
|
"end": 1035, |
|
"text": "Fig. 5", |
|
"ref_id": "FIGREF3" |
|
}, |
|
{ |
|
"start": 1127, |
|
"end": 1136, |
|
"text": "(Table 4)", |
|
"ref_id": "TABREF9" |
|
}, |
|
{ |
|
"start": 1472, |
|
"end": 1478, |
|
"text": "Fig. 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Does logical form information help the model generalize better?", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The final loss of the model (L model ) changes to:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Does logical form information help the model generalize better?", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "L model = \u03c9L lf + (1 \u2212 \u03c9)L evidence (4)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Does logical form information help the model generalize better?", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "where \u03c9 is the weightage given to the loss of auxillary task (L lf ), logical form prediction. L evidence is loss for evidence classification and L model is the final loss for our proposed model. We conducted our experiments on emrQA dataset as evidence sentences were provided in it. In the multi-class setting, the [CLS] token representation would be used for evidence classification as well as logical form prediction. The multi-task entity enriched model (M-cERNIE) achieved an absolute improvement of 6% over cBERT and 4% over cERNIE. This suggests that the inductive bias introduced via LF prediction does help in improving the overall performance of the model for multi-class QA as well.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Does logical form information help the model generalize better?", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In the general domain, BERT-based models are on the top of different leader boards across various tasks, including QA tasks (Rajpurkar et al., 2018 (Rajpurkar et al., , 2016 . The authors of applied BERT to the MS-MARCO passage retrieval QA task and observed improvement over state of the art results. further extended the work by combining BERT with re-ranking of predictions for queries that will be issued for each document. However, BERT-based models have not been adapted to answering physician questions on EMRs.", |
|
"cite_spans": [ |
|
{ |
|
"start": 124, |
|
"end": 147, |
|
"text": "(Rajpurkar et al., 2018", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 148, |
|
"end": 173, |
|
"text": "(Rajpurkar et al., , 2016", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "In case of domain-specific QA, logical forms or semantic parse are typically used to integrate the domain knowledge associated with a KB-based (knowledge base) structured QA datasets, where a model is learnt for mapping a natural language question to a LF. GeoQuery (Zelle and Mooney, 1996), and ATIS (Dahl et al., 1994) , are the oldest known manually generated question-LF annotations on closed-domain databases. QALD (Lopez et al., 2013) , FREE 917 (Cai and Yates, 2013) , SIMPLEQuestions (Bordes et al., 2015) contain hundreds of hand-crafted questions and their corresponding database queries. Prior work has also used LFs as a way to generate questions via crowdsourcing (Wang et al., 2015) . WEBQuestions (Berant et al., 2013) contains thousands of questions from Google search where the LFs are learned as latent representations in helping answer questions from Freebase. Prior work has not investigated the utility of logical forms in unstructured QA, especially as a means to generalize the QA model across different paraphrases of a question.", |
|
"cite_spans": [ |
|
{ |
|
"start": 266, |
|
"end": 276, |
|
"text": "(Zelle and", |
|
"ref_id": "BIBREF32" |
|
}, |
|
{ |
|
"start": 277, |
|
"end": 295, |
|
"text": "Mooney, 1996), and", |
|
"ref_id": "BIBREF32" |
|
}, |
|
{ |
|
"start": 296, |
|
"end": 320, |
|
"text": "ATIS (Dahl et al., 1994)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 420, |
|
"end": 440, |
|
"text": "(Lopez et al., 2013)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 452, |
|
"end": 473, |
|
"text": "(Cai and Yates, 2013)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 492, |
|
"end": 513, |
|
"text": "(Bordes et al., 2015)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 677, |
|
"end": 696, |
|
"text": "(Wang et al., 2015)", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 712, |
|
"end": 733, |
|
"text": "(Berant et al., 2013)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "There have been efforts on using multi-task learning for efficient question answering, such as the authors of (McCann et al., 2018) tried to learn multiple tasks together resulting in an overall boost in the performance of the model on SQuAD (Rajpurkar et al., 2016) . Similarly, the authors of (Lu et al., 2019) also utilised the information across different tasks which lie at the intersection of vision and natural language processing to improve the performance of their model across all tasks. The authors of (Rawat et al., 2019) utilised weak supervision to the model while predicting the answer but not much work has been done to incorporate the logical form of the question for unstructured question answering in a multi-task setting. Hence, we decided to explore this direction and incorporate the structured semantic information of the questions for extractive question answering.", |
|
"cite_spans": [ |
|
{ |
|
"start": 242, |
|
"end": 266, |
|
"text": "(Rajpurkar et al., 2016)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 295, |
|
"end": 312, |
|
"text": "(Lu et al., 2019)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "The proposed entity-enriched QA models trained with an auxiliary task improve over the state-of-theart models by about 3 \u2212 6% across the large-scale clinical QA dataset, emrQA (Pampari et al., 2018) (as well as MADE (Jagannatha et al., 2019) ). We also show that multitask learning for logical forms along with the answer results in better generalizing over unseen paraphrases for EMR QA. The predicted logical forms also serve as an accompanying justification to the answer and help in adding credibility to the predicted answer for the physician.", |
|
"cite_spans": [ |
|
{ |
|
"start": 176, |
|
"end": 198, |
|
"text": "(Pampari et al., 2018)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 216, |
|
"end": 241, |
|
"text": "(Jagannatha et al., 2019)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "Most of the hyper-parameters across our models remained same: learning rate: 2e \u2212 5, weight decay: 1e \u2212 5, warm-up proportion: 10% and hidden dropout probability: 0.1. The parameters that varied across models for different datasets are enumerated in the Table 6 . The hyper-parametsrs provided in Table 6 are for all models in a particular dataset. This also suggests that even after adding an auxiliary task, the proposed model doesn't need a lot of hyper-parameter tuning. C Selected entities from MetaMap", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 254, |
|
"end": 261, |
|
"text": "Table 6", |
|
"ref_id": "TABREF13" |
|
}, |
|
{ |
|
"start": 297, |
|
"end": 304, |
|
"text": "Table 6", |
|
"ref_id": "TABREF13" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "A Model Hyper-parameters", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The list of selected semantic types in the form of entities and their brief descriptors are provided in Table 7 . ", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 104, |
|
"end": 111, |
|
"text": "Table 7", |
|
"ref_id": "TABREF15" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "A Model Hyper-parameters", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://metamap.nlm.nih.gov/ SemanticTypesAndGroups.shtml", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://github.com/panushri25/emrQA 3 https://bio-nlp.org/index.php/ projects/39-nlp-challenges 4 https://github.com/panushri25/emrQA/ blob/master/templates/templates-all.csv", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "This work is supported by MIT-IBM Watson AI Lab, Cambridge, MA USA.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgement", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Publicly available clinical bert embeddings. NAACL-HLT Clinical NLP Workshop", |
|
"authors": [ |
|
{ |
|
"first": "Emily", |
|
"middle": [], |
|
"last": "Alsentzer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "John", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Willie", |
|
"middle": [], |
|
"last": "Murphy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei-Hung", |
|
"middle": [], |
|
"last": "Boag", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Di", |
|
"middle": [], |
|
"last": "Weng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tristan", |
|
"middle": [], |
|
"last": "Jin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matthew", |
|
"middle": [], |
|
"last": "Naumann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Mcdermott", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Emily Alsentzer, John R Murphy, Willie Boag, Wei- Hung Weng, Di Jin, Tristan Naumann, and Matthew McDermott. 2019. Publicly available clinical bert embeddings. NAACL-HLT Clinical NLP Workshop.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Effective mapping of biomedical text to the umls metathesaurus: the metamap program", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Alan R Aronson", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2001, |
|
"venue": "AMIA", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alan R Aronson. 2001. Effective mapping of biomedi- cal text to the umls metathesaurus: the metamap pro- gram. In AMIA, page 17.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Semantic parsing on freebase from question-answer pairs", |
|
"authors": [ |
|
{ |
|
"first": "Jonathan", |
|
"middle": [], |
|
"last": "Berant", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Chou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Roy", |
|
"middle": [], |
|
"last": "Frostig", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Percy", |
|
"middle": [], |
|
"last": "Liang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of the 2013 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1533--1544", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jonathan Berant, Andrew Chou, Roy Frostig, and Percy Liang. 2013. Semantic parsing on freebase from question-answer pairs. In Proceedings of the 2013 Conference on Empirical Methods in Natural Lan- guage Processing, pages 1533-1544.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Acquiring paraphrases from text corpora", |
|
"authors": [ |
|
{ |
|
"first": "Rahul", |
|
"middle": [], |
|
"last": "Bhagat", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eduard", |
|
"middle": [], |
|
"last": "Hovy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Siddharth", |
|
"middle": [], |
|
"last": "Patwardhan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of the fifth international conference on Knowledge capture", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "161--168", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rahul Bhagat, Eduard Hovy, and Siddharth Patward- han. 2009. Acquiring paraphrases from text corpora. In Proceedings of the fifth international conference on Knowledge capture, pages 161-168.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "The unified medical language system (umls): integrating biomedical terminology", |
|
"authors": [ |
|
{ |
|
"first": "Olivier", |
|
"middle": [], |
|
"last": "Bodenreider", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Nucleic acids research", |
|
"volume": "32", |
|
"issue": "1", |
|
"pages": "267--270", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Olivier Bodenreider. 2004. The unified medical lan- guage system (umls): integrating biomedical termi- nology. Nucleic acids research, 32(suppl 1):D267- D270.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Large-scale simple question answering with memory networks", |
|
"authors": [ |
|
{ |
|
"first": "Antoine", |
|
"middle": [], |
|
"last": "Bordes", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nicolas", |
|
"middle": [], |
|
"last": "Usunier", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sumit", |
|
"middle": [], |
|
"last": "Chopra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Weston", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1506.02075" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Antoine Bordes, Nicolas Usunier, Sumit Chopra, and Jason Weston. 2015. Large-scale simple question answering with memory networks. arXiv preprint arXiv:1506.02075.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Large-scale semantic parsing via schema matching and lexicon extension", |
|
"authors": [ |
|
{ |
|
"first": "Qingqing", |
|
"middle": [], |
|
"last": "Cai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexander", |
|
"middle": [], |
|
"last": "Yates", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "ACL", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "423--433", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Qingqing Cai and Alexander Yates. 2013. Large-scale semantic parsing via schema matching and lexicon extension. In ACL, volume 1, pages 423-433.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Expanding the scope of the atis task: The atis-3 corpus", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Deborah", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Madeleine", |
|
"middle": [], |
|
"last": "Dahl", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Bates", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "William", |
|
"middle": [], |
|
"last": "Brown", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kate", |
|
"middle": [], |
|
"last": "Fisher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Hunicke-Smith", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christine", |
|
"middle": [], |
|
"last": "Pallett", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexander", |
|
"middle": [], |
|
"last": "Pao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Elizabeth", |
|
"middle": [], |
|
"last": "Rudnicky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Shriberg", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1994, |
|
"venue": "HLT", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "43--48", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Deborah A Dahl, Madeleine Bates, Michael Brown, William Fisher, Kate Hunicke-Smith, David Pallett, Christine Pao, Alexander Rudnicky, and Elizabeth Shriberg. 1994. Expanding the scope of the atis task: The atis-3 corpus. In HLT, pages 43-48.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "BERT: Pre-training of deep bidirectional transformers for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirectional transformers for language under- standing. NAACL-HLT.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Overview of the first natural language processing challenge for extracting medication, indication, and adverse drug events from electronic health record notes (made 1.0). Drug safety", |
|
"authors": [ |
|
{ |
|
"first": "Abhyuday", |
|
"middle": [], |
|
"last": "Jagannatha", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Feifan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Weisong", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hong", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "42", |
|
"issue": "", |
|
"pages": "99--111", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Abhyuday Jagannatha, Feifan Liu, Weisong Liu, and Hong Yu. 2019. Overview of the first natural lan- guage processing challenge for extracting medica- tion, indication, and adverse drug events from elec- tronic health record notes (made 1.0). Drug safety, 42(1):99-111.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Mimiciii, a freely accessible critical care database", |
|
"authors": [ |
|
{ |
|
"first": "E", |
|
"middle": [ |
|
"W" |
|
], |
|
"last": "Alistair", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Johnson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Tom", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lu", |
|
"middle": [], |
|
"last": "Pollard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H Lehman", |
|
"middle": [], |
|
"last": "Shen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mengling", |
|
"middle": [], |
|
"last": "Li-Wei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohammad", |
|
"middle": [], |
|
"last": "Feng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Benjamin", |
|
"middle": [], |
|
"last": "Ghassemi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peter", |
|
"middle": [], |
|
"last": "Moody", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Leo", |
|
"middle": [ |
|
"Anthony" |
|
], |
|
"last": "Szolovits", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Roger G", |
|
"middle": [], |
|
"last": "Celi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Mark", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Scientific data", |
|
"volume": "3", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alistair EW Johnson, Tom J Pollard, Lu Shen, H Lehman Li-wei, Mengling Feng, Moham- mad Ghassemi, Benjamin Moody, Peter Szolovits, Leo Anthony Celi, and Roger G Mark. 2016. Mimic- iii, a freely accessible critical care database. Scien- tific data, 3:160035.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Biobert: a pre-trained biomedical language representation model for biomedical text mining", |
|
"authors": [ |
|
{ |
|
"first": "Jinhyuk", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wonjin", |
|
"middle": [], |
|
"last": "Yoon", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sungdong", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Donghyeon", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sunkyu", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chan", |
|
"middle": [], |
|
"last": "Ho So", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jaewoo", |
|
"middle": [], |
|
"last": "Kang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Bioinformatics", |
|
"volume": "36", |
|
"issue": "4", |
|
"pages": "1234--1240", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jinhyuk Lee, Wonjin Yoon, Sungdong Kim, Donghyeon Kim, Sunkyu Kim, Chan Ho So, and Jaewoo Kang. 2020. Biobert: a pre-trained biomed- ical language representation model for biomedical text mining. Bioinformatics, 36(4):1234-1240.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Rxnorm: prescription for electronic drug information exchange", |
|
"authors": [ |
|
{ |
|
"first": "Simon", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Ma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Robin", |
|
"middle": [], |
|
"last": "Moore", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vikraman", |
|
"middle": [], |
|
"last": "Ganesan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stuart", |
|
"middle": [], |
|
"last": "Nelson", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "", |
|
"volume": "7", |
|
"issue": "", |
|
"pages": "17--23", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Simon Liu, Wei Ma, Robin Moore, Vikraman Gane- san, and Stuart Nelson. 2005. Rxnorm: prescription for electronic drug information exchange. IT profes- sional, 7(5):17-23.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Evaluating question answering over linked data", |
|
"authors": [ |
|
{ |
|
"first": "Vanessa", |
|
"middle": [], |
|
"last": "Lopez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christina", |
|
"middle": [], |
|
"last": "Unger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Philipp", |
|
"middle": [], |
|
"last": "Cimiano", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Enrico", |
|
"middle": [], |
|
"last": "Motta", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Web Semantics: Science, Services and Agents on the World Wide Web", |
|
"volume": "21", |
|
"issue": "", |
|
"pages": "3--13", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Vanessa Lopez, Christina Unger, Philipp Cimiano, and Enrico Motta. 2013. Evaluating question answering over linked data. Web Semantics: Science, Services and Agents on the World Wide Web, 21:3-13.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "12-in-1: Multi-task vision and language representation learning", |
|
"authors": [ |
|
{ |
|
"first": "Jiasen", |
|
"middle": [], |
|
"last": "Lu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vedanuj", |
|
"middle": [], |
|
"last": "Goswami", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marcus", |
|
"middle": [], |
|
"last": "Rohrbach", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Devi", |
|
"middle": [], |
|
"last": "Parikh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stefan", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1912.02315" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jiasen Lu, Vedanuj Goswami, Marcus Rohrbach, Devi Parikh, and Stefan Lee. 2019. 12-in-1: Multi-task vision and language representation learning. arXiv preprint arXiv:1912.02315.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "The natural language decathlon", |
|
"authors": [ |
|
{ |
|
"first": "Bryan", |
|
"middle": [], |
|
"last": "Mccann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nitish", |
|
"middle": [], |
|
"last": "Shirish Keskar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Caiming", |
|
"middle": [], |
|
"last": "Xiong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Socher", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Multitask learning as question answering", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1806.08730" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bryan McCann, Nitish Shirish Keskar, Caiming Xiong, and Richard Socher. 2018. The natural language de- cathlon: Multitask learning as question answering. arXiv preprint arXiv:1806.08730.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Passage re-ranking with bert", |
|
"authors": [ |
|
{ |
|
"first": "Rodrigo", |
|
"middle": [], |
|
"last": "Nogueira", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kyunghyun", |
|
"middle": [], |
|
"last": "Cho", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1901.04085" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rodrigo Nogueira and Kyunghyun Cho. 2019. Pas- sage re-ranking with bert. arXiv preprint arXiv:1901.04085.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Document expansion by query prediction", |
|
"authors": [ |
|
{ |
|
"first": "Rodrigo", |
|
"middle": [], |
|
"last": "Nogueira", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jimmy", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kyunghyun", |
|
"middle": [], |
|
"last": "Cho", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1904.08375" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rodrigo Nogueira, Wei Yang, Jimmy Lin, and Kyunghyun Cho. 2019. Document expansion by query prediction. arXiv preprint arXiv:1904.08375.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "emrqa: A large corpus for question answering on electronic medical records", |
|
"authors": [ |
|
{ |
|
"first": "Anusri", |
|
"middle": [], |
|
"last": "Pampari", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Preethi", |
|
"middle": [], |
|
"last": "Raghavan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jennifer", |
|
"middle": [], |
|
"last": "Liang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jian", |
|
"middle": [], |
|
"last": "Peng", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Anusri Pampari, Preethi Raghavan, Jennifer Liang, and Jian Peng. 2018. emrqa: A large corpus for question answering on electronic medical records. EMNLP.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Know what you don't know: Unanswerable questions for squad", |
|
"authors": [ |
|
{ |
|
"first": "Pranav", |
|
"middle": [], |
|
"last": "Rajpurkar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Robin", |
|
"middle": [], |
|
"last": "Jia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Percy", |
|
"middle": [], |
|
"last": "Liang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1806.03822" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Pranav Rajpurkar, Robin Jia, and Percy Liang. 2018. Know what you don't know: Unanswerable ques- tions for squad. arXiv preprint arXiv:1806.03822.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "SQuAD: 100,000+ questions for machine comprehension of text", |
|
"authors": [ |
|
{ |
|
"first": "Pranav", |
|
"middle": [], |
|
"last": "Rajpurkar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jian", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Konstantin", |
|
"middle": [], |
|
"last": "Lopyrev", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Percy", |
|
"middle": [], |
|
"last": "Liang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1606.05250" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Pranav Rajpurkar, Jian Zhang, Konstantin Lopyrev, and Percy Liang. 2016. SQuAD: 100,000+ questions for machine comprehension of text. arXiv preprint arXiv:1606.05250.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Naranjo question answering using end-to-end multitask learning model", |
|
"authors": [ |
|
{ |
|
"first": "Fei", |
|
"middle": [], |
|
"last": "Bhanu Pratap Singh Rawat", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hong", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 25th ACM SIGKDD International Conference on Knowledge Discovery & Data Mining", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2547--2555", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bhanu Pratap Singh Rawat, Fei Li, and Hong Yu. 2019. Naranjo question answering using end-to-end multi- task learning model. In Proceedings of the 25th ACM SIGKDD International Conference on Knowl- edge Discovery & Data Mining, pages 2547-2555. ACM.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Bidirectional attention flow for machine comprehension", |
|
"authors": [ |
|
{ |
|
"first": "Minjoon", |
|
"middle": [], |
|
"last": "Seo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aniruddha", |
|
"middle": [], |
|
"last": "Kembhavi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ali", |
|
"middle": [], |
|
"last": "Farhadi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hannaneh", |
|
"middle": [], |
|
"last": "Hajishirzi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1611.01603" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Minjoon Seo, Aniruddha Kembhavi, Ali Farhadi, and Hannaneh Hajishirzi. 2016. Bidirectional attention flow for machine comprehension. arXiv preprint arXiv:1611.01603.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Snomed rt: a reference terminology for health care", |
|
"authors": [ |
|
{ |
|
"first": "Keith", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "Kent A Spackman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Roger", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Campbell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "C\u00f4t\u00e9", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1997, |
|
"venue": "Proceedings of the AMIA annual fall symposium", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kent A Spackman, Keith E Campbell, and Roger A C\u00f4t\u00e9. 1997. Snomed rt: a reference terminology for health care. In Proceedings of the AMIA annual fall symposium, page 640. American Medical Informat- ics Association.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Clicr: a dataset of clinical case reports for machine reading comprehension", |
|
"authors": [ |
|
{ |
|
"first": "Walter", |
|
"middle": [], |
|
"last": "Simon\u0161uster", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Daelemans", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1803.09720" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Simon\u0160uster and Walter Daelemans. 2018. Clicr: a dataset of clinical case reports for machine reading comprehension. arXiv preprint arXiv:1803.09720.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "cloze procedure\": A new tool for measuring readability", |
|
"authors": [ |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Wilson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Taylor", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1953, |
|
"venue": "Journalism Bulletin", |
|
"volume": "30", |
|
"issue": "4", |
|
"pages": "415--433", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wilson L Taylor. 1953. \"cloze procedure\": A new tool for measuring readability. Journalism Bulletin, 30(4):415-433.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Bioasq: A challenge on large-scale biomedical semantic indexing and question answering", |
|
"authors": [ |
|
{ |
|
"first": "George", |
|
"middle": [], |
|
"last": "Tsatsaronis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Schroeder", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Georgios", |
|
"middle": [], |
|
"last": "Paliouras", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yannis", |
|
"middle": [], |
|
"last": "Almirantis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ion", |
|
"middle": [], |
|
"last": "Androutsopoulos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eric", |
|
"middle": [], |
|
"last": "Gaussier", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Patrick", |
|
"middle": [], |
|
"last": "Gallinari", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Thierry", |
|
"middle": [], |
|
"last": "Artieres", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matthias", |
|
"middle": [], |
|
"last": "Michael R Alvers", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Zschunke", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "AAAI Fall Symposium Series", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "George Tsatsaronis, Michael Schroeder, Georgios Paliouras, Yannis Almirantis, Ion Androutsopoulos, Eric Gaussier, Patrick Gallinari, Thierry Artieres, Michael R Alvers, Matthias Zschunke, et al. 2012. Bioasq: A challenge on large-scale biomedical se- mantic indexing and question answering. In 2012 AAAI Fall Symposium Series.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "i2b2/va challenge on concepts, assertions, and relations in clinical text", |
|
"authors": [ |
|
{ |
|
"first": "Ozlem", |
|
"middle": [], |
|
"last": "Uzuner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Brett", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shuying", |
|
"middle": [], |
|
"last": "South", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Scott L", |
|
"middle": [], |
|
"last": "Shen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Duvall", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Journal of the American Medical Informatics Association", |
|
"volume": "18", |
|
"issue": "5", |
|
"pages": "552--556", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ozlem Uzuner, Brett R South, Shuying Shen, and Scott L DuVall. 2011. 2010 i2b2/va challenge on concepts, assertions, and relations in clinical text. Journal of the American Medical Informatics Asso- ciation, 18(5):552-556.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Attention is all you need", |
|
"authors": [ |
|
{ |
|
"first": "Ashish", |
|
"middle": [], |
|
"last": "Vaswani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noam", |
|
"middle": [], |
|
"last": "Shazeer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Niki", |
|
"middle": [], |
|
"last": "Parmar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jakob", |
|
"middle": [], |
|
"last": "Uszkoreit", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Llion", |
|
"middle": [], |
|
"last": "Jones", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aidan", |
|
"middle": [ |
|
"N" |
|
], |
|
"last": "Gomez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "\u0141ukasz", |
|
"middle": [], |
|
"last": "Kaiser", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Illia", |
|
"middle": [], |
|
"last": "Polosukhin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Advances in neural information processing systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5998--6008", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, \u0141ukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in neural information pro- cessing systems, pages 5998-6008.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Building a semantic parser overnight", |
|
"authors": [ |
|
{ |
|
"first": "Yushi", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jonathan", |
|
"middle": [], |
|
"last": "Berant", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Percy", |
|
"middle": [], |
|
"last": "Liang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "ACL-IJCNLP", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1332--1342", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yushi Wang, Jonathan Berant, and Percy Liang. 2015. Building a semantic parser overnight. In ACL- IJCNLP, volume 1, pages 1332-1342.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "Multimodal multitask representation learning for pathology biobank metadata prediction", |
|
"authors": [ |
|
{ |
|
"first": "Wei-Hung", |
|
"middle": [], |
|
"last": "Weng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yuannan", |
|
"middle": [], |
|
"last": "Cai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Angela", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fraser", |
|
"middle": [], |
|
"last": "Tan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Po-Hsuan Cameron", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1909.07846" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wei-Hung Weng, Yuannan Cai, Angela Lin, Fraser Tan, and Po-Hsuan Cameron Chen. 2019. Multi- modal multitask representation learning for pathol- ogy biobank metadata prediction. arXiv preprint arXiv:1909.07846.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "A broad-coverage challenge corpus for sentence understanding through inference", |
|
"authors": [ |
|
{ |
|
"first": "Adina", |
|
"middle": [], |
|
"last": "Williams", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nikita", |
|
"middle": [], |
|
"last": "Nangia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Samuel R", |
|
"middle": [], |
|
"last": "Bowman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1704.05426" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Adina Williams, Nikita Nangia, and Samuel R Bow- man. 2017. A broad-coverage challenge corpus for sentence understanding through inference. arXiv preprint arXiv:1704.05426.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "Learning to parse database queries using inductive logic programming", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "John", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Raymond J", |
|
"middle": [], |
|
"last": "Zelle", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Mooney", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1996, |
|
"venue": "Proceedings of the national conference on artificial intelligence", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1050--1055", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "John M Zelle and Raymond J Mooney. 1996. Learn- ing to parse database queries using inductive logic programming. In Proceedings of the national con- ference on artificial intelligence, pages 1050-1055.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "Ernie: Enhanced language representation with informative entities", |
|
"authors": [ |
|
{ |
|
"first": "Zhengyan", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xu", |
|
"middle": [], |
|
"last": "Han", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhiyuan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xin", |
|
"middle": [], |
|
"last": "Jiang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maosong", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Qun", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1905.07129" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhengyan Zhang, Xu Han, Zhiyuan Liu, Xin Jiang, Maosong Sun, and Qun Liu. 2019a. Ernie: En- hanced language representation with informative en- tities. arXiv preprint arXiv:1905.07129.", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "Sg-net: Syntax-guided machine reading comprehension", |
|
"authors": [ |
|
{ |
|
"first": "Zhuosheng", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yuwei", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Junru", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sufeng", |
|
"middle": [], |
|
"last": "Duan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hai", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1908.05147" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhuosheng Zhang, Yuwei Wu, Junru Zhou, Sufeng Duan, and Hai Zhao. 2019b. Sg-net: Syntax-guided machine reading comprehension. arXiv preprint arXiv:1908.05147.", |
|
"links": null |
|
}, |
|
"BIBREF35": { |
|
"ref_id": "b35", |
|
"title": "Aligning books and movies: Towards story-like visual explanations by watching movies and reading books", |
|
"authors": [ |
|
{ |
|
"first": "Yukun", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ryan", |
|
"middle": [], |
|
"last": "Kiros", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rich", |
|
"middle": [], |
|
"last": "Zemel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ruslan", |
|
"middle": [], |
|
"last": "Salakhutdinov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Raquel", |
|
"middle": [], |
|
"last": "Urtasun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Antonio", |
|
"middle": [], |
|
"last": "Torralba", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sanja", |
|
"middle": [], |
|
"last": "Fidler", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the IEEE international conference on computer vision", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "19--27", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yukun Zhu, Ryan Kiros, Rich Zemel, Ruslan Salakhut- dinov, Raquel Urtasun, Antonio Torralba, and Sanja Fidler. 2015. Aligning books and movies: Towards story-like visual explanations by watching movies and reading books. In Proceedings of the IEEE inter- national conference on computer vision, pages 19- 27.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"type_str": "figure", |
|
"uris": null, |
|
"num": null, |
|
"text": "A synthetic example of a clinical context, question, its logical form and the expected answer." |
|
}, |
|
"FIGREF1": { |
|
"type_str": "figure", |
|
"uris": null, |
|
"num": null, |
|
"text": "Two similar questions with different logical forms (LFs) but overlapping answer conditions." |
|
}, |
|
"FIGREF2": { |
|
"type_str": "figure", |
|
"uris": null, |
|
"num": null, |
|
"text": "MedicationEvent (x) given {ConditionEvent (|problem|) OR SymptomEvent (|problem|)} Tokenized: ['MedicationEvent (x)', 'given', 'ConditionEvent (|pr oblem|)', 'OR', 'SymptomEvent (|problem|)']" |
|
}, |
|
"FIGREF3": { |
|
"type_str": "figure", |
|
"uris": null, |
|
"num": null, |
|
"text": "Tokenized logical form (LF)." |
|
}, |
|
"TABREF0": { |
|
"type_str": "table", |
|
"content": "<table><tr><td>Token: Multi-Head Attention</td><td>Entity: Multi-Head Attention</td></tr><tr><td>[CLS] Ques: Has the patient ever gone into edema? Context: Has the [SEP] ? edema edema or Extremities</td><td>.</td></tr></table>", |
|
"html": null, |
|
"text": "Extremities no clubbing, ... cyanosis or edema.", |
|
"num": null |
|
}, |
|
"TABREF3": { |
|
"type_str": "table", |
|
"content": "<table/>", |
|
"html": null, |
|
"text": "", |
|
"num": null |
|
}, |
|
"TABREF5": { |
|
"type_str": "table", |
|
"content": "<table/>", |
|
"html": null, |
|
"text": "Train, validation and test data splits.", |
|
"num": null |
|
}, |
|
"TABREF7": { |
|
"type_str": "table", |
|
"content": "<table><tr><td>Logical Form: LabEvent (|test|) [abnormalResultFlag=Y, date=|date|,</td></tr><tr><td>result=x] OR ProcedureEvent (|test|) [abnormalResultFlag=Y, date=|d</td></tr><tr><td>ate|, result=x] OR VitalEvent (|test|) [date=|date|, (result=x)>vital.ref</td></tr><tr><td>high] OR VitalEvent (|test|) [date=|date|, (result=x)<vital.reflow] OR</td></tr><tr><td>[{LabEvent (|test|) [date=|date|, abnormalResultFlag=Y] OR Procedur</td></tr><tr><td>eEvent (|test|) [date=|date|, abnormalResultFlag=Y] OR VitalEvent (|t</td></tr><tr><td>est|) [date=|date|]} reveals {ConditionEvent (x) OR SymptomEvent</td></tr><tr><td>(x)}]</td></tr><tr><td>Q2: What were the abnormal results of BMI?</td></tr><tr><td>Logical Form: LabEvent (|test|) [date=x, (result=x)<lab.reflow] OR L</td></tr><tr><td>abEvent (|test|) [date=x, (result=x)>lab.refhigh] OR VitalEvent (|test|)</td></tr><tr><td>[date=x, (result=x)<vital.reflow] OR VitalEvent (|test|) [date=x, (result</td></tr><tr><td>=x)>vital.refhigh]</td></tr></table>", |
|
"html": null, |
|
"text": ". What were the results of the abnormal BMI on 2094-12-02?", |
|
"num": null |
|
}, |
|
"TABREF9": { |
|
"type_str": "table", |
|
"content": "<table/>", |
|
"html": null, |
|
"text": "Precision, Recall and F1-score for logical form prediction.", |
|
"num": null |
|
}, |
|
"TABREF11": { |
|
"type_str": "table", |
|
"content": "<table/>", |
|
"html": null, |
|
"text": "Macro-weighted precision, recall and F1-score of Proposed Models on Test Dataset (Multi-choice QA). For the model names, c: clinical; M: multitask.", |
|
"num": null |
|
}, |
|
"TABREF13": { |
|
"type_str": "table", |
|
"content": "<table><tr><td>B Logical forms (LFs) for MADE dataset</td></tr><tr><td>1. MedicationEvent (|medication|) [sig=x]</td></tr><tr><td>2. MedicationEvent (|medication|) causes Condi-</td></tr><tr><td>tionEvent (x) OR SymptomEvent (x)</td></tr><tr><td>3. MedicationEvent (|medication|) given Condi-</td></tr><tr><td>tionEvent (x) OR SymptomEvent (x)</td></tr><tr><td>4. [ProcedureEvent (|treatment|) given/conducted</td></tr><tr><td>ConditionEvent (x) OR SymptomEvent (x)] OR</td></tr><tr><td>[MedicationEvent (|treatment|) given Condition-</td></tr><tr><td>Event (x) OR SymptomEvent (x)]</td></tr><tr><td>5. MedicationEvent (x) CheckIfNull ([enddate])</td></tr><tr><td>OR MedicationEvent (x) [enddate>currentDate]</td></tr><tr><td>OR ProcedureEvent (x) [date=x] given Condition-</td></tr><tr><td>Event (|problem|) OR SymptomEvent (|problem|)</td></tr><tr><td>6. MedicationEvent (x) CheckIfNull ([enddate])</td></tr><tr><td>OR MedicationEvent (x) [enddate>currentDate]</td></tr><tr><td>given ConditionEvent (|problem|) OR Symp-</td></tr><tr><td>tomEvent (|problem|)</td></tr><tr><td>7. MedicationEvent (|treatment|) OR Proce-</td></tr><tr><td>dureEvent (|treatment|) given ConditionEvent (x)</td></tr><tr><td>OR SymptomEvent (x)</td></tr><tr><td>8. MedicationEvent (|treatment|) OR Proce-</td></tr><tr><td>dureEvent (|treatment|) improves/worsens/causes</td></tr><tr><td>ConditionEvent (x) OR SymptomEvent (x)</td></tr></table>", |
|
"html": null, |
|
"text": "Hyper-parameter values across different datasets.", |
|
"num": null |
|
}, |
|
"TABREF15": { |
|
"type_str": "table", |
|
"content": "<table/>", |
|
"html": null, |
|
"text": "Selected semantic types as per MetaMap and their brief descriptions.", |
|
"num": null |
|
} |
|
} |
|
} |
|
} |