|
{ |
|
"paper_id": "2020", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T01:06:42.347930Z" |
|
}, |
|
"title": "Interactive Extractive Search over Biomedical Corpora", |
|
"authors": [ |
|
{ |
|
"first": "Hillel", |
|
"middle": [], |
|
"last": "Taub-Tabib", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Allen Institute for AI", |
|
"location": { |
|
"settlement": "Tel Aviv", |
|
"country": "Israel" |
|
} |
|
}, |
|
"email": "hillelt@allenai.org" |
|
}, |
|
{ |
|
"first": "Micah", |
|
"middle": [], |
|
"last": "Shlain", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Allen Institute for AI", |
|
"location": { |
|
"settlement": "Tel Aviv", |
|
"country": "Israel" |
|
} |
|
}, |
|
"email": "micahs@allenai.org" |
|
}, |
|
{ |
|
"first": "Shoval", |
|
"middle": [], |
|
"last": "Sadde", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Allen Institute for AI", |
|
"location": { |
|
"settlement": "Tel Aviv", |
|
"country": "Israel" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Lahav", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Tel Aviv University", |
|
"location": { |
|
"settlement": "Tel-Aviv", |
|
"country": "Israel" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Matan", |
|
"middle": [], |
|
"last": "Eyal", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Allen Institute for AI", |
|
"location": { |
|
"settlement": "Tel Aviv", |
|
"country": "Israel" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Yaara", |
|
"middle": [], |
|
"last": "Cohen", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Allen Institute for AI", |
|
"location": { |
|
"settlement": "Tel Aviv", |
|
"country": "Israel" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Yoav", |
|
"middle": [], |
|
"last": "Goldberg", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Allen Institute for AI", |
|
"location": { |
|
"settlement": "Tel Aviv", |
|
"country": "Israel" |
|
} |
|
}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "We present a system that allows life-science researchers to search a linguistically annotated corpus of scientific texts using patterns over dependency graphs, as well as using patterns over token sequences and a powerful variant of boolean keyword queries. In contrast to previous attempts to dependency-based search, we introduce a lightweight query language that does not require the user to know the details of the underlying linguistic representations, and instead to query the corpus by providing an example sentence coupled with simple markup. Search is performed at an interactive speed due to efficient linguistic graphindexing and retrieval engine. This allows for rapid exploration, development and refinement of user queries. We demonstrate the system using example workflows over two corpora: the PubMed corpus including 14,446,243 PubMed abstracts and the CORD-19 dataset 1 , a collection of over 45,000 research papers focused on COVID-19 research. The system is publicly available at https://allenai. github.io/spike", |
|
"pdf_parse": { |
|
"paper_id": "2020", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "We present a system that allows life-science researchers to search a linguistically annotated corpus of scientific texts using patterns over dependency graphs, as well as using patterns over token sequences and a powerful variant of boolean keyword queries. In contrast to previous attempts to dependency-based search, we introduce a lightweight query language that does not require the user to know the details of the underlying linguistic representations, and instead to query the corpus by providing an example sentence coupled with simple markup. Search is performed at an interactive speed due to efficient linguistic graphindexing and retrieval engine. This allows for rapid exploration, development and refinement of user queries. We demonstrate the system using example workflows over two corpora: the PubMed corpus including 14,446,243 PubMed abstracts and the CORD-19 dataset 1 , a collection of over 45,000 research papers focused on COVID-19 research. The system is publicly available at https://allenai. github.io/spike", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Recent years have seen a surge in the amount of accessible Life Sciences data. Search engines like Google Scholar, Microsoft Academic Search or Semantic Scholar allow researchers to search for published papers based on keywords or concepts, but search results often include thousands of papers and extracting the relevant information from the papers is a problem not addressed by the search engines. This paradigm works well when the information need can be answered by reviewing a number of papers from the top of the search results. However, when the information need requires extraction of information nuggets from many papers 1 https://pages.semanticscholar.org/coronavirus-research (e.g. all chemical-protein interactions or all risk factors for a disease) the task becomes challenging and researchers will typically resort to curated knowledge bases or designated survey papers in case ones are available.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "We present a search system that works in a paradigm which we call Extractive Search, and which allows rapid information seeking queries that are aimed at extracting facts, rather than documents. Our system combines three query modes: boolean, sequential and syntactic, targeting different stages of the analysis process, and different extraction scenarios. Boolean queries ( \u00a74.1) are the most standard, and look for the existence of search terms, or groups of search terms, in a sentence, regardless of their order. These are very powerful for finding relevant sentences, and for co-occurrence searches. Sequential queries ( \u00a74.2) focus on the order and distance between terms. They are intuitive to specify and are very effective where the text includes \"anchor-words\" near the entity of interest. Lastly, syntactic queries ( \u00a74.4) focus on the linguistic constructions that connect the query words to each other. Syntactic queries are very powerful, and can work also where the concept to be extracted does not have clear linear anchors. However, they are also traditionally hard to specify and require strong linguistic background to use. Our systems lowers their barrier of entry with a specification-by-example interface.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Our proposed system is based on the following components. Minimal but powerful query languages. There is an inherent trade-off between simplicity and control. On the one extreme, web search engines like Google Search offer great simplicity, but very little control, over the exact information need. On the other extreme, information extraction patternspecification languages like UIMA Ruta offer great precision and control, but also expose a low-level view of the text and come with over hundred-page manual. 2 Our system is designed to offer high degree of expressivity, while remaining simple to grasp: the syntax and functionality can be described in a few paragraphs. The three query languages are designed to share the same syntax to the extent possible, to facilitate knowledge transfer between them and to ease the learning curve. Linguistic Information, Captures, and Expansions. Each of the three query types are linguistically informed, and the user can condition not only on the word forms, but also on their lemmas, parts-of-speech tags, and identified entity types. The user can also request to capture some of the search terms, and to expand them to a linguistic context. For example, in a boolean search query looking for a sentence that contains the lemmas \"treat\" and \"treatment\" ('lemma=treat|treatment'), a chemical name ('entity=SIMPLE CHEMICAL') and the word \"infection\" ('infection'), a user can mark the chemical name and the word \"infection\" as captures. This will yield a list of chemical/infection pairs, together with the sentence from which they originated, all of which contain the words relating to treatments. Capturing the word \"infection\" is not very useful on its own: all matches result in the exact same word. But, by expanding the captured word to its surrounding linguistic environment, the captures list will contain terms such as \"PEDV infection\", \"acyclovir-resistant HSV infection\" and \"secondary bacterial infection\". Running this query over PubMed allows us to create a large and relatively focused list in just a few seconds. The list can then be downloaded as a CSV file for further processing. The search becomes extractive: we are not only looking for documents, but also, by use of captures, extract information from them. Sentence Focus, Contextual Restrictions. As our system is intended for extraction of information, it works at the sentence level. However, each sentence is situated in a context, and we allow secondary queries to condition on that context, for example by looking for sentences that appear in paragraphs that contain certain words, or which appear in papers with certain words in their titles, in papers with specific MeSH terms, in papers whose abstracts include specific terms, etc. This combines the focus and information density of a sentence, which is the main target of the extraction, with the rich signals available in its surrounding context. Interactive Speed. Central to the approach is an indexed solution, based on (Valenzuela-Esc\u00e1rcega et al., 2020) , that allows to perform all types of queries efficiently over very large corpora, while getting results almost immediately. This allows the users to interactively refine their queries and improve them based on the feedback from the results. This contrasts with machine learning based solutions that, even neglecting the development time, require substantially longer turnaround times between query and results from a large corpus.", |
|
"cite_spans": [ |
|
{ |
|
"start": 510, |
|
"end": 511, |
|
"text": "2", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 3000, |
|
"end": 3035, |
|
"text": "(Valenzuela-Esc\u00e1rcega et al., 2020)", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The primary paradigm for navigating large scientific collections such as MEDLINE/PubMed 3 is document-level search. The most immediate document-level searching technique is boolean search (\"keyword search\"). However, these methods suffer from an inability to capture the concepts aimed for by the user, as biomedical terms may have different names in different sub-fields and as the user may not always know exactly what they are looking for. To overcome this issue several databases offer semantic searching by exploiting MeSH terms that indicate related concepts. While in some cases MeSH terms can be assigned automatically, e.g (Mork et al., 2013) , in others obtaining related concepts require a manual assignment which is laborious to obtain.", |
|
"cite_spans": [ |
|
{ |
|
"start": 632, |
|
"end": 651, |
|
"text": "(Mork et al., 2013)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Existing Information Discovery Approaches", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Beyond the methods incorporated in the literature databases themselves, there are numerous external tools for biomedical document searching. Thalia (Soto et al., 2018) is a system for semantic searching over PubMed. It can recognize different types of concepts occurring in Biomedical abstracts, and additionally enables search based on abstract metadata; LIVIVO (M\u00fcller et al., 2017) takes the task of vertically integrating information from divergent research areas in the life sciences; SWIFT-Review 4 offers iterative screening by reranking the results based on the user's inputs.", |
|
"cite_spans": [ |
|
{ |
|
"start": 363, |
|
"end": 384, |
|
"text": "(M\u00fcller et al., 2017)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Existing Information Discovery Approaches", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "All of these solutions are focused on the document level, which can be limiting: they often surface hundreds of papers or more, requiring careful reading, assessing and filtering by the user, in order to locate the relevant facts they are looking for.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Existing Information Discovery Approaches", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "To complement document searching, some systems facilitate automatic extraction of biomedical concepts, or patterns, from documents. Such systems are often equipped with analysis capabilities of the extracted information. For example, NaCTem has created systems that extract biomedical entities, relations and events. 5 ; ExaCT and RobotReviewer (Kiritchenko et al., 2010; Marshall et al., 2015 ) take a RCT report and retrieve sentences that match certain study characteristics.", |
|
"cite_spans": [ |
|
{ |
|
"start": 345, |
|
"end": 371, |
|
"text": "(Kiritchenko et al., 2010;", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 372, |
|
"end": 393, |
|
"text": "Marshall et al., 2015", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Existing Information Discovery Approaches", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "To improve the development of automatic document selection and information extraction the BioNLP community organized a series of shared tasks (Kim et al., 2009 (Kim et al., , 2011 N\u00e9dellec et al., 2013; Segura Bedmar et al., 2013; Jin-Dong et al., 2019) . The tasks address a diverse set of biomed topics addressed by a range of NLP-based techniques. While effective, such systems require annotated training data and substantial expertise to produce. As such, they are restricted to several \"head\" information extraction needs, those that enjoy a wide community interest and support. The long tail of information needs of \"casual\" researchers remain mostly un-addressed.", |
|
"cite_spans": [ |
|
{ |
|
"start": 142, |
|
"end": 159, |
|
"text": "(Kim et al., 2009", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 160, |
|
"end": 179, |
|
"text": "(Kim et al., , 2011", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 180, |
|
"end": 202, |
|
"text": "N\u00e9dellec et al., 2013;", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 203, |
|
"end": 230, |
|
"text": "Segura Bedmar et al., 2013;", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 231, |
|
"end": 253, |
|
"text": "Jin-Dong et al., 2019)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Existing Information Discovery Approaches", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Existing approaches to information extraction from bio-medical data suffer from significant practical limitations. Techniques based on supervised training require extensive data collection and annotation (Kim et al., 2009 (Kim et al., , 2011 N\u00e9dellec et al., 2013; Segura Bedmar et al., 2013; , or a high degree of technical savviness in producing high quality data sets from distant supervision (Peng et al., 2017; Verga et al., 2017; Wang et al., 2019) . On the other hand, rule based engines are generally too complex to be used directly by domain experts and require a linguist or an NLP specialist to operate. Furthermore, both rule based and supervised systems typically operate in a pipeline approach where an NER engine identifies the relevant entities and subsequent extraction models identify the relations between them. This approach is often problematic in real world biomedical IE scenarios, where relevant entities often cannot be extracted by stock NER models.", |
|
"cite_spans": [ |
|
{ |
|
"start": 204, |
|
"end": 221, |
|
"text": "(Kim et al., 2009", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 222, |
|
"end": 241, |
|
"text": "(Kim et al., , 2011", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 242, |
|
"end": 264, |
|
"text": "N\u00e9dellec et al., 2013;", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 265, |
|
"end": 292, |
|
"text": "Segura Bedmar et al., 2013;", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 396, |
|
"end": 415, |
|
"text": "(Peng et al., 2017;", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 416, |
|
"end": 435, |
|
"text": "Verga et al., 2017;", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 436, |
|
"end": 454, |
|
"text": "Wang et al., 2019)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Interactive IE Approach", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "To address these limitations we present a system allowing domain experts to interactively query linguistically annotated datasets of scientific re-search papers, using a novel multifaceted query language which we designed, and which supports boolean search, sequential patterns search, and byexample syntactic search (Shlain et al., 2020) , as well as specification of search terms whose matches should be captured or expanded. The queries can be further restricted by contextual information. We demonstrate the system on two datasets: a comprehensive dataset of PubMed abstracts and a dataset of full text papers focused on COVID-19 research.", |
|
"cite_spans": [ |
|
{ |
|
"start": 317, |
|
"end": 338, |
|
"text": "(Shlain et al., 2020)", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Interactive IE Approach", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Comparison to existing systems. In contrast to document level search solutions, the results returned by our system are sentences which include highlighted spans that directly answer the user's information need. In contrast to supervised IE solutions, our solution does not require a lengthy process of data collection and labeling or a precise definition of the problem settings.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Interactive IE Approach", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Compared to rule based systems our system differentiates itself in a number of ways: (i) our query engine automatically translates lightly tagged natural language sentences to syntactic queries (queryby-example) thus allowing domain experts to benefit from the advantages of syntactic patterns without a deep understanding of syntax; (ii) our queries run against indexed data, allowing our translated syntactic queries to run at interactive speed; and (iii) our system does not rely on relation schemas and does not make assumptions about the number of arguments involved or their types.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Interactive IE Approach", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "In many respects, our system is similar to the PropMiner system (Akbik et al., 2013) for exploratory relation extraction (Akbik et al., 2014) . Both PropMiner and our system support byexample queries in interactive speed. However, the query languages we describe in section 4 are significantly more expressive than PropMiner's language, which supports only binary relations. Furthermore, compared to PropMiner, our annotation pipeline was optimized specifically for the biomedical domain and our system is freely available online. Technical details. The datasets were annotated for biomedical entities and syntax using a custom SciSpacy pipeline (Neumann et al., 2019) 6 , and the syntactic trees were enriched to BART format using pyBART (Tiktinsky et al., 2020) . The annotated data is indexed using the Odinson engine (Valenzuela-Esc\u00e1rcega et al., 2020).", |
|
"cite_spans": [ |
|
{ |
|
"start": 64, |
|
"end": 84, |
|
"text": "(Akbik et al., 2013)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 121, |
|
"end": 141, |
|
"text": "(Akbik et al., 2014)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 646, |
|
"end": 670, |
|
"text": "(Neumann et al., 2019) 6", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 739, |
|
"end": 763, |
|
"text": "(Tiktinsky et al., 2020)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Interactive IE Approach", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Boolean queries are the standard in information retrieval (IR): the user provides a set of terms that should, and should not, appear in a document, and the system returns a set of documents that adhere to these constraints. This is a familiar and intuitive model, which can be very effective for initial data exploration as well as for extraction tasks that focus on co-occurrence. We depart from standard boolean queries and extend them by (a) allowing to condition on different linguistic aspects of each token; (b) allowing capturing of terms into named variables; and (c) allowing linguistic expansion of the captured terms.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Boolean Queries", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "The simplest boolean query is a list of terms, where each term is a word, i.e: 'infection asymptomatic fatal' The semantics is that all the terms must appear in the query. A term can be made optional by prefixing it with a '?' symbol ('infection asymptomatic ?fatal' ). Each term can also specify a list of alternatives: 'fatal|deadly|lethal'. Beyond words.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Boolean Queries", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "In addition to matching words, terms can also specify linguistic properties:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Boolean Queries", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "lemmas, parts-of-speech, and domain-specific entity-types:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Boolean Queries", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "'lemma=infect entity=DISEASE'. Conditions can also be combined: 'lemma=cause|reason&tag=NN'. We find that the ability to search for domain-specific types is very effective in boolean queries, as it allows to search for concepts rather than words. In addition to exact match, we also support matching on regular expressions ('lemma=/caus.*/'). The field names word, lemma, entity, tag can be shortened to w,l,e,t.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Boolean Queries", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Captures. Central to our extractive approach is the ability to designate specific search term to be captured. Capturing is indicated by prefixing the term with ':' (for an automatically-named capture) or with 'name:' (for a named capture). The query 'fatal asymptomatic d:e=DISEASE' will look for sentences that contain the terms 'fatal' and 'asymptomatic' as well as a name of a disease, and will capture the disease name under a variable \"d\". Each query result will be a sentence with a single disease captured. If several diseases appear in the same sentence, each one will be its own result. The user can then focus on the captured entities, and export the entire query result to a CSV file, in which each row contains the sentence, its source, and the captured variables. In the current examples, the result will be a list of disease names that co-occur with \"fatal\" and \"asymptomatic\". We can also issue a query such as 'chem:e=SIMPLE CHEMICAL d:e=DISEASE'", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Boolean Queries", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "to get a list of chemical-disease co-occurrences. Using additional terms, we can narrow down to co-occurrences with specific words, and by using contextual restrictions ( \u00a74.3) we can focus on cooccurrences in specific papers or domains. Expansions. Finally, for captured terms we also support linguistic expansions. After the term is matched, we can expand it to a larger linguistic environment based on the underlying syntactic sentence representation. An expansion is expressed by prefixing a term with angle brackets :", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Boolean Queries", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "' inf:infection asymptomatic fatal' will capture the word \"infection\" under the variable \"inf\" and expand it to its surrounding noun-phrase, capturing phrases like \"malarialike infection\", \"asymptomatic infection\", \"chronic infection\" and \"a mild subclinical infection 9\".", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Boolean Queries", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "While boolean queries allow terms to appear in any order, we sometimes care about the exact linear placements of words with respect to each other. The term-specification, capture and expansion syntax is the same as in boolean queries, but here terms must match as in the query. 'interspecies transmission' looks for the exact phrase \"interspecies transmission\" and 'tag=NNS transmission' looks for the word transmission immediately preceded by a plural noun. By capturing the noun ('which:tag=NNS transmission') we obtain a list of terms that includes the words \"bacteria\", \"diseases\", \"nuclei\" and \"crossspecies\". Wildcards. sequential queries can also use wildcard symbols: * (matching any single word), '...' (0 or more words), '...2-5...' (2-5 words). The query 'interspecies kind:...1-3... transmission' looks for the words \"interspecies\" and \"transmission\" with 1 to 3 intervening words, capturing the intervening words under \"kind\". First results include \"host-host\", \"zoonotic\", \"virus\", \"TSE agent\", \"and interclass\". Repetitions. We also allow to specify repetitions of terms. To do so, the term is enclosed in [ ] and followed by a quantifier. We support the standard list of regular expression quantifiers: *, +, ?, {n,m}. ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sequential (Token) Queries", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Each query can be associated with contextual restrictions, which are secondary queries that oper- ate on the same data and restrict the set of sentences that are considered for the main queries. These queries currently have the syntax of the Lucene query language. 7 Our system allows the secondary queries to condition on the paragraph the sentence appears in, and on the title, abstract, authors, publication data, publication venue and MeSH terms of the paper the sentence appears in. Additional sources of information are easy to add. For example, adding the contextual restriction '#d +title:cancer +mesh:\"Age Distribution\"' restricts a query results to sentences from papers which have the word \"cancer\" in their title and whose MeSH terms include \"Age Distribution\". Similarly '#d +title:/corona.*/ +year: [2015 TO 2020]' restricts queries to include sentences from papers published between 2015 and 2020 and have a word starting with corona in their title.", |
|
"cite_spans": [ |
|
{ |
|
"start": 265, |
|
"end": 266, |
|
"text": "7", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Contextual Restrictions", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "These secondary queries greatly increase the power of boolean, sequential and syntactic queries: one could look for interspecies transmissions that relate to certain diseases, or for sentence-level disease-chemical co-occurrences in papers that discuss specific sub-populations.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Contextual Restrictions", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "Recent advances in machine learning brought with them accurate syntactic parsing, but parse-trees remain hard to use. We remedy this by employing a novel query language we introduced in (Shlain et al., 2020) which is based on the principle of query-by-example.", |
|
"cite_spans": [ |
|
{ |
|
"start": 186, |
|
"end": 207, |
|
"text": "(Shlain et al., 2020)", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Example-based Syntactic Queries", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "The query is specified by starting with a simple natural language sentence that conveys the desired syntactic structure, for example, 'BMP-6 induces the phosphorylation of Smad1'. Then, words can be marked as anchor words (that need to match exactly) or capture nodes (that are variables). Words can also be neither anchor or capture, in which case they only support the scaffolding of the sentence. The system then translates the sentence with the captures and anchors syntax into a syntactic query graph, which is presented to the user. The user can then restrict capture nodes from \"match anything\" to matching specific terms (using the term specification syntax as in boolean or token queries) and can likewise relax the exact-match constraints on anchor words. Like in other query types, capture nodes can be marked for expansion. The syntactic graph is then matched against the pre-parsed and indexed corpus.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Example-based Syntactic Queries", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "This simple markup provides a rich syntax-based query system, while alleviating the user from the need to know linguistic syntax.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Example-based Syntactic Queries", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "For example, consider the query below, the details of which will be discussed shortly:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Example-based Syntactic Queries", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "' p1:[e=GENE OR GENE PRODUCT]BMP-6 $induces the $phosphorylation $of p2:Smad1'", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Example-based Syntactic Queries", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "The words 'induce', 'phosphorylation' and 'of' are anchors (designated by '$'), while 'p1' and 'p2' are captures for 'BMP-6' and 'Smad1'. Both capture nodes are marked for expansion using angle braces (' '). Node p1 is restricted to match tokens with the same entity type of BMP-6 (indicated by 'e=GENE OR...'). The query can be shortened by omitting the entity type and retaining only the entity restriction ('e'):", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Example-based Syntactic Queries", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "Here, the entity type is inferred by the system from the entity type of The graph for the query is displayed in Figure 1 . It has 5 tokens in a specific syntactic configuration determined by directed labeled edges. The 1st token must have the entity tag of 'GENE OR...', the 2nd, 3rd, and 4th tokens must be the exact words \"induces phosphorylation of\", and the 5th is unconstrained.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 112, |
|
"end": 120, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Example-based Syntactic Queries", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "Sentences whose syntactic graph has a subgraph that aligns to the query and adheres to the constraints will match the query. Example of matching sentences are: -ERK p 1 activation induces phosphorylation of Elk-1 p 2 . -Thrombopoietin p 1 activates human platelets and induces tyrosine phosphorylation of p80/85 cortactin p2", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Example-based Syntactic Queries", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "The sentence tokens corresponding to the p1 and p2 graph nodes will be bound to variables with these names: {p1=ERK, p2=Elk-1} for the first sentence and {p1=Thrombopoietin, p2=p80/85 cortactin} for the second.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Example-based Syntactic Queries", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "We describe a workflow which is based on using our extractive search system over a corpus of all PubMed abstracts. While the described researcher is hypothetical, the results we discuss are real.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Example Workflow: Risk-factors", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Consider a medical researcher who is trying to compile an up to date list of the risk factors for stroke. A PubMed search for \"risk factors for stroke\" yields 3317 results, and reading through all results is impractical. A Google query for the same phrase brings out an info box from NHLBI 9 listing 16 common risk factors including high blood pressure, diabetes, heart disease, etc. Having a curated list which clearly outlines the risk factors is helpful, but curated lists or survey papers will often not include rare or recent research findings.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Example Workflow: Risk-factors", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "The researcher thus turns to extractive search and tries an exploratory boolean query:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Example Workflow: Risk-factors", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "'risk factor stroke' . The figure shows the top results for the query and the majority of sentences retrieved indeed specify specific risk factors for stroke. This is an improvement over the PubMed results as the researcher can quickly identify the risk factors discussed without going through the different papers.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Example Workflow: Risk-factors", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Furthermore, the top results contain risk factors like migrane or C804A polymorphism not listed in the NHLBI knowledge base. However, the full result list is lengthy and extracting all the risk factors from it manually would be tedious. Instead, the researcher notes that many of the top results are variations on the \"X is a risk factor for stroke\" structure. She thus continues by issuing the following syntactic query, where a capture labeled r is used to directly capture the risk factors:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Example Workflow: Risk-factors", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "9 https://www.nhlbi.nih.gov/ health-topics/stroke The figure shows the top results for the query and the risk factors are indeed labeled with r as expected. Unfortunately, some of the captured risk factors names are not fully expanded. For example, we capture syndrome instead of metabolic syndrome and temperature instead of low temperature. Being interested in capturing the full names, the researcher adds angle brackets ' ' to expand the captured elements:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Example Workflow: Risk-factors", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "' r:Diabetes is a $risk $factor for $stroke'.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Example Workflow: Risk-factors", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "The full names are now captured as expected.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Example Workflow: Risk-factors", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Now that that researcher has verified that the query yields relevant results, she clicks the download button to download the full result set.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Example Workflow: Risk-factors", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "The resulting tab separated file has 1212 rows. Each row includes a result sentence, the captured elements in it (in this case, just the risk factor), and their offsets. Using a spreadsheet to group the rows by risk factor and order the results by frequency, the researcher obtains a list of 640 unique risk factors, 114 of them appearing more than once in the data. Figure 2a lists the top results.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 367, |
|
"end": 376, |
|
"text": "Figure 2a", |
|
"ref_id": "FIGREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Example Workflow: Risk-factors", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Reviewing the list, the researcher decides that she's not interested in general risk factors, but rather in diseases only. She modifies the query by adding an entity restriction to the 'r' capture:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Example Workflow: Risk-factors", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "As seen in the query graph, even though the researcher didn't specify the exact entity type, the query parser correctly resolved it to DISEASE. The results now include diseases like sleep apnoea and hypertension but do not include smoking, age and alcohol (see Figure 2b) .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 261, |
|
"end": 271, |
|
"text": "Figure 2b)", |
|
"ref_id": "FIGREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Example Workflow: Risk-factors", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Analyzing the results, the researcher now wants to compare the risk factors in the general population to ones listed in research papers dealing with children and infants. Luckily, such papers are indexed with corresponding MeSH terms and the researcher can utilize this fact by appending '#d mesh:Child mesh:Infant -mesh:Adult' to her query. In cases where a desired MeSH term does not exist, an alternative approach is filtering the results based on words in the abstract or title. For example, appending '#d abstract:child abstract:children' to a query will ensure that the result sentences come from abstracts which contain the word child or the word children.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Example Workflow: Risk-factors", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Happy with the results of the initial query, the researcher can further augment her list by querying for other structures which identify risk factors (e.g. \"'r:Diabetes $causes $stroke'\", \"'$risk $factors for $stroke $include r:Diabetes'\", etc.).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Example Workflow: Risk-factors", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Importantly, once the researcher has identified one or more effective queries to extract the risk factors for stroke, the queries can easily be modified in useful ways. For example, with a small modification to our original query we can extract: risk factors for cancer:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Example Workflow: Risk-factors", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "'r:Diabetes is a $risk $factor for $cacner' diseases which can be caused by smoking:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Example Workflow: Risk-factors", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "'$Smoking is a $risk $factor for d:[e]stroke'.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Example Workflow: Risk-factors", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "ad-hoc KB of (risk factor, disease) tuples (for self use or as an easily queryable public resource):", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Example Workflow: Risk-factors", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "'r:Diabetes is a $risk $ factor for d:[e]stroke'.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Example Workflow: Risk-factors", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "The COVID-19 Open Research Dataset (Wang et al., 2020 ) is a collection of 45,000 research papers, including over 33,000 with full text, about COVID-19 and the coronavirus family. The corpus was released by the Allen Institute for AI and associated partners in an attempt to encourage researchers to apply recent advances in NLP to the data to generate insights.", |
|
"cite_spans": [ |
|
{ |
|
"start": 35, |
|
"end": 53, |
|
"text": "(Wang et al., 2020", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Example Workflow: CORD-19", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Identifying COVID-19 Aliases Since the CORD-19 corpus includes papers about the entire Coronavirus family of viruses, it's useful to identify papers and sentences dealing specifically with COVID-19. Before converging on the acronym COVID-19 researchers have referred to the virus by many names: nCov-19, SARS-COV-ii, novel coronavirus, etc. Luckily, it's fairly easy to identify many of these aliases using a sequential pattern:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Example Workflow: CORD-19", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "'novel coronavirus ( alias:...1-2... )'", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Example Workflow: CORD-19", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "The pattern looks for the words \"novel coronavirus\" followed by an open parenthesis, one-or-two words which are to be captured under the 'alias' variable, and a closing parenthesis. The query retrieves 52 unique candidate aliases for COVID-19, though some of them refer to older coronaviruses such as \"MERS\", or non-relevant terms such as \"Fig2\". After ranking by frequency and validating the results, we can reuse the pattern on newly retrieved aliases to extend the list. Through this iterative process we quickly compile a list of 47 aliases. We marked all occurrences of these terms in the underlying corpus as a new entity type, COVID-19, and re-indexed the dataset with this entity information. Exploring Drugs and Treatments. To explore drugs and treatments for COVID-19 we search the corpus for chemicals co-occuring with the COVID-19 entity using a boolean query:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Example Workflow: CORD-19", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "'chemical:e=SIMPLE CHEMICAL|CHEMICAL e=COVID-19'", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Example Workflow: CORD-19", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Table 1(a) shows the top matching chemicals by frequency. While some of the substances listed like Chloroquine and Remdesivir are drugs being tested for treating COVID-19, others are only hypothesized as useful or appear in other contexts.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Example Workflow: CORD-19", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "To guide the search toward therapeutic substances in different stages of maturity we can add indicative terms to the query. For example, the following query can be used to detect substances at the stage of clinical trials:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Example Workflow: CORD-19", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "'chemical:e=SIMPLE CHEMICAL|CHEMICAL e=COVID-19 l=trial|experiment', while adding 'l=suggest|hypothesize|candidate' can assist in detecting substances in ideation stage. Table 1 (b,c) shows the frequency distributions of the chemicals resulting from the two queries. While the queries are very basic and include only a few terms for each category, the difference is clearly noticeable: while the Malaria drug Chloroquine tops both lists, the antiviral drug Remdesivir which is currently tested for COVID-19 is second on the list of trial related drugs but does not appear at all as a top result for ideation related drugs. Importantly, entity co-mention queries like the ones above rely on the availability and accuracy of underlying NER models. As we've seen in Section 5, in cases where the relevant types are not extracted by NER, syntactic queries can be used instead. For example the following query captures sentences including chemicals being used on patients (the abstract or paragraph are required to include COVID-19 related terms).", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 170, |
|
"end": 177, |
|
"text": "Table 1", |
|
"ref_id": "TABREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Example Workflow: CORD-19", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "'he was $treated $with a chem:treatment #d paragraph:ncov* paragraph:covid* abstract:ncov* abstract:covid*'", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Example Workflow: CORD-19", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Treatments (via syntactic query) ribavirin (11), oseltamivir (9), ECMO (6), convalescent plasma (4), TCM (3), LPV/r (3), three fusions of MSCs (2), supportive care (2), protective conditions (2), lopinavir/ritonavir (2), intravenous remdesivir (2), hydroxychloroquine (2), HCQ (2), glucocorticoids (2), FPV (2), effective isolation (2), chloroquine (2), caution (2), bDMARDs", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Example Workflow: CORD-19", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "(2), azithromycin (2), ARBs (2), antivirals (2), ACE inhibitors", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Example Workflow: CORD-19", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "(2), 500 mg chloroquine (2), masks 1Table 2: Top elements occurring in the syntactic \"treated with X\" configuration. Note that this query does not rely on NER information.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Example Workflow: CORD-19", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "The top results by frequency are shown in Table 2 . The top ranking results show many of the chemicals obtained by equivalent boolean queries 10 , but interestingly, they also contain non-chemical treatments like supportive care, isolation and masks. This demonstrates a benefit of using entity agnostic syntactic patterns even in cases where a strong NER model exists.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 42, |
|
"end": 50, |
|
"text": "Table 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Example Workflow: CORD-19", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "While the workflows discussed above pertain mainly to the medical domain, the system is optimized for the broader life science domain. Here are a sample of additional queries, showing different potential use-cases. Which genes regulate a cell process: ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "More Examples", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "We presented a search system that targets extracting facts from a biomed corpus and demonstrated its utility in a research and a clinical context over CORD-19 and PubMed. The system works in an Extractive Search paradigm which allows rapid information seeking practices in 3 modes: boolean, sequential and syntactic. The interactive and flexible nature of the system makes it suitable for users in different levels of sophistication.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "https://uima.apache.org/d/ ruta-current/tools.ruta.book.pdf", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://www.ncbi.nlm.nih.gov/pubmed/ 4 https://www.sciome.com/swift-review/", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "http://www.nactem.ac.uk/", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "All abstracts underwent sentence splitting, tokenization, tagging, parsing and NER using all the 4 NER models available in SciSpacy", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://lucene.apache.org/core/6_ 0_0/queryparser/org/apache/lucene/ queryparser/classic/package-summary.html", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "' p1:[e]BMP-6 $induces the $phosphorylation $of p2:Smad1'", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Similarly, we could specify '$[lemma]induces', resulting in the restriction 'lemma=induce' instead of 'word=induces' for the anchor.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "to get a more comprehensive coverage we can issue queries for other syntactic structures like ' chem:chemical was used $in $treatment' and combine the results of the different queries.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "Acknowledgements The work performed at BIU is supported by funding from the Europoean Research Council (ERC) under the Europoean Union's Horizon 2020 research and innovation programme, grant agreement No. 802774 (iEX-TRACT).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "acknowledgement", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Propminer: A workflow for interactive information extraction and exploration using dependency trees", |
|
"authors": [ |
|
{ |
|
"first": "Alan", |
|
"middle": [], |
|
"last": "Akbik", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Oresti", |
|
"middle": [], |
|
"last": "Konomi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michail", |
|
"middle": [], |
|
"last": "Melnikov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of the 51st Annual Meeting of the Association for Computational Linguistics: System Demonstrations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "157--162", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alan Akbik, Oresti Konomi, and Michail Melnikov. 2013. Propminer: A workflow for interactive infor- mation extraction and exploration using dependency trees. In Proceedings of the 51st Annual Meeting of the Association for Computational Linguistics: Sys- tem Demonstrations, pages 157-162.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Exploratory relation extraction in large text corpora", |
|
"authors": [ |
|
{ |
|
"first": "Alan", |
|
"middle": [], |
|
"last": "Akbik", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Thilo", |
|
"middle": [], |
|
"last": "Michael", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christoph", |
|
"middle": [], |
|
"last": "Boden", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of COLING 2014, the 25th International Conference on Computational Linguistics: Technical Papers", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2087--2096", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alan Akbik, Thilo Michael, and Christoph Boden. 2014. Exploratory relation extraction in large text corpora. In Proceedings of COLING 2014, the 25th International Conference on Computational Linguis- tics: Technical Papers, pages 2087-2096.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Overview of the regulatory network of plant seed development (seedev) task at the bionlp shared task", |
|
"authors": [ |
|
{ |
|
"first": "Estelle", |
|
"middle": [], |
|
"last": "Chaix", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bertrand", |
|
"middle": [], |
|
"last": "Dubreucq", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Abdelhak", |
|
"middle": [], |
|
"last": "Fatihi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dialekti", |
|
"middle": [], |
|
"last": "Valsamou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Robert", |
|
"middle": [], |
|
"last": "Bossy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mouhamadou", |
|
"middle": [], |
|
"last": "Ba", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Louise", |
|
"middle": [], |
|
"last": "Del\u00e9ger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pierre", |
|
"middle": [], |
|
"last": "Zweigenbaum", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Philippe", |
|
"middle": [], |
|
"last": "Bessieres", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Loic", |
|
"middle": [], |
|
"last": "Lepiniec", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 4th bionlp shared task workshop", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--11", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Estelle Chaix, Bertrand Dubreucq, Abdelhak Fatihi, Dialekti Valsamou, Robert Bossy, Mouhamadou Ba, Louise Del\u00e9ger, Pierre Zweigenbaum, Philippe Bessieres, Loic Lepiniec, et al. 2016. Overview of the regulatory network of plant seed development (seedev) task at the bionlp shared task 2016. In Proceedings of the 4th bionlp shared task workshop, pages 1-11.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Overview of the bacteria biotope task at bionlp shared task 2016", |
|
"authors": [ |
|
{ |
|
"first": "Louise", |
|
"middle": [], |
|
"last": "Del\u00e9ger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Robert", |
|
"middle": [], |
|
"last": "Bossy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Estelle", |
|
"middle": [], |
|
"last": "Chaix", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mouhamadou", |
|
"middle": [], |
|
"last": "Ba", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 4th BioNLP shared task workshop", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "12--22", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Louise Del\u00e9ger, Robert Bossy, Estelle Chaix, Mouhamadou Ba, Arnaud Ferr\u00e9, Philippe Bessieres, and Claire N\u00e9dellec. 2016. Overview of the bacteria biotope task at bionlp shared task 2016. In Proceed- ings of the 4th BioNLP shared task workshop, pages 12-22.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Proceedings of The 5th Workshop on BioNLP Open Shared Tasks. Association for Computational Linguistics", |
|
"authors": [], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kim Jin-Dong, N\u00e9dellec Claire, Bossy Robert, and Del\u00e9ger Louise, editors. 2019. Proceedings of The 5th Workshop on BioNLP Open Shared Tasks. Asso- ciation for Computational Linguistics, Hong Kong, China.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Overview of bionlp'09 shared task on event extraction", |
|
"authors": [ |
|
{ |
|
"first": "Jin-Dong", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tomoko", |
|
"middle": [], |
|
"last": "Ohta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sampo", |
|
"middle": [], |
|
"last": "Pyysalo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshinobu", |
|
"middle": [], |
|
"last": "Kano", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jun'ichi", |
|
"middle": [], |
|
"last": "Tsujii", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of the BioNLP 2009 workshop companion volume for shared task", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--9", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jin-Dong Kim, Tomoko Ohta, Sampo Pyysalo, Yoshi- nobu Kano, and Jun'ichi Tsujii. 2009. Overview of bionlp'09 shared task on event extraction. In Pro- ceedings of the BioNLP 2009 workshop companion volume for shared task, pages 1-9.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Overview of genia event task in bionlp shared task", |
|
"authors": [ |
|
{ |
|
"first": "Jin-Dong", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yue", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Toshihisa", |
|
"middle": [], |
|
"last": "Takagi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Akinori", |
|
"middle": [], |
|
"last": "Yonezawa", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of the BioNLP Shared Task 2011 Workshop", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "7--15", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jin-Dong Kim, Yue Wang, Toshihisa Takagi, and Aki- nori Yonezawa. 2011. Overview of genia event task in bionlp shared task 2011. In Proceedings of the BioNLP Shared Task 2011 Workshop, pages 7-15. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Exact: Automatic extraction of clinical trial characteristics from journal publications. BMC medical informatics and decision making", |
|
"authors": [ |
|
{ |
|
"first": "Svetlana", |
|
"middle": [], |
|
"last": "Kiritchenko", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Simona", |
|
"middle": [], |
|
"last": "Berry De Bruijn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joel", |
|
"middle": [], |
|
"last": "Carini", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ida", |
|
"middle": [], |
|
"last": "Martin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Sim", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "", |
|
"volume": "10", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1186/1472-6947-10-56" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Svetlana Kiritchenko, Berry de Bruijn, Simona Carini, Joel Martin, and Ida Sim. 2010. Exact: Automatic extraction of clinical trial characteristics from jour- nal publications. BMC medical informatics and de- cision making, 10:56.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "RobotReviewer: evaluation of a system for automatically assessing bias in clinical trials", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Iain", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jo\u00ebl", |
|
"middle": [], |
|
"last": "Marshall", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Byron C", |
|
"middle": [], |
|
"last": "Kuiper", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Wallace", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Journal of the American Medical Informatics Association", |
|
"volume": "23", |
|
"issue": "1", |
|
"pages": "193--201", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1093/jamia/ocv044" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Iain J Marshall, Jo\u00ebl Kuiper, and Byron C Wallace. 2015. RobotReviewer: evaluation of a system for automatically assessing bias in clinical trials. Jour- nal of the American Medical Informatics Associa- tion, 23(1):193-201.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "The nlm medical text indexer system for indexing biomedical literature. CEUR Workshop Proceedings", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [ |
|
"G" |
|
], |
|
"last": "Mork", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Antonio", |
|
"middle": [], |
|
"last": "Jimeno-Yepes", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alan", |
|
"middle": [], |
|
"last": "Aronson", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "J.G. Mork, Antonio Jimeno-Yepes, and Alan Aronson. 2013. The nlm medical text indexer system for in- dexing biomedical literature. CEUR Workshop Pro- ceedings, 1094.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "LI-VIVO : the vertical search engine for life sciences", |
|
"authors": [ |
|
{ |
|
"first": "Bernd", |
|
"middle": [], |
|
"last": "M\u00fcller", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christoph", |
|
"middle": [], |
|
"last": "Poley", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jana", |
|
"middle": [], |
|
"last": "P\u00f6ssel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexandra", |
|
"middle": [], |
|
"last": "Hagelstein", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "G\u00fcbitz", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Datenbank-Spektrum", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--6", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1007/s13222-016-0245-2" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bernd M\u00fcller, Christoph Poley, Jana P\u00f6ssel, Alexan- dra Hagelstein, and Thomas G\u00fcbitz. 2017. LI- VIVO : the vertical search engine for life sciences. Datenbank-Spektrum, pages 1-6.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Overview of bionlp shared task 2013", |
|
"authors": [ |
|
{ |
|
"first": "Claire", |
|
"middle": [], |
|
"last": "N\u00e9dellec", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Robert", |
|
"middle": [], |
|
"last": "Bossy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jin-Dong", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jung-Jae", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tomoko", |
|
"middle": [], |
|
"last": "Ohta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sampo", |
|
"middle": [], |
|
"last": "Pyysalo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pierre", |
|
"middle": [], |
|
"last": "Zweigenbaum", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of the BioNLP shared task 2013 workshop", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--7", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Claire N\u00e9dellec, Robert Bossy, Jin-Dong Kim, Jung- Jae Kim, Tomoko Ohta, Sampo Pyysalo, and Pierre Zweigenbaum. 2013. Overview of bionlp shared task 2013. In Proceedings of the BioNLP shared task 2013 workshop, pages 1-7.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Iz Beltagy, and Waleed Ammar. 2019. Scispacy: Fast and robust models for biomedical natural language processing", |
|
"authors": [ |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Neumann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "King", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mark Neumann, Daniel King, Iz Beltagy, and Waleed Ammar. 2019. Scispacy: Fast and robust models for biomedical natural language processing.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Cross-sentence n-ary relation extraction with graph lstms. Transactions of the Association for", |
|
"authors": [ |
|
{ |
|
"first": "Nanyun", |
|
"middle": [], |
|
"last": "Peng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hoifung", |
|
"middle": [], |
|
"last": "Poon", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Quirk", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wen-Tau", |
|
"middle": [], |
|
"last": "Yih", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Computational Linguistics", |
|
"volume": "5", |
|
"issue": "", |
|
"pages": "101--115", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nanyun Peng, Hoifung Poon, Chris Quirk, Kristina Toutanova, and Wen-tau Yih. 2017. Cross-sentence n-ary relation extraction with graph lstms. Transac- tions of the Association for Computational Linguis- tics, 5:101-115.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Extraction of drug-drug interactions from biomedical texts (ddiextraction 2013). Association for Computational Linguistics", |
|
"authors": [ |
|
{ |
|
"first": "Isabel", |
|
"middle": [], |
|
"last": "Segura Bedmar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Paloma", |
|
"middle": [], |
|
"last": "Mart\u00ednez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mar\u00eda Herrero", |
|
"middle": [], |
|
"last": "Zazo", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "", |
|
"volume": "9", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Isabel Segura Bedmar, Paloma Mart\u00ednez, and Mar\u00eda Herrero Zazo. 2013. Semeval-2013 task 9: Ex- traction of drug-drug interactions from biomedical texts (ddiextraction 2013). Association for Compu- tational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Syntactic search by example", |
|
"authors": [ |
|
{ |
|
"first": "Micah", |
|
"middle": [], |
|
"last": "Shlain", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hillel", |
|
"middle": [], |
|
"last": "Taub-Tabib", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shoval", |
|
"middle": [], |
|
"last": "Sadde", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoav", |
|
"middle": [], |
|
"last": "Goldberg", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of ACL 2020, System Demonstrations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Micah Shlain, Hillel Taub-Tabib, Shoval Sadde, and Yoav Goldberg. 2020. Syntactic search by exam- ple. In Proceedings of ACL 2020, System Demon- strations.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Thalia: semantic search engine for biomedical abstracts", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Axel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Piotr", |
|
"middle": [], |
|
"last": "Soto", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sophia", |
|
"middle": [], |
|
"last": "Przyby\u0142a", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Ananiadou", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Bioinformatics", |
|
"volume": "35", |
|
"issue": "10", |
|
"pages": "1799--1801", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1093/bioinformatics/bty871" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Axel J Soto, Piotr Przyby\u0142a, and Sophia Ananiadou. 2018. Thalia: semantic search engine for biomed- ical abstracts. Bioinformatics, 35(10):1799-1801.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "2020. pybart: Evidence-based syntactic transformations for ie", |
|
"authors": [ |
|
{ |
|
"first": "Aryeh", |
|
"middle": [], |
|
"last": "Tiktinsky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoav", |
|
"middle": [], |
|
"last": "Goldberg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Reut", |
|
"middle": [], |
|
"last": "Tsarfaty", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "Proceedings of ACL 2020", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Aryeh Tiktinsky, Yoav Goldberg, and Reut Tsarfaty. 2020. pybart: Evidence-based syntactic transforma- tions for ie. In Proceedings of ACL 2020, System Demonstrations.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Odinson: A fast rule-based information extraction framework", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Marco", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gus", |
|
"middle": [], |
|
"last": "Valenzuela-Esc\u00e1rcega", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dane", |
|
"middle": [], |
|
"last": "Hahn-Powell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Bell", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the Twelfth International Conference on Language Resources and Evaluation (LREC 2020)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marco A. Valenzuela-Esc\u00e1rcega, Gus Hahn-Powell, and Dane Bell. 2020. Odinson: A fast rule-based in- formation extraction framework. In Proceedings of the Twelfth International Conference on Language Resources and Evaluation (LREC 2020), Marseille, France. European Language Resources Association (ELRA).", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Attending to all mention pairs for full abstract biological relation extraction", |
|
"authors": [ |
|
{ |
|
"first": "Patrick", |
|
"middle": [], |
|
"last": "Verga", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Emma", |
|
"middle": [], |
|
"last": "Strubell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ofer", |
|
"middle": [], |
|
"last": "Shai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Mccallum", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1710.08312" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Patrick Verga, Emma Strubell, Ofer Shai, and Andrew McCallum. 2017. Attending to all mention pairs for full abstract biological relation extraction. arXiv preprint arXiv:1710.08312.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Carissa Schoenick, Nick Botner, and Waleed Ammar. 2019. Extracting evidence of supplement-drug interactions from literature", |
|
"authors": [ |
|
{ |
|
"first": "Lucy", |
|
"middle": [ |
|
"Lu" |
|
], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Oyvind", |
|
"middle": [], |
|
"last": "Tafjord", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sarthak", |
|
"middle": [], |
|
"last": "Jain", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Arman", |
|
"middle": [], |
|
"last": "Cohan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sam", |
|
"middle": [], |
|
"last": "Skjonsberg", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1909.08135" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lucy Lu Wang, Oyvind Tafjord, Sarthak Jain, Arman Cohan, Sam Skjonsberg, Carissa Schoenick, Nick Botner, and Waleed Ammar. 2019. Extracting ev- idence of supplement-drug interactions from litera- ture. arXiv preprint arXiv:1909.08135.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"type_str": "figure", |
|
"text": "For example, 'tag=DT [tag=JJ]* [tag=NN]+'.", |
|
"uris": null, |
|
"num": null |
|
}, |
|
"FIGREF1": { |
|
"type_str": "figure", |
|
"text": "Query Graph of the syntactic query ' p1:[e]BMP-6 $induces the $phosphorylation $of p2:Smad1.'", |
|
"uris": null, |
|
"num": null |
|
}, |
|
"FIGREF2": { |
|
"type_str": "figure", |
|
"text": "(a) ranked risk factors for stroke (b) ranked disease risk factors", |
|
"uris": null, |
|
"num": null |
|
}, |
|
"FIGREF3": { |
|
"type_str": "figure", |
|
"text": "Grouped and ranked results'r:Diabetes is a $risk $factor for $stroke'.", |
|
"uris": null, |
|
"num": null |
|
}, |
|
"FIGREF4": { |
|
"type_str": "figure", |
|
"text": "171), chloroquine (118), nucleotide (115), NCP (87), CR3022 (47), Ksiazek (46), IgG (45), lopinavir/ritonavir (42), ECMO (40), LPV/r (35), corticosteroids (35), oxygen (32), ribavirin (31), lopinavir (31), Hydroxychloroquine (30), amino acid (30), ritonavir (27), corticosteroid (24), Sofosbuvir (22), amino acids (22), HCQ (19), glucocorticoids (19) , ritonavir (5), S-RBD (4), nucleotide (4), Lopinavir (4), CR3022 (4), Ribavirin (3), nucleic acid (3), logP (3), Li (3), ledipasvir (3), IgG (3), HCQ (3), TGEV (2), teicoplanin (2), nelfinavir (2), NCP (2), HWs (2) glucocorticoids (2), ENPEP (2), ECMO (2), darunavir (2), creatinine(2), creatine (2), CQ (2), corticosteroid (2), CEP (2), ARB(2)", |
|
"uris": null, |
|
"num": null |
|
}, |
|
"FIGREF5": { |
|
"type_str": "figure", |
|
"text": "(a) Unrestricted. (b) with Trial related terms. (c) with Ideation related terms.", |
|
"uris": null, |
|
"num": null |
|
}, |
|
"TABREF0": { |
|
"num": null, |
|
"type_str": "table", |
|
"content": "<table/>", |
|
"html": null, |
|
"text": "Top chemicals co-occuring with the COVID-19 entity and their counts." |
|
}, |
|
"TABREF1": { |
|
"num": null, |
|
"type_str": "table", |
|
"content": "<table><tr><td colspan=\"5\">' p1:[e]CD95 v:[l]regulates p:[e]apoptosis'</td><td/></tr><tr><td>' host:[e]bat</td><td>is</td><td>a</td><td>$natural</td><td>$host</td><td>of</td></tr><tr><td colspan=\"3\">disease:[e]coronavirus'</td><td/><td/><td/></tr><tr><td colspan=\"5\">Documented LOF mutations in genes:</td><td/></tr><tr><td colspan=\"6\">'$loss $of $function m:[w]mutation in gene:[e]PAX8'</td></tr></table>", |
|
"html": null, |
|
"text": "Which specie is the natural host of a disease:" |
|
} |
|
} |
|
} |
|
} |