|
{ |
|
"paper_id": "2020", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T06:44:24.899007Z" |
|
}, |
|
"title": "SmartCiteCon: Implicit Citation Context Extraction from Academic Literature Using Supervised Learning", |
|
"authors": [ |
|
{ |
|
"first": "Chenrui", |
|
"middle": [], |
|
"last": "Guo", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Wuhan University Wuhan", |
|
"location": { |
|
"settlement": "Hubei", |
|
"country": "China" |
|
} |
|
}, |
|
"email": "chenruiguo@whu.edu.cn" |
|
}, |
|
{ |
|
"first": "Haoran", |
|
"middle": [], |
|
"last": "Cui", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Wuhan University Wuhan", |
|
"location": { |
|
"settlement": "Hubei", |
|
"country": "China" |
|
} |
|
}, |
|
"email": "haoran.cui@whu.edu.cn" |
|
}, |
|
{ |
|
"first": "Li", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Wuhan University Wuhan", |
|
"location": { |
|
"settlement": "Hubei", |
|
"country": "China" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Jiamin", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Wuhan University Wuhan", |
|
"location": { |
|
"settlement": "Hubei", |
|
"country": "China" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Lu", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Wuhan University Wuhan", |
|
"location": { |
|
"settlement": "Hubei", |
|
"country": "China" |
|
} |
|
}, |
|
"email": "weilu@whu.edu.cn" |
|
}, |
|
{ |
|
"first": "Jian", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Old Dominion University Norfolk", |
|
"location": { |
|
"region": "VA", |
|
"country": "USA" |
|
} |
|
}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "We introduce SmartCiteCon (SCC), a Java API for extracting both explicit and implicit citation context from academic literature in English. The tool is built on a Support Vector Machine (SVM) model trained on a set of 7,058 manually annotated citation context sentences, curated from 34,000 papers in the ACL Anthology. The model with 19 features achieves F 1 =85.6%. SCC supports PDF, XML, and JSON files out-of-box, provided that they are conformed to certain schemas. The API supports single document processing and batch processing in parallel. It takes about 12-45 seconds on average depending on the format to process a document on a dedicated server with 6 multithreaded cores. Using SCC, we extracted 11.8 million citation context sentences from \u223c33.3k PMC papers in the CORD-19 dataset, released on June 13, 2020. The source code is released at https://gitee. com/irlab/SmartCiteCon.", |
|
"pdf_parse": { |
|
"paper_id": "2020", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "We introduce SmartCiteCon (SCC), a Java API for extracting both explicit and implicit citation context from academic literature in English. The tool is built on a Support Vector Machine (SVM) model trained on a set of 7,058 manually annotated citation context sentences, curated from 34,000 papers in the ACL Anthology. The model with 19 features achieves F 1 =85.6%. SCC supports PDF, XML, and JSON files out-of-box, provided that they are conformed to certain schemas. The API supports single document processing and batch processing in parallel. It takes about 12-45 seconds on average depending on the format to process a document on a dedicated server with 6 multithreaded cores. Using SCC, we extracted 11.8 million citation context sentences from \u223c33.3k PMC papers in the CORD-19 dataset, released on June 13, 2020. The source code is released at https://gitee. com/irlab/SmartCiteCon.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Citations are ubiquitous in scientific publications. With proper citations, statements in research papers are supported by existing works, and readers obtain relevant information beyond the current paper. Citations also form graphs, which provide unique models for ranking, sentimental classification, and plagiarism detection. Therefore, citation analysis plays an important role in helping to understand the deep connection between literature. Accurate citation context recognition is the prerequisite of many downstream applications. Recently, citation context, the text segment that appears around the citation mark in the body text, has been used for enhancing and improving keyphrase extraction (Caragea et al., 2014) and document summarization (Cohan and Goharian, 2015) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 701, |
|
"end": 723, |
|
"text": "(Caragea et al., 2014)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 751, |
|
"end": 777, |
|
"text": "(Cohan and Goharian, 2015)", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "There are two types of citation context. Explicit citation contexts (ECC) are sentences containing ci-tation marks. Each citation thus corresponds to one explicit citation context sentence. Implicit citation contexts (ICC) are sentences that are semantically relevant to the cited articles but do not contain citation marks. ICC may appear before or after but may not immediately precede or follow the ECC sentence. One paper could be cited multiple times and each time may have different citation contexts. In the example below, the ECC, containing the citation mark \"(Ma et al. 2004)\", is highlighted in green. The ICC sentences are highlighted in yellow. The nonhighlighted sentence is not a citation context for the given citation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "We investigate the impact of semantic constraints on sta tistical word alignment models as prior knowledge.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In (Ma et al. 2004) , bilingual se mantic maps are constructed to guide word alignment. The framework we proposed seamlessly integrates derived semantic similarities into a statistical word alignment model. And we extended monolingual latent se mantic analysis in bilingual application.", |
|
"cite_spans": [ |
|
{ |
|
"start": 3, |
|
"end": 19, |
|
"text": "(Ma et al. 2004)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Most existing tools extract ECC, i.e., sentences containing citation marks. Although the results are highly relevant, the method omits ICC if the author uses multiple sentences to summarize the results. To our best knowledge, there are no offthe-shelf tools dedicated to ICC extraction. Unlike ECC sentences with citation marks, the lack of explicit marks makes citation context recognition challenging.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this work, we develop a Java API that implements a supervised machine learning model trained on 7058 manually labeled sentences to extract both ECC and ICC. The model achieves an F 1 -measure of 85.6%. The Java API can be deployed on a local machine or as a web service.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Several citation context extraction methods have been developed. In Nanba and Okumura (1999) , the scope of the citation context covered several consecutive sentences before and after the sentences with citation marks (i.e., citation sentence), identified based on a referential relationship with the citation sentence. In another work, Markov model was used for identifying citation context (Qazvinian and Radev, 2010) . Sugiyama (2010) described a support vector machine (SVM) and maximum entropy (ME) model for identifying citation sentences using shallow features such as proper nouns and contextual classification of the previous and next sentence (Sugiyama et al., 2010) . They found that the performances of SVM and ME do not exhibit significant differences. The positive samples were selected as sentences including citation marks using regular expression matching, ICC extraction was not covered.", |
|
"cite_spans": [ |
|
{ |
|
"start": 68, |
|
"end": 92, |
|
"text": "Nanba and Okumura (1999)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 392, |
|
"end": 419, |
|
"text": "(Qazvinian and Radev, 2010)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 422, |
|
"end": 437, |
|
"text": "Sugiyama (2010)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 653, |
|
"end": 676, |
|
"text": "(Sugiyama et al., 2010)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "ParsCit is an open-source software commonly used for citation parsing and citation context extraction (Councill et al., 2008) . ParsCit parses citation strings using a Conditional Random Field (CRF) model. The citation context extraction was performed by extracting a fixed window size of 200 characters on either side of the citation mark. GRO-BID (Lopez, 2009) is a library to extract information from scholarly documents. The documentation reports the F 1 -measure of citation context resolution is around 75%, which counts both the correct identification of citation marks and its correct association with bibliographic references.", |
|
"cite_spans": [ |
|
{ |
|
"start": 102, |
|
"end": 125, |
|
"text": "(Councill et al., 2008)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 349, |
|
"end": 362, |
|
"text": "(Lopez, 2009)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In summary, existing citation context extraction tools focus on ECC but ignore ICC, the latter of which includes more sentences semantically related to the cited papers.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Our system is based on a supervised machine learning model proposed in Lei et al. (2016) , which classifies a sentence into ICC and non-ICC.", |
|
"cite_spans": [ |
|
{ |
|
"start": 71, |
|
"end": 88, |
|
"text": "Lei et al. (2016)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Supervised Machine Learning Model", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "We adopted the ground truth built by Lei et al. (2016) containing 130 articles from 34,000 computational linguistics conference proceedings in ACL Anthology. The original PDF files were converted to XML format using OCR (Sch\u00e4fer and Weitz, 2012) . The training set was labeled by 13 graduate students majoring in information management. The labeling agreement was tested using Cohen's Kappa Coefficient (\u03ba = 0.937). The fi-nal ground truth contains 3,578 positive and 3,480 negative samples. The preprocessing uses Apache OpenNLP for sentence segmentation. Citation marks are identified using regular expressions. Citation marks are then removed, and the original sentences are converted into regular sentences for following analyses such as part-of-speech (POS) tagging. Each sentence is represented by up to 19 features of four types (Table 1 ). The best model using all features achieves 86% F 1 -measure in a the 10-fold cross validation. The SVM outperformed CRF by about 5% in F 1 -measure (Table 2) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 37, |
|
"end": 54, |
|
"text": "Lei et al. (2016)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 220, |
|
"end": 245, |
|
"text": "(Sch\u00e4fer and Weitz, 2012)", |
|
"ref_id": "BIBREF11" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 836, |
|
"end": 844, |
|
"text": "(Table 1", |
|
"ref_id": "TABREF1" |
|
}, |
|
{ |
|
"start": 996, |
|
"end": 1005, |
|
"text": "(Table 2)", |
|
"ref_id": "TABREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Supervised Machine Learning Model", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "The SCC system completes the extraction in four steps ( Figure 1 ): (1) file type recognition, (2) preprocessing, (3) feature extraction, and (4) sentence classification. The output is a JSON file containing ECC and ICC and other citation-related information. The API was written based on the Springboot framework in Java. The machine learning model was implemented with WEKA.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 56, |
|
"end": 64, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Architecture", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "SCC first recognizes the uploaded file type. For a PDF file, SCC invokes GROBID and converts it to an XML file under the TEI schema. If an XML file is uploaded as input, SCC checks whether the schema is in compliance with TEI or PloS ONE schema and passes it to corresponding preprocessors. If a JSON file is uploaded, it checks if it is in compliance with the S2ORC schema, published by Semantic Scholar (Lo et al., 2020) . We apply Apache Tika to identify file format. Other format of data files will not be processed.", |
|
"cite_spans": [ |
|
{ |
|
"start": 405, |
|
"end": 422, |
|
"text": "(Lo et al., 2020)", |
|
"ref_id": "BIBREF4" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "File Type Recognition", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "The preprocessing step reads files passed from the last step with customized preprocessors depending on the schema and prepares a canonicalized XML for feature extraction. This step includes the following modules.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Preprocessing", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "This module involves removing irrelevant tags from the DOM structure in the XML file. For example, in the PloS ONE XML files, the <fig>, <sub>, and <italic> tags used for marking up figures, superscripts, and italic font are all moved. Only the text inside these tags are retained. The <xref> tags mark positions of citations, which will be used for restoring citations. We use a separate data structure to store the positions of <xref> tags before removing them.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Tag removal", |
|
"sec_num": "4.2.1" |
|
}, |
|
{ |
|
"text": "We compared five commonly used sentence segmentation tools, including the Pragmatic Segmenter by Kevin Dias 1 , lingpine 2 , NLTK 3 , a regular expression parser, and the Stanford CoreNLP (Manning et al., 2014) sentence splitter. The golden standard contains 52 sentences provided by Kevin Dias, which covers most possible sentence forms. According to Dias' comparison, the Pragmatic Segmenter receives an accuracy of 98% and the Stanford CoreNLP's accuracy is 59.6%. In our experiments, the accuracies for Lingpipe, NLTK, and 1 https://github.com/diasks2/pragmatic_ segmenter 2 http://www.alias-i.com/lingpipe/ 3 https://www.nltk.org/ regular expression parsers are 61.5%, 50.0%, and 38.5%, respectively. The Pragmatic Segmenter is implemented by Ruby on Rails. To make our API less dependent on a second programming language, we decided to employ Lingpipe for sentence segmentation. We select up to five sentences before and after the current citation sentence as the candidates for classification. This covers almost all sentences that could be classified as ICC.", |
|
"cite_spans": [ |
|
{ |
|
"start": 188, |
|
"end": 210, |
|
"text": "(Manning et al., 2014)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sentence segmentation", |
|
"sec_num": "4.2.2" |
|
}, |
|
{ |
|
"text": "Because the input XML may have different schemas, this module takes the processed documents from the above modules and transforms them into a unified schema for feature extraction. The canonicalized schema defines new IDs for chapters, paragraphs, sentences, and citations. The canonicalized XML also includes whether the current sentence contains citation marks.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Canonicalization", |
|
"sec_num": "4.2.3" |
|
}, |
|
{ |
|
"text": "This step extracts 19 features (Table 1) from the canonicalized XML files and represents each candidate sentence as a vector saved in Livsvm files 4 . The SVM model classifies each sentence and outputs a binary indicating whether a sentence is ICC or not. The output JSON file contains citation marks and their positions, citation sentences, and sentences classified as ICC.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 31, |
|
"end": 40, |
|
"text": "(Table 1)", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Feature Extraction and Text Classification", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "Users can install SCC on a local machine. The API interface supports 3 modes:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "User Interfaces", |
|
"sec_num": "4.3.1" |
|
}, |
|
{ |
|
"text": "1. Single document mode -using the /extract service;", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "User Interfaces", |
|
"sec_num": "4.3.1" |
|
}, |
|
{ |
|
"text": "2. Batch extraction model with files zipped and transferred through TCP/IP -using", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "User Interfaces", |
|
"sec_num": "4.3.1" |
|
}, |
|
{ |
|
"text": "/batchExtract;", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "User Interfaces", |
|
"sec_num": "4.3.1" |
|
}, |
|
{ |
|
"text": "3. Local extraction model with files retrieved from a local directory -using /localExtract.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "User Interfaces", |
|
"sec_num": "4.3.1" |
|
}, |
|
{ |
|
"text": "In the single document and batch extraction modes, the API will return JSON objects and execution status. For the local extraction mode, the API will return the execution status and the results will be saved in JSON files.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "User Interfaces", |
|
"sec_num": "4.3.1" |
|
}, |
|
{ |
|
"text": "We test the SCC API on a computer with 16GB RAM and an Intel Core i7-8570H CPU@2.20GHz, which has 6 hyperthreaded cores (12 threads in total). In a preliminary experiment, we compare the runtime of processing 10 XML documents using a single process under different JVM heap sizes. The runtimes corresponding to 12GB, 8GB, 4GB, and 2GB are 23.8 min, 12.7 min, 7.3 min, and 7.6 min, respectively. Higher heap does not boost processing speed probably due to garbage collection. Based on the results, in the following experiments, 4GB heap was allocated to JVM. The experiments were set to extract citation context from randomly selected documents in different formats. The datasets include 10 PDF documents from PLoS ONE, 10 XML documents corresponding to the PDF documents, and 10 JSON documents from the CORD-19 dataset. We monitor the system using Jprofiler (version 11) and calculate the median time it takes for processing one document as we vary the number of processes N p . Figure 2 shows that the CPU utilization increases from about 10% and saturates when N p reaches 8. The memory utilization climbs up slowly as N p increases but are mostly well below the maximum allocated heap, because processed documents are not stored in memory anymore. The average processing time for all three types gradually decreases as N p increases but in general, it takes longer to process PDF files than JSON and XML files. The maximum and minimum processing time are shown in Table 3 . The runtime can be further reduced by running the API on a computer with more processes on a multicore server. On average, JSON files take the least time to process. We apply SCC and extract ECC and ICC from the CORD-19 dataset. is an open-access dataset compiled by Allen Institute of Artificial Intelligence about COVID-19, SARS, MERS, and related keyphrases conforming to the S2ORC schema (Lo et al., 2020) . We downloaded the data released on June 13, 2020 including 50,818 and 69,646 full text papers under the PMC and the PDF folders respectively. The PMC folder contains full-text files obtained by parsing JATS 5 XML files available for PMC papers using a custom parser, generated to the same target output JSON format. This resulted in 1,605,695 ECC and 10,215,848 ICC sentences from 33,319 documents. A fraction of documents was not processed due to the lack of citation marks and runtime exceptions. SCC code is released at https://gitee.com/ irlab/SmartCiteCon. The dataset is available on Microsoft OneDrive with a link on the code repository.", |
|
"cite_spans": [ |
|
{ |
|
"start": 1869, |
|
"end": 1886, |
|
"text": "(Lo et al., 2020)", |
|
"ref_id": "BIBREF4" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 979, |
|
"end": 987, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1467, |
|
"end": 1474, |
|
"text": "Table 3", |
|
"ref_id": "TABREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "SCC API Performance", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "The results in Table 3 indicate that SCC takes about 45 seconds on average to process a PDF document, which is still relatively slow. Using Jprofiler, we found that more than 90% time was spent on preprocessing, specifically canonicalization, followed by sentence classification (for XML and JSON) or 5 https://jats.nlm.nih.gov/ file type recognition (for PDF). The bottleneck is partially attributed to the word tokenization and POS tagging in the Stanford CoreNLP API. One way to mitigate this problem is to use the Stanford CoreNLP Server 6 . Alternatively, we can use Stanza (Qi et al., 2020) , the successor of Stanford CoreNLP. Empirical results have shown that it is faster than CoreNLP in several NLP tasks. Stanza was written in Python, but we can develop a RESTful service. The slowness can also be attributed to the poor garbage collection in Java, which can impact CPU usage massively. A more systematic and fine-grained profiling is needed to diagnose the root cause of this problem.", |
|
"cite_spans": [ |
|
{ |
|
"start": 579, |
|
"end": 596, |
|
"text": "(Qi et al., 2020)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 15, |
|
"end": 22, |
|
"text": "Table 3", |
|
"ref_id": "TABREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Lessons Learned", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "We developed SmartCiteCon (SCC), a Java API to extract explicit and implicit citation context from academic literature. The API implements an SVM model achieving an F 1 = 85.6%. SCC accepts XML (in PLoS ONE schema or GROBID schema), PDF, and JSON (in S2ORC schema) formats. The output of SCC is a JSON file containing marked citation contexts and paper metadata if available. We applied SCC on the PMC subset of the CORD-19 dataset and obtained about 11.8 million citation context sentences in which 10.2 million are implicit citation context.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions and Future Works", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "One limitation of SCC is that the model was trained on papers in computational linguistics, so more careful evaluation and feature distribution analysis should be performed when applying the model to other domains. In the future, we will explore word embedding models to enrich semantic features and improve scalability by overcoming performance bottlenecks.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions and Future Works", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "https://www.csie.ntu.edu.tw/\u02dccjlin/ libsvm/", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Extracting Citation Context from SCC is different from similar tools such as ParsCit and GROBID in that it extracts both ECC and ICC.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://stanfordnlp.github.io/ CoreNLP/corenlp-server.html", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "We thank Zikun Feng for setting up a web-based user interface and Shengwei Lei for constructive discussion.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgments", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Citationenhanced keyphrase extraction from research papers: A supervised approach", |
|
"authors": [ |
|
{ |
|
"first": "Cornelia", |
|
"middle": [], |
|
"last": "Caragea", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adrian", |
|
"middle": [], |
|
"last": "Florin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andreea", |
|
"middle": [], |
|
"last": "Bulgarov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sujatha Das", |
|
"middle": [], |
|
"last": "Godea", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Gollapalli", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1435--1446", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Cornelia Caragea, Florin Adrian Bulgarov, Andreea Godea, and Sujatha Das Gollapalli. 2014. Citation- enhanced keyphrase extraction from research papers: A supervised approach. In Proceedings of the 2014 Conference on Empirical Methods in Natural Lan- guage Processing, EMNLP 2014, October 25-29, 2014, Doha, Qatar, A meeting of SIGDAT, a Special Interest Group of the ACL, pages 1435-1446.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Scientific article summarization using citation-context and article's discourse structure", |
|
"authors": [ |
|
{ |
|
"first": "Arman", |
|
"middle": [], |
|
"last": "Cohan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nazli", |
|
"middle": [], |
|
"last": "Goharian", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "390--400", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D15-1045" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Arman Cohan and Nazli Goharian. 2015. Scientific ar- ticle summarization using citation-context and arti- cle's discourse structure. In Proceedings of the 2015 Conference on Empirical Methods in Natural Lan- guage Processing, pages 390-400, Lisbon, Portugal. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "ParsCit: an open-source CRF reference string parsing package", |
|
"authors": [ |
|
{ |
|
"first": "Isaac", |
|
"middle": [], |
|
"last": "Councill", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lee", |
|
"middle": [], |
|
"last": "Giles", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Min-Yen", |
|
"middle": [], |
|
"last": "Kan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Proceedings of the Sixth International Conference on Language Resources and Evaluation (LREC'08)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Isaac Councill, C Lee Giles, and Min-Yen Kan. 2008. ParsCit: an open-source CRF reference string pars- ing package. In Proceedings of the Sixth Interna- tional Conference on Language Resources and Eval- uation (LREC'08).", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Research on automatic recognition of academic citation context", |
|
"authors": [ |
|
{ |
|
"first": "Shengwei", |
|
"middle": [], |
|
"last": "Lei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Haihua", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yong", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Lu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Library and Information Service", |
|
"volume": "60", |
|
"issue": "17", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shengwei Lei, Haihua Chen, Yong Huang, and Wei Lu. 2016. Research on automatic recognition of aca- demic citation context. Library and Information Ser- vice, 60(17).", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "S2ORC: The semantic scholar open research corpus", |
|
"authors": [ |
|
{ |
|
"first": "Kyle", |
|
"middle": [], |
|
"last": "Lo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lucy", |
|
"middle": [ |
|
"Lu" |
|
], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Neumann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rodney", |
|
"middle": [], |
|
"last": "Kinney", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Weld", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4969--4983", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.acl-main.447" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kyle Lo, Lucy Lu Wang, Mark Neumann, Rodney Kin- ney, and Daniel Weld. 2020. S2ORC: The semantic scholar open research corpus. In Proceedings of the 58th Annual Meeting of the Association for Compu- tational Linguistics, pages 4969-4983, Online. As- sociation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "GROBID: combining automatic bibliographic data recognition and term extraction for scholarship publications", |
|
"authors": [ |
|
{ |
|
"first": "Patrice", |
|
"middle": [], |
|
"last": "Lopez", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of the 13th European Conference on Research and Advanced Technology for Digital Libraries, ECDL'09", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "473--474", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Patrice Lopez. 2009. GROBID: combining auto- matic bibliographic data recognition and term ex- traction for scholarship publications. In Proceed- ings of the 13th European Conference on Re- search and Advanced Technology for Digital Li- braries, ECDL'09, pages 473-474, Berlin, Heidel- berg. Springer-Verlag.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "The Stanford CoreNLP natural language processing toolkit", |
|
"authors": [ |
|
{ |
|
"first": "Christopher", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Manning", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mihai", |
|
"middle": [], |
|
"last": "Surdeanu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Bauer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jenny", |
|
"middle": [], |
|
"last": "Finkel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Steven", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Bethard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Mc-Closky", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Association for Computational Linguistics (ACL) System Demonstrations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "55--60", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Christopher D. Manning, Mihai Surdeanu, John Bauer, Jenny Finkel, Steven J. Bethard, and David Mc- Closky. 2014. The Stanford CoreNLP natural lan- guage processing toolkit. In Association for Compu- tational Linguistics (ACL) System Demonstrations, pages 55-60.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Finding implicit citations in scientific publications", |
|
"authors": [ |
|
{ |
|
"first": "Jonathan", |
|
"middle": [], |
|
"last": "Murray", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jonathan Murray. 2015. Finding implicit citations in scientific publications. Master's thesis, KTH Royal Insitute of Technology.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Towards multi-paper summarization using reference information", |
|
"authors": [ |
|
{ |
|
"first": "Hidetsugu", |
|
"middle": [], |
|
"last": "Nanba", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Manabu", |
|
"middle": [], |
|
"last": "Okumura", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1999, |
|
"venue": "Proceedings of the Sixteenth International Joint Conference on Artificial Intelligence, IJ-CAI '99", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "926--931", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hidetsugu Nanba and Manabu Okumura. 1999. To- wards multi-paper summarization using reference in- formation. In Proceedings of the Sixteenth Interna- tional Joint Conference on Artificial Intelligence, IJ- CAI '99, page 926-931, San Francisco, CA, USA. Morgan Kaufmann Publishers Inc.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Identifying non-explicit citing sentences for citation-based summarization", |
|
"authors": [ |
|
{ |
|
"first": "Vahed", |
|
"middle": [], |
|
"last": "Qazvinian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Dragomir", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Radev", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proceedings of the 48th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "555--564", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Vahed Qazvinian and Dragomir R. Radev. 2010. Identi- fying non-explicit citing sentences for citation-based summarization. In Proceedings of the 48th Annual Meeting of the Association for Computational Lin- guistics, pages 555-564, Uppsala, Sweden. Associa- tion for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Stanza: A python natural language processing toolkit for many human languages", |
|
"authors": [ |
|
{ |
|
"first": "Peng", |
|
"middle": [], |
|
"last": "Qi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yuhao", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yuhui", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Bolton", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics: System Demonstrations, ACL 2020, Online", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "101--108", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Peng Qi, Yuhao Zhang, Yuhui Zhang, Jason Bolton, and Christopher D. Manning. 2020. Stanza: A python natural language processing toolkit for many human languages. In Proceedings of the 58th An- nual Meeting of the Association for Computational Linguistics: System Demonstrations, ACL 2020, On- line, July 5-10, 2020, pages 101-108. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Combining OCR outputs for logical document structure markup. technical background to the ACL 2012 contributed task", |
|
"authors": [ |
|
{ |
|
"first": "Ulrich", |
|
"middle": [], |
|
"last": "Sch\u00e4fer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Benjamin", |
|
"middle": [], |
|
"last": "Weitz", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of the Special Workshop on Rediscovering 50 Years of Discoveries@ACL 2012", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "104--109", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ulrich Sch\u00e4fer and Benjamin Weitz. 2012. Combining OCR outputs for logical document structure markup. technical background to the ACL 2012 contributed task. In Proceedings of the Special Workshop on Rediscovering 50 Years of Discoveries@ACL 2012, Jeju Island, Korea, July 10, 2012, pages 104-109. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Identifying citing sentences in research papers using supervised learning", |
|
"authors": [ |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Sugiyama", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Kumar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Kan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [ |
|
"C" |
|
], |
|
"last": "Tripathi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "2010 International Conference on Information Retrieval Knowledge Management (CAMP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "67--72", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "K. Sugiyama, T. Kumar, M. Kan, and R. C. Tripathi. 2010. Identifying citing sentences in research pa- pers using supervised learning. In 2010 Interna- tional Conference on Information Retrieval Knowl- edge Management (CAMP), pages 67-72.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"text": "SmartCiteCon architecture.", |
|
"num": null, |
|
"type_str": "figure", |
|
"uris": null |
|
}, |
|
"TABREF1": { |
|
"text": "Features of the SVM model. A citation sentence is the sentence containing a citation mark.", |
|
"html": null, |
|
"num": null, |
|
"type_str": "table", |
|
"content": "<table><tr><td colspan=\"4\">Model Precision Recall F1-measure</td></tr><tr><td>SVM 19</td><td>85.6%</td><td>85.6%</td><td>85.6%</td></tr><tr><td>CRF 19</td><td>82.2%</td><td>79.9%</td><td>80.8%</td></tr></table>" |
|
}, |
|
"TABREF2": { |
|
"text": "Evaluation of SVM and CRF models on 19 features.", |
|
"html": null, |
|
"num": null, |
|
"type_str": "table", |
|
"content": "<table/>" |
|
}, |
|
"TABREF3": { |
|
"text": "Figure 2: The performance of SCC on a multicore computer. Runtime is normalized at the 177 seconds; the middle panel shows the CPU utilization monitored by Jprofiler; the right panel shows the memory utilization normalized at 4GB.", |
|
"html": null, |
|
"num": null, |
|
"type_str": "table", |
|
"content": "<table><tr><td colspan=\"2\">XML</td><td colspan=\"2\">PDF</td><td colspan=\"2\">JSON</td></tr><tr><td colspan=\"6\">Max Min Max Min Max Min</td></tr><tr><td>164</td><td>43</td><td>177</td><td>45</td><td>39</td><td>12</td></tr></table>" |
|
}, |
|
"TABREF4": { |
|
"text": "Runtime in seconds for different document formats. The maximum and the minimum runtime are achieved at N p = 1 and N p = 8, respectively.", |
|
"html": null, |
|
"num": null, |
|
"type_str": "table", |
|
"content": "<table/>" |
|
} |
|
} |
|
} |
|
} |