|
{ |
|
"paper_id": "2021", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T16:29:33.281129Z" |
|
}, |
|
"title": "TopGuNN: Fast NLP Training Data Augmentation using Large Corpora", |
|
"authors": [ |
|
{ |
|
"first": "Rebecca", |
|
"middle": [], |
|
"last": "Iglesias-Flores", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Colorado at Boulder", |
|
"location": {} |
|
}, |
|
"email": "irebecca@seas.upenn.edu" |
|
}, |
|
{ |
|
"first": "Megha", |
|
"middle": [], |
|
"last": "Mishra", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Colorado at Boulder", |
|
"location": {} |
|
}, |
|
"email": "mmishra@seas.upenn.edu" |
|
}, |
|
{ |
|
"first": "Ajay", |
|
"middle": [], |
|
"last": "Patel", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Colorado at Boulder", |
|
"location": {} |
|
}, |
|
"email": "ajayp@seas.upenn.edu" |
|
}, |
|
{ |
|
"first": "Akanksha", |
|
"middle": [], |
|
"last": "Malhotra", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "akanksha.malhotra@colorado.edu" |
|
}, |
|
{ |
|
"first": "Reno", |
|
"middle": [], |
|
"last": "Kriz", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Colorado at Boulder", |
|
"location": {} |
|
}, |
|
"email": "rekriz@seas.upenn.edu" |
|
}, |
|
{ |
|
"first": "Martha", |
|
"middle": [], |
|
"last": "Palmer", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "martha.palmer@colorado.edu" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Callison-Burch", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Colorado at Boulder", |
|
"location": {} |
|
}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Acquiring training data for natural language processing systems can be expensive and timeconsuming. Given a few training examples crafted by experts, large corpora can be mined for thousands of semantically similar examples that provide useful variability to improve model generalization. We present TopGuNN, a fast contextualized k-NN retrieval system that can efficiently index and search over contextual embeddings generated from large corpora to easily retrieve new diverse training examples. TopGuNN is demonstrated for a semantic role labeling training data augmentation use case over the Gigaword corpus. Using approximate k-NN and an efficient architecture, TopGuNN performs queries over an embedding space of 4.63TB (approximately 1.5B embeddings) in less than a day.", |
|
"pdf_parse": { |
|
"paper_id": "2021", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Acquiring training data for natural language processing systems can be expensive and timeconsuming. Given a few training examples crafted by experts, large corpora can be mined for thousands of semantically similar examples that provide useful variability to improve model generalization. We present TopGuNN, a fast contextualized k-NN retrieval system that can efficiently index and search over contextual embeddings generated from large corpora to easily retrieve new diverse training examples. TopGuNN is demonstrated for a semantic role labeling training data augmentation use case over the Gigaword corpus. Using approximate k-NN and an efficient architecture, TopGuNN performs queries over an embedding space of 4.63TB (approximately 1.5B embeddings) in less than a day.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "To collect training data for natural language processing (NLP) models, researchers have to rely on manual labor-intensive methods like crowdsourcing or hiring domain experts. Rather than relying on such techniques, we present TopGuNN, a system to make it quick and easy for researchers to create a larger training set, starting with just a few examples. Large-scale language models can be effectively used to search for similar words or sentences; however, attempting to extract the most similar words from a large corpus can become intractable and time consuming. Our system Top-GuNN utilizes a fast contextualized k-NN retrieval pipeline to quickly mine for a diverse set of training examples from large corpora. The system first creates a contextual word-level index from a corpus. Then, given a query word in a training example, it finds new sentences with words used in similar contexts to the query word. Figure 1 shows an example of the results of querying for the word \"diagnosis\" used in different contexts. TopGuNN pre-computes BERT contextualized word embeddings over the entire corpus, and then efficiently searches through them when queried using approximate k-NN indexing algorithms. Our system has been designed with efficiency and scalability in mind. We demonstrate its use by indexing the Gigaword corpus, a large corpus for which we pre-computed 1.5B contextualized word embeddings (totaling 4.63TB), and using TopGuNN to run search queries over it. A detailed description of the system's architecture is given in Section 3.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 911, |
|
"end": 919, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Our primary use case for TopGuNN was to retrieve more training data for an event extraction and semantic role labeling task. We start with a few example sentences of each event type, identify query words within each example sentence (often the event verb), and then query TopGuNN to find new instances of similar sentences. These candidates are quickly voted on by non-expert human annotators who check the correctness of the semantic type (described in Section 2). Using active learning strategies, these filtered candidates can then be used to better tune TopGuNN's retrieval in the future. We demonstrate how our system can be used to mine for new diverse training data from large corpora with an efficient human-in-the-loop process given just a few samples to start with.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Human-in-the-Loop with TopGuNN", |
|
"sec_num": "1.1" |
|
}, |
|
{ |
|
"text": "Our primary use case stems from our work on the DARPA KAIROS program. 1 The DARPA KAIROS program seeks to develop a schemabased AI system that can identify complex events in unstructured text and bring them to the attention of users like intelligence analysts. KAIROS systems are based on an ontology of abstracted event schemas which are complex event templates. Complex event schemas are made up of a series of simpler events, and specify information about participant roles, temporal order, and causal relations between the simpler events. The simplest level event representations used in KAIROS are \"event primitives\". For each event primitive, a definition of the primitive is given along with the event's semantic roles. An example of a KAIROS event primitive is Attack: Each event primitive contained 2-5 example sentences. Prior to TopGuNN, example sentences were selected by linguists who manually retrieved them from a corpus by keyword search. With Top-GuNN, we can find thousands of candidate sentences automatically and then annotators can make a quick pass to filter down to the final set.", |
|
"cite_spans": [ |
|
{ |
|
"start": 70, |
|
"end": 71, |
|
"text": "1", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Use Case: KAIROS Event Primitives", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Label Conflict.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Use Case: KAIROS Event Primitives", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Some work attempts to create event extraction systems without extensive training data. For instance, discusses how training could be performed using a single \"bleached statement,\" or a definition of an event, without needing a large set of labeled training examples. Rather than relying on such techniques, we design a system to make it quick and easy for annotators to create a larger training set .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Use Case: KAIROS Event Primitives", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "TopGuNN was used to index the Linguistic Data Consortium's English Gigaword Fifth Edition Corpus (Parker et al., 2011) . Gigaword consists of approximately 12 gigabytes of news articles from 7 distinct international news agencies, spanning 16 years from 1994-2010, and contains a total of 183 million sentences and 4.3 billion tokens. 2", |
|
"cite_spans": [ |
|
{ |
|
"start": 97, |
|
"end": 118, |
|
"text": "(Parker et al., 2011)", |
|
"ref_id": "BIBREF17" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Corpus", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "TopGuNN creates contextualized word embeddings for each content word in the corpus and for each query word in the query sentences. We use BERT (Devlin et al., 2019a) to create the embeddings because BERT produces contextually-aware embeddings unlike word2vec and GloVe (Mikolov et al., 2013; Pennington et al., 2014) . 3 FastBERT or DistilBERT would also be appropriate choices, but come with an accuracy trade-off for speed (Liu et al., 2020; Sanh et al., 2019) . We also investigated running TopGuNN at the sentence-level using sentence embeddings from SBERT and computing averaged sentence embeddings using BERT (Reimers and Gurevych, 2019) . Qualitatively, the results from using BERT at the word-level gave us diversity in the results that we desired (see Appendix B).", |
|
"cite_spans": [ |
|
{ |
|
"start": 143, |
|
"end": 165, |
|
"text": "(Devlin et al., 2019a)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 269, |
|
"end": 291, |
|
"text": "(Mikolov et al., 2013;", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 292, |
|
"end": 316, |
|
"text": "Pennington et al., 2014)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 319, |
|
"end": 320, |
|
"text": "3", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 425, |
|
"end": 443, |
|
"text": "(Liu et al., 2020;", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 444, |
|
"end": 462, |
|
"text": "Sanh et al., 2019)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 615, |
|
"end": 643, |
|
"text": "(Reimers and Gurevych, 2019)", |
|
"ref_id": "BIBREF23" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Embedding Model", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "A total of 60 event primitives were annotated using TopGuNN. On average, we were given 2 seed sentences per event and 1-2 viable query words per sentence with which to run through TopGuNN. The query word was typically a verb-form of the event. Approximately 120 query sentences were used to retrieve over 10,000 candidate sentences that were later sent through 2 phases of annotation: 1) sentence classification and 2) span annotation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Retrieving Event Primitives", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "After annotators confirm \"yes/no\" on the candidate sentences meeting the event primitive definition, the sentences classified as \"yes\" are sent to semantic role labeling for span annotation using a semantic role labeling tool called Datasaur (lee, 2019). 4", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Retrieving Event Primitives", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "Our system works well in retrieving new, diverse variations of a query word used in contextually similar ways. Below, we display notable retrieved results we found to best showcase the utility of Top-GuNN running over the entire Gigaword corpus for gathering both positive and abstract examples for training data.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Examples of Retrieved Sentences", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "\u2022 Query Word \"We detected SARS-CoV-2 RNA on eight (36%) of 22 surfaces, as well as on the pillow cover, sheet, and duvet cover,\" demonstrating that presymptomatic patients can easily contaminate environments, the authors said. \"Our data also reaffirm the potential role of surface contamination in the transmission of SARS-CoV-2 and the importance of strict surface hygiene practices, including regarding linens of SARS-CoV-2 patients,\" they said. Retrieved Sentence Also keep in mind that infestations of adware/spyware are the leading cause of a slow computer. Cosine sim. of contaminate and infestations: 0.637 More notable results can be seen in Appendix C.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Positive Example", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "To validate our system retrieves more relevant results as the size of the corpus it has access to grows we ran a test comparing the results of TopGuNN retrieval on a subset of Gigaword against full Gigaword (see Appendix D). The cosine similarities of retrieved results on the full Gigaword corpus were significantly higher than those retrieved from the subset. Qualitatively, the results appear to contain more apt variations of the retrieved word used in a similar contexts as the query word.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Influence of Corpora Size", |
|
"sec_num": "2.4.1" |
|
}, |
|
{ |
|
"text": "A diagram of TopGuNN is given in Figure 2 . Top-GuNN is engineered to run in multiple stages: 1) Pre-processing, 2) Generating Embeddings, 3) Indexing, and 4) Running Queries.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 33, |
|
"end": 41, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "System Design", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "During pre-processing we ingest a corpus and perform NLP analysis on each sentence. We use spaCy 5 to generate universal dependency labels and part-of-speech (POS) tags. We use the spaCy annotations to filter down the embeddings to a smaller subset that will be stored and indexed (resulting in a major reduction in the index size).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pre-Processing", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "During pre-processing we also construct several tables in a database to keep track of which sentence and document each word occurs in and what its POS and dependency labels are. This information is stored in 6 lookup dictionaries in a SQLiteDict 6 database seen in Appendix E.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pre-Processing", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "For our use case, we parallelized our preprocessing over each file in Gigaword. In a final step, we amalgamate the 6 lookup dictionaries per file into 6 lookup tables for the whole corpus. By doing so, we were able to use multiple CPUs for pre-processing.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pre-Processing", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "We partition the 183 million sentences in the Gigaword corpus into 960 sets of approximately 200,000 sentences each. For each partition, we pass batches of 175 sentences through BERT. Each partition is run in parallel using 16 NVIDIA GK210 GPUs on a p2.16xlarge machine with 732GB RAM on AWS, taking approximately 2 days to compute the BERT embeddings for all sentences in Gigaword.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Generating Embeddings", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "BERT tokenizes its input using the WordPiece tokenization scheme (Devlin et al., 2019b for indexing and queries, not on word pieces, so we align BERT's WordPiece tokenization scheme to our word-level tokenization scheme. We aligned the BERT-style model's tokenization with spaCy's tokenization using the method described in a blog post by Sterbak (ste, 2018). 7 We then took the mean of the WordPiece embeddings in a word to represent the embedding for the full word. In order to reduce the number of embeddings we need to store on disk, only content words are kept from each sentence. Content words consist of nonproper nouns, verbs, adverbs, and adjectives only. We use POS tags to identify content words and use dependency labels in conjunction with POS tags to further filter out auxiliary verbs. We store the final filtered embeddings using NumPy's memory mapped format as our underlying data store. 8 We discuss the savings in disk space in Section 4.1.", |
|
"cite_spans": [ |
|
{ |
|
"start": 65, |
|
"end": 86, |
|
"text": "(Devlin et al., 2019b", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 360, |
|
"end": 361, |
|
"text": "7", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 905, |
|
"end": 906, |
|
"text": "8", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Generating Embeddings", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "All of the embeddings saved in the previous step for each of the 960 partitions are added to an Annoy index, to create 960 Annoy indexes that span our entire corpus. We use Spotify's Annoy indexing system created by Bernhardsson (2018) for approximate k-NN search, which has been shown to be significantly faster than exact k-NN (Patel et al., 2018) . While, there are various competing implementations for approximate k-NN, we ultimately used Annoy to power our similarity search for its 7 https://www.depends-on-the-definition.com/ named-entity-recognition-with-bert/ 8 https://numpy.org/doc/stable/reference/generated/ numpy.memmap.html ability to build and query on-disk indexes and reduce the amount of RAM required for search. 9", |
|
"cite_spans": [ |
|
{ |
|
"start": 216, |
|
"end": 235, |
|
"text": "Bernhardsson (2018)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 329, |
|
"end": 349, |
|
"text": "(Patel et al., 2018)", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Indexing", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "TopGuNN allows you to query either a single query word or multiple query words batched together in a search query for performance. The input is a query matrix, which is a matrix of BERT embeddings for all query words in the batch.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Running Queries", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "Each query word is queried against the 960 Annoy indexes. In order to retrieve the overall top-N results, we query each Annoy index for its top-N results, and we then combine and sort the results from all the Annoy indexes to return the final compiled top-N results. We use our look-up dictionaries to return the document, the sentence, and the word of each result. Search results from each of the query words over the Annoy indexes are combined at the end and exported to a .tsv for human annotation and active learning.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Running Queries", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "Sequentially searching each query word against the 960 Annoy indexes before moving on to the next query word is slow. To perform searches more efficiently, we sequentially query each of the 960 Annoy indexes with all query words. This leverages the operating system page cache in such a way that allows for the system to scale better to larger batches of queries. By querying in this manner, we only need to load each of the 960 Annoy index files (each index is~6GB) into memory once, instead of once per query word. This is a constant time fixed cost that we must pay for a single query, but subsequent queries will benefit from not having to load the Annoy index again. This fixed cost of loading the Annoy indexes can be amortized over all queries in a batch (see Table 4 ). 10 Using this method we get performance gains in speed, but we trade it off for higher-memory usage as now we have to hold the intermediate results in memory for all query words in the batch until all Annoy indexes are queried. This means that our memory usage grows linearly with the number of queries in each batch. In practice, we found this trade-off to be tolerable. For a batch of 189 queries, we had a peak memory usage of~70GB.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 767, |
|
"end": 774, |
|
"text": "Table 4", |
|
"ref_id": "TABREF7" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Enhancing Query Performance", |
|
"sec_num": "3.4.1" |
|
}, |
|
{ |
|
"text": "Since this could possibly yield no results if the top-N is sufficiently small and all results are filtered out, we add a parameter that is the number of unique results desired for each query. However, setting top-N to be a very large would hinder the performance of the search queries.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Iterative Requery Method", |
|
"sec_num": "3.4.2" |
|
}, |
|
{ |
|
"text": "To strike a balance, we employ an iterative requery method that begins with a low top-N and incrementally requeries, increasing N by k (a configurable parameter) while the number of desired unique results retrieved is not met. A current search is halted once the number of desired unique results is met or terminated if the max top-N threshold is reached without meeting the number of desired unique results. This allows us to search the minimum possible amount of nearest neighbors required to reach the best unique results for maximal performance.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Iterative Requery Method", |
|
"sec_num": "3.4.2" |
|
}, |
|
{ |
|
"text": "4 Performance Details", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Iterative Requery Method", |
|
"sec_num": "3.4.2" |
|
}, |
|
{ |
|
"text": "The size of the Annoy index relies heavily on two parameters set at build time during post-processing: the number of trees (num_trees) and the number of nodes to inspect during searching (search_k). We also greatly reduce the size of the Annoy index by deciding to exclude non-content words from our index during the Section 3.2 stage.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Index Size", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "We use the following heuristic following Patel et al. (2018) to maintain similar search perfor- 10 For example, after searching a query word \"identify\" on a particular Annoy index all subsequent queried words like \"hired\" or \"launched\" on that same Annoy index will leverage the operating system page cache of the Annoy index file and perform faster mance across our indexes: 1 num_trees = 2 max(50,int((num_vecs/3000000.0) * 50.0)) 3 search_k = top_n * num_trees Algorithm 1: Heuristic for Annoy parameters Excluding Non-content Words We computed the number of words in the entire Gigaword corpus to be 4.3B words. We made the decision to exclude non-content words (defined in Section 3.2) which helped us save resources by a factor of 2.8X while maintaining a high search speed. Using content words only for the Gigaword corpus resulted in a total file size of 16TB (see F and G in Figure 2 ).", |
|
"cite_spans": [ |
|
{ |
|
"start": 54, |
|
"end": 60, |
|
"text": "(2018)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 96, |
|
"end": 98, |
|
"text": "10", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 884, |
|
"end": 892, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Index Size", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "To give an idea of the TopGuNN system's performance on a corpus as large as Gigaword, we report times for building an index for Gigaword and querying it. Our system design is deconstructed into 4 different stages (as previously described in Section 3) separating out the CPU from the GPU processes in order to streamline the workflow and save on costs. For each stage, we utilized a machine with the best RAM and CPU configuration profile for each particular task and only used a machine with GPUs for Stage 2. For pre-processing, we used a total of 384 cores on a CPU cluster. For our \"Generating Embeddings\" stage, we utilized a machine with 732GB RAM and 16 GPUs. For post-processing, we used a 16 core machine with 128GB of RAM.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sample Running Times", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Build Times The times for running the different stages of TopGuNN on the entire Gigaword corpus can be seen in Table 3 . Because the Annoy indexes are partitioned, the first step could be parallelized to further reduce the 19.4 hours. Keeping cost management in mind, we ran this step serially to highlight its relevant use case even with limited budget (our budget was approximately $2,000).", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 111, |
|
"end": 118, |
|
"text": "Table 3", |
|
"ref_id": "TABREF6" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Sample Running Times", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "5 Other NLP Applications 5.1 Sentence-and Document-Level Retrieval For a sentence-level application, TopGuNN could be useful for training data in story generation. In Ippolito et al. (2020) , the author predicts the likely embedding of the next sentence. To facilitate the diversity and speed of candidate sentences used to generate the next sentence in the story, Top-GuNN could be employed with sentence embeddings to retrieve sentences from large corpora. For document-retrieval training data, Kriz et al. (2020) recasts text-simplification as a document-retrieval task. The author generates document-level embeddings from the Newsela corpus using BERT and SBERT and similarly adds them to an Annoy index to find documents with similar complexity levels as the query document.", |
|
"cite_spans": [ |
|
{ |
|
"start": 167, |
|
"end": 189, |
|
"text": "Ippolito et al. (2020)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 497, |
|
"end": 515, |
|
"text": "Kriz et al. (2020)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sample Running Times", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "DAPRA KAIROS' events are similar to the events found in the IARPA BETTER multilingual information retrieval project. 11 A future application of TopGuNN could be querying in English and retrieving training examples in another language (or vice versa) by substituting BERT for GigaBERT (Lan et al., 2020) in TopGuNN. With this modification, TopGuNN could help facilitate multilingual retrieval of training examples.", |
|
"cite_spans": [ |
|
{ |
|
"start": 284, |
|
"end": 302, |
|
"text": "(Lan et al., 2020)", |
|
"ref_id": "BIBREF11" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Multilingual Information Retrieval", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "Previous work that parallels our work to search and index large corpora includes projects like Lin et al. (2010) , which created an index of n-gram counts over a web-scale sized corpus. Similarly, as an extension to work completed by Lin et al. (1997) and Gao et al. (2002) , Moore and Lewis (2010) propose a method for gathering domainspecific training data for languages models for use in tasks such as Machine Translation. By utilizing contextual word embeddings from a modern language model like BERT instead of techniques like n-grams or perplexity analysis as seen in previous approaches, TopGuNN aims to achieve higher quality results.", |
|
"cite_spans": [ |
|
{ |
|
"start": 95, |
|
"end": 112, |
|
"text": "Lin et al. (2010)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 234, |
|
"end": 251, |
|
"text": "Lin et al. (1997)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 256, |
|
"end": 273, |
|
"text": "Gao et al. (2002)", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Our work directly builds upon prior research on approximate k-NN algorithms for cosine similarity search. We chose to use the Annoy package for indexing our embeddings in TopGuNN for its particular ability to build on-disk indexes, however, another package could be used instead. Aum\u00fcller et al. 2018discusses various approximate k-NN algorithms that could alternatively be utilized for TopGuNN with alternate trade-offs in speed, memory, and other hardware requirements. By utilizing on-disk indexes on SSDs, which have fast randomaccess reads and high-throughput, we are able to use significantly cheaper machines than would be required to hold terabytes of indexes in RAM.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "You can get started with TopGuNN on GitHub: https://github.com/Penn-TopGuNN/TopGuNN", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Getting started with TopGuNN", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "We have presented a system for fast training data augmentation from a large corpus. To the best of our knowledge, existing search approaches do not make use of contextual word embeddings to produce the high quality diverse results needed in training examples for tasks like our event extraction use case. We have open sourced our efficient, scalable system that makes the most efficient use of human-in-the-loop annotation. We also highlight several other NLP tasks where our system could facilitate training data augmentation in Section 5.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "Future work may include enabling TopGuNN to query for multi-word expressions (i.e. \"put a name to\"), hyphenated expressions (i.e. \"pre-existing conditions\"), or in the form of natural language questions as seen in (Yu et al., 2019) . Finally, identifying antonymy as studied in (Rajana et al., 2017) would be a valuable extension for more finegrained search results as synonyms and antonyms often occupy the same embedding space.", |
|
"cite_spans": [ |
|
{ |
|
"start": 214, |
|
"end": 231, |
|
"text": "(Yu et al., 2019)", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 278, |
|
"end": 299, |
|
"text": "(Rajana et al., 2017)", |
|
"ref_id": "BIBREF21" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "We would like to thank Erik Bernhardsson for the useful feedback on integrating Annoy indexing.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "Special thanks to Ashley Nobi for spearheading the annotation effort and Katie Conger at University of Colorado at Boulder for the training sessions on semantic role labeling she gave for the span annotation effort.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "We would like to thank the Fall 2020 semester students of CIS 421/521 -Artificial Intelligence and Leila Pearlman at the University of Pennsylvania, and the University of Colorado at Boulder's Team of Linguists for annotating TopGuNN results.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "We would like to thank Ivan Lee, CEO of Datasaur Inc., Hartono Sulaiman and Nadya Nurhafidzah of Datasaur, for providing a seamless annotation tool for us to use and with around-theclock customer service in navigating the system. I would like to thank my post-doc Dr. Mohammad Sadegh Rasooli and my PhD labmate Aditya Kashyap for their invaluable input and constant availability to us throughout the project.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "Special thanks to my senior PhD labmate Reno Kriz for his mentorship during this project.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "The first author was funded by NSF for the University of Pennsylvania under grant number DGE-1845298 (the Graduate Research Fellowships Program). This research is also supported in part by the DARPA KAIROS Program (contract FA8750-19-2-1004), the DARPA LwLL Program (contract FA8750-19-2-0201), and the IARPA BETTER Program (contract . Approved for Public Release, Distribution Unlimited. The views and conclusions contained herein are those of the authors and should not be interpreted as necessarily representing the official policies, either expressed or implied, of DARPA, IARPA, or the U.S. Government.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "The U.S. Government is authorized to reproduce and distribute reprints for Governmental purposes. The views and conclusions contained in this publication are those of the authors and should not be interpreted as representing official policies or endorsements of NSF, DARPA, and the U.S. Government.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "And to the timeless 1986 American cult-classic \"Top Gun,\" thanks for the inspiration on naming our retrieval system... I feel the need for speed!", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "Our work utilizes BERT and therefore it contains the inherent biases that exist in language models trained on large amounts of unsupervised data collected from the internet. Kurita et al. (2019) analyzes the various biases that exist specifically in BERT.", |
|
"cite_spans": [ |
|
{ |
|
"start": 174, |
|
"end": 194, |
|
"text": "Kurita et al. (2019)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A Ethical Considerations/Discussion", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In our own tests, we directly observed some of these biases when querying for the DARPA KAIROS DETONATE:EXPLODE event over a subset of Gigaword. Querying the word bombing in the sentence \"Rabee'a owned a drill rig, and his friend had heard stories from elsewhere in Yemen about jets bombing well sites.\" yielded the word Muslim as the top result from the sentence \"Amid the tension, Muslim leaders say their communities are doing more than ever to help in investigations -a point they say is overlooked by many Americans.\" with a cosine similarity of 0.602. Moreover, 9 out of the 20 top results were the words \"muslim\" or \"mosque\".", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A Ethical Considerations/Discussion", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "When using TopGuNN to help bootstrap training data for event extraction models or running search queries, care must be taken to ensure these biases do not leak into a downstream applications by a thorough manual review to prevent unintentional harm. Debiasing language models is an active area of research and techniques like Qian et al. (2019) could be utilized to attempt to debias language models at train time that could then replace BERT in TopGuNN.", |
|
"cite_spans": [ |
|
{ |
|
"start": 326, |
|
"end": 344, |
|
"text": "Qian et al. (2019)", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A Ethical Considerations/Discussion", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We explored 3 different embedding models for the TopGuNN system: 1. SBERT automatically has its own sentence representation to retrieve sentences.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "B Testing Various Embedding Models with TopGuNN", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "2. AVG-BERT uses the mean of the word embeddings as the sentence representation to retrieve sentences.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "B Testing Various Embedding Models with TopGuNN", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "3. BERT returns results for a single query word and retrieves sentences with words that were used in a similar context as the query word in the query sentence. Note: To show diversity of results for BERT, the Top-10 unique nearest neighbors are shown and not necessarily the first Top-10 as seen in SBERT and AVG-BERT. On Tuesday, Sen. John McCain -who is inexplicably playing second fiddle to Kyl -told ABC: \"I believe that we could move forward with the START treaty and satisfy Senator Kyl's concerns and mine about missile defense and others, and I hope that we can do that.\" 0.848 White House officials, meanwhile, expressed hope of sealing a deal swiftly, perhaps by midweek, and clearing the congressional calendar for a long list of other priorities they aim to accomplish by the end of the year, including ratification of the New START arms treaty with Russia and the repeal of the \"don't ask, don't tell\" policy for gay service members as part of a wider Pentagon policy bill.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "B Testing Various Embedding Models with TopGuNN", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "While President Barack Obama presses the Senate to embrace a new arms control treaty with Russia, another nuclear pact with Moscow secured final approval after more than four years on Thursday with virtually no notice but potentially significant impact. 0.828", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "0.832", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In the interview, Putin also warned that ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "0.832", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u2022 Event: Destroy \u2022 Definition: Damage property, organization or natural resource Query Word \"These actions challenge national sovereignty, threaten one country, two systems, and will destroy the city's prosperity and stability,\" she said, referring to slogans of \"Liberate Hong Kong, revolution of our times\" and the act of throwing a Chinese flag in the sea. Retrieved Sentence \"Letting it expire would threaten jobs, harm the environment, weaken our renewable fuel industries, and increase our dependence on foreign oil,\" they wrote. Cosine sim. of destroy and weaken: 0.732 Table 9 : Weaken renewable fuel as a positive example of Destroy.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 577, |
|
"end": 584, |
|
"text": "Table 9", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Positive Example", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u2022 Event: Destroy \u2022 Definition: Damage property, organization or natural resource Query Word \"These actions challenge national sovereignty, threaten one country, two systems, and will destroy the city's prosperity and stability,\" she said, referring to slogans of \"Liberate Hong Kong, revolution of our times\" and the act of throwing a Chinese flag in the sea. Retrieved Sentence Adopting an orthodox view, he said in 1976 that a projected budget deficit estimated at 60 billion was \"very scary\" and would \"wreck\" the economy. Cosine sim. of destroy and wreck: 0.752 ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract Example", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We compared the top 10 unique results from a small subset of the Gigaword corpus (400,000 sentences) compared to results ran on the full Gigaword corpus (183 million sentences) for the event primitive Sentence (as in the judicial meaning).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "D TopGuNN Results Using Different Sized Corpora", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Current findings have shown us some interesting, but unexpected results. The cosine similarities of retrieved results for full Gigaword are significantly higher, but TopGuNN still works extremely well on a small subset in terms of quality and diversity of results. Other researchers who need to prioritize highspeed in retrieving positive or abstract examples for their training data could retrieve similar sentences even faster on a smaller subset of a uniform corpus like Gigaword without having to sacrifice much in terms of quality. Table 12 : Top-10 unique results querying the event primitive 'Sentence' (as in the judicial meaning) over a subset of Gigaword (400K sentences) vs. full Gigaword (183M sentences).", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 537, |
|
"end": 545, |
|
"text": "Table 12", |
|
"ref_id": "TABREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "D TopGuNN Results Using Different Sized Corpora", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Cosine Sim", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Method", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Query Sentence: \"The judge sentenced him to death.\" 0.742 When she explained to the court that she could not afford to pay, Nowlin was sent to prison. 0.701 \"It matters little if they condemn me, even to the heaviest sentence. 0.695 True, the court could have gone further and actually jailed the two defendants. 0.693", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Retrieved Result", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "He received a life sentence. Gigaword Subset (400K sentences)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Retrieved Result", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "A federal judge spared him prison time but ordered him to leave the country within 90 days or be deported.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "0.680", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The jury came within two votes of convicting Megahed.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "0.676", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Bush commuted the sentences, and the men are now free. 0.666 At 6:28 p.m., she found him hanged.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "0.670", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "He eventually pleaded guilty to manslaughter and spent 15 years in prison. Seven years later, a Paris court condemned him in absentia to life in jail for the murders. 0.847", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "0.664", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The jury decided unanimously to sentence him to death.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Continued on next page", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The jury convicted him to life in prison, where he will spend the rest of his life. Full Gigaword (183M sentences) 0.841 \"There are some things you just can't run from, this being one of those,\" Rolling told Circuit Judge Stan R. Morris, who accepted the pleas and found him guilty and later sentenced him to death. 0.833", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "0.845", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The presiding judge agreed, sentencing the two young men to life imprisonment.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "0.845", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The judge sent them to prison. 0.828 It wasn't until last October -a decade later -that courts sentenced 34 men to 26 years each for the killings. ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "0.830", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We demonstrate TopGuNN's ability to perform contextual similarity search of a query word in its corresponding sentence using polysemous words, which have two distinct sentences. Figure 3 and Figure 4 are further examples of querying two distinct sentences with different senses of the same word to retrieve sentences that capture both polysemies. ", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 178, |
|
"end": 186, |
|
"text": "Figure 3", |
|
"ref_id": "FIGREF2" |
|
}, |
|
{ |
|
"start": 191, |
|
"end": 199, |
|
"text": "Figure 4", |
|
"ref_id": "FIGREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "F Querying Polysemous Words", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://www.darpa.mil/program/ knowledge-directed-artificial-intelligence-reasoning-over-schemas", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://catalog.ldc.upenn.edu/LDC2011T07 3 We use the \"bert-base-uncased\" model from the Transformers Python package.(Wolf et al., 2020) 4 https://datasaur.ai/", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://spacy.io/ 6 https://pypi.org/project/sqlitedict/", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://github.com/spotify/annoy", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://www.iarpa.gov/index.php/research-programs/ better", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Named entity recognition with Bert. Tobias Sterbak Consulting", |
|
"authors": [], |
|
"year": null, |
|
"venue": "", |
|
"volume": "3", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Named entity recognition with Bert. Tobias Ster- bak Consulting, Akazienstra\u00dfe 3A, 10823 Berlin, Germany.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Ann-benchmarks: A benchmarking tool for approximate nearest neighbor algorithms", |
|
"authors": [ |
|
{ |
|
"first": "Martin", |
|
"middle": [], |
|
"last": "Aum\u00fcller", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Erik", |
|
"middle": [], |
|
"last": "Bernhardsson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexander", |
|
"middle": [], |
|
"last": "Faithfull", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Martin Aum\u00fcller, Erik Bernhardsson, and Alexander Faithfull. 2018. Ann-benchmarks: A benchmarking tool for approximate nearest neighbor algorithms.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Annoy: Approximate Nearest Neighbors in C++/Python. Python package version 1", |
|
"authors": [ |
|
{ |
|
"first": "Erik", |
|
"middle": [], |
|
"last": "Bernhardsson", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Erik Bernhardsson. 2018. Annoy: Approximate Near- est Neighbors in C++/Python. Python package ver- sion 1.13.0.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Reading the manual: Event extraction as definition comprehension", |
|
"authors": [ |
|
{ |
|
"first": "Yunmo", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tongfei", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Seth", |
|
"middle": [], |
|
"last": "Ebner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aaron", |
|
"middle": [ |
|
"Steven" |
|
], |
|
"last": "White", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Benjamin", |
|
"middle": [], |
|
"last": "Van Durme", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yunmo Chen, Tongfei Chen, Seth Ebner, Aaron Steven White, and Benjamin Van Durme. 2020. Reading the manual: Event extraction as definition compre- hension.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "BERT: Pre-training of deep bidirectional transformers for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "4171--4186", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N19-1423" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019a. BERT: Pre-training of deep bidirectional transformers for language under- standing. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 4171-4186, Minneapolis, Minnesota. Associ- ation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "BERT: Pre-training of deep bidirectional transformers for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "4171--4186", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N19-1423" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019b. BERT: Pre-training of deep bidirectional transformers for language under- standing. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 4171-4186, Minneapolis, Minnesota. Associ- ation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Toward a unified approach to statistical language modeling for chinese", |
|
"authors": [ |
|
{ |
|
"first": "Jianfeng", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joshua", |
|
"middle": [], |
|
"last": "Goodman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mingjing", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kai-Fu", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "ACM Transactions on Asian Language Information Processing", |
|
"volume": "1", |
|
"issue": "1", |
|
"pages": "3--33", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1145/595576.595578" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jianfeng Gao, Joshua Goodman, Mingjing Li, and Kai- Fu Lee. 2002. Toward a unified approach to statis- tical language modeling for chinese. ACM Trans- actions on Asian Language Information Processing, 1(1):3-33.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Toward better storylines with sentence-level language models", |
|
"authors": [ |
|
{ |
|
"first": "Daphne", |
|
"middle": [], |
|
"last": "Ippolito", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Grangier", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Douglas", |
|
"middle": [], |
|
"last": "Eck", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Callison-Burch", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Daphne Ippolito, David Grangier, Douglas Eck, and Chris Callison-Burch. 2020. Toward better story- lines with sentence-level language models.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Recasting text simplification as a document retrieval task", |
|
"authors": [ |
|
{ |
|
"first": "Reno", |
|
"middle": [], |
|
"last": "Kriz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eleni", |
|
"middle": [], |
|
"last": "Miltsakaki", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jaime", |
|
"middle": [], |
|
"last": "Rojas", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Reno Kriz, Eleni Miltsakaki, Jaime Rojas, Rebecca Iglesias-Flores, Megha Mishra, Marianna Apidi- anaki, and Chris Callison-Burch. 2020. Recasting text simplification as a document retrieval task. In Submission.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Measuring bias in contextualized word representations", |
|
"authors": [ |
|
{ |
|
"first": "Keita", |
|
"middle": [], |
|
"last": "Kurita", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nidhi", |
|
"middle": [], |
|
"last": "Vyas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ayush", |
|
"middle": [], |
|
"last": "Pareek", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alan", |
|
"middle": [ |
|
"W" |
|
], |
|
"last": "Black", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yulia", |
|
"middle": [], |
|
"last": "Tsvetkov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Keita Kurita, Nidhi Vyas, Ayush Pareek, Alan W Black, and Yulia Tsvetkov. 2019. Measuring bias in contextualized word representations.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "An empirical study of pre-trained transformers for arabic information extraction", |
|
"authors": [ |
|
{ |
|
"first": "Wuwei", |
|
"middle": [], |
|
"last": "Lan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yang", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alan", |
|
"middle": [], |
|
"last": "Ritter", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wuwei Lan, Yang Chen, Wei Xu, and Alan Ritter. 2020. An empirical study of pre-trained transform- ers for arabic information extraction.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "New tools for web-scale n-grams", |
|
"authors": [ |
|
{ |
|
"first": "Dekang", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenneth", |
|
"middle": [ |
|
"Ward" |
|
], |
|
"last": "Church", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Heng", |
|
"middle": [], |
|
"last": "Ji", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Satoshi", |
|
"middle": [], |
|
"last": "Sekine", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Yarowsky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shane", |
|
"middle": [], |
|
"last": "Bergsma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kailash", |
|
"middle": [], |
|
"last": "Patil", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Emily", |
|
"middle": [], |
|
"last": "Pitler", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rachel", |
|
"middle": [], |
|
"last": "Lathbury", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vikram", |
|
"middle": [], |
|
"last": "Rao", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "LREC", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dekang Lin, Kenneth Ward Church, Heng Ji, Satoshi Sekine, David Yarowsky, Shane Bergsma, Kailash Patil, Emily Pitler, Rachel Lathbury, Vikram Rao, et al. 2010. New tools for web-scale n-grams. In LREC.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Chinese language model adaptation based on document classification and multiple domain-specific language models", |
|
"authors": [ |
|
{ |
|
"first": "Chi-Lung", |
|
"middle": [], |
|
"last": "Sung-Chien Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lee-Feng", |
|
"middle": [], |
|
"last": "Tsai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Keh-Jiann", |
|
"middle": [], |
|
"last": "Chien", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lin-Shan", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1997, |
|
"venue": "EUROSPEECH. ISCA", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sung-Chien Lin, Chi-Lung Tsai, Lee-Feng Chien, Keh- Jiann Chen, and Lin-Shan Lee. 1997. Chinese lan- guage model adaptation based on document classifi- cation and multiple domain-specific language mod- els. In EUROSPEECH. ISCA.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Fastbert: a selfdistilling bert with adaptive inference time", |
|
"authors": [ |
|
{ |
|
"first": "Weijie", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peng", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhe", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhiruo", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Haotang", |
|
"middle": [], |
|
"last": "Deng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Qi", |
|
"middle": [], |
|
"last": "Ju", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Weijie Liu, Peng Zhou, Zhe Zhao, Zhiruo Wang, Haotang Deng, and Qi Ju. 2020. Fastbert: a self- distilling bert with adaptive inference time.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Efficient estimation of word representations in vector space", |
|
"authors": [ |
|
{ |
|
"first": "Tomas", |
|
"middle": [], |
|
"last": "Mikolov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kai", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "Corrado", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Dean", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "ICLR", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tomas Mikolov, Kai Chen, G. S. Corrado, and J. Dean. 2013. Efficient estimation of word representations in vector space. In ICLR.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Intelligent selection of language model training data", |
|
"authors": [ |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Robert", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "William", |
|
"middle": [], |
|
"last": "Moore", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Lewis", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proceedings of the ACL 2010 Conference Short Papers", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "220--224", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Robert C. Moore and William Lewis. 2010. Intelligent selection of language model training data. In Pro- ceedings of the ACL 2010 Conference Short Papers, pages 220-224, Uppsala, Sweden. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "English Gigaword Fifth Edition LDC2011T07", |
|
"authors": [ |
|
{ |
|
"first": "Robert", |
|
"middle": [], |
|
"last": "Parker", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Graff", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Junbo", |
|
"middle": [], |
|
"last": "Kong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ke", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kazuaki", |
|
"middle": [], |
|
"last": "Maeda", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.35111/wk4f-qt80" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Robert Parker, David Graff, Junbo Kong, Ke Chen, and Kazuaki Maeda. 2011. English Gigaword Fifth Edi- tion LDC2011T07. Web Download.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Magnitude: A fast, efficient universal vector embedding utility package", |
|
"authors": [ |
|
{ |
|
"first": "Ajay", |
|
"middle": [], |
|
"last": "Patel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexander", |
|
"middle": [], |
|
"last": "Sands", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Callison-Burch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marianna", |
|
"middle": [], |
|
"last": "Apidianaki", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing: System Demonstrations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "120--126", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D18-2021" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ajay Patel, Alexander Sands, Chris Callison-Burch, and Marianna Apidianaki. 2018. Magnitude: A fast, efficient universal vector embedding utility package. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing: System Demonstrations, pages 120-126, Brussels, Belgium. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Glove: Global vectors for word representation", |
|
"authors": [ |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Pennington", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Socher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1532--1543", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jeffrey Pennington, Richard Socher, and Christopher Manning. 2014. Glove: Global vectors for word rep- resentation. In Proceedings of the 2014 Conference on Empirical Methods in Natural Language Pro- cessing (EMNLP), pages 1532-1543, Doha, Qatar. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Reducing gender bias in word-level language models with a gender-equalizing loss function", |
|
"authors": [ |
|
{ |
|
"first": "Yusu", |
|
"middle": [], |
|
"last": "Qian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Urwa", |
|
"middle": [], |
|
"last": "Muaz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ben", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jae", |
|
"middle": [ |
|
"Won" |
|
], |
|
"last": "Hyun", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics: Student Research Workshop", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "223--228", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P19-2031" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yusu Qian, Urwa Muaz, Ben Zhang, and Jae Won Hyun. 2019. Reducing gender bias in word-level language models with a gender-equalizing loss func- tion. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics: Stu- dent Research Workshop, pages 223-228, Florence, Italy. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Learning antonyms with paraphrases and a morphologyaware neural network", |
|
"authors": [ |
|
{ |
|
"first": "Sneha", |
|
"middle": [], |
|
"last": "Rajana", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Callison-Burch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marianna", |
|
"middle": [], |
|
"last": "Apidianaki", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vered", |
|
"middle": [], |
|
"last": "Shwartz", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 6th", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/S17-1002" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sneha Rajana, Chris Callison-Burch, Marianna Apid- ianaki, and Vered Shwartz. 2017. Learning antonyms with paraphrases and a morphology- aware neural network. In Proceedings of the 6th", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Joint Conference on Lexical and Computational Semantics (*SEM 2017", |
|
"authors": [], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "12--21", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Joint Conference on Lexical and Computational Se- mantics (*SEM 2017), pages 12-21, Vancouver, Canada. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Sentence-BERT: Sentence embeddings using Siamese BERTnetworks", |
|
"authors": [ |
|
{ |
|
"first": "Nils", |
|
"middle": [], |
|
"last": "Reimers", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Iryna", |
|
"middle": [], |
|
"last": "Gurevych", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3982--3992", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D19-1410" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nils Reimers and Iryna Gurevych. 2019. Sentence- BERT: Sentence embeddings using Siamese BERT- networks. In Proceedings of the 2019 Confer- ence on Empirical Methods in Natural Language Processing and the 9th International Joint Confer- ence on Natural Language Processing (EMNLP- IJCNLP), pages 3982-3992, Hong Kong, China. As- sociation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Distilbert, a distilled version of BERT: smaller, faster, cheaper and lighter", |
|
"authors": [ |
|
{ |
|
"first": "Victor", |
|
"middle": [], |
|
"last": "Sanh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lysandre", |
|
"middle": [], |
|
"last": "Debut", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julien", |
|
"middle": [], |
|
"last": "Chaumond", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Wolf", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Victor Sanh, Lysandre Debut, Julien Chaumond, and Thomas Wolf. 2019. Distilbert, a distilled version of BERT: smaller, faster, cheaper and lighter. CoRR, abs/1910.01108.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Spider: A largescale human-labeled dataset for complex and crossdomain semantic parsing and text-to-sql task", |
|
"authors": [ |
|
{ |
|
"first": "Tao", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rui", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kai", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michihiro", |
|
"middle": [], |
|
"last": "Yasunaga", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dongxu", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zifan", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "James", |
|
"middle": [], |
|
"last": "Ma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Irene", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Qingning", |
|
"middle": [], |
|
"last": "Yao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shanelle", |
|
"middle": [], |
|
"last": "Roman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zilin", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dragomir", |
|
"middle": [], |
|
"last": "Radev", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tao Yu, Rui Zhang, Kai Yang, Michihiro Yasunaga, Dongxu Wang, Zifan Li, James Ma, Irene Li, Qingning Yao, Shanelle Roman, Zilin Zhang, and Dragomir Radev. 2019. Spider: A large- scale human-labeled dataset for complex and cross- domain semantic parsing and text-to-sql task.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure", |
|
"text": "Retrieved results for two queries with different senses of a polysemous word searching over 183 million sentences (or 1.5B embeddings) in the Gigaword corpus with TopGuNN." |
|
}, |
|
"FIGREF1": { |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure", |
|
"text": "TopGuNN runs in four stages: Pre-Processing, Generating Embeddings, Indexing, and Querying" |
|
}, |
|
"FIGREF2": { |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure", |
|
"text": "TopGuNN results on the the polysemous word change." |
|
}, |
|
"FIGREF3": { |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure", |
|
"text": "TopGuNN results on the the polysemous word acquit." |
|
}, |
|
"TABREF2": { |
|
"content": "<table><tr><td>Abstract Example</td></tr><tr><td>\u2022 Event: Contaminate</td></tr><tr><td>\u2022 Definition: An animal (incl. people) is in-</td></tr><tr><td>fected with a pathogen.</td></tr></table>", |
|
"type_str": "table", |
|
"text": "Shooting down an explosive as a positive example of Disable.", |
|
"num": null, |
|
"html": null |
|
}, |
|
"TABREF3": { |
|
"content": "<table/>", |
|
"type_str": "table", |
|
"text": "Infestations of computer spyware as an abstract example of Contaminate.", |
|
"num": null, |
|
"html": null |
|
}, |
|
"TABREF6": { |
|
"content": "<table><tr><td/><td>Query Time</td></tr><tr><td>Query Batch (n = 189)</td><td>21.4 hours</td></tr><tr><td>First Query (1)</td><td>19.4 hours</td></tr><tr><td colspan=\"2\">Subsequent Queries (2-189) 0.63 minutes</td></tr></table>", |
|
"type_str": "table", |
|
"text": "Build times for TopGuNN on GigawordQuery Times The times for querying TopGuNN on the entire Gigaword corpus can be seen in Ta-", |
|
"num": null, |
|
"html": null |
|
}, |
|
"TABREF7": { |
|
"content": "<table/>", |
|
"type_str": "table", |
|
"text": "Query times for TopGuNN on Gigaword", |
|
"num": null, |
|
"html": null |
|
}, |
|
"TABREF8": { |
|
"content": "<table><tr><td>Method</td><td>Cosine</td><td>Retrieved Result</td></tr><tr><td/><td>Sim</td><td/></tr><tr><td colspan=\"3\">Query Sentence: \"President Barack Obama's hopes of winning Senate approval for a new arms</td></tr><tr><td colspan=\"3\">control treaty with Russia by the end of the year were encouraged Tuesday by two Republican</td></tr><tr><td colspan=\"3\">senators, including John McCain.\"</td></tr><tr><td/><td>0.908</td><td>WASHINGTON -The US Senate, in a key test vote, moved Tuesday</td></tr><tr><td/><td/><td>toward final passage of a nuclear arms pact with Russia, setting up a likely</td></tr><tr><td/><td/><td>foreign policy victory for President Obama and a hard-won achievement</td></tr><tr><td/><td/><td>for Senator John F. Kerry of Massachusetts, who shepherded the treaty</td></tr><tr><td/><td/><td>through fierce GOP opposition.</td></tr><tr><td/><td>0.888</td><td>Fresh from winning Senate approval for a new strategic arms treaty,</td></tr><tr><td/><td/><td>President Barack Obama plans to return to the negotiating table with</td></tr><tr><td/><td/><td>Russia next year in hopes of securing the first legal limits imposed on the</td></tr><tr><td/><td/><td>smaller, battlefield nuclear weapons viewed as most vulnerable to theft or</td></tr><tr><td/><td/><td>diversion.</td></tr><tr><td/><td/><td>Continued on next page</td></tr></table>", |
|
"type_str": "table", |
|
"text": "Results comparing SBERT, AVG-BERT, and BERT", |
|
"num": null, |
|
"html": null |
|
}, |
|
"TABREF9": { |
|
"content": "<table><tr><td/><td/><td>-continued from previous page</td></tr><tr><td>Method</td><td>Cosine</td><td>Retrieved Result</td></tr><tr><td/><td>Sim</td><td/></tr><tr><td/><td>0.882</td><td/></tr><tr><td/><td/><td>Obama has sought.</td></tr><tr><td/><td>0.878</td><td>To press their point, Republicans pushed through a side resolution calling</td></tr><tr><td/><td/><td>on Obama to open talks with Russia on such weapons within a year.</td></tr><tr><td/><td>0.872</td><td>The Senate moved closer Monday to approving a new arms control treaty</td></tr><tr><td/><td/><td>with Russia over the opposition of Republican leaders as lawmakers</td></tr><tr><td/><td/><td>worked on a side deal to assure skeptics that the arms pact would not</td></tr><tr><td/><td/><td>inhibit U.S. plans to build missile defense systems.</td></tr><tr><td/><td>0.872</td><td>Beyond his behind-the-scenes role in negotiating the tax deal with Repub-</td></tr><tr><td/><td/><td>licans -a path that Biden and Obama decided on in a recent conversation</td></tr><tr><td>SBERT</td><td/><td>at the White House, aides say -the vice president has also been trying to</td></tr><tr><td/><td/><td>win Republican votes in the Senate for ratification of the START nuclear</td></tr><tr><td/><td/><td>arms treaty with Russia.</td></tr><tr><td/><td>0.856</td><td/></tr></table>", |
|
"type_str": "table", |
|
"text": "McCain, one of his party's leading voices on national security, said he thought that Republican concerns over missile defense and nuclear modernization could be resolved in time to vote on the so-called New Start treaty during the lame-duck session of Congress this month, as", |
|
"num": null, |
|
"html": null |
|
}, |
|
"TABREF11": { |
|
"content": "<table><tr><td/><td/><td>-continued from previous page</td></tr><tr><td>Method</td><td>Cosine</td><td>Retrieved Result</td></tr><tr><td/><td>Sim</td><td/></tr><tr><td colspan=\"3\">0.896 0.894 Sen. AVG-BERT A Republican senator announced that he would vote for the treaty and two others said they were leaning toward it, and at the same time, Sen. John McCain, R-Ariz., produced separate legislation that could reassure</td></tr><tr><td/><td/><td>fellow Republicans worried about the treaty's impact on missile defense.</td></tr><tr><td/><td>0.893</td><td>Obama brought up the treaty Tuesday during a White House meeting with</td></tr><tr><td/><td/><td>congressional leaders, pressing them to vote this month to strengthen the</td></tr><tr><td/><td/><td>relationship with Russia.</td></tr><tr><td/><td>0.892</td><td>President Barack Obama on Tuesday strongly defended his tax cut deal</td></tr><tr><td/><td/><td>with congressional Republicans against intense criticism from his own</td></tr><tr><td/><td/><td>party, insisting it was \"a good deal for the American people.\"</td></tr><tr><td/><td>0.887</td><td>Sen. Harry Reid of Nevada, the majority leader and crucial proponent</td></tr><tr><td/><td/><td>of the repeal, noted that some Republicans had indicated they may try to</td></tr><tr><td/><td/><td>block Senate approval of a nuclear arms treaty with Russia due to their</td></tr><tr><td/><td/><td>pique over the Senate action on the ban on gays in the military.</td></tr><tr><td/><td>0.887</td><td>Obama has insisted that the Senate approve it before the end of the month</td></tr><tr><td/><td/><td>rather than wait until a new Senate with more Republicans takes office,</td></tr><tr><td/><td/><td>and a number of Republican senators have signaled tentative support.</td></tr><tr><td/><td>0.885</td><td>Obama, in his brief remarks Wednesday during a meeting with the pres-</td></tr><tr><td/><td/><td>ident of Poland, suggested that Republicans for the next two years will</td></tr><tr><td/><td/><td>still be defending the Bush tax rates while he is looking forward to a new,</td></tr><tr><td/><td/><td>better code.</td></tr><tr><td colspan=\"3\">Query Word: \"President Barack Obama's hopes of winning Senate approval for a new arms</td></tr><tr><td colspan=\"3\">control treaty with Russia by the end of the year were encouraged Tuesday by two Republican</td></tr><tr><td colspan=\"3\">senators, including John McCain.\"</td></tr><tr><td/><td>0.953</td><td>TWO REPUBLICANS HINT AT HOPE FOR ARMS PACT WITH RUS-</td></tr><tr><td/><td/><td>SIA President Barack Obama's hopes of winning Senate approval for a</td></tr><tr><td/><td/><td>new arms control treaty with Russia by the end of the year were encour-</td></tr><tr><td/><td/><td>aged Tuesday by two Republican senators, including John McCain.</td></tr><tr><td/><td>0.825</td><td>Obama's failure to win passage of comprehensive immigration reform</td></tr><tr><td/><td/><td>was a disappointment to many Latinos, he conceded.</td></tr><tr><td/><td>0.802</td><td>Aides to Reid said they had mapped out a path to securing votes on all of</td></tr><tr><td/><td/><td>the legislation, which would mean staying in session until next Thursday,</td></tr><tr><td/><td/><td>two days before Christmas, and potentially returning the week before</td></tr><tr><td/><td/><td>New Year's Day.</td></tr><tr><td/><td>0.793</td><td>While he has a fair chance of securing the votes of the two other</td></tr><tr><td/><td/><td>Democrats, he faces a potential fight with one of those commission-</td></tr><tr><td/><td/><td>ers, Michael J. Copps, who has been public in his support for stricter</td></tr><tr><td/><td/><td>regulation of broadband Internet service.</td></tr><tr><td/><td>0.756</td><td>With a week before Election Day, Perry, who is thought to have the best</td></tr><tr><td>BERT</td><td/><td>chance of gaining a seat for Republicans in the state, is struggling to fend off accusations that he witnessed and covered up the illegal strip search of</td></tr><tr><td/><td/><td>a teenage girl in 1991, when he was a police sergeant in Wareham, Mass.</td></tr><tr><td/><td/><td>Continued on next page</td></tr></table>", |
|
"type_str": "table", |
|
"text": "John McCain of Arizona had previously said he hoped to vote for the treaty as long as concerns over missile defense were addressed, and it was not clear whether he was signaling a shift or using the opportunity to vent his longstanding frustration with Russian behavior.", |
|
"num": null, |
|
"html": null |
|
}, |
|
"TABREF12": { |
|
"content": "<table><tr><td/><td/><td>-continued from previous page</td></tr><tr><td>Method</td><td>Cosine</td><td>Retrieved Result</td></tr><tr><td/><td>Sim</td><td/></tr><tr><td/><td>0.755</td><td/></tr><tr><td/><td>0.744</td><td>Republican confidence about capturing control of the House remained</td></tr><tr><td/><td/><td>high, though even Republicans considered the Senate more of a question</td></tr><tr><td/><td/><td>mark, given the number of excruciatingly close races across the country.</td></tr><tr><td/><td>0.731</td><td>Obama bested the chamber in the first two years of his term, passing</td></tr><tr><td/><td/><td>health care legislation and an overhaul of financial regulations over the</td></tr><tr><td/><td/><td>group's heated opposition.</td></tr><tr><td/><td>0.731</td><td>Like most of her 18 opponents nearing the Nov. 28 election, Manigat's</td></tr><tr><td/><td/><td>campaign trail stretches northward from Port-au-Prince to Miami, New</td></tr><tr><td/><td colspan=\"2\">York, Boston and Montreal in hopes of garnering money and influence</td></tr><tr><td/><td/><td>from the large Haitian diaspora.</td></tr><tr><td/><td>0.730</td><td>Still, with Republicans challenging every element of the new law, the</td></tr><tr><td/><td/><td>Obama administration is likely to be handcuffed in its efforts to expand</td></tr><tr><td/><td/><td>the revamping of the health care system.</td></tr><tr><td colspan=\"2\">C Notable Results</td><td/></tr><tr><td>Positive Example</td><td/><td/></tr><tr><td colspan=\"2\">\u2022 Event: Defeat</td><td/></tr><tr><td colspan=\"3\">\u2022 Definition: Defeat in a conflict or an election (but not a game-style competition)</td></tr><tr><td>Query Word</td><td/><td/></tr><tr><td colspan=\"3\">Most democratic activists and lawmakers rejected the deal as a sham and it was eventually</td></tr><tr><td colspan=\"3\">defeated in the city's legislatures after a botched walkout by pro-government legislatures.</td></tr><tr><td colspan=\"2\">Retrieved Sentence</td><td/></tr><tr><td colspan=\"2\">The White House and</td><td/></tr></table>", |
|
"type_str": "table", |
|
"text": "The White House is negotiating with Sen. Jon Kyl, R-Ariz., whose support is crucial to getting other Republican votes, to meet his price: more money to modernize the nuclear arsenal. Senate Democrats considered the amendment a treaty killer because any change to the text would require both countries to go back to the negotiating table. Cosine sim. of rejected and considered: 0.745", |
|
"num": null, |
|
"html": null |
|
}, |
|
"TABREF13": { |
|
"content": "<table><tr><td>Positive Example</td></tr><tr><td>\u2022 Event: Disable</td></tr><tr><td>\u2022 Definition: Impeding the expected functioning of an ORG, a mechanical device, or software, Ex.,</td></tr><tr><td>remove fuse from explosive</td></tr><tr><td>Query Word</td></tr><tr><td>Soldiers and personnel</td></tr></table>", |
|
"type_str": "table", |
|
"text": "Treaty killer as a positive example of Defeat. have to be trained to be aware of the enemy's behaviors, to look for indicators of IEDs in their patrol areas and to use technology to dispose or disable them. Retrieved Sentence And he assured his audience that he had made clear to senior Pakistani military officials my strong desire to see more action taken against these places and to root out the terrorists. Cosine sim. of disable and root: 0.616", |
|
"num": null, |
|
"html": null |
|
}, |
|
"TABREF14": { |
|
"content": "<table><tr><td>Positive Example</td></tr><tr><td>\u2022 Event: Block Passage</td></tr><tr><td>\u2022 Definition:</td></tr></table>", |
|
"type_str": "table", |
|
"text": "Rooting out terrorist organizations as a positive example of Disable. Preventing entry or exit from a location Query Word ...archipelagic defense would have the holders of islands adjoining straits and other narrow seas fortify those islands with mobile anti-ship and anti-air missiles while deploying surface, subsurface, and aerial assets to block passage through these seaways. In effect these forces string a barricade between geographic features-interdicting shipping and overflight while bringing economic and military pressure on adversaries. Retrieved Sentence Assad Ismail, a local council president in Sadiya, a village along the disputed territories northeast of Baghdad, said that only the Americans were able to settle a recent dispute that flared when Iraqi soldiers trying to restrict the movement of insurgents closed off local farmers' access to their date palms, tomatoes and peanuts. Cosine sim. of block and restrict: 0.637", |
|
"num": null, |
|
"html": null |
|
}, |
|
"TABREF15": { |
|
"content": "<table/>", |
|
"type_str": "table", |
|
"text": "Restricting movement as a positive example of Block Passage.", |
|
"num": null, |
|
"html": null |
|
}, |
|
"TABREF16": { |
|
"content": "<table><tr><td>Abstract Example</td></tr><tr><td>\u2022 Event: Block Passage</td></tr><tr><td>\u2022 Definition: (</td></tr></table>", |
|
"type_str": "table", |
|
"text": "Wrecked the economy as an abstract example of Destroy. Physically) preventing entry or exit from a location Query Word ...archipelagic defense would have the holders of islands adjoining straits and other narrow seas fortify those islands with mobile anti-ship and anti-air missiles while deploying surface, subsurface, and aerial assets to block passage through these seaways. In effect these forces string a barricade between geographic features-interdicting shipping and overflight while bringing economic and military pressure on adversaries. Retrieved Sentence Even as Pakistan's army vows to take on militants spreading chaos and mayhem inside Pakistan, the intelligence service still sees the Afghan Taliban as a way to ensure influence on the other side of the border and keep India's influence at bay. Cosine sim. of block and keep: 0.649", |
|
"num": null, |
|
"html": null |
|
}, |
|
"TABREF17": { |
|
"content": "<table/>", |
|
"type_str": "table", |
|
"text": "Keeping influence at bay as an abstract example of Block Passage.", |
|
"num": null, |
|
"html": null |
|
}, |
|
"TABREF18": { |
|
"content": "<table><tr><td/><td/><td>-continued from previous page</td></tr><tr><td>Method</td><td>Cosine</td><td>Retrieved Result</td></tr><tr><td/><td>Sim</td><td/></tr><tr><td/><td>0.659</td><td/></tr></table>", |
|
"type_str": "table", |
|
"text": "That convinced a jury to find him guilty of aggravated sexual assault and send him to prison for 75 years. 0.881 A jury didn't believe him, and a judge sentenced him to eight years in prison. 0.863", |
|
"num": null, |
|
"html": null |
|
}, |
|
"TABREF19": { |
|
"content": "<table><tr><td>E Lookup Dictionaries</td></tr><tr><td>1. Sentences (32.4GB):</td></tr><tr><td>sent_id \u2192 (sentence)</td></tr><tr><td>2. Document Traceability (15.0GB):</td></tr><tr><td>sent_id \u2192 (doc_id)</td></tr><tr><td>3. Tokens (41.6GB):</td></tr><tr><td>sent_id \u2192 (sentence tokens)</td></tr><tr><td>4. Parts-of-Speech Tags (27.0GB):</td></tr><tr><td>sent_id \u2192 (sentence pos_tags)</td></tr><tr><td>5. Dependency Labels (33.9GB):</td></tr><tr><td>sent_id \u2192 (sentence dep_labels)</td></tr><tr><td>6. Words Trace (156.3 GB):</td></tr><tr><td>word_id \u2192 (word_id, word, (doc_id, sent_id))</td></tr></table>", |
|
"type_str": "table", |
|
"text": "0.814 They unanimously acquitted him on all counts. 0.812 But the U.N. court decided he was not directly involved and punished him with a light two years in prison.", |
|
"num": null, |
|
"html": null |
|
} |
|
} |
|
} |
|
} |