|
{ |
|
"paper_id": "2020", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T01:51:38.356888Z" |
|
}, |
|
"title": "Torch-Struct: Deep Structured Prediction Library", |
|
"authors": [ |
|
{ |
|
"first": "Alexander", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Rush", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "arush@cornell.edu" |
|
}, |
|
{ |
|
"first": "Cornell", |
|
"middle": [], |
|
"last": "Tech", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "The literature on structured prediction for NLP describes a rich collection of distributions and algorithms over sequences, segmentations, alignments, and trees; however, these algorithms are difficult to utilize in deep learning frameworks. We introduce Torch-Struct, a library for structured prediction designed to take advantage of and integrate with vectorized, auto-differentiation based frameworks. Torch-Struct includes a broad collection of probabilistic structures accessed through a simple and flexible distribution-based API that connects to any deep learning model. The library utilizes batched, vectorized operations and exploits auto-differentiation to produce readable, fast, and testable code. Internally, we also include a number of general-purpose optimizations to provide cross-algorithm efficiency. Experiments show significant performance gains over fast baselines. Case studies demonstrate the benefits of the library. Torch-Struct is available at https://github.com/ harvardnlp/pytorch-struct.", |
|
"pdf_parse": { |
|
"paper_id": "2020", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "The literature on structured prediction for NLP describes a rich collection of distributions and algorithms over sequences, segmentations, alignments, and trees; however, these algorithms are difficult to utilize in deep learning frameworks. We introduce Torch-Struct, a library for structured prediction designed to take advantage of and integrate with vectorized, auto-differentiation based frameworks. Torch-Struct includes a broad collection of probabilistic structures accessed through a simple and flexible distribution-based API that connects to any deep learning model. The library utilizes batched, vectorized operations and exploits auto-differentiation to produce readable, fast, and testable code. Internally, we also include a number of general-purpose optimizations to provide cross-algorithm efficiency. Experiments show significant performance gains over fast baselines. Case studies demonstrate the benefits of the library. Torch-Struct is available at https://github.com/ harvardnlp/pytorch-struct.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Structured prediction is an area of machine learning focusing on representations of spaces with combinatorial structure, as well as algorithms for inference and parameter estimation over these structures. Core methods include both tractable exact approaches like dynamic programming and spanning tree algorithms as well as heuristic techniques such linear programming relaxations and greedy search.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Structured prediction has played a key role in the history of natural language processing. Example methods include techniques for sequence labeling and segmentation (Lafferty et al., 2001; Sarawagi and Cohen, 2005) , discriminative dependency and constituency parsing (Finkel et al., 2008; McDonald et al., 2005) , unsupervised learning for Figure 1 : Distribution of binary trees over an 1000token sequence. Coloring shows the marginal probabilities of every span. Torch-Struct is an optimized collection of common CRF distributions used in NLP that is designed to integrate with deep learning frameworks. labeling and alignment (Vogel et al., 1996; Goldwater and Griffiths, 2007) , approximate translation decoding with beam search (Tillmann and Ney, 2003) , among many others.", |
|
"cite_spans": [ |
|
{ |
|
"start": 165, |
|
"end": 188, |
|
"text": "(Lafferty et al., 2001;", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 189, |
|
"end": 214, |
|
"text": "Sarawagi and Cohen, 2005)", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 268, |
|
"end": 289, |
|
"text": "(Finkel et al., 2008;", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 290, |
|
"end": 312, |
|
"text": "McDonald et al., 2005)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 630, |
|
"end": 650, |
|
"text": "(Vogel et al., 1996;", |
|
"ref_id": "BIBREF34" |
|
}, |
|
{ |
|
"start": 651, |
|
"end": 681, |
|
"text": "Goldwater and Griffiths, 2007)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 734, |
|
"end": 758, |
|
"text": "(Tillmann and Ney, 2003)", |
|
"ref_id": "BIBREF33" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 341, |
|
"end": 349, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In recent years, research into deep structured prediction has studied how these approaches can be integrated with neural networks and pretrained models. One line of work has utilized structured prediction as the final layer for deep models (Collobert et al., 2011; Durrett and Klein, 2015) . Another has incorporated structured prediction within deep learning models, exploring novel models for latentstructure learning, unsupervised learning, or model control (Johnson et al., 2016; Yogatama et al., 2016; Wiseman et al., 2018) . We aspire to make both of these use-cases as easy to use as standard neural networks.", |
|
"cite_spans": [ |
|
{ |
|
"start": 240, |
|
"end": 264, |
|
"text": "(Collobert et al., 2011;", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 265, |
|
"end": 289, |
|
"text": "Durrett and Klein, 2015)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 461, |
|
"end": 483, |
|
"text": "(Johnson et al., 2016;", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 484, |
|
"end": 506, |
|
"text": "Yogatama et al., 2016;", |
|
"ref_id": "BIBREF37" |
|
}, |
|
{ |
|
"start": 507, |
|
"end": 528, |
|
"text": "Wiseman et al., 2018)", |
|
"ref_id": "BIBREF35" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The practical challenge of employing structured Name Structure (Z) Parts (P) Algorithm (A( ))", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "LoC T/S Sample Reference Linear-Chain, HMM Labeled Chain Edges (N C 2 ) Forward- Backward 20", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "390k (Lafferty et al., 2001) Factorial-HMM Labeled Chains Trans.", |
|
"cite_spans": [ |
|
{ |
|
"start": 5, |
|
"end": 28, |
|
"text": "(Lafferty et al., 2001)", |
|
"ref_id": "BIBREF21" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "(LC 2 ) Obs. (N C L )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Factorial F-B 20 25k (Ghahramani and Jordan, 1996) (Kasami, 1966) Simple CKY Labeled Tree Splits (CN 2 ) 0-th order CKY 30 118k (Kasami, 1966) Dependency Proj. Tree Arcs (N 2 ) Eisner Alg 40 28k (Eisner, 2000) Dep (NP) Non-Proj. Tree Arcs (N 2 ) Matrix-Tree Chiu-Liu (MAP) 40 1.1m (Koo et al., 2007 ) (McDonald et al., 2005 ) Auto-Regressive Sequence Prefix (C N ) Greedy Search, Beam Search 60 - (Tillmann and Ney, 2003) Table 1: Models and algorithms implemented in Torch-Struct. Notation is developed in Section 5. Parts are described in terms of sequence lengths N, M , label size C, segment length K, and layers / grammar size L, G.", |
|
"cite_spans": [ |
|
{ |
|
"start": 21, |
|
"end": 50, |
|
"text": "(Ghahramani and Jordan, 1996)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 51, |
|
"end": 65, |
|
"text": "(Kasami, 1966)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 128, |
|
"end": 142, |
|
"text": "(Kasami, 1966)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 195, |
|
"end": 209, |
|
"text": "(Eisner, 2000)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 281, |
|
"end": 298, |
|
"text": "(Koo et al., 2007", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 299, |
|
"end": 323, |
|
"text": ") (McDonald et al., 2005", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 397, |
|
"end": 421, |
|
"text": "(Tillmann and Ney, 2003)", |
|
"ref_id": "BIBREF33" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Alignment Alignment Match (N M ) Skips (2N M ) DTW,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Lines of code (LoC) is from the log-partition (A( )) implementation. T/S is the tokens per second of a batched computation, computed with batch 32, N = 25, C = 20, K = 5, L = 3 (K80 GPU run on Google Colab).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "prediction is that many required algorithms are difficult to implement efficiently and correctly. Most projects reimplement custom versions of standard algorithms or focus particularly on a single welldefined model class. This research style makes it difficult to combine and try out new approaches, a problem that has compounded with the complexity of research in deep structured prediction. With this challenge in mind, we introduce Torch-Struct with three specific contributions:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 Modularity: models are represented as distributions with a standard flexible API integrated into a deep learning framework.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 Completeness: a broad array of classical algorithms are implemented and new models can easily be added.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 Efficiency: implementations target computational/memory efficiency for GPUs and the backend includes extensions for optimization.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this system description, we first motivate the approach taken by the library, then present a technical description of the methods used, and finally present several example use cases.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Several software libraries target structured prediction.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Optimization tools, such as SVM-struct (Joachims, 2008) , focus on parameter estimation. Model libraries, such as CRFSuite (Okazaki, 2007) , CRF++ (Kudo, 2005) , or NCRF++ (Yang and Zhang, 2018) , implement inference for a fixed set of popular models, usually linear-chain CRFs. General-purpose inference libraries, such as PyStruct (M\u00fcller and Behnke, 2014) or Tur-boParser (Martins et al., 2010), utilize external solvers for (primarily MAP) inference such as integer linear programming solvers and ADMM. Probabilistic programming languages, for example languages that integrate with deep learning such as Pyro (Bingham et al., 2019) , allow for specification and inference over some discrete domains. Most ambitiously, inference libraries such as Dyna (Eisner et al., 2004) allow for declarative specifications of dynamic programming algorithms to support inference for generic algorithms. Torch-Struct takes a different approach and integrates a library of optimized structured distributions into a vectorized deep learning system. We begin by motivating this approach with a case study.", |
|
"cite_spans": [ |
|
{ |
|
"start": 39, |
|
"end": 55, |
|
"text": "(Joachims, 2008)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 123, |
|
"end": 138, |
|
"text": "(Okazaki, 2007)", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 147, |
|
"end": 159, |
|
"text": "(Kudo, 2005)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 172, |
|
"end": 194, |
|
"text": "(Yang and Zhang, 2018)", |
|
"ref_id": "BIBREF36" |
|
}, |
|
{ |
|
"start": 613, |
|
"end": 635, |
|
"text": "(Bingham et al., 2019)", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "While structured prediction is traditionally presented at the output layer, recent applications have deployed structured models broadly within neural networks (Johnson et al., 2016; Kim et al., 2017; Yogatama et al., 2016, inter alia) . Torch-Struct aims to encourage this general use case. To illustrate, we consider a latent tree model. ListOps (Nangia and Bowman, 2018 ) is a dataset of mathematical functions. Each input/output pair consists of a prefix expression x and its result y, e.g.", |
|
"cite_spans": [ |
|
{ |
|
"start": 159, |
|
"end": 181, |
|
"text": "(Johnson et al., 2016;", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 182, |
|
"end": 199, |
|
"text": "Kim et al., 2017;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 200, |
|
"end": 234, |
|
"text": "Yogatama et al., 2016, inter alia)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 347, |
|
"end": 371, |
|
"text": "(Nangia and Bowman, 2018", |
|
"ref_id": "BIBREF27" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Motivating Case Study", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "x = [ MAX 2 9 [ MIN 4 7 ] 0 ] y = 9", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Motivating Case Study", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Models such as a flat RNN will fail to capture the hierarchical structure of this task. However, if a model can induce an explicit latent z, the parse tree of the expression, then the task is easy to learn by a tree-RNN model p(y|x, z) (Yogatama et al., 2016; Havrylov et al., 2019) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 236, |
|
"end": 259, |
|
"text": "(Yogatama et al., 2016;", |
|
"ref_id": "BIBREF37" |
|
}, |
|
{ |
|
"start": 260, |
|
"end": 282, |
|
"text": "Havrylov et al., 2019)", |
|
"ref_id": "BIBREF13" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Motivating Case Study", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Let us briefly summarize a latent-tree RL model for this task. The objective is to maximize the probability of the correct prediction under the expectation of a prior tree model, p(z|x; \u03c6),", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Motivating Case Study", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Obj = E z\u223cp(z|x;\u03c6) [log p(y | z, x)]", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Motivating Case Study", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Computing the expectation is intractable so policy gradient is used. First a tree is sampledz \u223c p(z|x; \u03c6), then the gradient with respect to \u03c6 is approximated as,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Motivating Case Study", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "\u2202 \u2202\u03c6 Obj \u2248 (log p(y |z, x) \u2212 b)( \u2202 \u2202\u03c6 p(z|x; \u03c6))", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Motivating Case Study", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "where b is a variance reduction baseline. A common choice is the self-critical baseline (Rennie et al., 2017),", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Motivating Case Study", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "b = log p(y | z * , x) with z * = arg max z p(z|x; \u03c6)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Motivating Case Study", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Finally an entropy regularization term is added to the objective encourage exploration of different trees, Obj", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Motivating Case Study", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "+ \u03bbH(p(z | x; \u03c6)).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Motivating Case Study", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Even in this brief overview, we can see how complex a latent structured learning problem can be. To compute these terms, we need 5 different properties of the structured prior model p(z |x; \u03c6):", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Motivating Case Study", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Sampling Policy gradient,z \u223c p(z | x; \u03c6) Density Score policy samples, p(z | x; \u03c6) Gradient Backpropagation, \u2202 \u2202\u03c6 p(z | x; \u03c6) Argmax Self-critical, arg max z p(z | x; \u03c6) Entropy Objective regularizer, H(p(z | x; \u03c6))", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Motivating Case Study", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "For structured models, each of these terms is nontrivial to compute. A goal of Torch-Struct is to make it seamless to deploy structured models for these complex settings. ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Motivating Case Study", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "The library design of Torch-Struct follows the distributions API used by both TensorFlow and Py-Torch (Dillon et al., 2017) . For each structured model in the library, we define a conditional random field (CRF) distribution object. From a user's standpoint, this object provides all necessary distributional properties. Given log-potentials output from a deep network, the user can request samples z \u223c CRF( ), probabilities CRF(z; ), modes arg max z CRF( ), or other distributional properties such as H(CRF( )). The library is agnostic to how these are utilized, and when possible, they allow for backpropagation to update the input network. The same distributional object can be used for standard output prediction as for more complex operations like attention or reinforcement learning. Figure 2 demonstrates this API for a binary tree CRF over an ordered sequence, such as p(z | x; \u03c6) from the previous section. The distribution takes in log-potentials which score each possible span in the input. The distribution converts these to probabilities of a specific tree. This distribution can be queried for predicting over the set of trees, sampling a tree for model structure, or even computing entropy over all trees. Table 1 shows all of the structures and distributions implemented in Torch-Struct. While each is internally implemented using different specialized algorithms and optimizations, from the user's perspective they all utilize the same external distributional API, and pass a generic set of distributional tests. 1 This approach hides the internal complexity of the inference procedure, while giving the user full access to the model.", |
|
"cite_spans": [ |
|
{ |
|
"start": 102, |
|
"end": 123, |
|
"text": "(Dillon et al., 2017)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 789, |
|
"end": 797, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF0" |
|
}, |
|
{ |
|
"start": 1220, |
|
"end": 1227, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Library Design", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "We now describe the technical approach underlying the library. To establish notation, first consider the implementation of a softmax categorical distribution, CAT( ), with one-hot categories z with z i = 1 from a set Z and probabilities given by the softmax over logits ,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conditional Random Fields", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "CAT(z; ) = exp(z \u2022 ) z \u2208Z exp(z \u2022 ) = exp i K j=1 exp j", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conditional Random Fields", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "Define the log-partition as A( ) = LSE( ), i.e. log of the denominator, where LSE is the log-sumexp operator. Computing probabilities or sampling from this distribution, requires enumerating Z to compute the log-partition A. A useful identity is that derivatives of A yield category probabilities,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conditional Random Fields", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "p(z i = 1) = exp i n j=1 exp j = \u2202 \u2202 i A( )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conditional Random Fields", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "Other distributional properties can be similarly extracted from variants of the log-partition. For instance, define A * ( ) = log max K j=1 exp j then 2 : I(z * i = 1) = \u2202 \u2202 i A * ( ). Conditional random fields, CRF( ), extend the softmax to combinatorial spaces where Z is exponentially sized. Each z, is now represented as a binary vector over polynomial-sized set of parts, P, i.e. Z \u2282 {0, 1} |P| . Similarly log-potentials are now defined over parts \u2208 R |P| . For instance, in Figure 2 each span is a part and the vector is shown in the top-left figure. Define the probability of a structure z as,", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 481, |
|
"end": 489, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Conditional Random Fields", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "CRF(z; ) = exp z \u2022 z exp z \u2022 = exp p p z p z exp p p z p", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conditional Random Fields", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "Computing probabilities or sampling from this distribution, requires computing the log-partition term A. In general, computing this term is now intractable, however for many core algorithms in NLP there are exist efficient combinatorial algorithms for this term (a list of examples is given in Table 1 ).", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 294, |
|
"end": 301, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Conditional Random Fields", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "structures to ensure that properties hold. While this is intractable for large spaces, it can be done for small sets and was extremely useful for development.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conditional Random Fields", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "2 This is a subgradient identity, but that deep learning libraries like PyTorch generally default to this value.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conditional Random Fields", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "Ops ( , \u2297) Backprop Gradients", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Name", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Log LSE, + \u2206 p(zp = 1) Max max, + \u2206 arg maxz K-Max k max, + \u2206 K-Argmax Sample LSE, + \u223c z \u223c CRF( ) K-Sample LSE, + \u223c K-Samples Count , \u00d7 Entropy (H)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Name", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "See (Li and Eisner, 2009) Exp. See (Li and Eisner, 2009 ) Sparsemax See (Mensch and Blondel, 2018) Derivatives of the log-partition again provide useful distributional properties. For instance, the marginal probabilities of parts are given by,", |
|
"cite_spans": [ |
|
{ |
|
"start": 4, |
|
"end": 25, |
|
"text": "(Li and Eisner, 2009)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 35, |
|
"end": 55, |
|
"text": "(Li and Eisner, 2009", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 72, |
|
"end": 98, |
|
"text": "(Mensch and Blondel, 2018)", |
|
"ref_id": "BIBREF25" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Name", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "p(z p = 1) = exp z:zp=1 z \u2022 z \u2208 exp z \u2022 = \u2202 \u2202 p A( )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Name", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Similarly derivatives of A * correspond to whether a part appears in the argmax structure, I(z * p = 1) = \u2202 \u2202 p A * ( ). While these gradient identities are well-known (Eisner, 2016) , they are not commonly deployed in practice. Computing CRF properties is typically done through two-step specialized algorithms, such as forward-backward, inside-outside, or similar variants such as viterbi-backpointers (Jurafsky and Martin, 2014) . Common wisdom is that these approaches are more efficient implementations.", |
|
"cite_spans": [ |
|
{ |
|
"start": 168, |
|
"end": 182, |
|
"text": "(Eisner, 2016)", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 404, |
|
"end": 431, |
|
"text": "(Jurafsky and Martin, 2014)", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Name", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "However, we observe that recent engineering of faster gradient computation for deep learning has made gradient-based calculations competitive with hand-written calculations. In our experiments, we found that using these identities with autodiffer-entiation was often faster, and much simpler, than custom two-pass approaches. Torch-Struct is thus designed around using gradients for distributional computations.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Name", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Torch-Struct is a collection of generic algorithms for CRF inference. Each CRF distribution object, CRF( ), is constructed by providing \u2208 R |P| where the parts P are specific to the type of distribution. Internally, each distribution is implemented through a single function for computing the logpartition function A( ). From this function, the library uses autodifferentiation and the identities from the previous section, to define a complete distribution object. The core models implemented by the library are shown in Table 1 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 522, |
|
"end": 529, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Dynamic Programming and Semirings", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "To make the approach concrete, we consider the example of the simplest structured model, a linear-", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dynamic Programming and Semirings", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "chain CRF p(z 1 , z 2 , z 3 | x). z 1 z 2 z 3", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dynamic Programming and Semirings", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "The model has C labels per node with a length N utilizing a first-order linear-chain (Markov) model. This model has N \u2212 1 \u00d7 C \u00d7 C parts corresponding to edges in the chain, and thus \u2208 R N \u22121\u00d7C\u00d7C logpotentials. The log-partition function A( ) factors into two reduce computations,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dynamic Programming and Semirings", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "A( ) = log c 3 ,c 2 exp 2,c 2 ,c 3 c 1 exp 1,c 1 ,c 2 = LSE c 3 ,c 2 [ 2,c 2 ,c 3 + [LSE c 1 1,c 1 ,c 2 ]]", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dynamic Programming and Semirings", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "Computing this function left-to-right using dynamic programming yields the standard forward algorithm for computing the log-partition of sequence models. As we have seen, the gradient with respect to produces marginals for each part, i.e. the probability of a specific labeled edge.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dynamic Programming and Semirings", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "We can further extend the same function to support generic semiring dynamic programming (Goodman, 1999) . A semiring is defined by a pair (\u2295, \u2297) with commutative \u2295, distribution, and appropriate identities.", |
|
"cite_spans": [ |
|
{ |
|
"start": 88, |
|
"end": 103, |
|
"text": "(Goodman, 1999)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dynamic Programming and Semirings", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "A( ) = c 3 ,c 2 [ 2,c 2 ,c 3 \u2297 [ c 1 1,c 1 ,c 2 ]]", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dynamic Programming and Semirings", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "The log-partition utilizes \u2295, \u2297 = (LSE, +), but we can substitute alternatives. For instance, utilizing the log-max semiring (max, +) in the forward algorithm yields the max score. As we have seen, its gradient with respect to is the argmax sequence, negating the need for a separate argmax (Viterbi) algorithm. Some distributional properties cannot be computed directly through gradient identities but still use a forward-backward style compute structure. For instance, sampling requires first computing the log-partition term and then sampling each part, (forward filtering / backward sampling). We can compute this value by overriding each backpropagation operation for the to instead compute a sample. Table 2 shows the set of semirings and backpropagation steps for computing different terms of interest. We note that many of the terms necessary in the case-study can be computed with variant semirings, negating the need for specialized algorithms.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 706, |
|
"end": 713, |
|
"text": "Table 2", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Dynamic Programming and Semirings", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "Torch-Struct aims for computational and memory efficiency. Implemented naively, dynamic programming algorithms in Python are prohibitively slow. As such Torch-Struct provides key primitives to help batch and vectorize these algorithms to take advantage of GPU computation and to minimize the overhead of backpropagating through chart-based dynamic programmming. We discuss three optimizations: a) Parallel Scan, b) Vectorization, and c) Semiring Matrix Multiplications. Figure 3 shows the impact of these optimizations on the core algorithms.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 470, |
|
"end": 478, |
|
"text": "Figure 3", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Optimizations", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Parallel Scan Inference The commutative properties of semiring algorithms allow flexibility in the order in which we compute A( ). Typical implementations of dynamic programming algorithms are serial in the length of the sequence. On parallel hardware, an appealing approach is a parallel scan ordering (S\u00e4rkk\u00e4 and Garc\u00eda-Fern\u00e1ndez, 2019) , typically used for computing prefix sums. To compute, A( ) in this manner we first pad the sequence length N out to the nearest power of two, and then compute a balanced parallel tree over the parts, shown in Figure 4 . Concretely each node layer would compute a semiring matrix multiplication, e.g. c n,\u2022,c \u2297 n+1,c,\u2022 . Under this approach, assuming enough parallel cores, we only need O(log N ) steps in Python and can use parallel operations for the rest. Similar parallel approach can also be used for computing sequence alignment and semi-Markov models. Vectorization Computational complexity is even more of an issue for algorithms that cannot easily be parallelized. For example, parsing algorithms the generalize CKY are common in NLP. The CKY algorithm has a bottleneck that it must compute each width from 1 through N in serial; however internally each one of these steps can be vectorized. Assuming we have computed all inside spans of width less than d, computing the inside span of width d requires computing for all i,", |
|
"cite_spans": [ |
|
{ |
|
"start": 303, |
|
"end": 338, |
|
"text": "(S\u00e4rkk\u00e4 and Garc\u00eda-Fern\u00e1ndez, 2019)", |
|
"ref_id": "BIBREF32" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 550, |
|
"end": 558, |
|
"text": "Figure 4", |
|
"ref_id": "FIGREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Optimizations", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "C[i, i + d] = i+d\u22121 j=i C[i, j] \u2297 C[j + 1, i + d]", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Optimizations", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "In order to vectorize this loop over i, j, we need to reindex the chart. Instead of using a single chart C, we split it into two parts: one right- i, i+d] . After this reindexing, the update can be written.", |
|
"cite_spans": [ |
|
{ |
|
"start": 147, |
|
"end": 154, |
|
"text": "i, i+d]", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Optimizations", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "facing C r [i, d] = C[i, i + d] and one left facing, C l [i+d, N \u2212d] = C[", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Optimizations", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "C r [i, d] = j\u22121 j=1 C r [i, j] \u2297 C l [i + d, N \u2212 d + j]", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Optimizations", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Unlike the original, this formula can easily be computed as a vectorized semiring dot product. This allows use to compute C r [\u2022, d] in one operation. Variants of this same approach can be used for many more complex dynamic programs.", |
|
"cite_spans": [ |
|
{ |
|
"start": 126, |
|
"end": 132, |
|
"text": "[\u2022, d]", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Optimizations", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "The two previous optimizations reduce most of the cost to semiring matrix multiplication. In the specific case of the ( , \u00d7) semiring these can be computed very efficiently using matrix multiplication, which is highlytuned on GPU hardware. However, this semiring is not particularly useful and prone to underflow. For other semirings, such as log and max, these operations are either slow or very memory inefficient. For instance, for matrices T and U of sized N \u00d7 M and M \u00d7 O, we can broadcast with \u2297 to a tensor of size N \u00d7 M \u00d7 O and then reduce dim M by at a huge memory cost.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Semiring Matrix Operations", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "A( ) \u2297 \u2297 I 7,\u2022,\u2022 \u2297 6,\u2022,\u2022 5,\u2022,\u2022 \u2297 \u2297 4,\u2022,\u2022 3,\u2022,\u2022 \u2297 2,\u2022,\u2022 1,\u2022,\u2022", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Semiring Matrix Operations", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "To avoid this issue, we implement custom CUDA kernels targeting fast and memory efficient tensor operations. For log, this corresponds to computing, V m,o = log n exp(T m,n + U n,o \u2212 q) + q where q = max n T m,n + U n,o . To optimize this operation on GPU we utilize the TVM language (Chen et al., 2018) to layout the CUDA loops and tune it to hardware. This produces much faster operations, although still less efficient that matrix multiplication which is heavily customized to hardware.", |
|
"cite_spans": [ |
|
{ |
|
"start": 284, |
|
"end": 303, |
|
"text": "(Chen et al., 2018)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Semiring Matrix Operations", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We present Torch-Struct, a library for deep structured prediction. The library achieves modularity through its adoption of a generic distributional API, completeness by utilizing CRFs and semirings to make it easy to add new algorithms, and efficiency through core optimizations to vectorize important dynamic programming steps. In addition to the problems discussed so far, Torch-Struct also includes several other example implementations including supervised dependency parsing with BERT, unsupervised tagging, structured attention, and connectionist temporal classification (CTC) for speech. Code demonstrates that the model is able to replicate standard deep learning results, although we focus here on the fidelity and implementation approach of the core library. The full library is available at https: //github.com/harvardnlp/pytorch-struct.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion and Future Work", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "In the future, we hope to support research and production applications employing structured models. We also believe the library provides a strong foundation for building generic tools for interpretablity, control, and visualization through its probabilistic API. Finally, we hope to explore further optimizations to make core algorithms competitive with highly-optimized neural network components. These approaches provide a benchmark for improving autodifferentiation systems and extending their functionality to higher-order properties.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion and Future Work", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "The test suite for each distribution enumerates over all", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "We thank Yoon Kim, Xiang Lisa Li, Sebastian Gehrmann, Yuntian Deng, and Justin Chiu for discussion and feedback on the project. The project was supported by NSF CAREER 1845664, NSF 1901030, and research awards by Sony and AWS.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgements", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Statistical inference for probabilistic functions of finite state markov chains. The annals of mathematical statistics", |
|
"authors": [ |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Leonard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ted", |
|
"middle": [], |
|
"last": "Baum", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Petrie", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1966, |
|
"venue": "", |
|
"volume": "37", |
|
"issue": "", |
|
"pages": "1554--1563", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Leonard E Baum and Ted Petrie. 1966. Statistical inference for probabilistic functions of finite state markov chains. The annals of mathematical statis- tics, 37(6):1554-1563.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Pyro: Deep universal probabilistic programming", |
|
"authors": [ |
|
{ |
|
"first": "Eli", |
|
"middle": [], |
|
"last": "Bingham", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jonathan", |
|
"middle": [ |
|
"P" |
|
], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Martin", |
|
"middle": [], |
|
"last": "Jankowiak", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fritz", |
|
"middle": [], |
|
"last": "Obermeyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Neeraj", |
|
"middle": [], |
|
"last": "Pradhan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Theofanis", |
|
"middle": [], |
|
"last": "Karaletsos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rohit", |
|
"middle": [], |
|
"last": "Singh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Paul", |
|
"middle": [], |
|
"last": "Szerlip", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Paul", |
|
"middle": [], |
|
"last": "Horsfall", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noah D", |
|
"middle": [], |
|
"last": "Goodman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "The Journal of Machine Learning Research", |
|
"volume": "20", |
|
"issue": "1", |
|
"pages": "973--978", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Eli Bingham, Jonathan P Chen, Martin Jankowiak, Fritz Obermeyer, Neeraj Pradhan, Theofanis Kar- aletsos, Rohit Singh, Paul Szerlip, Paul Horsfall, and Noah D Goodman. 2019. Pyro: Deep universal probabilistic programming. The Journal of Machine Learning Research, 20(1):973-978.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Tvm: end-to-end optimization stack for deep learning", |
|
"authors": [ |
|
{ |
|
"first": "Tianqi", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Thierry", |
|
"middle": [], |
|
"last": "Moreau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ziheng", |
|
"middle": [], |
|
"last": "Jiang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Haichen", |
|
"middle": [], |
|
"last": "Shen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eddie", |
|
"middle": [], |
|
"last": "Yan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Leyuan", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yuwei", |
|
"middle": [], |
|
"last": "Hu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luis", |
|
"middle": [], |
|
"last": "Ceze", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Carlos", |
|
"middle": [], |
|
"last": "Guestrin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Arvind", |
|
"middle": [], |
|
"last": "Krishnamurthy", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1802.04799" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tianqi Chen, Thierry Moreau, Ziheng Jiang, Haichen Shen, Eddie Yan, Leyuan Wang, Yuwei Hu, Luis Ceze, Carlos Guestrin, and Arvind Krishnamurthy. 2018. Tvm: end-to-end optimization stack for deep learning. arXiv preprint arXiv:1802.04799.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Natural language processing (almost) from scratch", |
|
"authors": [ |
|
{ |
|
"first": "Ronan", |
|
"middle": [], |
|
"last": "Collobert", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Weston", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L\u00e9on", |
|
"middle": [], |
|
"last": "Bottou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Karlen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Koray", |
|
"middle": [], |
|
"last": "Kavukcuoglu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pavel", |
|
"middle": [], |
|
"last": "Kuksa", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Journal of machine learning research", |
|
"volume": "12", |
|
"issue": "", |
|
"pages": "2493--2537", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ronan Collobert, Jason Weston, L\u00e9on Bottou, Michael Karlen, Koray Kavukcuoglu, and Pavel Kuksa. 2011. Natural language processing (almost) from scratch. Journal of machine learning research, 12(Aug):2493-2537.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Neural crf parsing", |
|
"authors": [ |
|
{ |
|
"first": "Greg", |
|
"middle": [], |
|
"last": "Durrett", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Klein", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1507.03641" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Greg Durrett and Dan Klein. 2015. Neural crf parsing. arXiv preprint arXiv:1507.03641.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Bilexical grammars and their cubic-time parsing algorithms", |
|
"authors": [ |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Eisner", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2000, |
|
"venue": "Advances in probabilistic and other parsing technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "29--61", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jason Eisner. 2000. Bilexical grammars and their cubic-time parsing algorithms. In Advances in prob- abilistic and other parsing technologies, pages 29- 61. Springer.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Inside-outside and forwardbackward algorithms are just backprop (tutorial paper)", |
|
"authors": [ |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Eisner", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the Workshop on Structured Prediction for NLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--17", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jason Eisner. 2016. Inside-outside and forward- backward algorithms are just backprop (tutorial pa- per). In Proceedings of the Workshop on Structured Prediction for NLP, pages 1-17.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Dyna: A declarative language for implementing dynamic programs", |
|
"authors": [ |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Eisner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eric", |
|
"middle": [], |
|
"last": "Goldlust", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noah A", |
|
"middle": [], |
|
"last": "Smith", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Proceedings of the ACL 2004 on Interactive poster and demonstration sessions", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jason Eisner, Eric Goldlust, and Noah A Smith. 2004. Dyna: A declarative language for implementing dy- namic programs. In Proceedings of the ACL 2004 on Interactive poster and demonstration sessions, pages 32-es.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Efficient, feature-based, conditional random field parsing", |
|
"authors": [ |
|
{ |
|
"first": "Jenny", |
|
"middle": [ |
|
"Rose" |
|
], |
|
"last": "Finkel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alex", |
|
"middle": [], |
|
"last": "Kleeman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher D", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Proceedings of ACL-08: HLT", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "959--967", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jenny Rose Finkel, Alex Kleeman, and Christopher D Manning. 2008. Efficient, feature-based, condi- tional random field parsing. In Proceedings of ACL- 08: HLT, pages 959-967.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Factorial hidden markov models", |
|
"authors": [ |
|
{ |
|
"first": "Zoubin", |
|
"middle": [], |
|
"last": "Ghahramani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael I Jordan", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1996, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "472--478", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zoubin Ghahramani and Michael I Jordan. 1996. Fac- torial hidden markov models. In Advances in Neural Information Processing Systems, pages 472-478.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "A fully bayesian approach to unsupervised part-of-speech tagging", |
|
"authors": [ |
|
{ |
|
"first": "Sharon", |
|
"middle": [], |
|
"last": "Goldwater", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tom", |
|
"middle": [], |
|
"last": "Griffiths", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proceedings of the 45th annual meeting of the association of computational linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "744--751", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sharon Goldwater and Tom Griffiths. 2007. A fully bayesian approach to unsupervised part-of-speech tagging. In Proceedings of the 45th annual meet- ing of the association of computational linguistics, pages 744-751.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Semiring parsing", |
|
"authors": [ |
|
{ |
|
"first": "Joshua", |
|
"middle": [], |
|
"last": "Goodman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1999, |
|
"venue": "Computational Linguistics", |
|
"volume": "25", |
|
"issue": "4", |
|
"pages": "573--605", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Joshua Goodman. 1999. Semiring parsing. Computa- tional Linguistics, 25(4):573-605.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Cooperative learning of disjoint syntax and semantics", |
|
"authors": [ |
|
{ |
|
"first": "Serhii", |
|
"middle": [], |
|
"last": "Havrylov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Germ\u00e1n", |
|
"middle": [], |
|
"last": "Kruszewski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Armand", |
|
"middle": [], |
|
"last": "Joulin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1902.09393" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Serhii Havrylov, Germ\u00e1n Kruszewski, and Armand Joulin. 2019. Cooperative learning of disjoint syntax and semantics. arXiv preprint arXiv:1902.09393.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Svmstruct: Support vector machine for complex outputs", |
|
"authors": [ |
|
{ |
|
"first": "Thorsten", |
|
"middle": [], |
|
"last": "Joachims", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thorsten Joachims. 2008. Svmstruct: Support vector machine for complex outputs.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Composing graphical models with neural networks for structured representations and fast inference", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Matthew", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [ |
|
"K" |
|
], |
|
"last": "Johnson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alex", |
|
"middle": [], |
|
"last": "Duvenaud", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Wiltschko", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Ryan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sandeep R", |
|
"middle": [], |
|
"last": "Adams", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Datta", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Advances in neural information processing systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2946--2954", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Matthew J Johnson, David K Duvenaud, Alex Wiltschko, Ryan P Adams, and Sandeep R Datta. 2016. Composing graphical models with neural net- works for structured representations and fast infer- ence. In Advances in neural information processing systems, pages 2946-2954.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Speech and language processing", |
|
"authors": [ |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Jurafsky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "James", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Martin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "", |
|
"volume": "3", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dan Jurafsky and James H Martin. 2014. Speech and language processing. vol. 3.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "An efficient recognition and syntax-analysis algorithm for context-free languages", |
|
"authors": [ |
|
{ |
|
"first": "Tadao", |
|
"middle": [], |
|
"last": "Kasami", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1966, |
|
"venue": "Coordinated Science Laboratory Report", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tadao Kasami. 1966. An efficient recognition and syntax-analysis algorithm for context-free lan- guages. Coordinated Science Laboratory Report no. R-257.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Structured prediction models via the matrix-tree theorem", |
|
"authors": [ |
|
{ |
|
"first": "Terry", |
|
"middle": [], |
|
"last": "Koo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Amir", |
|
"middle": [], |
|
"last": "Globerson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xavier", |
|
"middle": [ |
|
"Carreras" |
|
], |
|
"last": "P\u00e9rez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Collins", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Joint Conference on Empirical Methods in Natural Language Processing and Computational Natural Language Learning (EMNLP-CoNLL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "141--150", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Terry Koo, Amir Globerson, Xavier Carreras P\u00e9rez, and Michael Collins. 2007. Structured prediction models via the matrix-tree theorem. In Joint Con- ference on Empirical Methods in Natural Language Processing and Computational Natural Language Learning (EMNLP-CoNLL), pages 141-150.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Crf++: Yet another crf toolkit", |
|
"authors": [ |
|
{ |
|
"first": "Taku", |
|
"middle": [], |
|
"last": "Kudo", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Taku Kudo. 2005. Crf++: Yet another crf toolkit. http://crfpp. sourceforge. net/.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Conditional random fields: Probabilistic models for segmenting and labeling sequence data", |
|
"authors": [ |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Lafferty", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Mccallum", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fernando Cn", |
|
"middle": [], |
|
"last": "Pereira", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2001, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "John Lafferty, Andrew McCallum, and Fernando CN Pereira. 2001. Conditional random fields: Prob- abilistic models for segmenting and labeling se- quence data.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "First-and secondorder expectation semirings with applications to minimum-risk training on translation forests", |
|
"authors": [ |
|
{ |
|
"first": "Zhifei", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Eisner", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of the 2009 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "40--51", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhifei Li and Jason Eisner. 2009. First-and second- order expectation semirings with applications to minimum-risk training on translation forests. In Proceedings of the 2009 Conference on Empirical Methods in Natural Language Processing: Volume 1-Volume 1, pages 40-51. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Turbo parsers: Dependency parsing by approximate variational inference", |
|
"authors": [ |
|
{ |
|
"first": "F", |
|
"middle": [ |
|
"T" |
|
], |
|
"last": "Andr\u00e9", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noah", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Martins", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eric", |
|
"middle": [ |
|
"P" |
|
], |
|
"last": "Smith", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Xing", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [ |
|
"Q" |
|
], |
|
"last": "Pedro", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Aguiar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "M\u00e1rio", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Figueiredo", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proceedings of the 2010 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "34--44", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Andr\u00e9 FT Martins, Noah A Smith, Eric P Xing, Pe- dro MQ Aguiar, and M\u00e1rio AT Figueiredo. 2010. Turbo parsers: Dependency parsing by approximate variational inference. In Proceedings of the 2010 Conference on Empirical Methods in Natural Lan- guage Processing, pages 34-44. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Non-projective dependency parsing using spanning tree algorithms", |
|
"authors": [ |
|
{ |
|
"first": "Ryan", |
|
"middle": [], |
|
"last": "Mcdonald", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fernando", |
|
"middle": [], |
|
"last": "Pereira", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kiril", |
|
"middle": [], |
|
"last": "Ribarov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Proceedings of the conference on Human Language Technology and Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "523--530", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ryan McDonald, Fernando Pereira, Kiril Ribarov, and Jan Haji\u010d. 2005. Non-projective dependency pars- ing using spanning tree algorithms. In Proceedings of the conference on Human Language Technology and Empirical Methods in Natural Language Pro- cessing, pages 523-530. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Differentiable dynamic programming for structured prediction and attention", |
|
"authors": [ |
|
{ |
|
"first": "Arthur", |
|
"middle": [], |
|
"last": "Mensch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mathieu", |
|
"middle": [], |
|
"last": "Blondel", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1802.03676" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Arthur Mensch and Mathieu Blondel. 2018. Dif- ferentiable dynamic programming for struc- tured prediction and attention. arXiv preprint arXiv:1802.03676.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Pystruct: learning structured prediction in python", |
|
"authors": [ |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Andreas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sven", |
|
"middle": [], |
|
"last": "M\u00fcller", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Behnke", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "The Journal of Machine Learning Research", |
|
"volume": "15", |
|
"issue": "1", |
|
"pages": "2055--2060", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Andreas C M\u00fcller and Sven Behnke. 2014. Pys- truct: learning structured prediction in python. The Journal of Machine Learning Research, 15(1):2055- 2060.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Listops: A diagnostic dataset for latent tree learning", |
|
"authors": [ |
|
{ |
|
"first": "Nikita", |
|
"middle": [], |
|
"last": "Nangia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Samuel R Bowman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1804.06028" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nikita Nangia and Samuel R Bowman. 2018. Listops: A diagnostic dataset for latent tree learning. arXiv preprint arXiv:1804.06028.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "A general method applicable to the search for similarities in the amino acid sequence of two proteins", |
|
"authors": [ |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Saul", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christian", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Needleman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Wunsch", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1970, |
|
"venue": "Journal of molecular biology", |
|
"volume": "48", |
|
"issue": "3", |
|
"pages": "443--453", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Saul B Needleman and Christian D Wunsch. 1970. A general method applicable to the search for simi- larities in the amino acid sequence of two proteins. Journal of molecular biology, 48(3):443-453.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Crfsuite: a fast implementation of conditional random fields (crfs)", |
|
"authors": [ |
|
{ |
|
"first": "Naoaki", |
|
"middle": [], |
|
"last": "Okazaki", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Naoaki Okazaki. 2007. Crfsuite: a fast implementation of conditional random fields (crfs).", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "Self-critical sequence training for image captioning", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Steven", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Etienne", |
|
"middle": [], |
|
"last": "Rennie", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Youssef", |
|
"middle": [], |
|
"last": "Marcheret", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jerret", |
|
"middle": [], |
|
"last": "Mroueh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vaibhava", |
|
"middle": [], |
|
"last": "Ross", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Goel", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "7008--7024", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Steven J Rennie, Etienne Marcheret, Youssef Mroueh, Jerret Ross, and Vaibhava Goel. 2017. Self-critical sequence training for image captioning. In Proceed- ings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 7008-7024.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Semimarkov conditional random fields for information extraction", |
|
"authors": [ |
|
{ |
|
"first": "Sunita", |
|
"middle": [], |
|
"last": "Sarawagi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "William W Cohen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Advances in neural information processing systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1185--1192", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sunita Sarawagi and William W Cohen. 2005. Semi- markov conditional random fields for information extraction. In Advances in neural information pro- cessing systems, pages 1185-1192.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "Temporal parallelization of bayesian filters and smoothers", |
|
"authors": [ |
|
{ |
|
"first": "Simo", |
|
"middle": [], |
|
"last": "S\u00e4rkk\u00e4", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Garc\u00eda-Fern\u00e1ndez", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1905.13002" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Simo S\u00e4rkk\u00e4 and\u00c1ngel F Garc\u00eda-Fern\u00e1ndez. 2019. Temporal parallelization of bayesian filters and smoothers. arXiv preprint arXiv:1905.13002.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "Word reordering and a dynamic programming beam search algorithm for statistical machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Christoph", |
|
"middle": [], |
|
"last": "Tillmann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hermann", |
|
"middle": [], |
|
"last": "Ney", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "Computational linguistics", |
|
"volume": "29", |
|
"issue": "1", |
|
"pages": "97--133", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Christoph Tillmann and Hermann Ney. 2003. Word re- ordering and a dynamic programming beam search algorithm for statistical machine translation. Com- putational linguistics, 29(1):97-133.", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "Hmm-based word alignment in statistical translation", |
|
"authors": [ |
|
{ |
|
"first": "Stephan", |
|
"middle": [], |
|
"last": "Vogel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hermann", |
|
"middle": [], |
|
"last": "Ney", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christoph", |
|
"middle": [], |
|
"last": "Tillmann", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1996, |
|
"venue": "Proceedings of the 16th conference on Computational linguistics", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "836--841", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Stephan Vogel, Hermann Ney, and Christoph Tillmann. 1996. Hmm-based word alignment in statistical translation. In Proceedings of the 16th conference on Computational linguistics-Volume 2, pages 836- 841. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF35": { |
|
"ref_id": "b35", |
|
"title": "Learning neural templates for text generation", |
|
"authors": [ |
|
{ |
|
"first": "Sam", |
|
"middle": [], |
|
"last": "Wiseman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Stuart", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexander M", |
|
"middle": [], |
|
"last": "Shieber", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Rush", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1808.10122" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sam Wiseman, Stuart M Shieber, and Alexander M Rush. 2018. Learning neural templates for text gen- eration. arXiv preprint arXiv:1808.10122.", |
|
"links": null |
|
}, |
|
"BIBREF36": { |
|
"ref_id": "b36", |
|
"title": "Ncrf++: An opensource neural sequence labeling toolkit", |
|
"authors": [ |
|
{ |
|
"first": "Jie", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yue", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jie Yang and Yue Zhang. 2018. Ncrf++: An open- source neural sequence labeling toolkit. In Proceed- ings of the 56th Annual Meeting of the Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF37": { |
|
"ref_id": "b37", |
|
"title": "Learning to compose words into sentences with reinforcement learning", |
|
"authors": [ |
|
{ |
|
"first": "Dani", |
|
"middle": [], |
|
"last": "Yogatama", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Phil", |
|
"middle": [], |
|
"last": "Blunsom", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Dyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Edward", |
|
"middle": [], |
|
"last": "Grefenstette", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wang", |
|
"middle": [], |
|
"last": "Ling", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1611.09100" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dani Yogatama, Phil Blunsom, Chris Dyer, Edward Grefenstette, and Wang Ling. 2016. Learning to compose words into sentences with reinforcement learning. arXiv preprint arXiv:1611.09100.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"type_str": "figure", |
|
"num": null, |
|
"uris": null, |
|
"text": "Latent Tree CRF example where each cell represents a span (i, j). Torch-Struct can be used to compute many different properties of a structured distribution. (a) Log-potentials for each part/span. (b) Marginals for CRF( ) computed by backpropagation. (c) A single argmax tree arg max z CRF(z; ). (d) A single sampled tree z \u223c CRF( )." |
|
}, |
|
"FIGREF1": { |
|
"type_str": "figure", |
|
"num": null, |
|
"uris": null, |
|
"text": "Speed impact of optimizations. Time is given in seconds for 10 runs with batch 16. (a) Speed of a linearchain forward with 20 classes for lengths up to 500. Compares left-to-right ordering to parallel scan. (b) Speed of CKY inside with lengths up to 80. Compares inner loop versus vectorization. (c) Speed of linear-chain forward of length 20 with up to 100 classes. Compares broadcast-reduction versus CUDA semiring kernel. (Baseline memory is exhausted after 100 classes.)" |
|
}, |
|
"FIGREF2": { |
|
"type_str": "figure", |
|
"num": null, |
|
"uris": null, |
|
"text": "Parallel scan implementation of the linearchain CRF inference algorithm (parallel forward). Here \u2297 represents a semiring matrix operation and I is padding to produce a balanced tree." |
|
}, |
|
"TABREF1": { |
|
"type_str": "table", |
|
"content": "<table/>", |
|
"text": "", |
|
"num": null, |
|
"html": null |
|
} |
|
} |
|
} |
|
} |