|
{ |
|
"paper_id": "I11-1003", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T07:32:30.388309Z" |
|
}, |
|
"title": "Learning Logical Structures of Paragraphs in Legal Articles", |
|
"authors": [ |
|
{ |
|
"first": "Xuan", |
|
"middle": [], |
|
"last": "Ngo", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Japan Advanced Institute of Science and Technology", |
|
"location": { |
|
"addrLine": "1-1 Asahidai", |
|
"postCode": "923-1292", |
|
"settlement": "Nomi, Ishikawa", |
|
"country": "Japan" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Nguyen", |
|
"middle": [], |
|
"last": "Bach", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Japan Advanced Institute of Science and Technology", |
|
"location": { |
|
"addrLine": "1-1 Asahidai", |
|
"postCode": "923-1292", |
|
"settlement": "Nomi, Ishikawa", |
|
"country": "Japan" |
|
} |
|
}, |
|
"email": "bachnx@jaist.ac.jp" |
|
}, |
|
{ |
|
"first": "Tran", |
|
"middle": [ |
|
"Thi" |
|
], |
|
"last": "Le Minh", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Japan Advanced Institute of Science and Technology", |
|
"location": { |
|
"addrLine": "1-1 Asahidai", |
|
"postCode": "923-1292", |
|
"settlement": "Nomi, Ishikawa", |
|
"country": "Japan" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Akira", |
|
"middle": [], |
|
"last": "Oanh", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Japan Advanced Institute of Science and Technology", |
|
"location": { |
|
"addrLine": "1-1 Asahidai", |
|
"postCode": "923-1292", |
|
"settlement": "Nomi, Ishikawa", |
|
"country": "Japan" |
|
} |
|
}, |
|
"email": "oanhtt@jaist.ac.jp" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Shimazu", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Japan Advanced Institute of Science and Technology", |
|
"location": { |
|
"addrLine": "1-1 Asahidai", |
|
"postCode": "923-1292", |
|
"settlement": "Nomi, Ishikawa", |
|
"country": "Japan" |
|
} |
|
}, |
|
"email": "shimazu@jaist.ac.jp" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "This paper presents a new task, learning logical structures of paragraphs in legal articles, which is studied in research on Legal Engineering (Katayama, 2007). The goals of this task are recognizing logical parts of law sentences in a paragraph, and then grouping related logical parts into some logical structures of formulas, which describe logical relations between logical parts. We present a two-phase framework to learn logical structures of paragraphs in legal articles. In the first phase, we model the problem of recognizing logical parts in law sentences as a multi-layer sequence learning problem, and present a CRF-based model to recognize them. In the second phase, we propose a graph-based method to group logical parts into logical structures. We consider the problem of finding a subset of complete sub-graphs in a weighted-edge complete graph, where each node corresponds to a logical part, and a complete sub-graph corresponds to a logical structure. We also present an integer linear programming formulation for this optimization problem. Our models achieve 74.37% in recognizing logical parts, 79.59% in recognizing logical structures, and 55.73% in the whole task on the Japanese National Pension Law corpus.", |
|
"pdf_parse": { |
|
"paper_id": "I11-1003", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "This paper presents a new task, learning logical structures of paragraphs in legal articles, which is studied in research on Legal Engineering (Katayama, 2007). The goals of this task are recognizing logical parts of law sentences in a paragraph, and then grouping related logical parts into some logical structures of formulas, which describe logical relations between logical parts. We present a two-phase framework to learn logical structures of paragraphs in legal articles. In the first phase, we model the problem of recognizing logical parts in law sentences as a multi-layer sequence learning problem, and present a CRF-based model to recognize them. In the second phase, we propose a graph-based method to group logical parts into logical structures. We consider the problem of finding a subset of complete sub-graphs in a weighted-edge complete graph, where each node corresponds to a logical part, and a complete sub-graph corresponds to a logical structure. We also present an integer linear programming formulation for this optimization problem. Our models achieve 74.37% in recognizing logical parts, 79.59% in recognizing logical structures, and 55.73% in the whole task on the Japanese National Pension Law corpus.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Legal Engineering (Katayama, 2007) is a new research field which aims to achieve a trustworthy electronic society. Legal Engineering regards that laws are a kind of software for our society. Specifically, laws such as pension law are specifications for information systems such as pension systems.", |
|
"cite_spans": [ |
|
{ |
|
"start": 18, |
|
"end": 34, |
|
"text": "(Katayama, 2007)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "To achieve a trustworthy society, laws need to be verified about their consistency and contradiction.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Legal texts have some specific characteristics that make them different from other kinds of documents. One of the most important characteristics is that legal texts usually have some specific structures at both sentence and paragraph levels. At the sentence level, a law sentence can roughly be divided into two logical parts: requisite part and effectuation part (Bach, 2011a; Bach et al., 2011b; Tanaka eta al., 1993) . At the paragraph level, a paragraph usually contains a main sentence 1 and one or more subordinate sentences (Takano et al., 2010) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 364, |
|
"end": 377, |
|
"text": "(Bach, 2011a;", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 378, |
|
"end": 397, |
|
"text": "Bach et al., 2011b;", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 398, |
|
"end": 419, |
|
"text": "Tanaka eta al., 1993)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 531, |
|
"end": 552, |
|
"text": "(Takano et al., 2010)", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Analyzing logical structures of legal texts is an important task in Legal Engineering. The outputs of this task will be beneficial to people in understanding legal texts. They can easily understand 1) what does a law sentence say? 2) what cases in which the law sentence can be applied? and 3) what subjects are related to the provision described in the law sentence? This task is the preliminary step, which supports other tasks in legal text processing (translating legal articles into logical and formal representations, legal text summarization, legal text translation, question answering in legal domains, etc) and serves legal text verification, an important goal of Legal Engineering.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "There have been some studies analyzing logical structures of legal texts. (Bach et al., 2011b) presents the RRE task 2 , which recognizes the logical structure of law sentences. (Bach et al., 2010) describes an investigation on contributions of words to the RRE task. (Kimura et al., 2009) focuses on dealing with legal sentences including itemized and referential expressions. These works, however, only analyze logical structures of legal texts at the sentence level. At the paragraph level, (Takano et al., 2010) classifies a legal paragraph into one of six predefined categories: A, B, C, D, E, and F . Among six types, Type A, B, and C correspond to cases in which the main sentence is the first sentence, and subordinate sentences are other sentences. In paragraphs of Type D, E, and F , the main sentence is the first or the second sentence, and a subordinate sentence is an embedded sentence in parentheses within the main sentence.", |
|
"cite_spans": [ |
|
{ |
|
"start": 74, |
|
"end": 94, |
|
"text": "(Bach et al., 2011b)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 178, |
|
"end": 197, |
|
"text": "(Bach et al., 2010)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 268, |
|
"end": 289, |
|
"text": "(Kimura et al., 2009)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 494, |
|
"end": 515, |
|
"text": "(Takano et al., 2010)", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this paper, we present a task of learning logical structures of legal articles at the paragraph level. We propose a two-phase framework to complete the task. We also describe experimental results on real legal data.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Our main contributions can be summarized in the following points:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 Introducing a new task to legal text processing, learning logical structures of paragraphs in legal articles.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 Presenting an annotated corpus for the task, the Japanese National Pension Law corpus.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 Proposing a two-phase framework and providing solutions to solve the task.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 Evaluating our framework on the real annotated corpus.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The rest of this paper is organized as follows. Section 2 describes our task and its two sub-tasks: recognition of logical parts and recognition of logical structures. In Section 3, we present our framework and proposed solutions. Experimental results on real legal articles are described in Section 4. Finally, Section 5 gives some conclusions.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Learning logical structures of paragraphs in legal articles is the task of recognition of logical structures between logical parts in law sentences. A logical structure is usually formed from a pair of a requisite part and an effectuation part. These two parts are built from other kinds of logical parts such as topic parts, antecedent parts, consequent parts, and so on (Bach, 2011a; Bach et al., 2011b) 3 . Usually, consequent parts describes a law provision, antecedent parts describes cases in which the law provision can be applied, and topic parts describe subjects which are related to the law provision. In this paper, a logical structure can be defined as a set of some related logical parts. Figure 1 shows two cases of the inputs and outputs of the task. In the first case, the input is a paragraph of two sentences, and the outputs are four logical parts, which are grouped into two logical structures. In the second case, the input is a paragraph consisting of four sentences, and the outputs are four logical parts, which are grouped into three logical structures. An example in natural language 4 is presented in Figure 2 .", |
|
"cite_spans": [ |
|
{ |
|
"start": 372, |
|
"end": 385, |
|
"text": "(Bach, 2011a;", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 386, |
|
"end": 405, |
|
"text": "Bach et al., 2011b)", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 703, |
|
"end": 711, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
}, |
|
{ |
|
"start": 1129, |
|
"end": 1137, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Formulation", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Let s be a law sentence in the law sentence space S, then s can be represented by a sequence of words s = [w 1 w 2 . . . w n ]. A legal paragraph x in the legal paragraph space X is a sequence of law sentences", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sub-Task 1: Recognition of Logical Parts", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "x = [s 1 s 2 . . . s l ], where s i \u2208 S, \u2200i = 1, 2, . . . , l.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sub-Task 1: Recognition of Logical Parts", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "For each paragraph x, we denote a log-ical part p by a quad-tuple p = (b, e, k, c) where b, e, and k are three integers which indicate position of the beginning word, position of the end word, and sentence position of p, and c is a logical part category in the set of predefined categories C. Formally, the set P of all possible logical parts defined in a paragraph x can be described as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sub-Task 1: Recognition of Logical Parts", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "P = {(b, e, k, c)|1 \u2264 k \u2264 l, 1 \u2264 b \u2264 e \u2264 len(k), c \u2208 C}.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sub-Task 1: Recognition of Logical Parts", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "In the above definition, l is the number of sentences in the paragraph x, and len(k) is the length of the k th sentence.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sub-Task 1: Recognition of Logical Parts", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "In this sub-task, we want to recognize some non-overlapping (but possibly embedded) logical parts in an input paragraph. A solution for this task is a subset y \u2286 P which does not violate the overlapping relationship. We say that two logical parts p 1 and p 2 are overlapping if and only if they are in the same sentence (k 1 = k 2 ) and b 1 < b 2 \u2264 e 1 < e 2 or b 2 < b 1 \u2264 e 2 < e 1 . We denote the overlapping relationship by \u223c. We also say that p 1 is embedded in p 2 if and only if they are in the same sentence (k 1 = k 2 ) and b 2 \u2264 b 1 \u2264 e 1 \u2264 e 2 , and denote the embedded relationship by \u227a. Formally, the solution space can be described as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sub-Task 1: Recognition of Logical Parts", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Y = {y \u2286 P |\u2200u, v \u2208 y, u \u223c v}.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sub-Task 1: Recognition of Logical Parts", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "The learning problem in this sub-task is to learn a function R : X \u2192 Y from a set of m training samples", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sub-Task 1: Recognition of Logical Parts", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "{(x i , y i )|x i \u2208 X, y i \u2208 Y, \u2200i = 1, 2, . . . , m}.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sub-Task 1: Recognition of Logical Parts", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "In our task, we consider the following types of logical parts:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sub-Task 1: Recognition of Logical Parts", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "1. An antecedent part is denoted by A 2. A consequent part is denoted by C 3. A topic part which depends on the antecedent part is denoted by T 1 4. A topic part which depends on the consequent part is denoted by T 2 5. A topic part which depends on both the antecedent part and the consequent part is denoted by T 3 6. The left part of an equivalent statement is denoted by EL 7. The right part of an equivalent statement is denoted by ER 8. An object part, whose meaning is defined differently in different cases, is denoted by Ob 9. An original replacement part, which will be replaced by other replacement parts (denoted by RepR) in specific cases, is denoted by RepO.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sub-Task 1: Recognition of Logical Parts", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Compared with previous works (Bach et al., 2011b) , we introduce three new kinds of logical parts: Ob, RepO, and RepR.", |
|
"cite_spans": [ |
|
{ |
|
"start": 29, |
|
"end": 49, |
|
"text": "(Bach et al., 2011b)", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sub-Task 1: Recognition of Logical Parts", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "In the second sub-task, the goal is to recognize a set of logical structures given a set of logical parts. Let G =< V, E > be a complete undirected graph with the vertex set V and the edge set E. A real value function f is defined on E as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sub-Task 2: Recognition of Logical Structures", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "f : E \u2192 R, e \u2208 E \u2192 f (e) \u2208 R. In this sub-task, each vertex of the graph corresponds to a logical part, and a complete sub-graph corresponds to a logical structure. The value on an edge connecting two vertices expresses the degree that the two vertices belong to one logical structure. The positive (negative) value means that two vertices are likely (not likely) to belong to one logical structure.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sub-Task 2: Recognition of Logical Structures", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Let G s be a complete sub-graph of G, then v(G s ) and e(G s ) are the set of vertices and the set of edges of G s , respectively. We define the total value of a sub-graph as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sub-Task 2: Recognition of Logical Structures", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "f (G s ) = f (e(G s )) = e\u2208e(Gs) f (e).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sub-Task 2: Recognition of Logical Structures", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Let \u2126 be the set of all complete sub-graphs of G. The problem becomes determining a subset \u03a8 \u2286 \u2126 that satisfies the following constraints:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sub-Task 2: Recognition of Logical Structures", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "1. \u2200g \u2208 \u03a8, |v(g)| \u2265 2, 2. \u222a g\u2208\u03a8 v(g) = V , 3. \u2200g 1 , g 2 \u2208 \u03a8|v(g 1 ) \u2286 v(g 2 ) \u21d2 v(g 1 ) = v(g 2 ), 4. \u2200g \u2208 \u03a8, \u222a h\u2208\u03a8,h =g v(h) = V , and", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sub-Task 2: Recognition of Logical Structures", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "5. g\u2208\u03a8 f (g) \u2192 maximize. Constraint 1), minimal constraint, says that each logical structure must contain at least two logical parts. There is the case that a logical structure contains only a consequent part. Due to the characteristics of Japanese law sentences, however, our corpus does not contain such cases. A logical structure which contains a consequent part will also contain a topic part or an antecedent part or both of them. So a logical structure contains at least two logical parts. Constraint 2), complete constraint, says that each logical part must belong to at least one logical structure. Constraint 3), maximal constraint, says that we cannot have two different logical structures such that the set of logical parts in one logical structure contains the set of logical parts in the other logical structure. Constraint 4), significant constraint, says that if we remove any logical structure from the solution, Constraint 2) will be violated. Although Constraint 3) is guaranteed by Constraint 4), we introduce it because of its importance.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sub-Task 2: Recognition of Logical Structures", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "3 Proposed Solutions", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sub-Task 2: Recognition of Logical Structures", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "This sub-section presents our model for recognizing logical parts. We consider the recognition problem as a multi-layer sequence learning problem. First, we give some related notions. Let s be a law sentence, and P be the set of logical parts of s, P = {p 1 , p 2 , . . . , p m }. Layer 1 (s) (outer most layer) is defined as a set of logical parts in P , which are not embedded in any other part. Layer i (s) is defined as a set of logical parts in P \\ \u222a i\u22121 k=1 Layer k (s), which are not embedded in any other part in P \\\u222a i\u22121 k=1 Layer k (s). Formally, we have:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Multi-layer Sequence Learning for Logical Part Recognition", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Layer 1 (s) = {p|p \u2208 P, p \u227a q, \u2200q \u2208 P, q = p}.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Multi-layer Sequence Learning for Logical Part Recognition", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Layer Figure 3 illustrates a law sentence with four logical parts in three layers: Part 1 and Part 2 in Layer 1 , Part 3 in Layer 2 , and Part 4 in Layer 3 . Let K be the number of layers in a law sentence s, our model will recognize logical parts in K steps. In the k th step we recognize logical parts in Layer k . In each layer, we model the recognition problem as a sequence labeling task in which each word is an element. Logical parts in Layer i\u22121 will be used as input sequence in the i th step (in the first step, we use original sentence as input). Figure 4 gives an example of labeling for an input sentence. The sentence consists of three logical parts in two layers. In our model, we use IOE tag setting: the last element of a part is tagged with E, the other elements of a part are tagged with I, and an element not included in any part is tagged with O.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 6, |
|
"end": 14, |
|
"text": "Figure 3", |
|
"ref_id": "FIGREF2" |
|
}, |
|
{ |
|
"start": 558, |
|
"end": 566, |
|
"text": "Figure 4", |
|
"ref_id": "FIGREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Multi-layer Sequence Learning for Logical Part Recognition", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "i (s) = {p|p \u2208 Q i , p \u227a q, \u2200q \u2208 Q i , q = p}, where Q i = P \\ \u222a i\u22121 k=1 Layer k (s)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Multi-layer Sequence Learning for Logical Part Recognition", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Let K * be the maximum number of layers in all law sentences in training data. We learn K * models, in which the k th model is learned from logical parts in the Layer k of training data, using Conditional random fields (Lafferty et al., 2001; Kudo, CRF toolkit) . In the testing phase, we first apply the first model to the input law sentence, and then apply the i th model to the predicted logical parts in Layer i\u22121 .", |
|
"cite_spans": [ |
|
{ |
|
"start": 219, |
|
"end": 242, |
|
"text": "(Lafferty et al., 2001;", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 243, |
|
"end": 261, |
|
"text": "Kudo, CRF toolkit)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Multi-layer Sequence Learning for Logical Part Recognition", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Suppose that G is a sub-graph of G such that G contains all the vertices of G and the degree of each vertex in G is greater than zero, then the set of all the maximal complete sub-graphs (or cliques) of G will satisfy all the minimal, complete, maximal, and significant constraints. We also note that, a set of cliques that satisfies all these four constraints will form a sub-graph that has two properties like properties of G .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "ILP for Recognizing Logical Structures", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Let \u039b be the set of all such sub-graphs G of G, the sub-task now consists of two steps:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "ILP for Recognizing Logical Structures", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "1. Finding G = argmax G \u2208\u039b f (G ), and 2. Finding all cliques of G .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "ILP for Recognizing Logical Structures", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Each clique found in the second step will correspond to a logical structure.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "ILP for Recognizing Logical Structures", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Recently, some researches have shown that integer linear programming (ILP) formulations is an effective way to solve many NLP problems such as semantic role labeling (Punyakanok, 2004) , coreference resolution (Denis and Baldridge, 2007) , summarization (Clarke and Lapata, 2008) , dependency parsing (Martins et al., 2009) , and so on. The advantage of ILP formulations is that we can incorporate non-local features or global constraints easily, which are difficult in traditional algorithms. Although solving an ILP is NP-hard in general, some fast algorithms and available tools 5 make it a practical solution for many NLP problems (Martins et al., 2009) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 166, |
|
"end": 184, |
|
"text": "(Punyakanok, 2004)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 210, |
|
"end": 237, |
|
"text": "(Denis and Baldridge, 2007)", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 254, |
|
"end": 279, |
|
"text": "(Clarke and Lapata, 2008)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 301, |
|
"end": 323, |
|
"text": "(Martins et al., 2009)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 635, |
|
"end": 657, |
|
"text": "(Martins et al., 2009)", |
|
"ref_id": "BIBREF13" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "ILP for Recognizing Logical Structures", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "In this work, we exploit ILP to solve the first step. Let N be the number of vertices of G, we introduce a set of integer variables {x ij } 1\u2264i<j\u2264N . The values of {x ij } are set as follows. If (i, j) \u2208 e(G ) then x ij = 1, otherwise x ij = 0. ILP formulations for the first step can be described as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "ILP for Recognizing Logical Structures", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "//--------Objective function --------// M aximize : 1\u2264i<j\u2264N f (i, j) * x ij (1) //----------Constraints ----------// Integer : {x ij } 1\u2264i<j\u2264N . (2) 0 \u2264 x ij \u2264 1, (1 \u2264 i < j \u2264 N ). (3) j\u22121 i=1 x ij + N k=j+1 x jk \u2265 1, (1 \u2264 j \u2264 N ). (4)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "ILP for Recognizing Logical Structures", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "The last constraint guarantees that there is at least one edge connecting to each vertex in G .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "ILP for Recognizing Logical Structures", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "The second step, finding all cliques of an undirected graph, is a famous problem in graph theory. Many algorithms have been proposed to solve this problem efficiently. In this work, we exploit the Bron-Kerbosch algorithm, a backtracking algorithm. The main idea of the Bron-Kerbosch algorithm is using a branch-and-bound technique to stop searching on branches that cannot lead to a clique (Bron and Kerbosch, 1973) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 390, |
|
"end": 415, |
|
"text": "(Bron and Kerbosch, 1973)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "ILP for Recognizing Logical Structures", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "The remaining problem is how to define the value function f . Our solution is that, first we learn a binary classifier C using maximum entropy model. This classifier takes a pair of logical parts as the input, and outputs +1 if two logical parts belong to one logical structure, otherwise it will output \u22121. Then, we define the value function f for two logical parts as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "ILP for Recognizing Logical Structures", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "f (p 1 , p 2 ) = P rob(C(p 1 , p 2 ) = +1) \u2212 0.5. Function f will receive a value from \u22120.5 to +0.5, and it equals to zero in the case that the classifier assigns the same probability to +1 and \u22121.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "ILP for Recognizing Logical Structures", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "We have built a corpus, Japanese National Pension Law (JNPL) corpus, which consists of 83 legal articles 6 of Japanese national pension law. The architecture of JNPL is shown in Figure 5 . The law 6 Because building corpus is an expensive and timeconsuming task, we only annotate a part of JNPL. consists of articles, articles consist of paragraphs, and paragraphs contain sentences. A sentence may belong to items, sub-items, or sub-sub-items of a paragraph. Figure 6 illustrates the relationship between a law sentence and logical parts. A law sentence may contain some logical parts, and a logical part may be embedded in another one. In our corpus, a logical part is annotated with information about its type (kind of part) and formula-id (logical parts with the same id will be- Figure 7 : An annotated sentence in the JNPL corpus. The sentence contains two logical structures with four logical parts. long to one logical structure). An example of annotated sentence in the JNPL corpus is shown in Figure 7 .", |
|
"cite_spans": [ |
|
{ |
|
"start": 197, |
|
"end": 198, |
|
"text": "6", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 178, |
|
"end": 186, |
|
"text": "Figure 5", |
|
"ref_id": "FIGREF4" |
|
}, |
|
{ |
|
"start": 460, |
|
"end": 468, |
|
"text": "Figure 6", |
|
"ref_id": "FIGREF5" |
|
}, |
|
{ |
|
"start": 784, |
|
"end": 792, |
|
"text": "Figure 7", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1003, |
|
"end": 1011, |
|
"text": "Figure 7", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Corpus", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "We employed two people in a data-making company, who analyzed and annotated our corpus. The corpus consists of 83 legal articles, which contain 119 paragraphs with 426 sentences. On average, each paragraph consists of 3.6 sentences. The total number of logical parts is 807, and the number of logical structures is 351. On average, each paragraph consists of 6.8 logical parts and 3 logical structures. Table 1 shows some statistics on the number of logical parts of each type. Main types of parts are A(35.4%), C(30.7%), T 2 (14.1%), ER(7.1%), and EL(6.8%). Five main types of parts make up more than 94% of all types.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 403, |
|
"end": 410, |
|
"text": "Table 1", |
|
"ref_id": "TABREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Corpus", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "We divided the JNLP corpus into 10 sets, and conducted 10-fold cross-validation tests. For the first sub-task, we evaluated the performance of our system by precision, recall, and F 1 scores as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation Methods", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "precision = |correct parts| |predicted parts| , recall = |correct parts| |actual parts| , F1 = 2 * precision * recall precision+recall .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation Methods", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "For the second sub-task, we used MUC precision, recall, and F 1 scores as described in (Vilain et al., 1995) . We summarize them here for clarity.", |
|
"cite_spans": [ |
|
{ |
|
"start": 87, |
|
"end": 108, |
|
"text": "(Vilain et al., 1995)", |
|
"ref_id": "BIBREF19" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation Methods", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Let P 1 , P 2 , . . . , P n be n predicted logical structures, and G 1 , G 2 , . . . , G m be the correct answers or gold logical structures. To calculate recall, for each gold logical structure G i (i = 1, 2, . . . , m), let k(G i ) be the smallest number such that there exist k(G i ) predicted structures", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation Methods", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "P i 1 , P i 2 , . . . , P i k(G i ) which satisfy G i \u2286 \u222a k(G i ) j=1 P i j : recall = m i=1 (|G i |\u2212k(G i )) m i=1 (|G i |\u22121)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation Methods", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": ". To calculate precision, we switch the roles of predicted structures and gold structures. Finally, F 1 score is computed in a similar manner as in the first sub-task. We chose the Filter-Ranking (FR) Perceptron algorithm proposed by (Carreras and Marquez, 2005; Carreras et al., 2002) as our baseline model because of its effectiveness on phrase recognition problems, especially on problems that accept the embedded relationship 7 . We use FR-perceptron algorithm to recognize logical parts in law sentences one by one in an input paragraph. For beginning/end predictors, we got features of words, POS tags, and Bunsetsu 8 tags in a window size 2. Moreover, with beginning predictor, we used a feature for checking whether this position is the beginning of the sentence or not. Similarly, with end predictor, we use a feature for checking whether this position is the end of the sentence or not.", |
|
"cite_spans": [ |
|
{ |
|
"start": 234, |
|
"end": 262, |
|
"text": "(Carreras and Marquez, 2005;", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 263, |
|
"end": 285, |
|
"text": "Carreras et al., 2002)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation Methods", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "With each logical part candidate, we extract following kinds of features:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments on", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "1. Length of the part 2. Internal structure: this feature is the concatenation of the top logical parts, punctuation marks, parenthesis, and quotes inside the candidate. An example about internal structure may be (A+, +C + .) (plus is used to concatenate items)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments on", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "3. Word (POS) uni-gram, word (POS) bi-gram, and word (POS) tri-gram.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments on", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "In our experiments, we focus on paragraphs in Type A, B, and C defined in (Takano et al., 2010) . In these types, the first sentence is the main sentence, which usually contains more logical parts than other sentences. The other sentences often have a few logical parts, and in most cases these logical parts only appear in one layer. The first sentences usually contain logical parts in two layers.", |
|
"cite_spans": [ |
|
{ |
|
"start": 74, |
|
"end": 95, |
|
"text": "(Takano et al., 2010)", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Results", |
|
"sec_num": "4.3.2" |
|
}, |
|
{ |
|
"text": "We divided sentences into two groups. The first group consists of the first sentences in paragraphs, and the second group consists of other sentences. We set the number of layers k to 2 for sentences in the first group, and to 1 for sentences in the second group. To learn sequence labeling models, we used CRFs (Lafferty et al., 2001; Kudo, CRF toolkit) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 312, |
|
"end": 335, |
|
"text": "(Lafferty et al., 2001;", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 336, |
|
"end": 354, |
|
"text": "Kudo, CRF toolkit)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Results", |
|
"sec_num": "4.3.2" |
|
}, |
|
{ |
|
"text": "Experimental results on the JNPL corpus are described in Table 2 . We conducted experiments with four feature sets: words; words and POS tags; words and Bunsetsu tags; and words, POS tags, and Bunsetsu tags. To extract features from source sentences, we used the CaboCha tool (Kudo, Cabocha) , a Japanese morphological and syntactic analyzer. The best model (word and Bunsetsu tag features) achieved 74.37% in F 1 score. It improves 11.04% in F 1 score (30.11% in error rate) compared with the baseline model. Table 3 shows experimental results of our best model in more detail. Our model got good results on most main parts: C(78.98%), A(80.42%), and T 2 (82.14%). The model got low results on the other types of parts. It is understandable because three types of logical parts C, A, and T 2 make up more than 80%, while six other types only make up 20% of all types.", |
|
"cite_spans": [ |
|
{ |
|
"start": 276, |
|
"end": 291, |
|
"text": "(Kudo, Cabocha)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 57, |
|
"end": 64, |
|
"text": "Table 2", |
|
"ref_id": "TABREF1" |
|
}, |
|
{ |
|
"start": 510, |
|
"end": 517, |
|
"text": "Table 3", |
|
"ref_id": "TABREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Experimental Results", |
|
"sec_num": "4.3.2" |
|
}, |
|
{ |
|
"text": "Our baseline is a heuristic algorithm to solve this sub-task on graphs. This is an approximate algorithm which satisfies minimal, complete, maximal, and significant constraints. The main idea of our algorithm is picking up as many positive edges as possible, and as few negative edges as possible. We consider two cases: 1) There is no positive value edge on the input graph; and 2) There are some positive value edges on the input graph.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Baseline: a Heuristic Algorithm", |
|
"sec_num": "4.4.1" |
|
}, |
|
{ |
|
"text": "In the first case, because all the edges have negative values, we build logical structures with as few logical parts as possible. In this case, each logical structure contains exactly two logical parts. So we gradually choose two nodes in the graph with the maximum value on the edge connecting them. An example of the first case is illustrated in Figure 8 . The maximum value on an edge is \u22120.1, so the first logical structure will contain node 1 and node 3. The second logical structure contains node 2 and node 4 9 . In the second case, we first consider the subgraph which only contains non-negative value edges. In this sub-graph, we repeatedly build logical structures with as many logical parts as possi- ble. After building successfully a logical structure, we remove all the nodes and the edges according to it on the graph. When have no positive edge, we will build logical structures with exactly two logical parts.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 348, |
|
"end": 356, |
|
"text": "Figure 8", |
|
"ref_id": "FIGREF7" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Baseline: a Heuristic Algorithm", |
|
"sec_num": "4.4.1" |
|
}, |
|
{ |
|
"text": "An example of the second case is illustrated in Figure 9 . First, we consider the subgraph with positive edges. This sub-graph consists of five nodes {1, 2, 3, 4, 5} and four edges {(1, 2), (1, 3), (2, 3), (2, 4)}. First, we have a logical structure with three nodes {1, 2, 3}. We remove these nodes and the positive edges connecting to these nodes. We have two nodes {4, 5} with no positive edges. Now we build logical structures with exactly two nodes. We consider node 4. Among edges connecting to node 4, the edge (2, 4) has maximal value. So we have the second logical structure with two nodes {2, 4}. Next, we consider node 5, and we have the third logical structure with two nodes {1, 5}.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 48, |
|
"end": 56, |
|
"text": "Figure 9", |
|
"ref_id": "FIGREF8" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Baseline: a Heuristic Algorithm", |
|
"sec_num": "4.4.1" |
|
}, |
|
{ |
|
"text": "In our experiments, to learn a maximum entropy binary classification we used the implementation of Tsuruoka (Tsuruoka, MEM) . With a pair of logical parts, we extracted the following features (and combinations of them):", |
|
"cite_spans": [ |
|
{ |
|
"start": 108, |
|
"end": 123, |
|
"text": "(Tsuruoka, MEM)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Results", |
|
"sec_num": "4.4.2" |
|
}, |
|
{ |
|
"text": "\u2022 Categories of two parts.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Results", |
|
"sec_num": "4.4.2" |
|
}, |
|
{ |
|
"text": "\u2022 Layers of two parts.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Results", |
|
"sec_num": "4.4.2" |
|
}, |
|
{ |
|
"text": "\u2022 The positions of the sentences that contain two parts (the first sentence or not).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Results", |
|
"sec_num": "4.4.2" |
|
}, |
|
{ |
|
"text": "\u2022 Categories of other parts in the input paragraph.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Results", |
|
"sec_num": "4.4.2" |
|
}, |
|
{ |
|
"text": "We conducted experiments on this sub-task in two settings. In the first setting, we used annotated logical parts (gold inputs) as the inputs to the system. The purpose of this experiment is to evaluate the performance of the graph-based method on Sub-task 2. In the second setting, predicted logical parts (end-to-end) outputted by the Sub-task 1 were used as the inputs to the system. The purpose of this experiment is to evaluate the performance of our framework on the whole task.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Results", |
|
"sec_num": "4.4.2" |
|
}, |
|
{ |
|
"text": "In the second setting, end-to-end setting, because input logical parts may differ from the correct logical parts, we need to modify the MUC scores. Let P 1 , P 2 , . . . , P n be n predicted logical structures, and G 1 , G 2 , . . . , G m be the gold logical structures. For each gold logical structure G i (i = 1, 2, . . . , m), let D i be the set of logical parts in G i which are not included in the set of input logical parts. D i = {p \u2208 G i |p / \u2208 \u222a n j=1 P j }. Let k(G i ) be the smallest number such that there exist k(G i ) predicted structures P i 1 , P i 2 , . . . , P i k(G i ) which satisfy", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Results", |
|
"sec_num": "4.4.2" |
|
}, |
|
{ |
|
"text": "G i \u2286 (\u222a k(G i ) j=1 P i j ) \u222a D i . recall = m i=1 (|G i |\u2212|D i |\u2212k(G i )) m i=1 (|G i |\u22121)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Results", |
|
"sec_num": "4.4.2" |
|
}, |
|
{ |
|
"text": ". To calculate the precision, we switch the roles of predicted structures and gold structures. Table 4 shows experimental results on the second sub-task. The ILP model outperformed the baseline model in both settings. It improved 3.70% in the F 1 score (15.35% in error rate) in the gold-input setting, and 4.61% in the F 1 score (9.43% in error rate) in the end-to-end setting compared with the baseline model (heuristic algorithm).", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 95, |
|
"end": 102, |
|
"text": "Table 4", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Experimental Results", |
|
"sec_num": "4.4.2" |
|
}, |
|
{ |
|
"text": "We have introduced the task of learning logical structures of paragraphs in legal articles, a new task which has been studied in research on Legal Engineering. We presented the Japanese National Pension Law corpus, an annotated corpus of real legal articles for the task. We also described a two-phase framework with multi-layer sequence learning model and ILP formulation to complete the task. Our results provide a baseline for further researches on this interesting task.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "In the future, we will continue to improve this task. On the other hand, we also investigate the task of translating legal articles into logical and formal representations.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Usually, the first sentence is the main sentence.2 The task of Recognition of Requisite part and Effectuation part in law sentences.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We only recognize logical structures (a set of related logical parts). The task of translating legal articles into logical and formal representations is not covered in this paper.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Because law sentences are very long and complicated, we use toy sentences to illustrate the task.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We used lp-solve from http://lpsolve.sourceforge.net/", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We re-implement the FR-perceptron algorithm by ourself.8 In Japanese, a Bunsetsu is an unit of sentence which is similar to a chunk in English.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "If the number of nodes is odd, the final logical structure will consist of the final node and another node, so that the edge connecting them has the maximal value.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "This work was partly supported by the 21st Century COE program 'Verifiable and Evolvable e-Society', Grant-in-Aid for Scientific Research, Education and Research Center for Trustworthy e-Society, and JAIST Overseas Training Program for 3D Program Students.We would like to give special thanks to Kenji Takano and Yoshiko Oyama, who analyzed law sentences and built the corpus, and the reviewers, who gave us valuable comments.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgments", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "A Study on Recognition of Requisite Part and Effectuation Part in Law Sentences", |
|
"authors": [ |
|
{ |
|
"first": "N", |
|
"middle": [ |
|
"X" |
|
], |
|
"last": "Bach", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "School of Information Science", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "N.X. Bach. 2011a. A Study on Recognition of Requisite Part and Effectuation Part in Law Sen- tences. Master Thesis, School of Information Sci- ence, Japan Advanced Institute of Science and Tech- nology.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "RRE Task: The Task of Recognition of Requisite Part and Effectuation Part in Law Sentences", |
|
"authors": [ |
|
{ |
|
"first": "N", |
|
"middle": [ |
|
"X" |
|
], |
|
"last": "Bach", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "N", |
|
"middle": [ |
|
"L" |
|
], |
|
"last": "Minh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Shimazu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "In International Journal of Computer Processing Of Languages (IJCPOL)", |
|
"volume": "23", |
|
"issue": "2", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "N.X. Bach, N.L. Minh, A. Shimazu. 2011b. RRE Task: The Task of Recognition of Requisite Part and Effectuation Part in Law Sentences. In Inter- national Journal of Computer Processing Of Lan- guages (IJCPOL), Volume 23, Number 2.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Exploring Contributions of Words to Recognition of Requisite Part and Effectuation Part in Law Sentences", |
|
"authors": [ |
|
{ |
|
"first": "N", |
|
"middle": [ |
|
"X" |
|
], |
|
"last": "Bach", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "N", |
|
"middle": [ |
|
"L" |
|
], |
|
"last": "Minh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Shimazu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proceedings of JURISIN", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "121--132", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "N.X. Bach, N.L. Minh, A. Shimazu. 2010. Exploring Contributions of Words to Recognition of Requisite Part and Effectuation Part in Law Sentences. In Pro- ceedings of JURISIN, pp. 121-132.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Algorithm 457: Finding All Cliques of an Undirected Graph", |
|
"authors": [ |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Bron", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Kerbosch", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1973, |
|
"venue": "Communications of the ACM", |
|
"volume": "16", |
|
"issue": "", |
|
"pages": "575--577", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "C. Bron and J. Kerbosch. 1973. Algorithm 457: Find- ing All Cliques of an Undirected Graph. In Commu- nications of the ACM, Volume 16, Issue 9, pp. 575- 577.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Filtering-Ranking Perceptron Learning for Partial Parsing", |
|
"authors": [ |
|
{ |
|
"first": "X", |
|
"middle": [], |
|
"last": "Carreras", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Marquez", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Machine Learning", |
|
"volume": "60", |
|
"issue": "", |
|
"pages": "41--71", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "X. Carreras and L. Marquez. 2005. Filtering-Ranking Perceptron Learning for Partial Parsing. In Machine Learning, Volume 60, Issue 1-3, pp. 41-71.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Learning and Inference for Clause Identification", |
|
"authors": [ |
|
{ |
|
"first": "X", |
|
"middle": [], |
|
"last": "Carreras", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Marquez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Punyakanok", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Roth", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Proceedings of ECML", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "35--47", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "X. Carreras, L. Marquez, V. Punyakanok, D. Roth. 2002. Learning and Inference for Clause Identifi- cation. In Proceedings of ECML, pp. 35-47.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Global Inference for Sentence Compression: An Integer Linear Programming Approach", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Clarke", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Lapata", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "In Journal of Artificial Intelligence Research", |
|
"volume": "31", |
|
"issue": "", |
|
"pages": "399--429", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "J. Clarke and M. Lapata. 2008. Global Inference for Sentence Compression: An Integer Linear Program- ming Approach. In Journal of Artificial Intelligence Research (JAIR), Volume 31, pp. 399-429.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Joint Determination of Anaphoricity and Coreference Resolution Using Integer Programming", |
|
"authors": [ |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Denis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Baldridge", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proceedings of HLT-NAACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "236--243", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "P. Denis and J. Baldridge. 2007. Joint Determination of Anaphoricity and Coreference Resolution Us- ing Integer Programming. In Proceedings of HLT- NAACL, pp. 236-243.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Legal Engineering -An Engineering Approach to Laws in e-Society Age", |
|
"authors": [ |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Katayama", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proceedings of JURISIN", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "T. Katayama. 2007. Legal Engineering -An Engineer- ing Approach to Laws in e-Society Age. In Proceed- ings of JURISIN.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Treatment of Legal Sentences Including Itemized and Referential Expressions -Towards Translation into Logical Forms", |
|
"authors": [ |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Kimura", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Nakamura", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Shimazu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "New Frontiers in Artificial Intelligence", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "242--253", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Y. Kimura, M. Nakamura, A. Shimazu. Treatment of Legal Sentences Including Itemized and Referen- tial Expressions -Towards Translation into Logical Forms. New Frontiers in Artificial Intelligence, vol- ume 5447 of LNAI, pp.242-253.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Yet Another Japanese Dependency Structure Analyzer", |
|
"authors": [ |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Kudo", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "T. Kudo. Yet Another Japanese Depen- dency Structure Analyzer. http://chasen.org/", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "CRF++: Yet Another CRF toolkit", |
|
"authors": [ |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Kudo", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "T. Kudo. CRF++: Yet Another CRF toolkit. http://crfpp.sourceforge.net/.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Conditional Random Fields: Probabilistic Models for Segmenting and Labeling Sequence Data", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Lafferty", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Mccallum", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "F", |
|
"middle": [], |
|
"last": "Pereira", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2001, |
|
"venue": "Proceedings of ICML", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "282--289", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "J. Lafferty, A. McCallum, F. Pereira. 2001. Condi- tional Random Fields: Probabilistic Models for Seg- menting and Labeling Sequence Data. In Proceed- ings of ICML, pp.282-289.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Concise Integer Linear Programming Formulations for Dependency Parsing", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [ |
|
"F T" |
|
], |
|
"last": "Martins", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "N", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Smith", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [ |
|
"P" |
|
], |
|
"last": "Xing", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "342--350", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "A.F.T. Martins, N.A. Smith, E.P. Xing. 2009. Concise Integer Linear Programming Formulations for De- pendency Parsing. In Proceedings of ACL, pp.342- 350.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Towards Translation of Legal Sentences into Logical Forms", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Nakamura", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Nobuoka", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Shimazu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proceedings of JURISIN", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "M. Nakamura, S. Nobuoka, A. Shimazu. 2007. To- wards Translation of Legal Sentences into Logical Forms. In Proceedings of JURISIN.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Semantic Role Labeling Via Integer Linear Programming Inference", |
|
"authors": [ |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Punyakanok", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Roth", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "W", |
|
"middle": [], |
|
"last": "Yih", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Zimak", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Proceedings of COLING", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1346--1352", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "V. Punyakanok, D. Roth, W. Yih, D. Zimak. 2004. Se- mantic Role Labeling Via Integer Linear Program- ming Inference. In Proceedings of COLING, pp. 1346-1352.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Semantic Analysis of Paragraphs Consisting of Multiple Sentences -Towards Development of a Logical Formulation System", |
|
"authors": [ |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Takano", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Nakamura", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Oyama", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Shimazu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proceedings of JU-RIX", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "117--126", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "K. Takano, M. Nakamura, Y. Oyama, A. Shimazu. 2010. Semantic Analysis of Paragraphs Consisting of Multiple Sentences -Towards Development of a Logical Formulation System. In Proceedings of JU- RIX, pp. 117-126.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Standard Structure of Legal Provisions -for the Legal Knowledge Processing by Natural Language", |
|
"authors": [ |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Tanaka", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "I", |
|
"middle": [], |
|
"last": "Kawazoe", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Narita", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1993, |
|
"venue": "IPSJ Research Report on Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "79--86", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "K. Tanaka, I. Kawazoe, H. Narita. 1993 Stan- dard Structure of Legal Provisions -for the Legal Knowledge Processing by Natural Language -(in Japanese). In IPSJ Research Report on Natural Lan- guage Processing, pp. 79-86.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "A simple C++ library for maximum entropy classification", |
|
"authors": [ |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Tsuruoka", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Y. Tsuruoka. A simple C++ library for maxi- mum entropy classification. http://www-tsujii.is.s.u- tokyo.ac.jp/ tsuruoka/maxent/.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "A Model-Theoretic Coreference Scoring Scheme", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Vilain", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Burger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Aberdeen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Connolly", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Hirschman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1995, |
|
"venue": "Proceedings of MUC-6", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "45--52", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "M. Vilain, J. Burger, J. Aberdeen, D. Connolly, L. Hirschman. 1995. A Model-Theoretic Coreference Scoring Scheme. In Proceedings of MUC-6, pp. 45- 52.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"type_str": "figure", |
|
"num": null, |
|
"uris": null, |
|
"text": "Two cases of inputs and outputs of the task." |
|
}, |
|
"FIGREF1": { |
|
"type_str": "figure", |
|
"num": null, |
|
"uris": null, |
|
"text": "An example in natural language (E means Effectuation part, R means Requisite part, and LS means Logical Structure)." |
|
}, |
|
"FIGREF2": { |
|
"type_str": "figure", |
|
"num": null, |
|
"uris": null, |
|
"text": "A law sentence with logical parts in three layers." |
|
}, |
|
"FIGREF3": { |
|
"type_str": "figure", |
|
"num": null, |
|
"uris": null, |
|
"text": "An example of labeling in the multilayer model." |
|
}, |
|
"FIGREF4": { |
|
"type_str": "figure", |
|
"num": null, |
|
"uris": null, |
|
"text": "The architecture of JNPL." |
|
}, |
|
"FIGREF5": { |
|
"type_str": "figure", |
|
"num": null, |
|
"uris": null, |
|
"text": "Relationship between a sentence and logical parts." |
|
}, |
|
"FIGREF7": { |
|
"type_str": "figure", |
|
"num": null, |
|
"uris": null, |
|
"text": "An example of the first case." |
|
}, |
|
"FIGREF8": { |
|
"type_str": "figure", |
|
"num": null, |
|
"uris": null, |
|
"text": "An example of the second case." |
|
}, |
|
"TABREF0": { |
|
"html": null, |
|
"type_str": "table", |
|
"content": "<table><tr><td>Logical Part</td><td>C</td><td>A</td><td colspan=\"3\">T 1 T 2 T 3 EL ER Ob RepO RepR</td></tr><tr><td>Number</td><td colspan=\"3\">248 286 0 114 12 55 57</td><td>9</td><td>12</td><td>14</td></tr></table>", |
|
"text": "Statistics on logical parts of the JNPL corpus", |
|
"num": null |
|
}, |
|
"TABREF1": { |
|
"html": null, |
|
"type_str": "table", |
|
"content": "<table><tr><td colspan=\"4\">: Experimental results for Sub-task 1 on the</td></tr><tr><td colspan=\"4\">JNLP corpus(W:Word; P: POS tag; B: Bunsetsu</td></tr><tr><td>tag)</td><td/><td/><td/></tr><tr><td>Model</td><td colspan=\"3\">Prec(%) Recall(%) F 1 (%)</td></tr><tr><td>Baseline</td><td>79.70</td><td>52.54</td><td>63.33</td></tr><tr><td>W</td><td>79.18</td><td>69.27</td><td>73.89</td></tr><tr><td>W+P</td><td>77.62</td><td>68.77</td><td>72.93</td></tr><tr><td>W+B W+P+B</td><td>79.63 77.89</td><td>69.76 69.39</td><td>74.37 73.39</td></tr></table>", |
|
"text": "", |
|
"num": null |
|
}, |
|
"TABREF2": { |
|
"html": null, |
|
"type_str": "table", |
|
"content": "<table><tr><td colspan=\"4\">Logical Part Prec(%) Recall(%) F 1 (%)</td></tr><tr><td>C EL</td><td>83.41 76.74</td><td>75.00 60.00</td><td>78.98 67.35</td></tr><tr><td>ER</td><td>41.94</td><td>22.81</td><td>29.55</td></tr><tr><td>Ob</td><td>0.00</td><td>0.00</td><td>0.00</td></tr><tr><td>A RepO</td><td>80.42 100</td><td>80.42 16.67</td><td>80.42 28.57</td></tr><tr><td>RepR</td><td>100</td><td>28.57</td><td>44.44</td></tr><tr><td>T 2 T 3 Overall</td><td>83.64 60.00 79.63</td><td>80.70 25.00 69.76</td><td>82.14 35.29 74.37</td></tr></table>", |
|
"text": "Experimental results in more details", |
|
"num": null |
|
}, |
|
"TABREF3": { |
|
"html": null, |
|
"type_str": "table", |
|
"content": "<table><tr><td/><td colspan=\"3\">Experiments on Sub-task 2</td></tr><tr><td/><td colspan=\"3\">Gold Input Setting</td></tr><tr><td>Model Heuristic ILP</td><td colspan=\"3\">Prec(%) Recall(%) F 1 (%) 71.19 75.89 81.24 76.56 82.87 79.59</td></tr><tr><td/><td colspan=\"3\">End-to-End Setting</td></tr><tr><td>Model</td><td colspan=\"3\">Prec(%) Recall(%) F 1 (%)</td></tr><tr><td>Heuristic</td><td>54.88</td><td>47.84</td><td>51.12</td></tr><tr><td>ILP</td><td>57.51</td><td>54.06</td><td>55.73</td></tr></table>", |
|
"text": "", |
|
"num": null |
|
} |
|
} |
|
} |
|
} |