applied-ai-018
commited on
Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- llmeval-env/lib/python3.10/site-packages/nltk/classify/api.py +195 -0
- llmeval-env/lib/python3.10/site-packages/nltk/classify/decisiontree.py +349 -0
- llmeval-env/lib/python3.10/site-packages/nltk/classify/megam.py +184 -0
- llmeval-env/lib/python3.10/site-packages/nltk/classify/naivebayes.py +260 -0
- llmeval-env/lib/python3.10/site-packages/nltk/classify/positivenaivebayes.py +180 -0
- llmeval-env/lib/python3.10/site-packages/nltk/classify/rte_classify.py +183 -0
- llmeval-env/lib/python3.10/site-packages/nltk/classify/scikitlearn.py +143 -0
- llmeval-env/lib/python3.10/site-packages/nltk/classify/senna.py +176 -0
- llmeval-env/lib/python3.10/site-packages/nltk/classify/textcat.py +197 -0
- llmeval-env/lib/python3.10/site-packages/nltk/classify/util.py +346 -0
- llmeval-env/lib/python3.10/site-packages/nltk/test/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/nltk/test/__pycache__/all.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/nltk/test/__pycache__/childes_fixt.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/nltk/test/__pycache__/classify_fixt.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/nltk/test/__pycache__/conftest.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/nltk/test/__pycache__/gensim_fixt.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/nltk/test/__pycache__/gluesemantics_malt_fixt.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/nltk/test/__pycache__/portuguese_en_fixt.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/nltk/test/__pycache__/probability_fixt.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/nltk/test/__pycache__/setup_fixt.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/nltk/test/collocations.doctest +307 -0
- llmeval-env/lib/python3.10/site-packages/nltk/test/conftest.py +33 -0
- llmeval-env/lib/python3.10/site-packages/nltk/test/dependency.doctest +241 -0
- llmeval-env/lib/python3.10/site-packages/nltk/test/discourse.doctest +552 -0
- llmeval-env/lib/python3.10/site-packages/nltk/test/featgram.doctest +610 -0
- llmeval-env/lib/python3.10/site-packages/nltk/test/gensim_fixt.py +4 -0
- llmeval-env/lib/python3.10/site-packages/nltk/test/gluesemantics.doctest +383 -0
- llmeval-env/lib/python3.10/site-packages/nltk/test/gluesemantics_malt_fixt.py +9 -0
- llmeval-env/lib/python3.10/site-packages/nltk/test/grammar.doctest +69 -0
- llmeval-env/lib/python3.10/site-packages/nltk/test/index.doctest +100 -0
- llmeval-env/lib/python3.10/site-packages/nltk/test/inference.doctest +536 -0
- llmeval-env/lib/python3.10/site-packages/nltk/test/japanese.doctest +48 -0
- llmeval-env/lib/python3.10/site-packages/nltk/test/meteor.doctest +54 -0
- llmeval-env/lib/python3.10/site-packages/nltk/test/nonmonotonic.doctest +293 -0
- llmeval-env/lib/python3.10/site-packages/nltk/test/paice.doctest +35 -0
- llmeval-env/lib/python3.10/site-packages/nltk/test/parse.doctest +933 -0
- llmeval-env/lib/python3.10/site-packages/nltk/test/portuguese_en.doctest +568 -0
- llmeval-env/lib/python3.10/site-packages/nltk/test/semantics.doctest +667 -0
- llmeval-env/lib/python3.10/site-packages/nltk/test/treetransforms.doctest +154 -0
- llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_bllip.py +42 -0
- llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_collocations.py +120 -0
- llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_corpus_views.py +48 -0
- llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_freqdist.py +7 -0
- llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_json2csv_corpus.py +210 -0
- llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_json_serialization.py +95 -0
- llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_rte_classify.py +94 -0
- llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_seekable_unicode_stream_reader.py +86 -0
- llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_tokenize.py +867 -0
- llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_wordnet.py +240 -0
- llmeval-env/lib/python3.10/site-packages/nltk/twitter/__pycache__/__init__.cpython-310.pyc +0 -0
llmeval-env/lib/python3.10/site-packages/nltk/classify/api.py
ADDED
@@ -0,0 +1,195 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Natural Language Toolkit: Classifier Interface
|
2 |
+
#
|
3 |
+
# Copyright (C) 2001-2023 NLTK Project
|
4 |
+
# Author: Edward Loper <edloper@gmail.com>
|
5 |
+
# Steven Bird <stevenbird1@gmail.com> (minor additions)
|
6 |
+
# URL: <https://www.nltk.org/>
|
7 |
+
# For license information, see LICENSE.TXT
|
8 |
+
|
9 |
+
"""
|
10 |
+
Interfaces for labeling tokens with category labels (or "class labels").
|
11 |
+
|
12 |
+
``ClassifierI`` is a standard interface for "single-category
|
13 |
+
classification", in which the set of categories is known, the number
|
14 |
+
of categories is finite, and each text belongs to exactly one
|
15 |
+
category.
|
16 |
+
|
17 |
+
``MultiClassifierI`` is a standard interface for "multi-category
|
18 |
+
classification", which is like single-category classification except
|
19 |
+
that each text belongs to zero or more categories.
|
20 |
+
"""
|
21 |
+
from nltk.internals import overridden
|
22 |
+
|
23 |
+
##//////////////////////////////////////////////////////
|
24 |
+
# { Classification Interfaces
|
25 |
+
##//////////////////////////////////////////////////////
|
26 |
+
|
27 |
+
|
28 |
+
class ClassifierI:
|
29 |
+
"""
|
30 |
+
A processing interface for labeling tokens with a single category
|
31 |
+
label (or "class"). Labels are typically strs or
|
32 |
+
ints, but can be any immutable type. The set of labels
|
33 |
+
that the classifier chooses from must be fixed and finite.
|
34 |
+
|
35 |
+
Subclasses must define:
|
36 |
+
- ``labels()``
|
37 |
+
- either ``classify()`` or ``classify_many()`` (or both)
|
38 |
+
|
39 |
+
Subclasses may define:
|
40 |
+
- either ``prob_classify()`` or ``prob_classify_many()`` (or both)
|
41 |
+
"""
|
42 |
+
|
43 |
+
def labels(self):
|
44 |
+
"""
|
45 |
+
:return: the list of category labels used by this classifier.
|
46 |
+
:rtype: list of (immutable)
|
47 |
+
"""
|
48 |
+
raise NotImplementedError()
|
49 |
+
|
50 |
+
def classify(self, featureset):
|
51 |
+
"""
|
52 |
+
:return: the most appropriate label for the given featureset.
|
53 |
+
:rtype: label
|
54 |
+
"""
|
55 |
+
if overridden(self.classify_many):
|
56 |
+
return self.classify_many([featureset])[0]
|
57 |
+
else:
|
58 |
+
raise NotImplementedError()
|
59 |
+
|
60 |
+
def prob_classify(self, featureset):
|
61 |
+
"""
|
62 |
+
:return: a probability distribution over labels for the given
|
63 |
+
featureset.
|
64 |
+
:rtype: ProbDistI
|
65 |
+
"""
|
66 |
+
if overridden(self.prob_classify_many):
|
67 |
+
return self.prob_classify_many([featureset])[0]
|
68 |
+
else:
|
69 |
+
raise NotImplementedError()
|
70 |
+
|
71 |
+
def classify_many(self, featuresets):
|
72 |
+
"""
|
73 |
+
Apply ``self.classify()`` to each element of ``featuresets``. I.e.:
|
74 |
+
|
75 |
+
return [self.classify(fs) for fs in featuresets]
|
76 |
+
|
77 |
+
:rtype: list(label)
|
78 |
+
"""
|
79 |
+
return [self.classify(fs) for fs in featuresets]
|
80 |
+
|
81 |
+
def prob_classify_many(self, featuresets):
|
82 |
+
"""
|
83 |
+
Apply ``self.prob_classify()`` to each element of ``featuresets``. I.e.:
|
84 |
+
|
85 |
+
return [self.prob_classify(fs) for fs in featuresets]
|
86 |
+
|
87 |
+
:rtype: list(ProbDistI)
|
88 |
+
"""
|
89 |
+
return [self.prob_classify(fs) for fs in featuresets]
|
90 |
+
|
91 |
+
|
92 |
+
class MultiClassifierI:
|
93 |
+
"""
|
94 |
+
A processing interface for labeling tokens with zero or more
|
95 |
+
category labels (or "labels"). Labels are typically strs
|
96 |
+
or ints, but can be any immutable type. The set of labels
|
97 |
+
that the multi-classifier chooses from must be fixed and finite.
|
98 |
+
|
99 |
+
Subclasses must define:
|
100 |
+
- ``labels()``
|
101 |
+
- either ``classify()`` or ``classify_many()`` (or both)
|
102 |
+
|
103 |
+
Subclasses may define:
|
104 |
+
- either ``prob_classify()`` or ``prob_classify_many()`` (or both)
|
105 |
+
"""
|
106 |
+
|
107 |
+
def labels(self):
|
108 |
+
"""
|
109 |
+
:return: the list of category labels used by this classifier.
|
110 |
+
:rtype: list of (immutable)
|
111 |
+
"""
|
112 |
+
raise NotImplementedError()
|
113 |
+
|
114 |
+
def classify(self, featureset):
|
115 |
+
"""
|
116 |
+
:return: the most appropriate set of labels for the given featureset.
|
117 |
+
:rtype: set(label)
|
118 |
+
"""
|
119 |
+
if overridden(self.classify_many):
|
120 |
+
return self.classify_many([featureset])[0]
|
121 |
+
else:
|
122 |
+
raise NotImplementedError()
|
123 |
+
|
124 |
+
def prob_classify(self, featureset):
|
125 |
+
"""
|
126 |
+
:return: a probability distribution over sets of labels for the
|
127 |
+
given featureset.
|
128 |
+
:rtype: ProbDistI
|
129 |
+
"""
|
130 |
+
if overridden(self.prob_classify_many):
|
131 |
+
return self.prob_classify_many([featureset])[0]
|
132 |
+
else:
|
133 |
+
raise NotImplementedError()
|
134 |
+
|
135 |
+
def classify_many(self, featuresets):
|
136 |
+
"""
|
137 |
+
Apply ``self.classify()`` to each element of ``featuresets``. I.e.:
|
138 |
+
|
139 |
+
return [self.classify(fs) for fs in featuresets]
|
140 |
+
|
141 |
+
:rtype: list(set(label))
|
142 |
+
"""
|
143 |
+
return [self.classify(fs) for fs in featuresets]
|
144 |
+
|
145 |
+
def prob_classify_many(self, featuresets):
|
146 |
+
"""
|
147 |
+
Apply ``self.prob_classify()`` to each element of ``featuresets``. I.e.:
|
148 |
+
|
149 |
+
return [self.prob_classify(fs) for fs in featuresets]
|
150 |
+
|
151 |
+
:rtype: list(ProbDistI)
|
152 |
+
"""
|
153 |
+
return [self.prob_classify(fs) for fs in featuresets]
|
154 |
+
|
155 |
+
|
156 |
+
# # [XX] IN PROGRESS:
|
157 |
+
# class SequenceClassifierI:
|
158 |
+
# """
|
159 |
+
# A processing interface for labeling sequences of tokens with a
|
160 |
+
# single category label (or "class"). Labels are typically
|
161 |
+
# strs or ints, but can be any immutable type. The set
|
162 |
+
# of labels that the classifier chooses from must be fixed and
|
163 |
+
# finite.
|
164 |
+
# """
|
165 |
+
# def labels(self):
|
166 |
+
# """
|
167 |
+
# :return: the list of category labels used by this classifier.
|
168 |
+
# :rtype: list of (immutable)
|
169 |
+
# """
|
170 |
+
# raise NotImplementedError()
|
171 |
+
|
172 |
+
# def prob_classify(self, featureset):
|
173 |
+
# """
|
174 |
+
# Return a probability distribution over labels for the given
|
175 |
+
# featureset.
|
176 |
+
|
177 |
+
# If ``featureset`` is a list of featuresets, then return a
|
178 |
+
# corresponding list containing the probability distribution
|
179 |
+
# over labels for each of the given featuresets, where the
|
180 |
+
# *i*\ th element of this list is the most appropriate label for
|
181 |
+
# the *i*\ th element of ``featuresets``.
|
182 |
+
# """
|
183 |
+
# raise NotImplementedError()
|
184 |
+
|
185 |
+
# def classify(self, featureset):
|
186 |
+
# """
|
187 |
+
# Return the most appropriate label for the given featureset.
|
188 |
+
|
189 |
+
# If ``featureset`` is a list of featuresets, then return a
|
190 |
+
# corresponding list containing the most appropriate label for
|
191 |
+
# each of the given featuresets, where the *i*\ th element of
|
192 |
+
# this list is the most appropriate label for the *i*\ th element
|
193 |
+
# of ``featuresets``.
|
194 |
+
# """
|
195 |
+
# raise NotImplementedError()
|
llmeval-env/lib/python3.10/site-packages/nltk/classify/decisiontree.py
ADDED
@@ -0,0 +1,349 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Natural Language Toolkit: Decision Tree Classifiers
|
2 |
+
#
|
3 |
+
# Copyright (C) 2001-2023 NLTK Project
|
4 |
+
# Author: Edward Loper <edloper@gmail.com>
|
5 |
+
# URL: <https://www.nltk.org/>
|
6 |
+
# For license information, see LICENSE.TXT
|
7 |
+
|
8 |
+
"""
|
9 |
+
A classifier model that decides which label to assign to a token on
|
10 |
+
the basis of a tree structure, where branches correspond to conditions
|
11 |
+
on feature values, and leaves correspond to label assignments.
|
12 |
+
"""
|
13 |
+
|
14 |
+
from collections import defaultdict
|
15 |
+
|
16 |
+
from nltk.classify.api import ClassifierI
|
17 |
+
from nltk.probability import FreqDist, MLEProbDist, entropy
|
18 |
+
|
19 |
+
|
20 |
+
class DecisionTreeClassifier(ClassifierI):
|
21 |
+
def __init__(self, label, feature_name=None, decisions=None, default=None):
|
22 |
+
"""
|
23 |
+
:param label: The most likely label for tokens that reach
|
24 |
+
this node in the decision tree. If this decision tree
|
25 |
+
has no children, then this label will be assigned to
|
26 |
+
any token that reaches this decision tree.
|
27 |
+
:param feature_name: The name of the feature that this
|
28 |
+
decision tree selects for.
|
29 |
+
:param decisions: A dictionary mapping from feature values
|
30 |
+
for the feature identified by ``feature_name`` to
|
31 |
+
child decision trees.
|
32 |
+
:param default: The child that will be used if the value of
|
33 |
+
feature ``feature_name`` does not match any of the keys in
|
34 |
+
``decisions``. This is used when constructing binary
|
35 |
+
decision trees.
|
36 |
+
"""
|
37 |
+
self._label = label
|
38 |
+
self._fname = feature_name
|
39 |
+
self._decisions = decisions
|
40 |
+
self._default = default
|
41 |
+
|
42 |
+
def labels(self):
|
43 |
+
labels = [self._label]
|
44 |
+
if self._decisions is not None:
|
45 |
+
for dt in self._decisions.values():
|
46 |
+
labels.extend(dt.labels())
|
47 |
+
if self._default is not None:
|
48 |
+
labels.extend(self._default.labels())
|
49 |
+
return list(set(labels))
|
50 |
+
|
51 |
+
def classify(self, featureset):
|
52 |
+
# Decision leaf:
|
53 |
+
if self._fname is None:
|
54 |
+
return self._label
|
55 |
+
|
56 |
+
# Decision tree:
|
57 |
+
fval = featureset.get(self._fname)
|
58 |
+
if fval in self._decisions:
|
59 |
+
return self._decisions[fval].classify(featureset)
|
60 |
+
elif self._default is not None:
|
61 |
+
return self._default.classify(featureset)
|
62 |
+
else:
|
63 |
+
return self._label
|
64 |
+
|
65 |
+
def error(self, labeled_featuresets):
|
66 |
+
errors = 0
|
67 |
+
for featureset, label in labeled_featuresets:
|
68 |
+
if self.classify(featureset) != label:
|
69 |
+
errors += 1
|
70 |
+
return errors / len(labeled_featuresets)
|
71 |
+
|
72 |
+
def pretty_format(self, width=70, prefix="", depth=4):
|
73 |
+
"""
|
74 |
+
Return a string containing a pretty-printed version of this
|
75 |
+
decision tree. Each line in this string corresponds to a
|
76 |
+
single decision tree node or leaf, and indentation is used to
|
77 |
+
display the structure of the decision tree.
|
78 |
+
"""
|
79 |
+
# [xx] display default!!
|
80 |
+
if self._fname is None:
|
81 |
+
n = width - len(prefix) - 15
|
82 |
+
return "{}{} {}\n".format(prefix, "." * n, self._label)
|
83 |
+
s = ""
|
84 |
+
for i, (fval, result) in enumerate(
|
85 |
+
sorted(
|
86 |
+
self._decisions.items(),
|
87 |
+
key=lambda item: (item[0] in [None, False, True], str(item[0]).lower()),
|
88 |
+
)
|
89 |
+
):
|
90 |
+
hdr = f"{prefix}{self._fname}={fval}? "
|
91 |
+
n = width - 15 - len(hdr)
|
92 |
+
s += "{}{} {}\n".format(hdr, "." * (n), result._label)
|
93 |
+
if result._fname is not None and depth > 1:
|
94 |
+
s += result.pretty_format(width, prefix + " ", depth - 1)
|
95 |
+
if self._default is not None:
|
96 |
+
n = width - len(prefix) - 21
|
97 |
+
s += "{}else: {} {}\n".format(prefix, "." * n, self._default._label)
|
98 |
+
if self._default._fname is not None and depth > 1:
|
99 |
+
s += self._default.pretty_format(width, prefix + " ", depth - 1)
|
100 |
+
return s
|
101 |
+
|
102 |
+
def pseudocode(self, prefix="", depth=4):
|
103 |
+
"""
|
104 |
+
Return a string representation of this decision tree that
|
105 |
+
expresses the decisions it makes as a nested set of pseudocode
|
106 |
+
if statements.
|
107 |
+
"""
|
108 |
+
if self._fname is None:
|
109 |
+
return f"{prefix}return {self._label!r}\n"
|
110 |
+
s = ""
|
111 |
+
for (fval, result) in sorted(
|
112 |
+
self._decisions.items(),
|
113 |
+
key=lambda item: (item[0] in [None, False, True], str(item[0]).lower()),
|
114 |
+
):
|
115 |
+
s += f"{prefix}if {self._fname} == {fval!r}: "
|
116 |
+
if result._fname is not None and depth > 1:
|
117 |
+
s += "\n" + result.pseudocode(prefix + " ", depth - 1)
|
118 |
+
else:
|
119 |
+
s += f"return {result._label!r}\n"
|
120 |
+
if self._default is not None:
|
121 |
+
if len(self._decisions) == 1:
|
122 |
+
s += "{}if {} != {!r}: ".format(
|
123 |
+
prefix, self._fname, list(self._decisions.keys())[0]
|
124 |
+
)
|
125 |
+
else:
|
126 |
+
s += f"{prefix}else: "
|
127 |
+
if self._default._fname is not None and depth > 1:
|
128 |
+
s += "\n" + self._default.pseudocode(prefix + " ", depth - 1)
|
129 |
+
else:
|
130 |
+
s += f"return {self._default._label!r}\n"
|
131 |
+
return s
|
132 |
+
|
133 |
+
def __str__(self):
|
134 |
+
return self.pretty_format()
|
135 |
+
|
136 |
+
@staticmethod
|
137 |
+
def train(
|
138 |
+
labeled_featuresets,
|
139 |
+
entropy_cutoff=0.05,
|
140 |
+
depth_cutoff=100,
|
141 |
+
support_cutoff=10,
|
142 |
+
binary=False,
|
143 |
+
feature_values=None,
|
144 |
+
verbose=False,
|
145 |
+
):
|
146 |
+
"""
|
147 |
+
:param binary: If true, then treat all feature/value pairs as
|
148 |
+
individual binary features, rather than using a single n-way
|
149 |
+
branch for each feature.
|
150 |
+
"""
|
151 |
+
# Collect a list of all feature names.
|
152 |
+
feature_names = set()
|
153 |
+
for featureset, label in labeled_featuresets:
|
154 |
+
for fname in featureset:
|
155 |
+
feature_names.add(fname)
|
156 |
+
|
157 |
+
# Collect a list of the values each feature can take.
|
158 |
+
if feature_values is None and binary:
|
159 |
+
feature_values = defaultdict(set)
|
160 |
+
for featureset, label in labeled_featuresets:
|
161 |
+
for fname, fval in featureset.items():
|
162 |
+
feature_values[fname].add(fval)
|
163 |
+
|
164 |
+
# Start with a stump.
|
165 |
+
if not binary:
|
166 |
+
tree = DecisionTreeClassifier.best_stump(
|
167 |
+
feature_names, labeled_featuresets, verbose
|
168 |
+
)
|
169 |
+
else:
|
170 |
+
tree = DecisionTreeClassifier.best_binary_stump(
|
171 |
+
feature_names, labeled_featuresets, feature_values, verbose
|
172 |
+
)
|
173 |
+
|
174 |
+
# Refine the stump.
|
175 |
+
tree.refine(
|
176 |
+
labeled_featuresets,
|
177 |
+
entropy_cutoff,
|
178 |
+
depth_cutoff - 1,
|
179 |
+
support_cutoff,
|
180 |
+
binary,
|
181 |
+
feature_values,
|
182 |
+
verbose,
|
183 |
+
)
|
184 |
+
|
185 |
+
# Return it
|
186 |
+
return tree
|
187 |
+
|
188 |
+
@staticmethod
|
189 |
+
def leaf(labeled_featuresets):
|
190 |
+
label = FreqDist(label for (featureset, label) in labeled_featuresets).max()
|
191 |
+
return DecisionTreeClassifier(label)
|
192 |
+
|
193 |
+
@staticmethod
|
194 |
+
def stump(feature_name, labeled_featuresets):
|
195 |
+
label = FreqDist(label for (featureset, label) in labeled_featuresets).max()
|
196 |
+
|
197 |
+
# Find the best label for each value.
|
198 |
+
freqs = defaultdict(FreqDist) # freq(label|value)
|
199 |
+
for featureset, label in labeled_featuresets:
|
200 |
+
feature_value = featureset.get(feature_name)
|
201 |
+
freqs[feature_value][label] += 1
|
202 |
+
|
203 |
+
decisions = {val: DecisionTreeClassifier(freqs[val].max()) for val in freqs}
|
204 |
+
return DecisionTreeClassifier(label, feature_name, decisions)
|
205 |
+
|
206 |
+
def refine(
|
207 |
+
self,
|
208 |
+
labeled_featuresets,
|
209 |
+
entropy_cutoff,
|
210 |
+
depth_cutoff,
|
211 |
+
support_cutoff,
|
212 |
+
binary=False,
|
213 |
+
feature_values=None,
|
214 |
+
verbose=False,
|
215 |
+
):
|
216 |
+
if len(labeled_featuresets) <= support_cutoff:
|
217 |
+
return
|
218 |
+
if self._fname is None:
|
219 |
+
return
|
220 |
+
if depth_cutoff <= 0:
|
221 |
+
return
|
222 |
+
for fval in self._decisions:
|
223 |
+
fval_featuresets = [
|
224 |
+
(featureset, label)
|
225 |
+
for (featureset, label) in labeled_featuresets
|
226 |
+
if featureset.get(self._fname) == fval
|
227 |
+
]
|
228 |
+
|
229 |
+
label_freqs = FreqDist(label for (featureset, label) in fval_featuresets)
|
230 |
+
if entropy(MLEProbDist(label_freqs)) > entropy_cutoff:
|
231 |
+
self._decisions[fval] = DecisionTreeClassifier.train(
|
232 |
+
fval_featuresets,
|
233 |
+
entropy_cutoff,
|
234 |
+
depth_cutoff,
|
235 |
+
support_cutoff,
|
236 |
+
binary,
|
237 |
+
feature_values,
|
238 |
+
verbose,
|
239 |
+
)
|
240 |
+
if self._default is not None:
|
241 |
+
default_featuresets = [
|
242 |
+
(featureset, label)
|
243 |
+
for (featureset, label) in labeled_featuresets
|
244 |
+
if featureset.get(self._fname) not in self._decisions
|
245 |
+
]
|
246 |
+
label_freqs = FreqDist(label for (featureset, label) in default_featuresets)
|
247 |
+
if entropy(MLEProbDist(label_freqs)) > entropy_cutoff:
|
248 |
+
self._default = DecisionTreeClassifier.train(
|
249 |
+
default_featuresets,
|
250 |
+
entropy_cutoff,
|
251 |
+
depth_cutoff,
|
252 |
+
support_cutoff,
|
253 |
+
binary,
|
254 |
+
feature_values,
|
255 |
+
verbose,
|
256 |
+
)
|
257 |
+
|
258 |
+
@staticmethod
|
259 |
+
def best_stump(feature_names, labeled_featuresets, verbose=False):
|
260 |
+
best_stump = DecisionTreeClassifier.leaf(labeled_featuresets)
|
261 |
+
best_error = best_stump.error(labeled_featuresets)
|
262 |
+
for fname in feature_names:
|
263 |
+
stump = DecisionTreeClassifier.stump(fname, labeled_featuresets)
|
264 |
+
stump_error = stump.error(labeled_featuresets)
|
265 |
+
if stump_error < best_error:
|
266 |
+
best_error = stump_error
|
267 |
+
best_stump = stump
|
268 |
+
if verbose:
|
269 |
+
print(
|
270 |
+
"best stump for {:6d} toks uses {:20} err={:6.4f}".format(
|
271 |
+
len(labeled_featuresets), best_stump._fname, best_error
|
272 |
+
)
|
273 |
+
)
|
274 |
+
return best_stump
|
275 |
+
|
276 |
+
@staticmethod
|
277 |
+
def binary_stump(feature_name, feature_value, labeled_featuresets):
|
278 |
+
label = FreqDist(label for (featureset, label) in labeled_featuresets).max()
|
279 |
+
|
280 |
+
# Find the best label for each value.
|
281 |
+
pos_fdist = FreqDist()
|
282 |
+
neg_fdist = FreqDist()
|
283 |
+
for featureset, label in labeled_featuresets:
|
284 |
+
if featureset.get(feature_name) == feature_value:
|
285 |
+
pos_fdist[label] += 1
|
286 |
+
else:
|
287 |
+
neg_fdist[label] += 1
|
288 |
+
|
289 |
+
decisions = {}
|
290 |
+
default = label
|
291 |
+
# But hopefully we have observations!
|
292 |
+
if pos_fdist.N() > 0:
|
293 |
+
decisions = {feature_value: DecisionTreeClassifier(pos_fdist.max())}
|
294 |
+
if neg_fdist.N() > 0:
|
295 |
+
default = DecisionTreeClassifier(neg_fdist.max())
|
296 |
+
|
297 |
+
return DecisionTreeClassifier(label, feature_name, decisions, default)
|
298 |
+
|
299 |
+
@staticmethod
|
300 |
+
def best_binary_stump(
|
301 |
+
feature_names, labeled_featuresets, feature_values, verbose=False
|
302 |
+
):
|
303 |
+
best_stump = DecisionTreeClassifier.leaf(labeled_featuresets)
|
304 |
+
best_error = best_stump.error(labeled_featuresets)
|
305 |
+
for fname in feature_names:
|
306 |
+
for fval in feature_values[fname]:
|
307 |
+
stump = DecisionTreeClassifier.binary_stump(
|
308 |
+
fname, fval, labeled_featuresets
|
309 |
+
)
|
310 |
+
stump_error = stump.error(labeled_featuresets)
|
311 |
+
if stump_error < best_error:
|
312 |
+
best_error = stump_error
|
313 |
+
best_stump = stump
|
314 |
+
if verbose:
|
315 |
+
if best_stump._decisions:
|
316 |
+
descr = "{}={}".format(
|
317 |
+
best_stump._fname, list(best_stump._decisions.keys())[0]
|
318 |
+
)
|
319 |
+
else:
|
320 |
+
descr = "(default)"
|
321 |
+
print(
|
322 |
+
"best stump for {:6d} toks uses {:20} err={:6.4f}".format(
|
323 |
+
len(labeled_featuresets), descr, best_error
|
324 |
+
)
|
325 |
+
)
|
326 |
+
return best_stump
|
327 |
+
|
328 |
+
|
329 |
+
##//////////////////////////////////////////////////////
|
330 |
+
## Demo
|
331 |
+
##//////////////////////////////////////////////////////
|
332 |
+
|
333 |
+
|
334 |
+
def f(x):
|
335 |
+
return DecisionTreeClassifier.train(x, binary=True, verbose=True)
|
336 |
+
|
337 |
+
|
338 |
+
def demo():
|
339 |
+
from nltk.classify.util import binary_names_demo_features, names_demo
|
340 |
+
|
341 |
+
classifier = names_demo(
|
342 |
+
f, binary_names_demo_features # DecisionTreeClassifier.train,
|
343 |
+
)
|
344 |
+
print(classifier.pretty_format(depth=7))
|
345 |
+
print(classifier.pseudocode(depth=7))
|
346 |
+
|
347 |
+
|
348 |
+
if __name__ == "__main__":
|
349 |
+
demo()
|
llmeval-env/lib/python3.10/site-packages/nltk/classify/megam.py
ADDED
@@ -0,0 +1,184 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Natural Language Toolkit: Interface to Megam Classifier
|
2 |
+
#
|
3 |
+
# Copyright (C) 2001-2023 NLTK Project
|
4 |
+
# Author: Edward Loper <edloper@gmail.com>
|
5 |
+
# URL: <https://www.nltk.org/>
|
6 |
+
# For license information, see LICENSE.TXT
|
7 |
+
|
8 |
+
"""
|
9 |
+
A set of functions used to interface with the external megam_ maxent
|
10 |
+
optimization package. Before megam can be used, you should tell NLTK where it
|
11 |
+
can find the megam binary, using the ``config_megam()`` function. Typical
|
12 |
+
usage:
|
13 |
+
|
14 |
+
>>> from nltk.classify import megam
|
15 |
+
>>> megam.config_megam() # pass path to megam if not found in PATH # doctest: +SKIP
|
16 |
+
[Found megam: ...]
|
17 |
+
|
18 |
+
Use with MaxentClassifier. Example below, see MaxentClassifier documentation
|
19 |
+
for details.
|
20 |
+
|
21 |
+
nltk.classify.MaxentClassifier.train(corpus, 'megam')
|
22 |
+
|
23 |
+
.. _megam: https://www.umiacs.umd.edu/~hal/megam/index.html
|
24 |
+
"""
|
25 |
+
import subprocess
|
26 |
+
|
27 |
+
from nltk.internals import find_binary
|
28 |
+
|
29 |
+
try:
|
30 |
+
import numpy
|
31 |
+
except ImportError:
|
32 |
+
numpy = None
|
33 |
+
|
34 |
+
######################################################################
|
35 |
+
# { Configuration
|
36 |
+
######################################################################
|
37 |
+
|
38 |
+
_megam_bin = None
|
39 |
+
|
40 |
+
|
41 |
+
def config_megam(bin=None):
|
42 |
+
"""
|
43 |
+
Configure NLTK's interface to the ``megam`` maxent optimization
|
44 |
+
package.
|
45 |
+
|
46 |
+
:param bin: The full path to the ``megam`` binary. If not specified,
|
47 |
+
then nltk will search the system for a ``megam`` binary; and if
|
48 |
+
one is not found, it will raise a ``LookupError`` exception.
|
49 |
+
:type bin: str
|
50 |
+
"""
|
51 |
+
global _megam_bin
|
52 |
+
_megam_bin = find_binary(
|
53 |
+
"megam",
|
54 |
+
bin,
|
55 |
+
env_vars=["MEGAM"],
|
56 |
+
binary_names=["megam.opt", "megam", "megam_686", "megam_i686.opt"],
|
57 |
+
url="https://www.umiacs.umd.edu/~hal/megam/index.html",
|
58 |
+
)
|
59 |
+
|
60 |
+
|
61 |
+
######################################################################
|
62 |
+
# { Megam Interface Functions
|
63 |
+
######################################################################
|
64 |
+
|
65 |
+
|
66 |
+
def write_megam_file(train_toks, encoding, stream, bernoulli=True, explicit=True):
|
67 |
+
"""
|
68 |
+
Generate an input file for ``megam`` based on the given corpus of
|
69 |
+
classified tokens.
|
70 |
+
|
71 |
+
:type train_toks: list(tuple(dict, str))
|
72 |
+
:param train_toks: Training data, represented as a list of
|
73 |
+
pairs, the first member of which is a feature dictionary,
|
74 |
+
and the second of which is a classification label.
|
75 |
+
|
76 |
+
:type encoding: MaxentFeatureEncodingI
|
77 |
+
:param encoding: A feature encoding, used to convert featuresets
|
78 |
+
into feature vectors. May optionally implement a cost() method
|
79 |
+
in order to assign different costs to different class predictions.
|
80 |
+
|
81 |
+
:type stream: stream
|
82 |
+
:param stream: The stream to which the megam input file should be
|
83 |
+
written.
|
84 |
+
|
85 |
+
:param bernoulli: If true, then use the 'bernoulli' format. I.e.,
|
86 |
+
all joint features have binary values, and are listed iff they
|
87 |
+
are true. Otherwise, list feature values explicitly. If
|
88 |
+
``bernoulli=False``, then you must call ``megam`` with the
|
89 |
+
``-fvals`` option.
|
90 |
+
|
91 |
+
:param explicit: If true, then use the 'explicit' format. I.e.,
|
92 |
+
list the features that would fire for any of the possible
|
93 |
+
labels, for each token. If ``explicit=True``, then you must
|
94 |
+
call ``megam`` with the ``-explicit`` option.
|
95 |
+
"""
|
96 |
+
# Look up the set of labels.
|
97 |
+
labels = encoding.labels()
|
98 |
+
labelnum = {label: i for (i, label) in enumerate(labels)}
|
99 |
+
|
100 |
+
# Write the file, which contains one line per instance.
|
101 |
+
for featureset, label in train_toks:
|
102 |
+
# First, the instance number (or, in the weighted multiclass case, the cost of each label).
|
103 |
+
if hasattr(encoding, "cost"):
|
104 |
+
stream.write(
|
105 |
+
":".join(str(encoding.cost(featureset, label, l)) for l in labels)
|
106 |
+
)
|
107 |
+
else:
|
108 |
+
stream.write("%d" % labelnum[label])
|
109 |
+
|
110 |
+
# For implicit file formats, just list the features that fire
|
111 |
+
# for this instance's actual label.
|
112 |
+
if not explicit:
|
113 |
+
_write_megam_features(encoding.encode(featureset, label), stream, bernoulli)
|
114 |
+
|
115 |
+
# For explicit formats, list the features that would fire for
|
116 |
+
# any of the possible labels.
|
117 |
+
else:
|
118 |
+
for l in labels:
|
119 |
+
stream.write(" #")
|
120 |
+
_write_megam_features(encoding.encode(featureset, l), stream, bernoulli)
|
121 |
+
|
122 |
+
# End of the instance.
|
123 |
+
stream.write("\n")
|
124 |
+
|
125 |
+
|
126 |
+
def parse_megam_weights(s, features_count, explicit=True):
|
127 |
+
"""
|
128 |
+
Given the stdout output generated by ``megam`` when training a
|
129 |
+
model, return a ``numpy`` array containing the corresponding weight
|
130 |
+
vector. This function does not currently handle bias features.
|
131 |
+
"""
|
132 |
+
if numpy is None:
|
133 |
+
raise ValueError("This function requires that numpy be installed")
|
134 |
+
assert explicit, "non-explicit not supported yet"
|
135 |
+
lines = s.strip().split("\n")
|
136 |
+
weights = numpy.zeros(features_count, "d")
|
137 |
+
for line in lines:
|
138 |
+
if line.strip():
|
139 |
+
fid, weight = line.split()
|
140 |
+
weights[int(fid)] = float(weight)
|
141 |
+
return weights
|
142 |
+
|
143 |
+
|
144 |
+
def _write_megam_features(vector, stream, bernoulli):
|
145 |
+
if not vector:
|
146 |
+
raise ValueError(
|
147 |
+
"MEGAM classifier requires the use of an " "always-on feature."
|
148 |
+
)
|
149 |
+
for (fid, fval) in vector:
|
150 |
+
if bernoulli:
|
151 |
+
if fval == 1:
|
152 |
+
stream.write(" %s" % fid)
|
153 |
+
elif fval != 0:
|
154 |
+
raise ValueError(
|
155 |
+
"If bernoulli=True, then all" "features must be binary."
|
156 |
+
)
|
157 |
+
else:
|
158 |
+
stream.write(f" {fid} {fval}")
|
159 |
+
|
160 |
+
|
161 |
+
def call_megam(args):
|
162 |
+
"""
|
163 |
+
Call the ``megam`` binary with the given arguments.
|
164 |
+
"""
|
165 |
+
if isinstance(args, str):
|
166 |
+
raise TypeError("args should be a list of strings")
|
167 |
+
if _megam_bin is None:
|
168 |
+
config_megam()
|
169 |
+
|
170 |
+
# Call megam via a subprocess
|
171 |
+
cmd = [_megam_bin] + args
|
172 |
+
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
|
173 |
+
(stdout, stderr) = p.communicate()
|
174 |
+
|
175 |
+
# Check the return code.
|
176 |
+
if p.returncode != 0:
|
177 |
+
print()
|
178 |
+
print(stderr)
|
179 |
+
raise OSError("megam command failed!")
|
180 |
+
|
181 |
+
if isinstance(stdout, str):
|
182 |
+
return stdout
|
183 |
+
else:
|
184 |
+
return stdout.decode("utf-8")
|
llmeval-env/lib/python3.10/site-packages/nltk/classify/naivebayes.py
ADDED
@@ -0,0 +1,260 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Natural Language Toolkit: Naive Bayes Classifiers
|
2 |
+
#
|
3 |
+
# Copyright (C) 2001-2023 NLTK Project
|
4 |
+
# Author: Edward Loper <edloper@gmail.com>
|
5 |
+
# URL: <https://www.nltk.org/>
|
6 |
+
# For license information, see LICENSE.TXT
|
7 |
+
|
8 |
+
"""
|
9 |
+
A classifier based on the Naive Bayes algorithm. In order to find the
|
10 |
+
probability for a label, this algorithm first uses the Bayes rule to
|
11 |
+
express P(label|features) in terms of P(label) and P(features|label):
|
12 |
+
|
13 |
+
| P(label) * P(features|label)
|
14 |
+
| P(label|features) = ------------------------------
|
15 |
+
| P(features)
|
16 |
+
|
17 |
+
The algorithm then makes the 'naive' assumption that all features are
|
18 |
+
independent, given the label:
|
19 |
+
|
20 |
+
| P(label) * P(f1|label) * ... * P(fn|label)
|
21 |
+
| P(label|features) = --------------------------------------------
|
22 |
+
| P(features)
|
23 |
+
|
24 |
+
Rather than computing P(features) explicitly, the algorithm just
|
25 |
+
calculates the numerator for each label, and normalizes them so they
|
26 |
+
sum to one:
|
27 |
+
|
28 |
+
| P(label) * P(f1|label) * ... * P(fn|label)
|
29 |
+
| P(label|features) = --------------------------------------------
|
30 |
+
| SUM[l]( P(l) * P(f1|l) * ... * P(fn|l) )
|
31 |
+
"""
|
32 |
+
|
33 |
+
from collections import defaultdict
|
34 |
+
|
35 |
+
from nltk.classify.api import ClassifierI
|
36 |
+
from nltk.probability import DictionaryProbDist, ELEProbDist, FreqDist, sum_logs
|
37 |
+
|
38 |
+
##//////////////////////////////////////////////////////
|
39 |
+
## Naive Bayes Classifier
|
40 |
+
##//////////////////////////////////////////////////////
|
41 |
+
|
42 |
+
|
43 |
+
class NaiveBayesClassifier(ClassifierI):
|
44 |
+
"""
|
45 |
+
A Naive Bayes classifier. Naive Bayes classifiers are
|
46 |
+
paramaterized by two probability distributions:
|
47 |
+
|
48 |
+
- P(label) gives the probability that an input will receive each
|
49 |
+
label, given no information about the input's features.
|
50 |
+
|
51 |
+
- P(fname=fval|label) gives the probability that a given feature
|
52 |
+
(fname) will receive a given value (fval), given that the
|
53 |
+
label (label).
|
54 |
+
|
55 |
+
If the classifier encounters an input with a feature that has
|
56 |
+
never been seen with any label, then rather than assigning a
|
57 |
+
probability of 0 to all labels, it will ignore that feature.
|
58 |
+
|
59 |
+
The feature value 'None' is reserved for unseen feature values;
|
60 |
+
you generally should not use 'None' as a feature value for one of
|
61 |
+
your own features.
|
62 |
+
"""
|
63 |
+
|
64 |
+
def __init__(self, label_probdist, feature_probdist):
|
65 |
+
"""
|
66 |
+
:param label_probdist: P(label), the probability distribution
|
67 |
+
over labels. It is expressed as a ``ProbDistI`` whose
|
68 |
+
samples are labels. I.e., P(label) =
|
69 |
+
``label_probdist.prob(label)``.
|
70 |
+
|
71 |
+
:param feature_probdist: P(fname=fval|label), the probability
|
72 |
+
distribution for feature values, given labels. It is
|
73 |
+
expressed as a dictionary whose keys are ``(label, fname)``
|
74 |
+
pairs and whose values are ``ProbDistI`` objects over feature
|
75 |
+
values. I.e., P(fname=fval|label) =
|
76 |
+
``feature_probdist[label,fname].prob(fval)``. If a given
|
77 |
+
``(label,fname)`` is not a key in ``feature_probdist``, then
|
78 |
+
it is assumed that the corresponding P(fname=fval|label)
|
79 |
+
is 0 for all values of ``fval``.
|
80 |
+
"""
|
81 |
+
self._label_probdist = label_probdist
|
82 |
+
self._feature_probdist = feature_probdist
|
83 |
+
self._labels = list(label_probdist.samples())
|
84 |
+
|
85 |
+
def labels(self):
|
86 |
+
return self._labels
|
87 |
+
|
88 |
+
def classify(self, featureset):
|
89 |
+
return self.prob_classify(featureset).max()
|
90 |
+
|
91 |
+
def prob_classify(self, featureset):
|
92 |
+
# Discard any feature names that we've never seen before.
|
93 |
+
# Otherwise, we'll just assign a probability of 0 to
|
94 |
+
# everything.
|
95 |
+
featureset = featureset.copy()
|
96 |
+
for fname in list(featureset.keys()):
|
97 |
+
for label in self._labels:
|
98 |
+
if (label, fname) in self._feature_probdist:
|
99 |
+
break
|
100 |
+
else:
|
101 |
+
# print('Ignoring unseen feature %s' % fname)
|
102 |
+
del featureset[fname]
|
103 |
+
|
104 |
+
# Find the log probability of each label, given the features.
|
105 |
+
# Start with the log probability of the label itself.
|
106 |
+
logprob = {}
|
107 |
+
for label in self._labels:
|
108 |
+
logprob[label] = self._label_probdist.logprob(label)
|
109 |
+
|
110 |
+
# Then add in the log probability of features given labels.
|
111 |
+
for label in self._labels:
|
112 |
+
for (fname, fval) in featureset.items():
|
113 |
+
if (label, fname) in self._feature_probdist:
|
114 |
+
feature_probs = self._feature_probdist[label, fname]
|
115 |
+
logprob[label] += feature_probs.logprob(fval)
|
116 |
+
else:
|
117 |
+
# nb: This case will never come up if the
|
118 |
+
# classifier was created by
|
119 |
+
# NaiveBayesClassifier.train().
|
120 |
+
logprob[label] += sum_logs([]) # = -INF.
|
121 |
+
|
122 |
+
return DictionaryProbDist(logprob, normalize=True, log=True)
|
123 |
+
|
124 |
+
def show_most_informative_features(self, n=10):
|
125 |
+
# Determine the most relevant features, and display them.
|
126 |
+
cpdist = self._feature_probdist
|
127 |
+
print("Most Informative Features")
|
128 |
+
|
129 |
+
for (fname, fval) in self.most_informative_features(n):
|
130 |
+
|
131 |
+
def labelprob(l):
|
132 |
+
return cpdist[l, fname].prob(fval)
|
133 |
+
|
134 |
+
labels = sorted(
|
135 |
+
(l for l in self._labels if fval in cpdist[l, fname].samples()),
|
136 |
+
key=lambda element: (-labelprob(element), element),
|
137 |
+
reverse=True,
|
138 |
+
)
|
139 |
+
if len(labels) == 1:
|
140 |
+
continue
|
141 |
+
l0 = labels[0]
|
142 |
+
l1 = labels[-1]
|
143 |
+
if cpdist[l0, fname].prob(fval) == 0:
|
144 |
+
ratio = "INF"
|
145 |
+
else:
|
146 |
+
ratio = "%8.1f" % (
|
147 |
+
cpdist[l1, fname].prob(fval) / cpdist[l0, fname].prob(fval)
|
148 |
+
)
|
149 |
+
print(
|
150 |
+
"%24s = %-14r %6s : %-6s = %s : 1.0"
|
151 |
+
% (fname, fval, ("%s" % l1)[:6], ("%s" % l0)[:6], ratio)
|
152 |
+
)
|
153 |
+
|
154 |
+
def most_informative_features(self, n=100):
|
155 |
+
"""
|
156 |
+
Return a list of the 'most informative' features used by this
|
157 |
+
classifier. For the purpose of this function, the
|
158 |
+
informativeness of a feature ``(fname,fval)`` is equal to the
|
159 |
+
highest value of P(fname=fval|label), for any label, divided by
|
160 |
+
the lowest value of P(fname=fval|label), for any label:
|
161 |
+
|
162 |
+
| max[ P(fname=fval|label1) / P(fname=fval|label2) ]
|
163 |
+
"""
|
164 |
+
if hasattr(self, "_most_informative_features"):
|
165 |
+
return self._most_informative_features[:n]
|
166 |
+
else:
|
167 |
+
# The set of (fname, fval) pairs used by this classifier.
|
168 |
+
features = set()
|
169 |
+
# The max & min probability associated w/ each (fname, fval)
|
170 |
+
# pair. Maps (fname,fval) -> float.
|
171 |
+
maxprob = defaultdict(lambda: 0.0)
|
172 |
+
minprob = defaultdict(lambda: 1.0)
|
173 |
+
|
174 |
+
for (label, fname), probdist in self._feature_probdist.items():
|
175 |
+
for fval in probdist.samples():
|
176 |
+
feature = (fname, fval)
|
177 |
+
features.add(feature)
|
178 |
+
p = probdist.prob(fval)
|
179 |
+
maxprob[feature] = max(p, maxprob[feature])
|
180 |
+
minprob[feature] = min(p, minprob[feature])
|
181 |
+
if minprob[feature] == 0:
|
182 |
+
features.discard(feature)
|
183 |
+
|
184 |
+
# Convert features to a list, & sort it by how informative
|
185 |
+
# features are.
|
186 |
+
self._most_informative_features = sorted(
|
187 |
+
features,
|
188 |
+
key=lambda feature_: (
|
189 |
+
minprob[feature_] / maxprob[feature_],
|
190 |
+
feature_[0],
|
191 |
+
feature_[1] in [None, False, True],
|
192 |
+
str(feature_[1]).lower(),
|
193 |
+
),
|
194 |
+
)
|
195 |
+
return self._most_informative_features[:n]
|
196 |
+
|
197 |
+
@classmethod
|
198 |
+
def train(cls, labeled_featuresets, estimator=ELEProbDist):
|
199 |
+
"""
|
200 |
+
:param labeled_featuresets: A list of classified featuresets,
|
201 |
+
i.e., a list of tuples ``(featureset, label)``.
|
202 |
+
"""
|
203 |
+
label_freqdist = FreqDist()
|
204 |
+
feature_freqdist = defaultdict(FreqDist)
|
205 |
+
feature_values = defaultdict(set)
|
206 |
+
fnames = set()
|
207 |
+
|
208 |
+
# Count up how many times each feature value occurred, given
|
209 |
+
# the label and featurename.
|
210 |
+
for featureset, label in labeled_featuresets:
|
211 |
+
label_freqdist[label] += 1
|
212 |
+
for fname, fval in featureset.items():
|
213 |
+
# Increment freq(fval|label, fname)
|
214 |
+
feature_freqdist[label, fname][fval] += 1
|
215 |
+
# Record that fname can take the value fval.
|
216 |
+
feature_values[fname].add(fval)
|
217 |
+
# Keep a list of all feature names.
|
218 |
+
fnames.add(fname)
|
219 |
+
|
220 |
+
# If a feature didn't have a value given for an instance, then
|
221 |
+
# we assume that it gets the implicit value 'None.' This loop
|
222 |
+
# counts up the number of 'missing' feature values for each
|
223 |
+
# (label,fname) pair, and increments the count of the fval
|
224 |
+
# 'None' by that amount.
|
225 |
+
for label in label_freqdist:
|
226 |
+
num_samples = label_freqdist[label]
|
227 |
+
for fname in fnames:
|
228 |
+
count = feature_freqdist[label, fname].N()
|
229 |
+
# Only add a None key when necessary, i.e. if there are
|
230 |
+
# any samples with feature 'fname' missing.
|
231 |
+
if num_samples - count > 0:
|
232 |
+
feature_freqdist[label, fname][None] += num_samples - count
|
233 |
+
feature_values[fname].add(None)
|
234 |
+
|
235 |
+
# Create the P(label) distribution
|
236 |
+
label_probdist = estimator(label_freqdist)
|
237 |
+
|
238 |
+
# Create the P(fval|label, fname) distribution
|
239 |
+
feature_probdist = {}
|
240 |
+
for ((label, fname), freqdist) in feature_freqdist.items():
|
241 |
+
probdist = estimator(freqdist, bins=len(feature_values[fname]))
|
242 |
+
feature_probdist[label, fname] = probdist
|
243 |
+
|
244 |
+
return cls(label_probdist, feature_probdist)
|
245 |
+
|
246 |
+
|
247 |
+
##//////////////////////////////////////////////////////
|
248 |
+
## Demo
|
249 |
+
##//////////////////////////////////////////////////////
|
250 |
+
|
251 |
+
|
252 |
+
def demo():
|
253 |
+
from nltk.classify.util import names_demo
|
254 |
+
|
255 |
+
classifier = names_demo(NaiveBayesClassifier.train)
|
256 |
+
classifier.show_most_informative_features()
|
257 |
+
|
258 |
+
|
259 |
+
if __name__ == "__main__":
|
260 |
+
demo()
|
llmeval-env/lib/python3.10/site-packages/nltk/classify/positivenaivebayes.py
ADDED
@@ -0,0 +1,180 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Natural Language Toolkit: Positive Naive Bayes Classifier
|
2 |
+
#
|
3 |
+
# Copyright (C) 2012 NLTK Project
|
4 |
+
# Author: Alessandro Presta <alessandro.presta@gmail.com>
|
5 |
+
# URL: <https://www.nltk.org/>
|
6 |
+
# For license information, see LICENSE.TXT
|
7 |
+
|
8 |
+
"""
|
9 |
+
A variant of the Naive Bayes Classifier that performs binary classification with
|
10 |
+
partially-labeled training sets. In other words, assume we want to build a classifier
|
11 |
+
that assigns each example to one of two complementary classes (e.g., male names and
|
12 |
+
female names).
|
13 |
+
If we have a training set with labeled examples for both classes, we can use a
|
14 |
+
standard Naive Bayes Classifier. However, consider the case when we only have labeled
|
15 |
+
examples for one of the classes, and other, unlabeled, examples.
|
16 |
+
Then, assuming a prior distribution on the two labels, we can use the unlabeled set
|
17 |
+
to estimate the frequencies of the various features.
|
18 |
+
|
19 |
+
Let the two possible labels be 1 and 0, and let's say we only have examples labeled 1
|
20 |
+
and unlabeled examples. We are also given an estimate of P(1).
|
21 |
+
|
22 |
+
We compute P(feature|1) exactly as in the standard case.
|
23 |
+
|
24 |
+
To compute P(feature|0), we first estimate P(feature) from the unlabeled set (we are
|
25 |
+
assuming that the unlabeled examples are drawn according to the given prior distribution)
|
26 |
+
and then express the conditional probability as:
|
27 |
+
|
28 |
+
| P(feature) - P(feature|1) * P(1)
|
29 |
+
| P(feature|0) = ----------------------------------
|
30 |
+
| P(0)
|
31 |
+
|
32 |
+
Example:
|
33 |
+
|
34 |
+
>>> from nltk.classify import PositiveNaiveBayesClassifier
|
35 |
+
|
36 |
+
Some sentences about sports:
|
37 |
+
|
38 |
+
>>> sports_sentences = [ 'The team dominated the game',
|
39 |
+
... 'They lost the ball',
|
40 |
+
... 'The game was intense',
|
41 |
+
... 'The goalkeeper catched the ball',
|
42 |
+
... 'The other team controlled the ball' ]
|
43 |
+
|
44 |
+
Mixed topics, including sports:
|
45 |
+
|
46 |
+
>>> various_sentences = [ 'The President did not comment',
|
47 |
+
... 'I lost the keys',
|
48 |
+
... 'The team won the game',
|
49 |
+
... 'Sara has two kids',
|
50 |
+
... 'The ball went off the court',
|
51 |
+
... 'They had the ball for the whole game',
|
52 |
+
... 'The show is over' ]
|
53 |
+
|
54 |
+
The features of a sentence are simply the words it contains:
|
55 |
+
|
56 |
+
>>> def features(sentence):
|
57 |
+
... words = sentence.lower().split()
|
58 |
+
... return dict(('contains(%s)' % w, True) for w in words)
|
59 |
+
|
60 |
+
We use the sports sentences as positive examples, the mixed ones ad unlabeled examples:
|
61 |
+
|
62 |
+
>>> positive_featuresets = map(features, sports_sentences)
|
63 |
+
>>> unlabeled_featuresets = map(features, various_sentences)
|
64 |
+
>>> classifier = PositiveNaiveBayesClassifier.train(positive_featuresets,
|
65 |
+
... unlabeled_featuresets)
|
66 |
+
|
67 |
+
Is the following sentence about sports?
|
68 |
+
|
69 |
+
>>> classifier.classify(features('The cat is on the table'))
|
70 |
+
False
|
71 |
+
|
72 |
+
What about this one?
|
73 |
+
|
74 |
+
>>> classifier.classify(features('My team lost the game'))
|
75 |
+
True
|
76 |
+
"""
|
77 |
+
|
78 |
+
from collections import defaultdict
|
79 |
+
|
80 |
+
from nltk.classify.naivebayes import NaiveBayesClassifier
|
81 |
+
from nltk.probability import DictionaryProbDist, ELEProbDist, FreqDist
|
82 |
+
|
83 |
+
##//////////////////////////////////////////////////////
|
84 |
+
## Positive Naive Bayes Classifier
|
85 |
+
##//////////////////////////////////////////////////////
|
86 |
+
|
87 |
+
|
88 |
+
class PositiveNaiveBayesClassifier(NaiveBayesClassifier):
|
89 |
+
@staticmethod
|
90 |
+
def train(
|
91 |
+
positive_featuresets,
|
92 |
+
unlabeled_featuresets,
|
93 |
+
positive_prob_prior=0.5,
|
94 |
+
estimator=ELEProbDist,
|
95 |
+
):
|
96 |
+
"""
|
97 |
+
:param positive_featuresets: An iterable of featuresets that are known as positive
|
98 |
+
examples (i.e., their label is ``True``).
|
99 |
+
|
100 |
+
:param unlabeled_featuresets: An iterable of featuresets whose label is unknown.
|
101 |
+
|
102 |
+
:param positive_prob_prior: A prior estimate of the probability of the label
|
103 |
+
``True`` (default 0.5).
|
104 |
+
"""
|
105 |
+
positive_feature_freqdist = defaultdict(FreqDist)
|
106 |
+
unlabeled_feature_freqdist = defaultdict(FreqDist)
|
107 |
+
feature_values = defaultdict(set)
|
108 |
+
fnames = set()
|
109 |
+
|
110 |
+
# Count up how many times each feature value occurred in positive examples.
|
111 |
+
num_positive_examples = 0
|
112 |
+
for featureset in positive_featuresets:
|
113 |
+
for fname, fval in featureset.items():
|
114 |
+
positive_feature_freqdist[fname][fval] += 1
|
115 |
+
feature_values[fname].add(fval)
|
116 |
+
fnames.add(fname)
|
117 |
+
num_positive_examples += 1
|
118 |
+
|
119 |
+
# Count up how many times each feature value occurred in unlabeled examples.
|
120 |
+
num_unlabeled_examples = 0
|
121 |
+
for featureset in unlabeled_featuresets:
|
122 |
+
for fname, fval in featureset.items():
|
123 |
+
unlabeled_feature_freqdist[fname][fval] += 1
|
124 |
+
feature_values[fname].add(fval)
|
125 |
+
fnames.add(fname)
|
126 |
+
num_unlabeled_examples += 1
|
127 |
+
|
128 |
+
# If a feature didn't have a value given for an instance, then we assume that
|
129 |
+
# it gets the implicit value 'None'.
|
130 |
+
for fname in fnames:
|
131 |
+
count = positive_feature_freqdist[fname].N()
|
132 |
+
positive_feature_freqdist[fname][None] += num_positive_examples - count
|
133 |
+
feature_values[fname].add(None)
|
134 |
+
|
135 |
+
for fname in fnames:
|
136 |
+
count = unlabeled_feature_freqdist[fname].N()
|
137 |
+
unlabeled_feature_freqdist[fname][None] += num_unlabeled_examples - count
|
138 |
+
feature_values[fname].add(None)
|
139 |
+
|
140 |
+
negative_prob_prior = 1.0 - positive_prob_prior
|
141 |
+
|
142 |
+
# Create the P(label) distribution.
|
143 |
+
label_probdist = DictionaryProbDist(
|
144 |
+
{True: positive_prob_prior, False: negative_prob_prior}
|
145 |
+
)
|
146 |
+
|
147 |
+
# Create the P(fval|label, fname) distribution.
|
148 |
+
feature_probdist = {}
|
149 |
+
for fname, freqdist in positive_feature_freqdist.items():
|
150 |
+
probdist = estimator(freqdist, bins=len(feature_values[fname]))
|
151 |
+
feature_probdist[True, fname] = probdist
|
152 |
+
|
153 |
+
for fname, freqdist in unlabeled_feature_freqdist.items():
|
154 |
+
global_probdist = estimator(freqdist, bins=len(feature_values[fname]))
|
155 |
+
negative_feature_probs = {}
|
156 |
+
for fval in feature_values[fname]:
|
157 |
+
prob = (
|
158 |
+
global_probdist.prob(fval)
|
159 |
+
- positive_prob_prior * feature_probdist[True, fname].prob(fval)
|
160 |
+
) / negative_prob_prior
|
161 |
+
# TODO: We need to add some kind of smoothing here, instead of
|
162 |
+
# setting negative probabilities to zero and normalizing.
|
163 |
+
negative_feature_probs[fval] = max(prob, 0.0)
|
164 |
+
feature_probdist[False, fname] = DictionaryProbDist(
|
165 |
+
negative_feature_probs, normalize=True
|
166 |
+
)
|
167 |
+
|
168 |
+
return PositiveNaiveBayesClassifier(label_probdist, feature_probdist)
|
169 |
+
|
170 |
+
|
171 |
+
##//////////////////////////////////////////////////////
|
172 |
+
## Demo
|
173 |
+
##//////////////////////////////////////////////////////
|
174 |
+
|
175 |
+
|
176 |
+
def demo():
|
177 |
+
from nltk.classify.util import partial_names_demo
|
178 |
+
|
179 |
+
classifier = partial_names_demo(PositiveNaiveBayesClassifier.train)
|
180 |
+
classifier.show_most_informative_features()
|
llmeval-env/lib/python3.10/site-packages/nltk/classify/rte_classify.py
ADDED
@@ -0,0 +1,183 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Natural Language Toolkit: RTE Classifier
|
2 |
+
#
|
3 |
+
# Copyright (C) 2001-2023 NLTK Project
|
4 |
+
# Author: Ewan Klein <ewan@inf.ed.ac.uk>
|
5 |
+
# URL: <https://www.nltk.org/>
|
6 |
+
# For license information, see LICENSE.TXT
|
7 |
+
|
8 |
+
"""
|
9 |
+
Simple classifier for RTE corpus.
|
10 |
+
|
11 |
+
It calculates the overlap in words and named entities between text and
|
12 |
+
hypothesis, and also whether there are words / named entities in the
|
13 |
+
hypothesis which fail to occur in the text, since this is an indicator that
|
14 |
+
the hypothesis is more informative than (i.e not entailed by) the text.
|
15 |
+
|
16 |
+
TO DO: better Named Entity classification
|
17 |
+
TO DO: add lemmatization
|
18 |
+
"""
|
19 |
+
|
20 |
+
from nltk.classify.maxent import MaxentClassifier
|
21 |
+
from nltk.classify.util import accuracy
|
22 |
+
from nltk.tokenize import RegexpTokenizer
|
23 |
+
|
24 |
+
|
25 |
+
class RTEFeatureExtractor:
|
26 |
+
"""
|
27 |
+
This builds a bag of words for both the text and the hypothesis after
|
28 |
+
throwing away some stopwords, then calculates overlap and difference.
|
29 |
+
"""
|
30 |
+
|
31 |
+
def __init__(self, rtepair, stop=True, use_lemmatize=False):
|
32 |
+
"""
|
33 |
+
:param rtepair: a ``RTEPair`` from which features should be extracted
|
34 |
+
:param stop: if ``True``, stopwords are thrown away.
|
35 |
+
:type stop: bool
|
36 |
+
"""
|
37 |
+
self.stop = stop
|
38 |
+
self.stopwords = {
|
39 |
+
"a",
|
40 |
+
"the",
|
41 |
+
"it",
|
42 |
+
"they",
|
43 |
+
"of",
|
44 |
+
"in",
|
45 |
+
"to",
|
46 |
+
"is",
|
47 |
+
"have",
|
48 |
+
"are",
|
49 |
+
"were",
|
50 |
+
"and",
|
51 |
+
"very",
|
52 |
+
".",
|
53 |
+
",",
|
54 |
+
}
|
55 |
+
|
56 |
+
self.negwords = {"no", "not", "never", "failed", "rejected", "denied"}
|
57 |
+
# Try to tokenize so that abbreviations, monetary amounts, email
|
58 |
+
# addresses, URLs are single tokens.
|
59 |
+
tokenizer = RegexpTokenizer(r"[\w.@:/]+|\w+|\$[\d.]+")
|
60 |
+
|
61 |
+
# Get the set of word types for text and hypothesis
|
62 |
+
self.text_tokens = tokenizer.tokenize(rtepair.text)
|
63 |
+
self.hyp_tokens = tokenizer.tokenize(rtepair.hyp)
|
64 |
+
self.text_words = set(self.text_tokens)
|
65 |
+
self.hyp_words = set(self.hyp_tokens)
|
66 |
+
|
67 |
+
if use_lemmatize:
|
68 |
+
self.text_words = {self._lemmatize(token) for token in self.text_tokens}
|
69 |
+
self.hyp_words = {self._lemmatize(token) for token in self.hyp_tokens}
|
70 |
+
|
71 |
+
if self.stop:
|
72 |
+
self.text_words = self.text_words - self.stopwords
|
73 |
+
self.hyp_words = self.hyp_words - self.stopwords
|
74 |
+
|
75 |
+
self._overlap = self.hyp_words & self.text_words
|
76 |
+
self._hyp_extra = self.hyp_words - self.text_words
|
77 |
+
self._txt_extra = self.text_words - self.hyp_words
|
78 |
+
|
79 |
+
def overlap(self, toktype, debug=False):
|
80 |
+
"""
|
81 |
+
Compute the overlap between text and hypothesis.
|
82 |
+
|
83 |
+
:param toktype: distinguish Named Entities from ordinary words
|
84 |
+
:type toktype: 'ne' or 'word'
|
85 |
+
"""
|
86 |
+
ne_overlap = {token for token in self._overlap if self._ne(token)}
|
87 |
+
if toktype == "ne":
|
88 |
+
if debug:
|
89 |
+
print("ne overlap", ne_overlap)
|
90 |
+
return ne_overlap
|
91 |
+
elif toktype == "word":
|
92 |
+
if debug:
|
93 |
+
print("word overlap", self._overlap - ne_overlap)
|
94 |
+
return self._overlap - ne_overlap
|
95 |
+
else:
|
96 |
+
raise ValueError("Type not recognized:'%s'" % toktype)
|
97 |
+
|
98 |
+
def hyp_extra(self, toktype, debug=True):
|
99 |
+
"""
|
100 |
+
Compute the extraneous material in the hypothesis.
|
101 |
+
|
102 |
+
:param toktype: distinguish Named Entities from ordinary words
|
103 |
+
:type toktype: 'ne' or 'word'
|
104 |
+
"""
|
105 |
+
ne_extra = {token for token in self._hyp_extra if self._ne(token)}
|
106 |
+
if toktype == "ne":
|
107 |
+
return ne_extra
|
108 |
+
elif toktype == "word":
|
109 |
+
return self._hyp_extra - ne_extra
|
110 |
+
else:
|
111 |
+
raise ValueError("Type not recognized: '%s'" % toktype)
|
112 |
+
|
113 |
+
@staticmethod
|
114 |
+
def _ne(token):
|
115 |
+
"""
|
116 |
+
This just assumes that words in all caps or titles are
|
117 |
+
named entities.
|
118 |
+
|
119 |
+
:type token: str
|
120 |
+
"""
|
121 |
+
if token.istitle() or token.isupper():
|
122 |
+
return True
|
123 |
+
return False
|
124 |
+
|
125 |
+
@staticmethod
|
126 |
+
def _lemmatize(word):
|
127 |
+
"""
|
128 |
+
Use morphy from WordNet to find the base form of verbs.
|
129 |
+
"""
|
130 |
+
from nltk.corpus import wordnet as wn
|
131 |
+
|
132 |
+
lemma = wn.morphy(word, pos=wn.VERB)
|
133 |
+
if lemma is not None:
|
134 |
+
return lemma
|
135 |
+
return word
|
136 |
+
|
137 |
+
|
138 |
+
def rte_features(rtepair):
|
139 |
+
extractor = RTEFeatureExtractor(rtepair)
|
140 |
+
features = {}
|
141 |
+
features["alwayson"] = True
|
142 |
+
features["word_overlap"] = len(extractor.overlap("word"))
|
143 |
+
features["word_hyp_extra"] = len(extractor.hyp_extra("word"))
|
144 |
+
features["ne_overlap"] = len(extractor.overlap("ne"))
|
145 |
+
features["ne_hyp_extra"] = len(extractor.hyp_extra("ne"))
|
146 |
+
features["neg_txt"] = len(extractor.negwords & extractor.text_words)
|
147 |
+
features["neg_hyp"] = len(extractor.negwords & extractor.hyp_words)
|
148 |
+
return features
|
149 |
+
|
150 |
+
|
151 |
+
def rte_featurize(rte_pairs):
|
152 |
+
return [(rte_features(pair), pair.value) for pair in rte_pairs]
|
153 |
+
|
154 |
+
|
155 |
+
def rte_classifier(algorithm, sample_N=None):
|
156 |
+
from nltk.corpus import rte as rte_corpus
|
157 |
+
|
158 |
+
train_set = rte_corpus.pairs(["rte1_dev.xml", "rte2_dev.xml", "rte3_dev.xml"])
|
159 |
+
test_set = rte_corpus.pairs(["rte1_test.xml", "rte2_test.xml", "rte3_test.xml"])
|
160 |
+
|
161 |
+
if sample_N is not None:
|
162 |
+
train_set = train_set[:sample_N]
|
163 |
+
test_set = test_set[:sample_N]
|
164 |
+
|
165 |
+
featurized_train_set = rte_featurize(train_set)
|
166 |
+
featurized_test_set = rte_featurize(test_set)
|
167 |
+
|
168 |
+
# Train the classifier
|
169 |
+
print("Training classifier...")
|
170 |
+
if algorithm in ["megam"]: # MEGAM based algorithms.
|
171 |
+
clf = MaxentClassifier.train(featurized_train_set, algorithm)
|
172 |
+
elif algorithm in ["GIS", "IIS"]: # Use default GIS/IIS MaxEnt algorithm
|
173 |
+
clf = MaxentClassifier.train(featurized_train_set, algorithm)
|
174 |
+
else:
|
175 |
+
err_msg = str(
|
176 |
+
"RTEClassifier only supports these algorithms:\n "
|
177 |
+
"'megam', 'GIS', 'IIS'.\n"
|
178 |
+
)
|
179 |
+
raise Exception(err_msg)
|
180 |
+
print("Testing classifier...")
|
181 |
+
acc = accuracy(clf, featurized_test_set)
|
182 |
+
print("Accuracy: %6.4f" % acc)
|
183 |
+
return clf
|
llmeval-env/lib/python3.10/site-packages/nltk/classify/scikitlearn.py
ADDED
@@ -0,0 +1,143 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Natural Language Toolkit: Interface to scikit-learn classifiers
|
2 |
+
#
|
3 |
+
# Author: Lars Buitinck <L.J.Buitinck@uva.nl>
|
4 |
+
# URL: <https://www.nltk.org/>
|
5 |
+
# For license information, see LICENSE.TXT
|
6 |
+
"""
|
7 |
+
scikit-learn (https://scikit-learn.org) is a machine learning library for
|
8 |
+
Python. It supports many classification algorithms, including SVMs,
|
9 |
+
Naive Bayes, logistic regression (MaxEnt) and decision trees.
|
10 |
+
|
11 |
+
This package implements a wrapper around scikit-learn classifiers. To use this
|
12 |
+
wrapper, construct a scikit-learn estimator object, then use that to construct
|
13 |
+
a SklearnClassifier. E.g., to wrap a linear SVM with default settings:
|
14 |
+
|
15 |
+
>>> from sklearn.svm import LinearSVC
|
16 |
+
>>> from nltk.classify.scikitlearn import SklearnClassifier
|
17 |
+
>>> classif = SklearnClassifier(LinearSVC())
|
18 |
+
|
19 |
+
A scikit-learn classifier may include preprocessing steps when it's wrapped
|
20 |
+
in a Pipeline object. The following constructs and wraps a Naive Bayes text
|
21 |
+
classifier with tf-idf weighting and chi-square feature selection to get the
|
22 |
+
best 1000 features:
|
23 |
+
|
24 |
+
>>> from sklearn.feature_extraction.text import TfidfTransformer
|
25 |
+
>>> from sklearn.feature_selection import SelectKBest, chi2
|
26 |
+
>>> from sklearn.naive_bayes import MultinomialNB
|
27 |
+
>>> from sklearn.pipeline import Pipeline
|
28 |
+
>>> pipeline = Pipeline([('tfidf', TfidfTransformer()),
|
29 |
+
... ('chi2', SelectKBest(chi2, k=1000)),
|
30 |
+
... ('nb', MultinomialNB())])
|
31 |
+
>>> classif = SklearnClassifier(pipeline)
|
32 |
+
"""
|
33 |
+
|
34 |
+
from nltk.classify.api import ClassifierI
|
35 |
+
from nltk.probability import DictionaryProbDist
|
36 |
+
|
37 |
+
try:
|
38 |
+
from sklearn.feature_extraction import DictVectorizer
|
39 |
+
from sklearn.preprocessing import LabelEncoder
|
40 |
+
except ImportError:
|
41 |
+
pass
|
42 |
+
|
43 |
+
__all__ = ["SklearnClassifier"]
|
44 |
+
|
45 |
+
|
46 |
+
class SklearnClassifier(ClassifierI):
|
47 |
+
"""Wrapper for scikit-learn classifiers."""
|
48 |
+
|
49 |
+
def __init__(self, estimator, dtype=float, sparse=True):
|
50 |
+
"""
|
51 |
+
:param estimator: scikit-learn classifier object.
|
52 |
+
|
53 |
+
:param dtype: data type used when building feature array.
|
54 |
+
scikit-learn estimators work exclusively on numeric data. The
|
55 |
+
default value should be fine for almost all situations.
|
56 |
+
|
57 |
+
:param sparse: Whether to use sparse matrices internally.
|
58 |
+
The estimator must support these; not all scikit-learn classifiers
|
59 |
+
do (see their respective documentation and look for "sparse
|
60 |
+
matrix"). The default value is True, since most NLP problems
|
61 |
+
involve sparse feature sets. Setting this to False may take a
|
62 |
+
great amount of memory.
|
63 |
+
:type sparse: boolean.
|
64 |
+
"""
|
65 |
+
self._clf = estimator
|
66 |
+
self._encoder = LabelEncoder()
|
67 |
+
self._vectorizer = DictVectorizer(dtype=dtype, sparse=sparse)
|
68 |
+
|
69 |
+
def __repr__(self):
|
70 |
+
return "<SklearnClassifier(%r)>" % self._clf
|
71 |
+
|
72 |
+
def classify_many(self, featuresets):
|
73 |
+
"""Classify a batch of samples.
|
74 |
+
|
75 |
+
:param featuresets: An iterable over featuresets, each a dict mapping
|
76 |
+
strings to either numbers, booleans or strings.
|
77 |
+
:return: The predicted class label for each input sample.
|
78 |
+
:rtype: list
|
79 |
+
"""
|
80 |
+
X = self._vectorizer.transform(featuresets)
|
81 |
+
classes = self._encoder.classes_
|
82 |
+
return [classes[i] for i in self._clf.predict(X)]
|
83 |
+
|
84 |
+
def prob_classify_many(self, featuresets):
|
85 |
+
"""Compute per-class probabilities for a batch of samples.
|
86 |
+
|
87 |
+
:param featuresets: An iterable over featuresets, each a dict mapping
|
88 |
+
strings to either numbers, booleans or strings.
|
89 |
+
:rtype: list of ``ProbDistI``
|
90 |
+
"""
|
91 |
+
X = self._vectorizer.transform(featuresets)
|
92 |
+
y_proba_list = self._clf.predict_proba(X)
|
93 |
+
return [self._make_probdist(y_proba) for y_proba in y_proba_list]
|
94 |
+
|
95 |
+
def labels(self):
|
96 |
+
"""The class labels used by this classifier.
|
97 |
+
|
98 |
+
:rtype: list
|
99 |
+
"""
|
100 |
+
return list(self._encoder.classes_)
|
101 |
+
|
102 |
+
def train(self, labeled_featuresets):
|
103 |
+
"""
|
104 |
+
Train (fit) the scikit-learn estimator.
|
105 |
+
|
106 |
+
:param labeled_featuresets: A list of ``(featureset, label)``
|
107 |
+
where each ``featureset`` is a dict mapping strings to either
|
108 |
+
numbers, booleans or strings.
|
109 |
+
"""
|
110 |
+
|
111 |
+
X, y = list(zip(*labeled_featuresets))
|
112 |
+
X = self._vectorizer.fit_transform(X)
|
113 |
+
y = self._encoder.fit_transform(y)
|
114 |
+
self._clf.fit(X, y)
|
115 |
+
|
116 |
+
return self
|
117 |
+
|
118 |
+
def _make_probdist(self, y_proba):
|
119 |
+
classes = self._encoder.classes_
|
120 |
+
return DictionaryProbDist({classes[i]: p for i, p in enumerate(y_proba)})
|
121 |
+
|
122 |
+
|
123 |
+
if __name__ == "__main__":
|
124 |
+
from sklearn.linear_model import LogisticRegression
|
125 |
+
from sklearn.naive_bayes import BernoulliNB
|
126 |
+
|
127 |
+
from nltk.classify.util import names_demo, names_demo_features
|
128 |
+
|
129 |
+
# Bernoulli Naive Bayes is designed for binary classification. We set the
|
130 |
+
# binarize option to False since we know we're passing boolean features.
|
131 |
+
print("scikit-learn Naive Bayes:")
|
132 |
+
names_demo(
|
133 |
+
SklearnClassifier(BernoulliNB(binarize=False)).train,
|
134 |
+
features=names_demo_features,
|
135 |
+
)
|
136 |
+
|
137 |
+
# The C parameter on logistic regression (MaxEnt) controls regularization.
|
138 |
+
# The higher it's set, the less regularized the classifier is.
|
139 |
+
print("\n\nscikit-learn logistic regression:")
|
140 |
+
names_demo(
|
141 |
+
SklearnClassifier(LogisticRegression(C=1000)).train,
|
142 |
+
features=names_demo_features,
|
143 |
+
)
|
llmeval-env/lib/python3.10/site-packages/nltk/classify/senna.py
ADDED
@@ -0,0 +1,176 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Natural Language Toolkit: Senna Interface
|
2 |
+
#
|
3 |
+
# Copyright (C) 2001-2023 NLTK Project
|
4 |
+
# Author: Rami Al-Rfou' <ralrfou@cs.stonybrook.edu>
|
5 |
+
# URL: <https://www.nltk.org/>
|
6 |
+
# For license information, see LICENSE.TXT
|
7 |
+
|
8 |
+
"""
|
9 |
+
A general interface to the SENNA pipeline that supports any of the
|
10 |
+
operations specified in SUPPORTED_OPERATIONS.
|
11 |
+
|
12 |
+
Applying multiple operations at once has the speed advantage. For example,
|
13 |
+
Senna will automatically determine POS tags if you are extracting named
|
14 |
+
entities. Applying both of the operations will cost only the time of
|
15 |
+
extracting the named entities.
|
16 |
+
|
17 |
+
The SENNA pipeline has a fixed maximum size of the sentences that it can read.
|
18 |
+
By default it is 1024 token/sentence. If you have larger sentences, changing
|
19 |
+
the MAX_SENTENCE_SIZE value in SENNA_main.c should be considered and your
|
20 |
+
system specific binary should be rebuilt. Otherwise this could introduce
|
21 |
+
misalignment errors.
|
22 |
+
|
23 |
+
The input is:
|
24 |
+
|
25 |
+
- path to the directory that contains SENNA executables. If the path is incorrect,
|
26 |
+
Senna will automatically search for executable file specified in SENNA environment variable
|
27 |
+
- List of the operations needed to be performed.
|
28 |
+
- (optionally) the encoding of the input data (default:utf-8)
|
29 |
+
|
30 |
+
Note: Unit tests for this module can be found in test/unit/test_senna.py
|
31 |
+
|
32 |
+
>>> from nltk.classify import Senna
|
33 |
+
>>> pipeline = Senna('/usr/share/senna-v3.0', ['pos', 'chk', 'ner']) # doctest: +SKIP
|
34 |
+
>>> sent = 'Dusseldorf is an international business center'.split()
|
35 |
+
>>> [(token['word'], token['chk'], token['ner'], token['pos']) for token in pipeline.tag(sent)] # doctest: +SKIP
|
36 |
+
[('Dusseldorf', 'B-NP', 'B-LOC', 'NNP'), ('is', 'B-VP', 'O', 'VBZ'), ('an', 'B-NP', 'O', 'DT'),
|
37 |
+
('international', 'I-NP', 'O', 'JJ'), ('business', 'I-NP', 'O', 'NN'), ('center', 'I-NP', 'O', 'NN')]
|
38 |
+
"""
|
39 |
+
|
40 |
+
from os import environ, path, sep
|
41 |
+
from platform import architecture, system
|
42 |
+
from subprocess import PIPE, Popen
|
43 |
+
|
44 |
+
from nltk.tag.api import TaggerI
|
45 |
+
|
46 |
+
|
47 |
+
class Senna(TaggerI):
|
48 |
+
|
49 |
+
SUPPORTED_OPERATIONS = ["pos", "chk", "ner"]
|
50 |
+
|
51 |
+
def __init__(self, senna_path, operations, encoding="utf-8"):
|
52 |
+
self._encoding = encoding
|
53 |
+
self._path = path.normpath(senna_path) + sep
|
54 |
+
|
55 |
+
# Verifies the existence of the executable on the self._path first
|
56 |
+
# senna_binary_file_1 = self.executable(self._path)
|
57 |
+
exe_file_1 = self.executable(self._path)
|
58 |
+
if not path.isfile(exe_file_1):
|
59 |
+
# Check for the system environment
|
60 |
+
if "SENNA" in environ:
|
61 |
+
# self._path = path.join(environ['SENNA'],'')
|
62 |
+
self._path = path.normpath(environ["SENNA"]) + sep
|
63 |
+
exe_file_2 = self.executable(self._path)
|
64 |
+
if not path.isfile(exe_file_2):
|
65 |
+
raise LookupError(
|
66 |
+
"Senna executable expected at %s or %s but not found"
|
67 |
+
% (exe_file_1, exe_file_2)
|
68 |
+
)
|
69 |
+
|
70 |
+
self.operations = operations
|
71 |
+
|
72 |
+
def executable(self, base_path):
|
73 |
+
"""
|
74 |
+
The function that determines the system specific binary that should be
|
75 |
+
used in the pipeline. In case, the system is not known the default senna binary will
|
76 |
+
be used.
|
77 |
+
"""
|
78 |
+
os_name = system()
|
79 |
+
if os_name == "Linux":
|
80 |
+
bits = architecture()[0]
|
81 |
+
if bits == "64bit":
|
82 |
+
return path.join(base_path, "senna-linux64")
|
83 |
+
return path.join(base_path, "senna-linux32")
|
84 |
+
if os_name == "Windows":
|
85 |
+
return path.join(base_path, "senna-win32.exe")
|
86 |
+
if os_name == "Darwin":
|
87 |
+
return path.join(base_path, "senna-osx")
|
88 |
+
return path.join(base_path, "senna")
|
89 |
+
|
90 |
+
def _map(self):
|
91 |
+
"""
|
92 |
+
A method that calculates the order of the columns that SENNA pipeline
|
93 |
+
will output the tags into. This depends on the operations being ordered.
|
94 |
+
"""
|
95 |
+
_map = {}
|
96 |
+
i = 1
|
97 |
+
for operation in Senna.SUPPORTED_OPERATIONS:
|
98 |
+
if operation in self.operations:
|
99 |
+
_map[operation] = i
|
100 |
+
i += 1
|
101 |
+
return _map
|
102 |
+
|
103 |
+
def tag(self, tokens):
|
104 |
+
"""
|
105 |
+
Applies the specified operation(s) on a list of tokens.
|
106 |
+
"""
|
107 |
+
return self.tag_sents([tokens])[0]
|
108 |
+
|
109 |
+
def tag_sents(self, sentences):
|
110 |
+
"""
|
111 |
+
Applies the tag method over a list of sentences. This method will return a
|
112 |
+
list of dictionaries. Every dictionary will contain a word with its
|
113 |
+
calculated annotations/tags.
|
114 |
+
"""
|
115 |
+
encoding = self._encoding
|
116 |
+
|
117 |
+
if not path.isfile(self.executable(self._path)):
|
118 |
+
raise LookupError(
|
119 |
+
"Senna executable expected at %s but not found"
|
120 |
+
% self.executable(self._path)
|
121 |
+
)
|
122 |
+
|
123 |
+
# Build the senna command to run the tagger
|
124 |
+
_senna_cmd = [
|
125 |
+
self.executable(self._path),
|
126 |
+
"-path",
|
127 |
+
self._path,
|
128 |
+
"-usrtokens",
|
129 |
+
"-iobtags",
|
130 |
+
]
|
131 |
+
_senna_cmd.extend(["-" + op for op in self.operations])
|
132 |
+
|
133 |
+
# Serialize the actual sentences to a temporary string
|
134 |
+
_input = "\n".join(" ".join(x) for x in sentences) + "\n"
|
135 |
+
if isinstance(_input, str) and encoding:
|
136 |
+
_input = _input.encode(encoding)
|
137 |
+
|
138 |
+
# Run the tagger and get the output
|
139 |
+
p = Popen(_senna_cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
|
140 |
+
(stdout, stderr) = p.communicate(input=_input)
|
141 |
+
senna_output = stdout
|
142 |
+
|
143 |
+
# Check the return code.
|
144 |
+
if p.returncode != 0:
|
145 |
+
raise RuntimeError("Senna command failed! Details: %s" % stderr)
|
146 |
+
|
147 |
+
if encoding:
|
148 |
+
senna_output = stdout.decode(encoding)
|
149 |
+
|
150 |
+
# Output the tagged sentences
|
151 |
+
map_ = self._map()
|
152 |
+
tagged_sentences = [[]]
|
153 |
+
sentence_index = 0
|
154 |
+
token_index = 0
|
155 |
+
for tagged_word in senna_output.strip().split("\n"):
|
156 |
+
if not tagged_word:
|
157 |
+
tagged_sentences.append([])
|
158 |
+
sentence_index += 1
|
159 |
+
token_index = 0
|
160 |
+
continue
|
161 |
+
tags = tagged_word.split("\t")
|
162 |
+
result = {}
|
163 |
+
for tag in map_:
|
164 |
+
result[tag] = tags[map_[tag]].strip()
|
165 |
+
try:
|
166 |
+
result["word"] = sentences[sentence_index][token_index]
|
167 |
+
except IndexError as e:
|
168 |
+
raise IndexError(
|
169 |
+
"Misalignment error occurred at sentence number %d. Possible reason"
|
170 |
+
" is that the sentence size exceeded the maximum size. Check the "
|
171 |
+
"documentation of Senna class for more information."
|
172 |
+
% sentence_index
|
173 |
+
) from e
|
174 |
+
tagged_sentences[-1].append(result)
|
175 |
+
token_index += 1
|
176 |
+
return tagged_sentences
|
llmeval-env/lib/python3.10/site-packages/nltk/classify/textcat.py
ADDED
@@ -0,0 +1,197 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Natural Language Toolkit: Language ID module using TextCat algorithm
|
2 |
+
#
|
3 |
+
# Copyright (C) 2001-2023 NLTK Project
|
4 |
+
# Author: Avital Pekker <avital.pekker@utoronto.ca>
|
5 |
+
#
|
6 |
+
# URL: <https://www.nltk.org/>
|
7 |
+
# For license information, see LICENSE.TXT
|
8 |
+
|
9 |
+
"""
|
10 |
+
A module for language identification using the TextCat algorithm.
|
11 |
+
An implementation of the text categorization algorithm
|
12 |
+
presented in Cavnar, W. B. and J. M. Trenkle,
|
13 |
+
"N-Gram-Based Text Categorization".
|
14 |
+
|
15 |
+
The algorithm takes advantage of Zipf's law and uses
|
16 |
+
n-gram frequencies to profile languages and text-yet to
|
17 |
+
be identified-then compares using a distance measure.
|
18 |
+
|
19 |
+
Language n-grams are provided by the "An Crubadan"
|
20 |
+
project. A corpus reader was created separately to read
|
21 |
+
those files.
|
22 |
+
|
23 |
+
For details regarding the algorithm, see:
|
24 |
+
https://www.let.rug.nl/~vannoord/TextCat/textcat.pdf
|
25 |
+
|
26 |
+
For details about An Crubadan, see:
|
27 |
+
https://borel.slu.edu/crubadan/index.html
|
28 |
+
"""
|
29 |
+
|
30 |
+
from sys import maxsize
|
31 |
+
|
32 |
+
from nltk.util import trigrams
|
33 |
+
|
34 |
+
# Note: this is NOT "re" you're likely used to. The regex module
|
35 |
+
# is an alternative to the standard re module that supports
|
36 |
+
# Unicode codepoint properties with the \p{} syntax.
|
37 |
+
# You may have to "pip install regx"
|
38 |
+
try:
|
39 |
+
import regex as re
|
40 |
+
except ImportError:
|
41 |
+
re = None
|
42 |
+
######################################################################
|
43 |
+
## Language identification using TextCat
|
44 |
+
######################################################################
|
45 |
+
|
46 |
+
|
47 |
+
class TextCat:
|
48 |
+
|
49 |
+
_corpus = None
|
50 |
+
fingerprints = {}
|
51 |
+
_START_CHAR = "<"
|
52 |
+
_END_CHAR = ">"
|
53 |
+
|
54 |
+
last_distances = {}
|
55 |
+
|
56 |
+
def __init__(self):
|
57 |
+
if not re:
|
58 |
+
raise OSError(
|
59 |
+
"classify.textcat requires the regex module that "
|
60 |
+
"supports unicode. Try '$ pip install regex' and "
|
61 |
+
"see https://pypi.python.org/pypi/regex for "
|
62 |
+
"further details."
|
63 |
+
)
|
64 |
+
|
65 |
+
from nltk.corpus import crubadan
|
66 |
+
|
67 |
+
self._corpus = crubadan
|
68 |
+
# Load all language ngrams into cache
|
69 |
+
for lang in self._corpus.langs():
|
70 |
+
self._corpus.lang_freq(lang)
|
71 |
+
|
72 |
+
def remove_punctuation(self, text):
|
73 |
+
"""Get rid of punctuation except apostrophes"""
|
74 |
+
return re.sub(r"[^\P{P}\']+", "", text)
|
75 |
+
|
76 |
+
def profile(self, text):
|
77 |
+
"""Create FreqDist of trigrams within text"""
|
78 |
+
from nltk import FreqDist, word_tokenize
|
79 |
+
|
80 |
+
clean_text = self.remove_punctuation(text)
|
81 |
+
tokens = word_tokenize(clean_text)
|
82 |
+
|
83 |
+
fingerprint = FreqDist()
|
84 |
+
for t in tokens:
|
85 |
+
token_trigram_tuples = trigrams(self._START_CHAR + t + self._END_CHAR)
|
86 |
+
token_trigrams = ["".join(tri) for tri in token_trigram_tuples]
|
87 |
+
|
88 |
+
for cur_trigram in token_trigrams:
|
89 |
+
if cur_trigram in fingerprint:
|
90 |
+
fingerprint[cur_trigram] += 1
|
91 |
+
else:
|
92 |
+
fingerprint[cur_trigram] = 1
|
93 |
+
|
94 |
+
return fingerprint
|
95 |
+
|
96 |
+
def calc_dist(self, lang, trigram, text_profile):
|
97 |
+
"""Calculate the "out-of-place" measure between the
|
98 |
+
text and language profile for a single trigram"""
|
99 |
+
|
100 |
+
lang_fd = self._corpus.lang_freq(lang)
|
101 |
+
dist = 0
|
102 |
+
|
103 |
+
if trigram in lang_fd:
|
104 |
+
idx_lang_profile = list(lang_fd.keys()).index(trigram)
|
105 |
+
idx_text = list(text_profile.keys()).index(trigram)
|
106 |
+
|
107 |
+
# print(idx_lang_profile, ", ", idx_text)
|
108 |
+
dist = abs(idx_lang_profile - idx_text)
|
109 |
+
else:
|
110 |
+
# Arbitrary but should be larger than
|
111 |
+
# any possible trigram file length
|
112 |
+
# in terms of total lines
|
113 |
+
dist = maxsize
|
114 |
+
|
115 |
+
return dist
|
116 |
+
|
117 |
+
def lang_dists(self, text):
|
118 |
+
"""Calculate the "out-of-place" measure between
|
119 |
+
the text and all languages"""
|
120 |
+
|
121 |
+
distances = {}
|
122 |
+
profile = self.profile(text)
|
123 |
+
# For all the languages
|
124 |
+
for lang in self._corpus._all_lang_freq.keys():
|
125 |
+
# Calculate distance metric for every trigram in
|
126 |
+
# input text to be identified
|
127 |
+
lang_dist = 0
|
128 |
+
for trigram in profile:
|
129 |
+
lang_dist += self.calc_dist(lang, trigram, profile)
|
130 |
+
|
131 |
+
distances[lang] = lang_dist
|
132 |
+
|
133 |
+
return distances
|
134 |
+
|
135 |
+
def guess_language(self, text):
|
136 |
+
"""Find the language with the min distance
|
137 |
+
to the text and return its ISO 639-3 code"""
|
138 |
+
self.last_distances = self.lang_dists(text)
|
139 |
+
|
140 |
+
return min(self.last_distances, key=self.last_distances.get)
|
141 |
+
#################################################')
|
142 |
+
|
143 |
+
|
144 |
+
def demo():
|
145 |
+
from nltk.corpus import udhr
|
146 |
+
|
147 |
+
langs = [
|
148 |
+
"Kurdish-UTF8",
|
149 |
+
"Abkhaz-UTF8",
|
150 |
+
"Farsi_Persian-UTF8",
|
151 |
+
"Hindi-UTF8",
|
152 |
+
"Hawaiian-UTF8",
|
153 |
+
"Russian-UTF8",
|
154 |
+
"Vietnamese-UTF8",
|
155 |
+
"Serbian_Srpski-UTF8",
|
156 |
+
"Esperanto-UTF8",
|
157 |
+
]
|
158 |
+
|
159 |
+
friendly = {
|
160 |
+
"kmr": "Northern Kurdish",
|
161 |
+
"abk": "Abkhazian",
|
162 |
+
"pes": "Iranian Persian",
|
163 |
+
"hin": "Hindi",
|
164 |
+
"haw": "Hawaiian",
|
165 |
+
"rus": "Russian",
|
166 |
+
"vie": "Vietnamese",
|
167 |
+
"srp": "Serbian",
|
168 |
+
"epo": "Esperanto",
|
169 |
+
}
|
170 |
+
|
171 |
+
tc = TextCat()
|
172 |
+
|
173 |
+
for cur_lang in langs:
|
174 |
+
# Get raw data from UDHR corpus
|
175 |
+
raw_sentences = udhr.sents(cur_lang)
|
176 |
+
rows = len(raw_sentences) - 1
|
177 |
+
cols = list(map(len, raw_sentences))
|
178 |
+
|
179 |
+
sample = ""
|
180 |
+
|
181 |
+
# Generate a sample text of the language
|
182 |
+
for i in range(0, rows):
|
183 |
+
cur_sent = ""
|
184 |
+
for j in range(0, cols[i]):
|
185 |
+
cur_sent += " " + raw_sentences[i][j]
|
186 |
+
|
187 |
+
sample += cur_sent
|
188 |
+
|
189 |
+
# Try to detect what it is
|
190 |
+
print("Language snippet: " + sample[0:140] + "...")
|
191 |
+
guess = tc.guess_language(sample)
|
192 |
+
print(f"Language detection: {guess} ({friendly[guess]})")
|
193 |
+
print("#" * 140)
|
194 |
+
|
195 |
+
|
196 |
+
if __name__ == "__main__":
|
197 |
+
demo()
|
llmeval-env/lib/python3.10/site-packages/nltk/classify/util.py
ADDED
@@ -0,0 +1,346 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Natural Language Toolkit: Classifier Utility Functions
|
2 |
+
#
|
3 |
+
# Copyright (C) 2001-2023 NLTK Project
|
4 |
+
# Author: Edward Loper <edloper@gmail.com>
|
5 |
+
# Steven Bird <stevenbird1@gmail.com> (minor additions)
|
6 |
+
# URL: <https://www.nltk.org/>
|
7 |
+
# For license information, see LICENSE.TXT
|
8 |
+
|
9 |
+
"""
|
10 |
+
Utility functions and classes for classifiers.
|
11 |
+
"""
|
12 |
+
|
13 |
+
import math
|
14 |
+
|
15 |
+
# from nltk.util import Deprecated
|
16 |
+
import nltk.classify.util # for accuracy & log_likelihood
|
17 |
+
from nltk.util import LazyMap
|
18 |
+
|
19 |
+
######################################################################
|
20 |
+
# { Helper Functions
|
21 |
+
######################################################################
|
22 |
+
|
23 |
+
# alternative name possibility: 'map_featurefunc()'?
|
24 |
+
# alternative name possibility: 'detect_features()'?
|
25 |
+
# alternative name possibility: 'map_featuredetect()'?
|
26 |
+
# or.. just have users use LazyMap directly?
|
27 |
+
def apply_features(feature_func, toks, labeled=None):
|
28 |
+
"""
|
29 |
+
Use the ``LazyMap`` class to construct a lazy list-like
|
30 |
+
object that is analogous to ``map(feature_func, toks)``. In
|
31 |
+
particular, if ``labeled=False``, then the returned list-like
|
32 |
+
object's values are equal to::
|
33 |
+
|
34 |
+
[feature_func(tok) for tok in toks]
|
35 |
+
|
36 |
+
If ``labeled=True``, then the returned list-like object's values
|
37 |
+
are equal to::
|
38 |
+
|
39 |
+
[(feature_func(tok), label) for (tok, label) in toks]
|
40 |
+
|
41 |
+
The primary purpose of this function is to avoid the memory
|
42 |
+
overhead involved in storing all the featuresets for every token
|
43 |
+
in a corpus. Instead, these featuresets are constructed lazily,
|
44 |
+
as-needed. The reduction in memory overhead can be especially
|
45 |
+
significant when the underlying list of tokens is itself lazy (as
|
46 |
+
is the case with many corpus readers).
|
47 |
+
|
48 |
+
:param feature_func: The function that will be applied to each
|
49 |
+
token. It should return a featureset -- i.e., a dict
|
50 |
+
mapping feature names to feature values.
|
51 |
+
:param toks: The list of tokens to which ``feature_func`` should be
|
52 |
+
applied. If ``labeled=True``, then the list elements will be
|
53 |
+
passed directly to ``feature_func()``. If ``labeled=False``,
|
54 |
+
then the list elements should be tuples ``(tok,label)``, and
|
55 |
+
``tok`` will be passed to ``feature_func()``.
|
56 |
+
:param labeled: If true, then ``toks`` contains labeled tokens --
|
57 |
+
i.e., tuples of the form ``(tok, label)``. (Default:
|
58 |
+
auto-detect based on types.)
|
59 |
+
"""
|
60 |
+
if labeled is None:
|
61 |
+
labeled = toks and isinstance(toks[0], (tuple, list))
|
62 |
+
if labeled:
|
63 |
+
|
64 |
+
def lazy_func(labeled_token):
|
65 |
+
return (feature_func(labeled_token[0]), labeled_token[1])
|
66 |
+
|
67 |
+
return LazyMap(lazy_func, toks)
|
68 |
+
else:
|
69 |
+
return LazyMap(feature_func, toks)
|
70 |
+
|
71 |
+
|
72 |
+
def attested_labels(tokens):
|
73 |
+
"""
|
74 |
+
:return: A list of all labels that are attested in the given list
|
75 |
+
of tokens.
|
76 |
+
:rtype: list of (immutable)
|
77 |
+
:param tokens: The list of classified tokens from which to extract
|
78 |
+
labels. A classified token has the form ``(token, label)``.
|
79 |
+
:type tokens: list
|
80 |
+
"""
|
81 |
+
return tuple({label for (tok, label) in tokens})
|
82 |
+
|
83 |
+
|
84 |
+
def log_likelihood(classifier, gold):
|
85 |
+
results = classifier.prob_classify_many([fs for (fs, l) in gold])
|
86 |
+
ll = [pdist.prob(l) for ((fs, l), pdist) in zip(gold, results)]
|
87 |
+
return math.log(sum(ll) / len(ll))
|
88 |
+
|
89 |
+
|
90 |
+
def accuracy(classifier, gold):
|
91 |
+
results = classifier.classify_many([fs for (fs, l) in gold])
|
92 |
+
correct = [l == r for ((fs, l), r) in zip(gold, results)]
|
93 |
+
if correct:
|
94 |
+
return sum(correct) / len(correct)
|
95 |
+
else:
|
96 |
+
return 0
|
97 |
+
|
98 |
+
|
99 |
+
class CutoffChecker:
|
100 |
+
"""
|
101 |
+
A helper class that implements cutoff checks based on number of
|
102 |
+
iterations and log likelihood.
|
103 |
+
|
104 |
+
Accuracy cutoffs are also implemented, but they're almost never
|
105 |
+
a good idea to use.
|
106 |
+
"""
|
107 |
+
|
108 |
+
def __init__(self, cutoffs):
|
109 |
+
self.cutoffs = cutoffs.copy()
|
110 |
+
if "min_ll" in cutoffs:
|
111 |
+
cutoffs["min_ll"] = -abs(cutoffs["min_ll"])
|
112 |
+
if "min_lldelta" in cutoffs:
|
113 |
+
cutoffs["min_lldelta"] = abs(cutoffs["min_lldelta"])
|
114 |
+
self.ll = None
|
115 |
+
self.acc = None
|
116 |
+
self.iter = 1
|
117 |
+
|
118 |
+
def check(self, classifier, train_toks):
|
119 |
+
cutoffs = self.cutoffs
|
120 |
+
self.iter += 1
|
121 |
+
if "max_iter" in cutoffs and self.iter >= cutoffs["max_iter"]:
|
122 |
+
return True # iteration cutoff.
|
123 |
+
|
124 |
+
new_ll = nltk.classify.util.log_likelihood(classifier, train_toks)
|
125 |
+
if math.isnan(new_ll):
|
126 |
+
return True
|
127 |
+
|
128 |
+
if "min_ll" in cutoffs or "min_lldelta" in cutoffs:
|
129 |
+
if "min_ll" in cutoffs and new_ll >= cutoffs["min_ll"]:
|
130 |
+
return True # log likelihood cutoff
|
131 |
+
if (
|
132 |
+
"min_lldelta" in cutoffs
|
133 |
+
and self.ll
|
134 |
+
and ((new_ll - self.ll) <= abs(cutoffs["min_lldelta"]))
|
135 |
+
):
|
136 |
+
return True # log likelihood delta cutoff
|
137 |
+
self.ll = new_ll
|
138 |
+
|
139 |
+
if "max_acc" in cutoffs or "min_accdelta" in cutoffs:
|
140 |
+
new_acc = nltk.classify.util.log_likelihood(classifier, train_toks)
|
141 |
+
if "max_acc" in cutoffs and new_acc >= cutoffs["max_acc"]:
|
142 |
+
return True # log likelihood cutoff
|
143 |
+
if (
|
144 |
+
"min_accdelta" in cutoffs
|
145 |
+
and self.acc
|
146 |
+
and ((new_acc - self.acc) <= abs(cutoffs["min_accdelta"]))
|
147 |
+
):
|
148 |
+
return True # log likelihood delta cutoff
|
149 |
+
self.acc = new_acc
|
150 |
+
|
151 |
+
return False # no cutoff reached.
|
152 |
+
|
153 |
+
|
154 |
+
######################################################################
|
155 |
+
# { Demos
|
156 |
+
######################################################################
|
157 |
+
|
158 |
+
|
159 |
+
def names_demo_features(name):
|
160 |
+
features = {}
|
161 |
+
features["alwayson"] = True
|
162 |
+
features["startswith"] = name[0].lower()
|
163 |
+
features["endswith"] = name[-1].lower()
|
164 |
+
for letter in "abcdefghijklmnopqrstuvwxyz":
|
165 |
+
features["count(%s)" % letter] = name.lower().count(letter)
|
166 |
+
features["has(%s)" % letter] = letter in name.lower()
|
167 |
+
return features
|
168 |
+
|
169 |
+
|
170 |
+
def binary_names_demo_features(name):
|
171 |
+
features = {}
|
172 |
+
features["alwayson"] = True
|
173 |
+
features["startswith(vowel)"] = name[0].lower() in "aeiouy"
|
174 |
+
features["endswith(vowel)"] = name[-1].lower() in "aeiouy"
|
175 |
+
for letter in "abcdefghijklmnopqrstuvwxyz":
|
176 |
+
features["count(%s)" % letter] = name.lower().count(letter)
|
177 |
+
features["has(%s)" % letter] = letter in name.lower()
|
178 |
+
features["startswith(%s)" % letter] = letter == name[0].lower()
|
179 |
+
features["endswith(%s)" % letter] = letter == name[-1].lower()
|
180 |
+
return features
|
181 |
+
|
182 |
+
|
183 |
+
def names_demo(trainer, features=names_demo_features):
|
184 |
+
import random
|
185 |
+
|
186 |
+
from nltk.corpus import names
|
187 |
+
|
188 |
+
# Construct a list of classified names, using the names corpus.
|
189 |
+
namelist = [(name, "male") for name in names.words("male.txt")] + [
|
190 |
+
(name, "female") for name in names.words("female.txt")
|
191 |
+
]
|
192 |
+
|
193 |
+
# Randomly split the names into a test & train set.
|
194 |
+
random.seed(123456)
|
195 |
+
random.shuffle(namelist)
|
196 |
+
train = namelist[:5000]
|
197 |
+
test = namelist[5000:5500]
|
198 |
+
|
199 |
+
# Train up a classifier.
|
200 |
+
print("Training classifier...")
|
201 |
+
classifier = trainer([(features(n), g) for (n, g) in train])
|
202 |
+
|
203 |
+
# Run the classifier on the test data.
|
204 |
+
print("Testing classifier...")
|
205 |
+
acc = accuracy(classifier, [(features(n), g) for (n, g) in test])
|
206 |
+
print("Accuracy: %6.4f" % acc)
|
207 |
+
|
208 |
+
# For classifiers that can find probabilities, show the log
|
209 |
+
# likelihood and some sample probability distributions.
|
210 |
+
try:
|
211 |
+
test_featuresets = [features(n) for (n, g) in test]
|
212 |
+
pdists = classifier.prob_classify_many(test_featuresets)
|
213 |
+
ll = [pdist.logprob(gold) for ((name, gold), pdist) in zip(test, pdists)]
|
214 |
+
print("Avg. log likelihood: %6.4f" % (sum(ll) / len(test)))
|
215 |
+
print()
|
216 |
+
print("Unseen Names P(Male) P(Female)\n" + "-" * 40)
|
217 |
+
for ((name, gender), pdist) in list(zip(test, pdists))[:5]:
|
218 |
+
if gender == "male":
|
219 |
+
fmt = " %-15s *%6.4f %6.4f"
|
220 |
+
else:
|
221 |
+
fmt = " %-15s %6.4f *%6.4f"
|
222 |
+
print(fmt % (name, pdist.prob("male"), pdist.prob("female")))
|
223 |
+
except NotImplementedError:
|
224 |
+
pass
|
225 |
+
|
226 |
+
# Return the classifier
|
227 |
+
return classifier
|
228 |
+
|
229 |
+
|
230 |
+
def partial_names_demo(trainer, features=names_demo_features):
|
231 |
+
import random
|
232 |
+
|
233 |
+
from nltk.corpus import names
|
234 |
+
|
235 |
+
male_names = names.words("male.txt")
|
236 |
+
female_names = names.words("female.txt")
|
237 |
+
|
238 |
+
random.seed(654321)
|
239 |
+
random.shuffle(male_names)
|
240 |
+
random.shuffle(female_names)
|
241 |
+
|
242 |
+
# Create a list of male names to be used as positive-labeled examples for training
|
243 |
+
positive = map(features, male_names[:2000])
|
244 |
+
|
245 |
+
# Create a list of male and female names to be used as unlabeled examples
|
246 |
+
unlabeled = map(features, male_names[2000:2500] + female_names[:500])
|
247 |
+
|
248 |
+
# Create a test set with correctly-labeled male and female names
|
249 |
+
test = [(name, True) for name in male_names[2500:2750]] + [
|
250 |
+
(name, False) for name in female_names[500:750]
|
251 |
+
]
|
252 |
+
|
253 |
+
random.shuffle(test)
|
254 |
+
|
255 |
+
# Train up a classifier.
|
256 |
+
print("Training classifier...")
|
257 |
+
classifier = trainer(positive, unlabeled)
|
258 |
+
|
259 |
+
# Run the classifier on the test data.
|
260 |
+
print("Testing classifier...")
|
261 |
+
acc = accuracy(classifier, [(features(n), m) for (n, m) in test])
|
262 |
+
print("Accuracy: %6.4f" % acc)
|
263 |
+
|
264 |
+
# For classifiers that can find probabilities, show the log
|
265 |
+
# likelihood and some sample probability distributions.
|
266 |
+
try:
|
267 |
+
test_featuresets = [features(n) for (n, m) in test]
|
268 |
+
pdists = classifier.prob_classify_many(test_featuresets)
|
269 |
+
ll = [pdist.logprob(gold) for ((name, gold), pdist) in zip(test, pdists)]
|
270 |
+
print("Avg. log likelihood: %6.4f" % (sum(ll) / len(test)))
|
271 |
+
print()
|
272 |
+
print("Unseen Names P(Male) P(Female)\n" + "-" * 40)
|
273 |
+
for ((name, is_male), pdist) in zip(test, pdists)[:5]:
|
274 |
+
if is_male == True:
|
275 |
+
fmt = " %-15s *%6.4f %6.4f"
|
276 |
+
else:
|
277 |
+
fmt = " %-15s %6.4f *%6.4f"
|
278 |
+
print(fmt % (name, pdist.prob(True), pdist.prob(False)))
|
279 |
+
except NotImplementedError:
|
280 |
+
pass
|
281 |
+
|
282 |
+
# Return the classifier
|
283 |
+
return classifier
|
284 |
+
|
285 |
+
|
286 |
+
_inst_cache = {}
|
287 |
+
|
288 |
+
|
289 |
+
def wsd_demo(trainer, word, features, n=1000):
|
290 |
+
import random
|
291 |
+
|
292 |
+
from nltk.corpus import senseval
|
293 |
+
|
294 |
+
# Get the instances.
|
295 |
+
print("Reading data...")
|
296 |
+
global _inst_cache
|
297 |
+
if word not in _inst_cache:
|
298 |
+
_inst_cache[word] = [(i, i.senses[0]) for i in senseval.instances(word)]
|
299 |
+
instances = _inst_cache[word][:]
|
300 |
+
if n > len(instances):
|
301 |
+
n = len(instances)
|
302 |
+
senses = list({l for (i, l) in instances})
|
303 |
+
print(" Senses: " + " ".join(senses))
|
304 |
+
|
305 |
+
# Randomly split the names into a test & train set.
|
306 |
+
print("Splitting into test & train...")
|
307 |
+
random.seed(123456)
|
308 |
+
random.shuffle(instances)
|
309 |
+
train = instances[: int(0.8 * n)]
|
310 |
+
test = instances[int(0.8 * n) : n]
|
311 |
+
|
312 |
+
# Train up a classifier.
|
313 |
+
print("Training classifier...")
|
314 |
+
classifier = trainer([(features(i), l) for (i, l) in train])
|
315 |
+
|
316 |
+
# Run the classifier on the test data.
|
317 |
+
print("Testing classifier...")
|
318 |
+
acc = accuracy(classifier, [(features(i), l) for (i, l) in test])
|
319 |
+
print("Accuracy: %6.4f" % acc)
|
320 |
+
|
321 |
+
# For classifiers that can find probabilities, show the log
|
322 |
+
# likelihood and some sample probability distributions.
|
323 |
+
try:
|
324 |
+
test_featuresets = [features(i) for (i, n) in test]
|
325 |
+
pdists = classifier.prob_classify_many(test_featuresets)
|
326 |
+
ll = [pdist.logprob(gold) for ((name, gold), pdist) in zip(test, pdists)]
|
327 |
+
print("Avg. log likelihood: %6.4f" % (sum(ll) / len(test)))
|
328 |
+
except NotImplementedError:
|
329 |
+
pass
|
330 |
+
|
331 |
+
# Return the classifier
|
332 |
+
return classifier
|
333 |
+
|
334 |
+
|
335 |
+
def check_megam_config():
|
336 |
+
"""
|
337 |
+
Checks whether the MEGAM binary is configured.
|
338 |
+
"""
|
339 |
+
try:
|
340 |
+
_megam_bin
|
341 |
+
except NameError as e:
|
342 |
+
err_msg = str(
|
343 |
+
"Please configure your megam binary first, e.g.\n"
|
344 |
+
">>> nltk.config_megam('/usr/bin/local/megam')"
|
345 |
+
)
|
346 |
+
raise NameError(err_msg) from e
|
llmeval-env/lib/python3.10/site-packages/nltk/test/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (461 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/nltk/test/__pycache__/all.cpython-310.pyc
ADDED
Binary file (1.1 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/nltk/test/__pycache__/childes_fixt.cpython-310.pyc
ADDED
Binary file (610 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/nltk/test/__pycache__/classify_fixt.cpython-310.pyc
ADDED
Binary file (342 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/nltk/test/__pycache__/conftest.cpython-310.pyc
ADDED
Binary file (1.03 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/nltk/test/__pycache__/gensim_fixt.cpython-310.pyc
ADDED
Binary file (341 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/nltk/test/__pycache__/gluesemantics_malt_fixt.cpython-310.pyc
ADDED
Binary file (526 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/nltk/test/__pycache__/portuguese_en_fixt.cpython-310.pyc
ADDED
Binary file (401 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/nltk/test/__pycache__/probability_fixt.cpython-310.pyc
ADDED
Binary file (345 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/nltk/test/__pycache__/setup_fixt.cpython-310.pyc
ADDED
Binary file (1.24 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/nltk/test/collocations.doctest
ADDED
@@ -0,0 +1,307 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.. Copyright (C) 2001-2023 NLTK Project
|
2 |
+
.. For license information, see LICENSE.TXT
|
3 |
+
|
4 |
+
==============
|
5 |
+
Collocations
|
6 |
+
==============
|
7 |
+
|
8 |
+
Overview
|
9 |
+
~~~~~~~~
|
10 |
+
|
11 |
+
Collocations are expressions of multiple words which commonly co-occur. For
|
12 |
+
example, the top ten bigram collocations in Genesis are listed below, as
|
13 |
+
measured using Pointwise Mutual Information.
|
14 |
+
|
15 |
+
>>> import nltk
|
16 |
+
>>> from nltk.collocations import *
|
17 |
+
>>> bigram_measures = nltk.collocations.BigramAssocMeasures()
|
18 |
+
>>> trigram_measures = nltk.collocations.TrigramAssocMeasures()
|
19 |
+
>>> fourgram_measures = nltk.collocations.QuadgramAssocMeasures()
|
20 |
+
>>> finder = BigramCollocationFinder.from_words(
|
21 |
+
... nltk.corpus.genesis.words('english-web.txt'))
|
22 |
+
>>> finder.nbest(bigram_measures.pmi, 10)
|
23 |
+
[('Allon', 'Bacuth'), ('Ashteroth', 'Karnaim'), ('Ben', 'Ammi'),
|
24 |
+
('En', 'Mishpat'), ('Jegar', 'Sahadutha'), ('Salt', 'Sea'),
|
25 |
+
('Whoever', 'sheds'), ('appoint', 'overseers'), ('aromatic', 'resin'),
|
26 |
+
('cutting', 'instrument')]
|
27 |
+
|
28 |
+
While these words are highly collocated, the expressions are also very
|
29 |
+
infrequent. Therefore it is useful to apply filters, such as ignoring all
|
30 |
+
bigrams which occur less than three times in the corpus:
|
31 |
+
|
32 |
+
>>> finder.apply_freq_filter(3)
|
33 |
+
>>> finder.nbest(bigram_measures.pmi, 10)
|
34 |
+
[('Beer', 'Lahai'), ('Lahai', 'Roi'), ('gray', 'hairs'),
|
35 |
+
('ewe', 'lambs'), ('Most', 'High'), ('many', 'colors'),
|
36 |
+
('burnt', 'offering'), ('Paddan', 'Aram'), ('east', 'wind'),
|
37 |
+
('living', 'creature')]
|
38 |
+
|
39 |
+
We may similarly find collocations among tagged words:
|
40 |
+
|
41 |
+
>>> finder = BigramCollocationFinder.from_words(
|
42 |
+
... nltk.corpus.brown.tagged_words('ca01', tagset='universal'))
|
43 |
+
>>> finder.nbest(bigram_measures.pmi, 5)
|
44 |
+
[(('1,119', 'NUM'), ('votes', 'NOUN')),
|
45 |
+
(('1962', 'NUM'), ("governor's", 'NOUN')),
|
46 |
+
(('637', 'NUM'), ('E.', 'NOUN')),
|
47 |
+
(('Alpharetta', 'NOUN'), ('prison', 'NOUN')),
|
48 |
+
(('Bar', 'NOUN'), ('Association', 'NOUN'))]
|
49 |
+
|
50 |
+
Or tags alone:
|
51 |
+
|
52 |
+
>>> finder = BigramCollocationFinder.from_words(t for w, t in
|
53 |
+
... nltk.corpus.brown.tagged_words('ca01', tagset='universal'))
|
54 |
+
>>> finder.nbest(bigram_measures.pmi, 10)
|
55 |
+
[('PRT', 'VERB'), ('PRON', 'VERB'), ('ADP', 'DET'), ('.', 'PRON'), ('DET', 'ADJ'),
|
56 |
+
('CONJ', 'PRON'), ('ADP', 'NUM'), ('NUM', '.'), ('ADV', 'ADV'), ('VERB', 'ADV')]
|
57 |
+
|
58 |
+
Or spanning intervening words:
|
59 |
+
|
60 |
+
>>> finder = BigramCollocationFinder.from_words(
|
61 |
+
... nltk.corpus.genesis.words('english-web.txt'),
|
62 |
+
... window_size = 20)
|
63 |
+
>>> finder.apply_freq_filter(2)
|
64 |
+
>>> ignored_words = nltk.corpus.stopwords.words('english')
|
65 |
+
>>> finder.apply_word_filter(lambda w: len(w) < 3 or w.lower() in ignored_words)
|
66 |
+
>>> finder.nbest(bigram_measures.likelihood_ratio, 10)
|
67 |
+
[('chief', 'chief'), ('became', 'father'), ('years', 'became'),
|
68 |
+
('hundred', 'years'), ('lived', 'became'), ('king', 'king'),
|
69 |
+
('lived', 'years'), ('became', 'became'), ('chief', 'chiefs'),
|
70 |
+
('hundred', 'became')]
|
71 |
+
|
72 |
+
Finders
|
73 |
+
~~~~~~~
|
74 |
+
|
75 |
+
The collocations package provides collocation finders which by default
|
76 |
+
consider all ngrams in a text as candidate collocations:
|
77 |
+
|
78 |
+
>>> text = "I do not like green eggs and ham, I do not like them Sam I am!"
|
79 |
+
>>> tokens = nltk.wordpunct_tokenize(text)
|
80 |
+
>>> finder = BigramCollocationFinder.from_words(tokens)
|
81 |
+
>>> scored = finder.score_ngrams(bigram_measures.raw_freq)
|
82 |
+
>>> sorted(bigram for bigram, score in scored)
|
83 |
+
[(',', 'I'), ('I', 'am'), ('I', 'do'), ('Sam', 'I'), ('am', '!'),
|
84 |
+
('and', 'ham'), ('do', 'not'), ('eggs', 'and'), ('green', 'eggs'),
|
85 |
+
('ham', ','), ('like', 'green'), ('like', 'them'), ('not', 'like'),
|
86 |
+
('them', 'Sam')]
|
87 |
+
|
88 |
+
We could otherwise construct the collocation finder from manually-derived
|
89 |
+
FreqDists:
|
90 |
+
|
91 |
+
>>> word_fd = nltk.FreqDist(tokens)
|
92 |
+
>>> bigram_fd = nltk.FreqDist(nltk.bigrams(tokens))
|
93 |
+
>>> finder = BigramCollocationFinder(word_fd, bigram_fd)
|
94 |
+
>>> scored == finder.score_ngrams(bigram_measures.raw_freq)
|
95 |
+
True
|
96 |
+
|
97 |
+
A similar interface is provided for trigrams:
|
98 |
+
|
99 |
+
>>> finder = TrigramCollocationFinder.from_words(tokens)
|
100 |
+
>>> scored = finder.score_ngrams(trigram_measures.raw_freq)
|
101 |
+
>>> set(trigram for trigram, score in scored) == set(nltk.trigrams(tokens))
|
102 |
+
True
|
103 |
+
|
104 |
+
We may want to select only the top n results:
|
105 |
+
|
106 |
+
>>> sorted(finder.nbest(trigram_measures.raw_freq, 2))
|
107 |
+
[('I', 'do', 'not'), ('do', 'not', 'like')]
|
108 |
+
|
109 |
+
Alternatively, we can select those above a minimum score value:
|
110 |
+
|
111 |
+
>>> sorted(finder.above_score(trigram_measures.raw_freq,
|
112 |
+
... 1.0 / len(tuple(nltk.trigrams(tokens)))))
|
113 |
+
[('I', 'do', 'not'), ('do', 'not', 'like')]
|
114 |
+
|
115 |
+
Now spanning intervening words:
|
116 |
+
|
117 |
+
>>> finder = TrigramCollocationFinder.from_words(tokens)
|
118 |
+
>>> finder = TrigramCollocationFinder.from_words(tokens, window_size=4)
|
119 |
+
>>> sorted(finder.nbest(trigram_measures.raw_freq, 4))
|
120 |
+
[('I', 'do', 'like'), ('I', 'do', 'not'), ('I', 'not', 'like'), ('do', 'not', 'like')]
|
121 |
+
|
122 |
+
A closer look at the finder's ngram frequencies:
|
123 |
+
|
124 |
+
>>> sorted(finder.ngram_fd.items(), key=lambda t: (-t[1], t[0]))[:10]
|
125 |
+
[(('I', 'do', 'like'), 2), (('I', 'do', 'not'), 2), (('I', 'not', 'like'), 2),
|
126 |
+
(('do', 'not', 'like'), 2), ((',', 'I', 'do'), 1), ((',', 'I', 'not'), 1),
|
127 |
+
((',', 'do', 'not'), 1), (('I', 'am', '!'), 1), (('Sam', 'I', '!'), 1),
|
128 |
+
(('Sam', 'I', 'am'), 1)]
|
129 |
+
|
130 |
+
A similar interface is provided for fourgrams:
|
131 |
+
|
132 |
+
>>> finder_4grams = QuadgramCollocationFinder.from_words(tokens)
|
133 |
+
>>> scored_4grams = finder_4grams.score_ngrams(fourgram_measures.raw_freq)
|
134 |
+
>>> set(fourgram for fourgram, score in scored_4grams) == set(nltk.ngrams(tokens, n=4))
|
135 |
+
True
|
136 |
+
|
137 |
+
Filtering candidates
|
138 |
+
~~~~~~~~~~~~~~~~~~~~
|
139 |
+
|
140 |
+
All the ngrams in a text are often too many to be useful when finding
|
141 |
+
collocations. It is generally useful to remove some words or punctuation,
|
142 |
+
and to require a minimum frequency for candidate collocations.
|
143 |
+
|
144 |
+
Given our sample text above, if we remove all trigrams containing personal
|
145 |
+
pronouns from candidature, score_ngrams should return 6 less results, and
|
146 |
+
'do not like' will be the only candidate which occurs more than once:
|
147 |
+
|
148 |
+
>>> finder = TrigramCollocationFinder.from_words(tokens)
|
149 |
+
>>> len(finder.score_ngrams(trigram_measures.raw_freq))
|
150 |
+
14
|
151 |
+
>>> finder.apply_word_filter(lambda w: w in ('I', 'me'))
|
152 |
+
>>> len(finder.score_ngrams(trigram_measures.raw_freq))
|
153 |
+
8
|
154 |
+
>>> sorted(finder.above_score(trigram_measures.raw_freq,
|
155 |
+
... 1.0 / len(tuple(nltk.trigrams(tokens)))))
|
156 |
+
[('do', 'not', 'like')]
|
157 |
+
|
158 |
+
Sometimes a filter is a function on the whole ngram, rather than each word,
|
159 |
+
such as if we may permit 'and' to appear in the middle of a trigram, but
|
160 |
+
not on either edge:
|
161 |
+
|
162 |
+
>>> finder.apply_ngram_filter(lambda w1, w2, w3: 'and' in (w1, w3))
|
163 |
+
>>> len(finder.score_ngrams(trigram_measures.raw_freq))
|
164 |
+
6
|
165 |
+
|
166 |
+
Finally, it is often important to remove low frequency candidates, as we
|
167 |
+
lack sufficient evidence about their significance as collocations:
|
168 |
+
|
169 |
+
>>> finder.apply_freq_filter(2)
|
170 |
+
>>> len(finder.score_ngrams(trigram_measures.raw_freq))
|
171 |
+
1
|
172 |
+
|
173 |
+
Association measures
|
174 |
+
~~~~~~~~~~~~~~~~~~~~
|
175 |
+
|
176 |
+
A number of measures are available to score collocations or other associations.
|
177 |
+
The arguments to measure functions are marginals of a contingency table, in the
|
178 |
+
bigram case (n_ii, (n_ix, n_xi), n_xx)::
|
179 |
+
|
180 |
+
w1 ~w1
|
181 |
+
------ ------
|
182 |
+
w2 | n_ii | n_oi | = n_xi
|
183 |
+
------ ------
|
184 |
+
~w2 | n_io | n_oo |
|
185 |
+
------ ------
|
186 |
+
= n_ix TOTAL = n_xx
|
187 |
+
|
188 |
+
We test their calculation using some known values presented in Manning and
|
189 |
+
Schutze's text and other papers.
|
190 |
+
|
191 |
+
Student's t: examples from Manning and Schutze 5.3.2
|
192 |
+
|
193 |
+
>>> print('%0.4f' % bigram_measures.student_t(8, (15828, 4675), 14307668))
|
194 |
+
0.9999
|
195 |
+
>>> print('%0.4f' % bigram_measures.student_t(20, (42, 20), 14307668))
|
196 |
+
4.4721
|
197 |
+
|
198 |
+
Chi-square: examples from Manning and Schutze 5.3.3
|
199 |
+
|
200 |
+
>>> print('%0.2f' % bigram_measures.chi_sq(8, (15828, 4675), 14307668))
|
201 |
+
1.55
|
202 |
+
>>> print('%0.0f' % bigram_measures.chi_sq(59, (67, 65), 571007))
|
203 |
+
456400
|
204 |
+
|
205 |
+
Likelihood ratios: examples from Dunning, CL, 1993
|
206 |
+
|
207 |
+
>>> print('%0.2f' % bigram_measures.likelihood_ratio(110, (2552, 221), 31777))
|
208 |
+
270.72
|
209 |
+
>>> print('%0.2f' % bigram_measures.likelihood_ratio(8, (13, 32), 31777))
|
210 |
+
95.29
|
211 |
+
|
212 |
+
Pointwise Mutual Information: examples from Manning and Schutze 5.4
|
213 |
+
|
214 |
+
>>> print('%0.2f' % bigram_measures.pmi(20, (42, 20), 14307668))
|
215 |
+
18.38
|
216 |
+
>>> print('%0.2f' % bigram_measures.pmi(20, (15019, 15629), 14307668))
|
217 |
+
0.29
|
218 |
+
|
219 |
+
TODO: Find authoritative results for trigrams.
|
220 |
+
|
221 |
+
Using contingency table values
|
222 |
+
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
223 |
+
|
224 |
+
While frequency counts make marginals readily available for collocation
|
225 |
+
finding, it is common to find published contingency table values. The
|
226 |
+
collocations package therefore provides a wrapper, ContingencyMeasures, which
|
227 |
+
wraps an association measures class, providing association measures which
|
228 |
+
take contingency values as arguments, (n_ii, n_io, n_oi, n_oo) in the
|
229 |
+
bigram case.
|
230 |
+
|
231 |
+
>>> from nltk.metrics import ContingencyMeasures
|
232 |
+
>>> cont_bigram_measures = ContingencyMeasures(bigram_measures)
|
233 |
+
>>> print('%0.2f' % cont_bigram_measures.likelihood_ratio(8, 5, 24, 31740))
|
234 |
+
95.29
|
235 |
+
>>> print('%0.2f' % cont_bigram_measures.chi_sq(8, 15820, 4667, 14287173))
|
236 |
+
1.55
|
237 |
+
|
238 |
+
Ranking and correlation
|
239 |
+
~~~~~~~~~~~~~~~~~~~~~~~
|
240 |
+
|
241 |
+
It is useful to consider the results of finding collocations as a ranking, and
|
242 |
+
the rankings output using different association measures can be compared using
|
243 |
+
the Spearman correlation coefficient.
|
244 |
+
|
245 |
+
Ranks can be assigned to a sorted list of results trivially by assigning
|
246 |
+
strictly increasing ranks to each result:
|
247 |
+
|
248 |
+
>>> from nltk.metrics.spearman import *
|
249 |
+
>>> results_list = ['item1', 'item2', 'item3', 'item4', 'item5']
|
250 |
+
>>> print(list(ranks_from_sequence(results_list)))
|
251 |
+
[('item1', 0), ('item2', 1), ('item3', 2), ('item4', 3), ('item5', 4)]
|
252 |
+
|
253 |
+
If scores are available for each result, we may allow sufficiently similar
|
254 |
+
results (differing by no more than rank_gap) to be assigned the same rank:
|
255 |
+
|
256 |
+
>>> results_scored = [('item1', 50.0), ('item2', 40.0), ('item3', 38.0),
|
257 |
+
... ('item4', 35.0), ('item5', 14.0)]
|
258 |
+
>>> print(list(ranks_from_scores(results_scored, rank_gap=5)))
|
259 |
+
[('item1', 0), ('item2', 1), ('item3', 1), ('item4', 1), ('item5', 4)]
|
260 |
+
|
261 |
+
The Spearman correlation coefficient gives a number from -1.0 to 1.0 comparing
|
262 |
+
two rankings. A coefficient of 1.0 indicates identical rankings; -1.0 indicates
|
263 |
+
exact opposite rankings.
|
264 |
+
|
265 |
+
>>> print('%0.1f' % spearman_correlation(
|
266 |
+
... ranks_from_sequence(results_list),
|
267 |
+
... ranks_from_sequence(results_list)))
|
268 |
+
1.0
|
269 |
+
>>> print('%0.1f' % spearman_correlation(
|
270 |
+
... ranks_from_sequence(reversed(results_list)),
|
271 |
+
... ranks_from_sequence(results_list)))
|
272 |
+
-1.0
|
273 |
+
>>> results_list2 = ['item2', 'item3', 'item1', 'item5', 'item4']
|
274 |
+
>>> print('%0.1f' % spearman_correlation(
|
275 |
+
... ranks_from_sequence(results_list),
|
276 |
+
... ranks_from_sequence(results_list2)))
|
277 |
+
0.6
|
278 |
+
>>> print('%0.1f' % spearman_correlation(
|
279 |
+
... ranks_from_sequence(reversed(results_list)),
|
280 |
+
... ranks_from_sequence(results_list2)))
|
281 |
+
-0.6
|
282 |
+
|
283 |
+
Keywords
|
284 |
+
~~~~~~~~
|
285 |
+
|
286 |
+
Bigram association metrics can also be used to perform keyword analysis. . For example, this finds the keywords
|
287 |
+
associated with the "romance" section of the Brown corpus as measured by likelihood ratio:
|
288 |
+
|
289 |
+
>>> romance = nltk.FreqDist(w.lower() for w in nltk.corpus.brown.words(categories='romance') if w.isalpha())
|
290 |
+
>>> freq = nltk.FreqDist(w.lower() for w in nltk.corpus.brown.words() if w.isalpha())
|
291 |
+
|
292 |
+
>>> key = nltk.FreqDist()
|
293 |
+
>>> for w in romance:
|
294 |
+
... key[w] = bigram_measures.likelihood_ratio(romance[w], (freq[w], romance.N()), freq.N())
|
295 |
+
|
296 |
+
>>> for k,v in key.most_common(10):
|
297 |
+
... print(f'{k:10s} {v:9.3f}')
|
298 |
+
she 1163.325
|
299 |
+
i 995.961
|
300 |
+
her 930.528
|
301 |
+
you 513.149
|
302 |
+
of 501.891
|
303 |
+
is 463.386
|
304 |
+
had 421.615
|
305 |
+
he 411.000
|
306 |
+
the 347.632
|
307 |
+
said 300.811
|
llmeval-env/lib/python3.10/site-packages/nltk/test/conftest.py
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pytest
|
2 |
+
|
3 |
+
from nltk.corpus.reader import CorpusReader
|
4 |
+
|
5 |
+
|
6 |
+
@pytest.fixture(autouse=True)
|
7 |
+
def mock_plot(mocker):
|
8 |
+
"""Disable matplotlib plotting in test code"""
|
9 |
+
|
10 |
+
try:
|
11 |
+
import matplotlib.pyplot as plt
|
12 |
+
|
13 |
+
mocker.patch.object(plt, "gca")
|
14 |
+
mocker.patch.object(plt, "show")
|
15 |
+
except ImportError:
|
16 |
+
pass
|
17 |
+
|
18 |
+
|
19 |
+
@pytest.fixture(scope="module", autouse=True)
|
20 |
+
def teardown_loaded_corpora():
|
21 |
+
"""
|
22 |
+
After each test session ends (either doctest or unit test),
|
23 |
+
unload any loaded corpora
|
24 |
+
"""
|
25 |
+
|
26 |
+
yield # first, wait for the test to end
|
27 |
+
|
28 |
+
import nltk.corpus
|
29 |
+
|
30 |
+
for name in dir(nltk.corpus):
|
31 |
+
obj = getattr(nltk.corpus, name, None)
|
32 |
+
if isinstance(obj, CorpusReader) and hasattr(obj, "_unload"):
|
33 |
+
obj._unload()
|
llmeval-env/lib/python3.10/site-packages/nltk/test/dependency.doctest
ADDED
@@ -0,0 +1,241 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.. Copyright (C) 2001-2023 NLTK Project
|
2 |
+
.. For license information, see LICENSE.TXT
|
3 |
+
|
4 |
+
===================
|
5 |
+
Dependency Grammars
|
6 |
+
===================
|
7 |
+
|
8 |
+
>>> from nltk.grammar import DependencyGrammar
|
9 |
+
>>> from nltk.parse import (
|
10 |
+
... DependencyGraph,
|
11 |
+
... ProjectiveDependencyParser,
|
12 |
+
... NonprojectiveDependencyParser,
|
13 |
+
... )
|
14 |
+
|
15 |
+
CoNLL Data
|
16 |
+
----------
|
17 |
+
|
18 |
+
>>> treebank_data = """Pierre NNP 2 NMOD
|
19 |
+
... Vinken NNP 8 SUB
|
20 |
+
... , , 2 P
|
21 |
+
... 61 CD 5 NMOD
|
22 |
+
... years NNS 6 AMOD
|
23 |
+
... old JJ 2 NMOD
|
24 |
+
... , , 2 P
|
25 |
+
... will MD 0 ROOT
|
26 |
+
... join VB 8 VC
|
27 |
+
... the DT 11 NMOD
|
28 |
+
... board NN 9 OBJ
|
29 |
+
... as IN 9 VMOD
|
30 |
+
... a DT 15 NMOD
|
31 |
+
... nonexecutive JJ 15 NMOD
|
32 |
+
... director NN 12 PMOD
|
33 |
+
... Nov. NNP 9 VMOD
|
34 |
+
... 29 CD 16 NMOD
|
35 |
+
... . . 9 VMOD
|
36 |
+
... """
|
37 |
+
|
38 |
+
>>> dg = DependencyGraph(treebank_data)
|
39 |
+
>>> dg.tree().pprint()
|
40 |
+
(will
|
41 |
+
(Vinken Pierre , (old (years 61)) ,)
|
42 |
+
(join (board the) (as (director a nonexecutive)) (Nov. 29) .))
|
43 |
+
>>> for head, rel, dep in dg.triples():
|
44 |
+
... print(
|
45 |
+
... '({h[0]}, {h[1]}), {r}, ({d[0]}, {d[1]})'
|
46 |
+
... .format(h=head, r=rel, d=dep)
|
47 |
+
... )
|
48 |
+
(will, MD), SUB, (Vinken, NNP)
|
49 |
+
(Vinken, NNP), NMOD, (Pierre, NNP)
|
50 |
+
(Vinken, NNP), P, (,, ,)
|
51 |
+
(Vinken, NNP), NMOD, (old, JJ)
|
52 |
+
(old, JJ), AMOD, (years, NNS)
|
53 |
+
(years, NNS), NMOD, (61, CD)
|
54 |
+
(Vinken, NNP), P, (,, ,)
|
55 |
+
(will, MD), VC, (join, VB)
|
56 |
+
(join, VB), OBJ, (board, NN)
|
57 |
+
(board, NN), NMOD, (the, DT)
|
58 |
+
(join, VB), VMOD, (as, IN)
|
59 |
+
(as, IN), PMOD, (director, NN)
|
60 |
+
(director, NN), NMOD, (a, DT)
|
61 |
+
(director, NN), NMOD, (nonexecutive, JJ)
|
62 |
+
(join, VB), VMOD, (Nov., NNP)
|
63 |
+
(Nov., NNP), NMOD, (29, CD)
|
64 |
+
(join, VB), VMOD, (., .)
|
65 |
+
|
66 |
+
Using a custom cell extractor.
|
67 |
+
|
68 |
+
>>> def custom_extractor(cells):
|
69 |
+
... _, tag, head, rel = cells
|
70 |
+
... return 'spam', 'spam', tag, tag, '', head, rel
|
71 |
+
>>> dg = DependencyGraph(treebank_data, cell_extractor=custom_extractor)
|
72 |
+
>>> dg.tree().pprint()
|
73 |
+
(spam
|
74 |
+
(spam spam spam (spam (spam spam)) spam)
|
75 |
+
(spam (spam spam) (spam (spam spam spam)) (spam spam) spam))
|
76 |
+
|
77 |
+
Custom cell extractors can take in and return an index.
|
78 |
+
|
79 |
+
>>> def custom_extractor(cells, index):
|
80 |
+
... word, tag, head, rel = cells
|
81 |
+
... return (index, '{}-{}'.format(word, index), word,
|
82 |
+
... tag, tag, '', head, rel)
|
83 |
+
>>> dg = DependencyGraph(treebank_data, cell_extractor=custom_extractor)
|
84 |
+
>>> dg.tree().pprint()
|
85 |
+
(will-8
|
86 |
+
(Vinken-2 Pierre-1 ,-3 (old-6 (years-5 61-4)) ,-7)
|
87 |
+
(join-9
|
88 |
+
(board-11 the-10)
|
89 |
+
(as-12 (director-15 a-13 nonexecutive-14))
|
90 |
+
(Nov.-16 29-17)
|
91 |
+
.-18))
|
92 |
+
|
93 |
+
Using the dependency-parsed version of the Penn Treebank corpus sample.
|
94 |
+
|
95 |
+
>>> from nltk.corpus import dependency_treebank
|
96 |
+
>>> t = dependency_treebank.parsed_sents()[0]
|
97 |
+
>>> print(t.to_conll(3))
|
98 |
+
Pierre NNP 2
|
99 |
+
Vinken NNP 8
|
100 |
+
, , 2
|
101 |
+
61 CD 5
|
102 |
+
years NNS 6
|
103 |
+
old JJ 2
|
104 |
+
, , 2
|
105 |
+
will MD 0
|
106 |
+
join VB 8
|
107 |
+
the DT 11
|
108 |
+
board NN 9
|
109 |
+
as IN 9
|
110 |
+
a DT 15
|
111 |
+
nonexecutive JJ 15
|
112 |
+
director NN 12
|
113 |
+
Nov. NNP 9
|
114 |
+
29 CD 16
|
115 |
+
. . 8
|
116 |
+
|
117 |
+
Using the output of zpar (like Malt-TAB but with zero-based indexing)
|
118 |
+
|
119 |
+
>>> zpar_data = """
|
120 |
+
... Pierre NNP 1 NMOD
|
121 |
+
... Vinken NNP 7 SUB
|
122 |
+
... , , 1 P
|
123 |
+
... 61 CD 4 NMOD
|
124 |
+
... years NNS 5 AMOD
|
125 |
+
... old JJ 1 NMOD
|
126 |
+
... , , 1 P
|
127 |
+
... will MD -1 ROOT
|
128 |
+
... join VB 7 VC
|
129 |
+
... the DT 10 NMOD
|
130 |
+
... board NN 8 OBJ
|
131 |
+
... as IN 8 VMOD
|
132 |
+
... a DT 14 NMOD
|
133 |
+
... nonexecutive JJ 14 NMOD
|
134 |
+
... director NN 11 PMOD
|
135 |
+
... Nov. NNP 8 VMOD
|
136 |
+
... 29 CD 15 NMOD
|
137 |
+
... . . 7 P
|
138 |
+
... """
|
139 |
+
|
140 |
+
>>> zdg = DependencyGraph(zpar_data, zero_based=True)
|
141 |
+
>>> print(zdg.tree())
|
142 |
+
(will
|
143 |
+
(Vinken Pierre , (old (years 61)) ,)
|
144 |
+
(join (board the) (as (director a nonexecutive)) (Nov. 29))
|
145 |
+
.)
|
146 |
+
|
147 |
+
|
148 |
+
Projective Dependency Parsing
|
149 |
+
-----------------------------
|
150 |
+
|
151 |
+
>>> grammar = DependencyGrammar.fromstring("""
|
152 |
+
... 'fell' -> 'price' | 'stock'
|
153 |
+
... 'price' -> 'of' 'the'
|
154 |
+
... 'of' -> 'stock'
|
155 |
+
... 'stock' -> 'the'
|
156 |
+
... """)
|
157 |
+
>>> print(grammar)
|
158 |
+
Dependency grammar with 5 productions
|
159 |
+
'fell' -> 'price'
|
160 |
+
'fell' -> 'stock'
|
161 |
+
'price' -> 'of' 'the'
|
162 |
+
'of' -> 'stock'
|
163 |
+
'stock' -> 'the'
|
164 |
+
|
165 |
+
>>> dp = ProjectiveDependencyParser(grammar)
|
166 |
+
>>> for t in sorted(dp.parse(['the', 'price', 'of', 'the', 'stock', 'fell'])):
|
167 |
+
... print(t)
|
168 |
+
(fell (price the (of (stock the))))
|
169 |
+
(fell (price the of) (stock the))
|
170 |
+
(fell (price the of the) stock)
|
171 |
+
|
172 |
+
Non-Projective Dependency Parsing
|
173 |
+
---------------------------------
|
174 |
+
|
175 |
+
>>> grammar = DependencyGrammar.fromstring("""
|
176 |
+
... 'taught' -> 'play' | 'man'
|
177 |
+
... 'man' -> 'the'
|
178 |
+
... 'play' -> 'golf' | 'dog' | 'to'
|
179 |
+
... 'dog' -> 'his'
|
180 |
+
... """)
|
181 |
+
>>> print(grammar)
|
182 |
+
Dependency grammar with 7 productions
|
183 |
+
'taught' -> 'play'
|
184 |
+
'taught' -> 'man'
|
185 |
+
'man' -> 'the'
|
186 |
+
'play' -> 'golf'
|
187 |
+
'play' -> 'dog'
|
188 |
+
'play' -> 'to'
|
189 |
+
'dog' -> 'his'
|
190 |
+
|
191 |
+
>>> dp = NonprojectiveDependencyParser(grammar)
|
192 |
+
>>> g, = dp.parse(['the', 'man', 'taught', 'his', 'dog', 'to', 'play', 'golf'])
|
193 |
+
|
194 |
+
>>> print(g.root['word'])
|
195 |
+
taught
|
196 |
+
|
197 |
+
>>> for _, node in sorted(g.nodes.items()):
|
198 |
+
... if node['word'] is not None:
|
199 |
+
... print('{address} {word}: {d}'.format(d=node['deps'][''], **node))
|
200 |
+
1 the: []
|
201 |
+
2 man: [1]
|
202 |
+
3 taught: [2, 7]
|
203 |
+
4 his: []
|
204 |
+
5 dog: [4]
|
205 |
+
6 to: []
|
206 |
+
7 play: [5, 6, 8]
|
207 |
+
8 golf: []
|
208 |
+
|
209 |
+
>>> print(g.tree())
|
210 |
+
(taught (man the) (play (dog his) to golf))
|
211 |
+
|
212 |
+
Integration with MALT parser
|
213 |
+
============================
|
214 |
+
|
215 |
+
In case the top relation is different from the default, we can set it. In case
|
216 |
+
of MALT parser, it's set to `'null'`.
|
217 |
+
|
218 |
+
>>> dg_str = """1 I _ NN NN _ 2 nn _ _
|
219 |
+
... 2 shot _ NN NN _ 0 null _ _
|
220 |
+
... 3 an _ AT AT _ 2 dep _ _
|
221 |
+
... 4 elephant _ NN NN _ 7 nn _ _
|
222 |
+
... 5 in _ NN NN _ 7 nn _ _
|
223 |
+
... 6 my _ NN NN _ 7 nn _ _
|
224 |
+
... 7 pajamas _ NNS NNS _ 3 dobj _ _
|
225 |
+
... """
|
226 |
+
>>> dg = DependencyGraph(dg_str, top_relation_label='null')
|
227 |
+
|
228 |
+
>>> len(dg.nodes)
|
229 |
+
8
|
230 |
+
|
231 |
+
>>> dg.root['word'], dg.root['address']
|
232 |
+
('shot', 2)
|
233 |
+
|
234 |
+
>>> print(dg.to_conll(10))
|
235 |
+
1 I _ NN NN _ 2 nn _ _
|
236 |
+
2 shot _ NN NN _ 0 null _ _
|
237 |
+
3 an _ AT AT _ 2 dep _ _
|
238 |
+
4 elephant _ NN NN _ 7 nn _ _
|
239 |
+
5 in _ NN NN _ 7 nn _ _
|
240 |
+
6 my _ NN NN _ 7 nn _ _
|
241 |
+
7 pajamas _ NNS NNS _ 3 dobj _ _
|
llmeval-env/lib/python3.10/site-packages/nltk/test/discourse.doctest
ADDED
@@ -0,0 +1,552 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.. Copyright (C) 2001-2023 NLTK Project
|
2 |
+
.. For license information, see LICENSE.TXT
|
3 |
+
|
4 |
+
==================
|
5 |
+
Discourse Checking
|
6 |
+
==================
|
7 |
+
|
8 |
+
>>> from nltk import *
|
9 |
+
>>> from nltk.sem import logic
|
10 |
+
>>> logic._counter._value = 0
|
11 |
+
|
12 |
+
Setup
|
13 |
+
=====
|
14 |
+
|
15 |
+
>>> from nltk.test.childes_fixt import setup_module
|
16 |
+
>>> setup_module()
|
17 |
+
|
18 |
+
Introduction
|
19 |
+
============
|
20 |
+
|
21 |
+
The NLTK discourse module makes it possible to test consistency and
|
22 |
+
redundancy of simple discourses, using theorem-proving and
|
23 |
+
model-building from `nltk.inference`.
|
24 |
+
|
25 |
+
The ``DiscourseTester`` constructor takes a list of sentences as a
|
26 |
+
parameter.
|
27 |
+
|
28 |
+
>>> dt = DiscourseTester(['a boxer walks', 'every boxer chases a girl'])
|
29 |
+
|
30 |
+
The ``DiscourseTester`` parses each sentence into a list of logical
|
31 |
+
forms. Once we have created ``DiscourseTester`` object, we can
|
32 |
+
inspect various properties of the discourse. First off, we might want
|
33 |
+
to double-check what sentences are currently stored as the discourse.
|
34 |
+
|
35 |
+
>>> dt.sentences()
|
36 |
+
s0: a boxer walks
|
37 |
+
s1: every boxer chases a girl
|
38 |
+
|
39 |
+
As you will see, each sentence receives an identifier `s`\ :subscript:`i`.
|
40 |
+
We might also want to check what grammar the ``DiscourseTester`` is
|
41 |
+
using (by default, ``book_grammars/discourse.fcfg``):
|
42 |
+
|
43 |
+
>>> dt.grammar()
|
44 |
+
% start S
|
45 |
+
# Grammar Rules
|
46 |
+
S[SEM = <app(?subj,?vp)>] -> NP[NUM=?n,SEM=?subj] VP[NUM=?n,SEM=?vp]
|
47 |
+
NP[NUM=?n,SEM=<app(?det,?nom)> ] -> Det[NUM=?n,SEM=?det] Nom[NUM=?n,SEM=?nom]
|
48 |
+
NP[LOC=?l,NUM=?n,SEM=?np] -> PropN[LOC=?l,NUM=?n,SEM=?np]
|
49 |
+
...
|
50 |
+
|
51 |
+
A different grammar can be invoked by using the optional ``gramfile``
|
52 |
+
parameter when a ``DiscourseTester`` object is created.
|
53 |
+
|
54 |
+
Readings and Threads
|
55 |
+
====================
|
56 |
+
|
57 |
+
Depending on
|
58 |
+
the grammar used, we may find some sentences have more than one
|
59 |
+
logical form. To check this, use the ``readings()`` method. Given a
|
60 |
+
sentence identifier of the form `s`\ :subscript:`i`, each reading of
|
61 |
+
that sentence is given an identifier `s`\ :sub:`i`-`r`\ :sub:`j`.
|
62 |
+
|
63 |
+
|
64 |
+
>>> dt.readings()
|
65 |
+
<BLANKLINE>
|
66 |
+
s0 readings:
|
67 |
+
<BLANKLINE>
|
68 |
+
s0-r0: exists z1.(boxer(z1) & walk(z1))
|
69 |
+
s0-r1: exists z1.(boxerdog(z1) & walk(z1))
|
70 |
+
<BLANKLINE>
|
71 |
+
s1 readings:
|
72 |
+
<BLANKLINE>
|
73 |
+
s1-r0: all z2.(boxer(z2) -> exists z3.(girl(z3) & chase(z2,z3)))
|
74 |
+
s1-r1: all z1.(boxerdog(z1) -> exists z2.(girl(z2) & chase(z1,z2)))
|
75 |
+
|
76 |
+
|
77 |
+
In this case, the only source of ambiguity lies in the word *boxer*,
|
78 |
+
which receives two translations: ``boxer`` and ``boxerdog``. The
|
79 |
+
intention is that one of these corresponds to the ``person`` sense and
|
80 |
+
one to the ``dog`` sense. In principle, we would also expect to see a
|
81 |
+
quantifier scope ambiguity in ``s1``. However, the simple grammar we
|
82 |
+
are using, namely `sem4.fcfg <sem4.fcfg>`_, doesn't support quantifier
|
83 |
+
scope ambiguity.
|
84 |
+
|
85 |
+
We can also investigate the readings of a specific sentence:
|
86 |
+
|
87 |
+
>>> dt.readings('a boxer walks')
|
88 |
+
The sentence 'a boxer walks' has these readings:
|
89 |
+
exists x.(boxer(x) & walk(x))
|
90 |
+
exists x.(boxerdog(x) & walk(x))
|
91 |
+
|
92 |
+
Given that each sentence is two-ways ambiguous, we potentially have
|
93 |
+
four different discourse 'threads', taking all combinations of
|
94 |
+
readings. To see these, specify the ``threaded=True`` parameter on
|
95 |
+
the ``readings()`` method. Again, each thread is assigned an
|
96 |
+
identifier of the form `d`\ :sub:`i`. Following the identifier is a
|
97 |
+
list of the readings that constitute that thread.
|
98 |
+
|
99 |
+
>>> dt.readings(threaded=True)
|
100 |
+
d0: ['s0-r0', 's1-r0']
|
101 |
+
d1: ['s0-r0', 's1-r1']
|
102 |
+
d2: ['s0-r1', 's1-r0']
|
103 |
+
d3: ['s0-r1', 's1-r1']
|
104 |
+
|
105 |
+
Of course, this simple-minded approach doesn't scale: a discourse with, say, three
|
106 |
+
sentences, each of which has 3 readings, will generate 27 different
|
107 |
+
threads. It is an interesting exercise to consider how to manage
|
108 |
+
discourse ambiguity more efficiently.
|
109 |
+
|
110 |
+
Checking Consistency
|
111 |
+
====================
|
112 |
+
|
113 |
+
Now, we can check whether some or all of the discourse threads are
|
114 |
+
consistent, using the ``models()`` method. With no parameter, this
|
115 |
+
method will try to find a model for every discourse thread in the
|
116 |
+
current discourse. However, we can also specify just one thread, say ``d1``.
|
117 |
+
|
118 |
+
>>> dt.models('d1')
|
119 |
+
--------------------------------------------------------------------------------
|
120 |
+
Model for Discourse Thread d1
|
121 |
+
--------------------------------------------------------------------------------
|
122 |
+
% number = 1
|
123 |
+
% seconds = 0
|
124 |
+
<BLANKLINE>
|
125 |
+
% Interpretation of size 2
|
126 |
+
<BLANKLINE>
|
127 |
+
c1 = 0.
|
128 |
+
<BLANKLINE>
|
129 |
+
f1(0) = 0.
|
130 |
+
f1(1) = 0.
|
131 |
+
<BLANKLINE>
|
132 |
+
boxer(0).
|
133 |
+
- boxer(1).
|
134 |
+
<BLANKLINE>
|
135 |
+
- boxerdog(0).
|
136 |
+
- boxerdog(1).
|
137 |
+
<BLANKLINE>
|
138 |
+
- girl(0).
|
139 |
+
- girl(1).
|
140 |
+
<BLANKLINE>
|
141 |
+
walk(0).
|
142 |
+
- walk(1).
|
143 |
+
<BLANKLINE>
|
144 |
+
- chase(0,0).
|
145 |
+
- chase(0,1).
|
146 |
+
- chase(1,0).
|
147 |
+
- chase(1,1).
|
148 |
+
<BLANKLINE>
|
149 |
+
Consistent discourse: d1 ['s0-r0', 's1-r1']:
|
150 |
+
s0-r0: exists z1.(boxer(z1) & walk(z1))
|
151 |
+
s1-r1: all z1.(boxerdog(z1) -> exists z2.(girl(z2) & chase(z1,z2)))
|
152 |
+
<BLANKLINE>
|
153 |
+
|
154 |
+
There are various formats for rendering **Mace4** models --- here,
|
155 |
+
we have used the 'cooked' format (which is intended to be
|
156 |
+
human-readable). There are a number of points to note.
|
157 |
+
|
158 |
+
#. The entities in the domain are all treated as non-negative
|
159 |
+
integers. In this case, there are only two entities, ``0`` and
|
160 |
+
``1``.
|
161 |
+
|
162 |
+
#. The ``-`` symbol indicates negation. So ``0`` is the only
|
163 |
+
``boxerdog`` and the only thing that ``walk``\ s. Nothing is a
|
164 |
+
``boxer``, or a ``girl`` or in the ``chase`` relation. Thus the
|
165 |
+
universal sentence is vacuously true.
|
166 |
+
|
167 |
+
#. ``c1`` is an introduced constant that denotes ``0``.
|
168 |
+
|
169 |
+
#. ``f1`` is a Skolem function, but it plays no significant role in
|
170 |
+
this model.
|
171 |
+
|
172 |
+
|
173 |
+
We might want to now add another sentence to the discourse, and there
|
174 |
+
is method ``add_sentence()`` for doing just this.
|
175 |
+
|
176 |
+
>>> dt.add_sentence('John is a boxer')
|
177 |
+
>>> dt.sentences()
|
178 |
+
s0: a boxer walks
|
179 |
+
s1: every boxer chases a girl
|
180 |
+
s2: John is a boxer
|
181 |
+
|
182 |
+
We can now test all the properties as before; here, we just show a
|
183 |
+
couple of them.
|
184 |
+
|
185 |
+
>>> dt.readings()
|
186 |
+
<BLANKLINE>
|
187 |
+
s0 readings:
|
188 |
+
<BLANKLINE>
|
189 |
+
s0-r0: exists z1.(boxer(z1) & walk(z1))
|
190 |
+
s0-r1: exists z1.(boxerdog(z1) & walk(z1))
|
191 |
+
<BLANKLINE>
|
192 |
+
s1 readings:
|
193 |
+
<BLANKLINE>
|
194 |
+
s1-r0: all z1.(boxer(z1) -> exists z2.(girl(z2) & chase(z1,z2)))
|
195 |
+
s1-r1: all z1.(boxerdog(z1) -> exists z2.(girl(z2) & chase(z1,z2)))
|
196 |
+
<BLANKLINE>
|
197 |
+
s2 readings:
|
198 |
+
<BLANKLINE>
|
199 |
+
s2-r0: boxer(John)
|
200 |
+
s2-r1: boxerdog(John)
|
201 |
+
>>> dt.readings(threaded=True)
|
202 |
+
d0: ['s0-r0', 's1-r0', 's2-r0']
|
203 |
+
d1: ['s0-r0', 's1-r0', 's2-r1']
|
204 |
+
d2: ['s0-r0', 's1-r1', 's2-r0']
|
205 |
+
d3: ['s0-r0', 's1-r1', 's2-r1']
|
206 |
+
d4: ['s0-r1', 's1-r0', 's2-r0']
|
207 |
+
d5: ['s0-r1', 's1-r0', 's2-r1']
|
208 |
+
d6: ['s0-r1', 's1-r1', 's2-r0']
|
209 |
+
d7: ['s0-r1', 's1-r1', 's2-r1']
|
210 |
+
|
211 |
+
If you are interested in a particular thread, the ``expand_threads()``
|
212 |
+
method will remind you of what readings it consists of:
|
213 |
+
|
214 |
+
>>> thread = dt.expand_threads('d1')
|
215 |
+
>>> for rid, reading in thread:
|
216 |
+
... print(rid, str(reading.normalize()))
|
217 |
+
s0-r0 exists z1.(boxer(z1) & walk(z1))
|
218 |
+
s1-r0 all z1.(boxer(z1) -> exists z2.(girl(z2) & chase(z1,z2)))
|
219 |
+
s2-r1 boxerdog(John)
|
220 |
+
|
221 |
+
Suppose we have already defined a discourse, as follows:
|
222 |
+
|
223 |
+
>>> dt = DiscourseTester(['A student dances', 'Every student is a person'])
|
224 |
+
|
225 |
+
Now, when we add a new sentence, is it consistent with what we already
|
226 |
+
have? The `` consistchk=True`` parameter of ``add_sentence()`` allows
|
227 |
+
us to check:
|
228 |
+
|
229 |
+
>>> dt.add_sentence('No person dances', consistchk=True)
|
230 |
+
Inconsistent discourse: d0 ['s0-r0', 's1-r0', 's2-r0']:
|
231 |
+
s0-r0: exists z1.(student(z1) & dance(z1))
|
232 |
+
s1-r0: all z1.(student(z1) -> person(z1))
|
233 |
+
s2-r0: -exists z1.(person(z1) & dance(z1))
|
234 |
+
<BLANKLINE>
|
235 |
+
>>> dt.readings()
|
236 |
+
<BLANKLINE>
|
237 |
+
s0 readings:
|
238 |
+
<BLANKLINE>
|
239 |
+
s0-r0: exists z1.(student(z1) & dance(z1))
|
240 |
+
<BLANKLINE>
|
241 |
+
s1 readings:
|
242 |
+
<BLANKLINE>
|
243 |
+
s1-r0: all z1.(student(z1) -> person(z1))
|
244 |
+
<BLANKLINE>
|
245 |
+
s2 readings:
|
246 |
+
<BLANKLINE>
|
247 |
+
s2-r0: -exists z1.(person(z1) & dance(z1))
|
248 |
+
|
249 |
+
So let's retract the inconsistent sentence:
|
250 |
+
|
251 |
+
>>> dt.retract_sentence('No person dances', verbose=True)
|
252 |
+
Current sentences are
|
253 |
+
s0: A student dances
|
254 |
+
s1: Every student is a person
|
255 |
+
|
256 |
+
We can now verify that result is consistent.
|
257 |
+
|
258 |
+
>>> dt.models()
|
259 |
+
--------------------------------------------------------------------------------
|
260 |
+
Model for Discourse Thread d0
|
261 |
+
--------------------------------------------------------------------------------
|
262 |
+
% number = 1
|
263 |
+
% seconds = 0
|
264 |
+
<BLANKLINE>
|
265 |
+
% Interpretation of size 2
|
266 |
+
<BLANKLINE>
|
267 |
+
c1 = 0.
|
268 |
+
<BLANKLINE>
|
269 |
+
dance(0).
|
270 |
+
- dance(1).
|
271 |
+
<BLANKLINE>
|
272 |
+
person(0).
|
273 |
+
- person(1).
|
274 |
+
<BLANKLINE>
|
275 |
+
student(0).
|
276 |
+
- student(1).
|
277 |
+
<BLANKLINE>
|
278 |
+
Consistent discourse: d0 ['s0-r0', 's1-r0']:
|
279 |
+
s0-r0: exists z1.(student(z1) & dance(z1))
|
280 |
+
s1-r0: all z1.(student(z1) -> person(z1))
|
281 |
+
<BLANKLINE>
|
282 |
+
|
283 |
+
Checking Informativity
|
284 |
+
======================
|
285 |
+
|
286 |
+
Let's assume that we are still trying to extend the discourse *A
|
287 |
+
student dances.* *Every student is a person.* We add a new sentence,
|
288 |
+
but this time, we check whether it is informative with respect to what
|
289 |
+
has gone before.
|
290 |
+
|
291 |
+
>>> dt.add_sentence('A person dances', informchk=True)
|
292 |
+
Sentence 'A person dances' under reading 'exists x.(person(x) & dance(x))':
|
293 |
+
Not informative relative to thread 'd0'
|
294 |
+
|
295 |
+
In fact, we are just checking whether the new sentence is entailed by
|
296 |
+
the preceding discourse.
|
297 |
+
|
298 |
+
>>> dt.models()
|
299 |
+
--------------------------------------------------------------------------------
|
300 |
+
Model for Discourse Thread d0
|
301 |
+
--------------------------------------------------------------------------------
|
302 |
+
% number = 1
|
303 |
+
% seconds = 0
|
304 |
+
<BLANKLINE>
|
305 |
+
% Interpretation of size 2
|
306 |
+
<BLANKLINE>
|
307 |
+
c1 = 0.
|
308 |
+
<BLANKLINE>
|
309 |
+
c2 = 0.
|
310 |
+
<BLANKLINE>
|
311 |
+
dance(0).
|
312 |
+
- dance(1).
|
313 |
+
<BLANKLINE>
|
314 |
+
person(0).
|
315 |
+
- person(1).
|
316 |
+
<BLANKLINE>
|
317 |
+
student(0).
|
318 |
+
- student(1).
|
319 |
+
<BLANKLINE>
|
320 |
+
Consistent discourse: d0 ['s0-r0', 's1-r0', 's2-r0']:
|
321 |
+
s0-r0: exists z1.(student(z1) & dance(z1))
|
322 |
+
s1-r0: all z1.(student(z1) -> person(z1))
|
323 |
+
s2-r0: exists z1.(person(z1) & dance(z1))
|
324 |
+
<BLANKLINE>
|
325 |
+
|
326 |
+
|
327 |
+
|
328 |
+
Adding Background Knowledge
|
329 |
+
===========================
|
330 |
+
|
331 |
+
Let's build a new discourse, and look at the readings of the component sentences:
|
332 |
+
|
333 |
+
>>> dt = DiscourseTester(['Vincent is a boxer', 'Fido is a boxer', 'Vincent is married', 'Fido barks'])
|
334 |
+
>>> dt.readings()
|
335 |
+
<BLANKLINE>
|
336 |
+
s0 readings:
|
337 |
+
<BLANKLINE>
|
338 |
+
s0-r0: boxer(Vincent)
|
339 |
+
s0-r1: boxerdog(Vincent)
|
340 |
+
<BLANKLINE>
|
341 |
+
s1 readings:
|
342 |
+
<BLANKLINE>
|
343 |
+
s1-r0: boxer(Fido)
|
344 |
+
s1-r1: boxerdog(Fido)
|
345 |
+
<BLANKLINE>
|
346 |
+
s2 readings:
|
347 |
+
<BLANKLINE>
|
348 |
+
s2-r0: married(Vincent)
|
349 |
+
<BLANKLINE>
|
350 |
+
s3 readings:
|
351 |
+
<BLANKLINE>
|
352 |
+
s3-r0: bark(Fido)
|
353 |
+
|
354 |
+
This gives us a lot of threads:
|
355 |
+
|
356 |
+
>>> dt.readings(threaded=True)
|
357 |
+
d0: ['s0-r0', 's1-r0', 's2-r0', 's3-r0']
|
358 |
+
d1: ['s0-r0', 's1-r1', 's2-r0', 's3-r0']
|
359 |
+
d2: ['s0-r1', 's1-r0', 's2-r0', 's3-r0']
|
360 |
+
d3: ['s0-r1', 's1-r1', 's2-r0', 's3-r0']
|
361 |
+
|
362 |
+
|
363 |
+
We can eliminate some of the readings, and hence some of the threads,
|
364 |
+
by adding background information.
|
365 |
+
|
366 |
+
>>> import nltk.data
|
367 |
+
>>> bg = nltk.data.load('grammars/book_grammars/background.fol')
|
368 |
+
>>> dt.add_background(bg)
|
369 |
+
>>> dt.background()
|
370 |
+
all x.(boxerdog(x) -> dog(x))
|
371 |
+
all x.(boxer(x) -> person(x))
|
372 |
+
all x.-(dog(x) & person(x))
|
373 |
+
all x.(married(x) <-> exists y.marry(x,y))
|
374 |
+
all x.(bark(x) -> dog(x))
|
375 |
+
all x y.(marry(x,y) -> (person(x) & person(y)))
|
376 |
+
-(Vincent = Mia)
|
377 |
+
-(Vincent = Fido)
|
378 |
+
-(Mia = Fido)
|
379 |
+
|
380 |
+
The background information allows us to reject three of the threads as
|
381 |
+
inconsistent. To see what remains, use the ``filter=True`` parameter
|
382 |
+
on ``readings()``.
|
383 |
+
|
384 |
+
>>> dt.readings(filter=True)
|
385 |
+
d1: ['s0-r0', 's1-r1', 's2-r0', 's3-r0']
|
386 |
+
|
387 |
+
The ``models()`` method gives us more information about the surviving thread.
|
388 |
+
|
389 |
+
>>> dt.models()
|
390 |
+
--------------------------------------------------------------------------------
|
391 |
+
Model for Discourse Thread d0
|
392 |
+
--------------------------------------------------------------------------------
|
393 |
+
No model found!
|
394 |
+
<BLANKLINE>
|
395 |
+
--------------------------------------------------------------------------------
|
396 |
+
Model for Discourse Thread d1
|
397 |
+
--------------------------------------------------------------------------------
|
398 |
+
% number = 1
|
399 |
+
% seconds = 0
|
400 |
+
<BLANKLINE>
|
401 |
+
% Interpretation of size 3
|
402 |
+
<BLANKLINE>
|
403 |
+
Fido = 0.
|
404 |
+
<BLANKLINE>
|
405 |
+
Mia = 1.
|
406 |
+
<BLANKLINE>
|
407 |
+
Vincent = 2.
|
408 |
+
<BLANKLINE>
|
409 |
+
f1(0) = 0.
|
410 |
+
f1(1) = 0.
|
411 |
+
f1(2) = 2.
|
412 |
+
<BLANKLINE>
|
413 |
+
bark(0).
|
414 |
+
- bark(1).
|
415 |
+
- bark(2).
|
416 |
+
<BLANKLINE>
|
417 |
+
- boxer(0).
|
418 |
+
- boxer(1).
|
419 |
+
boxer(2).
|
420 |
+
<BLANKLINE>
|
421 |
+
boxerdog(0).
|
422 |
+
- boxerdog(1).
|
423 |
+
- boxerdog(2).
|
424 |
+
<BLANKLINE>
|
425 |
+
dog(0).
|
426 |
+
- dog(1).
|
427 |
+
- dog(2).
|
428 |
+
<BLANKLINE>
|
429 |
+
- married(0).
|
430 |
+
- married(1).
|
431 |
+
married(2).
|
432 |
+
<BLANKLINE>
|
433 |
+
- person(0).
|
434 |
+
- person(1).
|
435 |
+
person(2).
|
436 |
+
<BLANKLINE>
|
437 |
+
- marry(0,0).
|
438 |
+
- marry(0,1).
|
439 |
+
- marry(0,2).
|
440 |
+
- marry(1,0).
|
441 |
+
- marry(1,1).
|
442 |
+
- marry(1,2).
|
443 |
+
- marry(2,0).
|
444 |
+
- marry(2,1).
|
445 |
+
marry(2,2).
|
446 |
+
<BLANKLINE>
|
447 |
+
--------------------------------------------------------------------------------
|
448 |
+
Model for Discourse Thread d2
|
449 |
+
--------------------------------------------------------------------------------
|
450 |
+
No model found!
|
451 |
+
<BLANKLINE>
|
452 |
+
--------------------------------------------------------------------------------
|
453 |
+
Model for Discourse Thread d3
|
454 |
+
--------------------------------------------------------------------------------
|
455 |
+
No model found!
|
456 |
+
<BLANKLINE>
|
457 |
+
Inconsistent discourse: d0 ['s0-r0', 's1-r0', 's2-r0', 's3-r0']:
|
458 |
+
s0-r0: boxer(Vincent)
|
459 |
+
s1-r0: boxer(Fido)
|
460 |
+
s2-r0: married(Vincent)
|
461 |
+
s3-r0: bark(Fido)
|
462 |
+
<BLANKLINE>
|
463 |
+
Consistent discourse: d1 ['s0-r0', 's1-r1', 's2-r0', 's3-r0']:
|
464 |
+
s0-r0: boxer(Vincent)
|
465 |
+
s1-r1: boxerdog(Fido)
|
466 |
+
s2-r0: married(Vincent)
|
467 |
+
s3-r0: bark(Fido)
|
468 |
+
<BLANKLINE>
|
469 |
+
Inconsistent discourse: d2 ['s0-r1', 's1-r0', 's2-r0', 's3-r0']:
|
470 |
+
s0-r1: boxerdog(Vincent)
|
471 |
+
s1-r0: boxer(Fido)
|
472 |
+
s2-r0: married(Vincent)
|
473 |
+
s3-r0: bark(Fido)
|
474 |
+
<BLANKLINE>
|
475 |
+
Inconsistent discourse: d3 ['s0-r1', 's1-r1', 's2-r0', 's3-r0']:
|
476 |
+
s0-r1: boxerdog(Vincent)
|
477 |
+
s1-r1: boxerdog(Fido)
|
478 |
+
s2-r0: married(Vincent)
|
479 |
+
s3-r0: bark(Fido)
|
480 |
+
<BLANKLINE>
|
481 |
+
|
482 |
+
|
483 |
+
.. This will not be visible in the html output: create a tempdir to
|
484 |
+
play in.
|
485 |
+
>>> import tempfile, os
|
486 |
+
>>> tempdir = tempfile.mkdtemp()
|
487 |
+
>>> old_dir = os.path.abspath('.')
|
488 |
+
>>> os.chdir(tempdir)
|
489 |
+
|
490 |
+
In order to play around with your own version of background knowledge,
|
491 |
+
you might want to start off with a local copy of ``background.fol``:
|
492 |
+
|
493 |
+
>>> nltk.data.retrieve('grammars/book_grammars/background.fol')
|
494 |
+
Retrieving 'nltk:grammars/book_grammars/background.fol', saving to 'background.fol'
|
495 |
+
|
496 |
+
After you have modified the file, the ``load_fol()`` function will parse
|
497 |
+
the strings in the file into expressions of ``nltk.sem.logic``.
|
498 |
+
|
499 |
+
>>> from nltk.inference.discourse import load_fol
|
500 |
+
>>> mybg = load_fol(open('background.fol').read())
|
501 |
+
|
502 |
+
The result can be loaded as an argument of ``add_background()`` in the
|
503 |
+
manner shown earlier.
|
504 |
+
|
505 |
+
.. This will not be visible in the html output: clean up the tempdir.
|
506 |
+
>>> os.chdir(old_dir)
|
507 |
+
>>> for f in os.listdir(tempdir):
|
508 |
+
... os.remove(os.path.join(tempdir, f))
|
509 |
+
>>> os.rmdir(tempdir)
|
510 |
+
>>> nltk.data.clear_cache()
|
511 |
+
|
512 |
+
|
513 |
+
Regression Testing from book
|
514 |
+
============================
|
515 |
+
|
516 |
+
>>> logic._counter._value = 0
|
517 |
+
|
518 |
+
>>> from nltk.tag import RegexpTagger
|
519 |
+
>>> tagger = RegexpTagger(
|
520 |
+
... [('^(chases|runs)$', 'VB'),
|
521 |
+
... ('^(a)$', 'ex_quant'),
|
522 |
+
... ('^(every)$', 'univ_quant'),
|
523 |
+
... ('^(dog|boy)$', 'NN'),
|
524 |
+
... ('^(He)$', 'PRP')
|
525 |
+
... ])
|
526 |
+
>>> rc = DrtGlueReadingCommand(depparser=MaltParser(tagger=tagger))
|
527 |
+
>>> dt = DiscourseTester(map(str.split, ['Every dog chases a boy', 'He runs']), rc)
|
528 |
+
>>> dt.readings()
|
529 |
+
<BLANKLINE>
|
530 |
+
s0 readings:
|
531 |
+
<BLANKLINE>
|
532 |
+
s0-r0: ([z2],[boy(z2), (([z5],[dog(z5)]) -> ([],[chases(z5,z2)]))])
|
533 |
+
s0-r1: ([],[(([z1],[dog(z1)]) -> ([z2],[boy(z2), chases(z1,z2)]))])
|
534 |
+
<BLANKLINE>
|
535 |
+
s1 readings:
|
536 |
+
<BLANKLINE>
|
537 |
+
s1-r0: ([z1],[PRO(z1), runs(z1)])
|
538 |
+
>>> dt.readings(show_thread_readings=True)
|
539 |
+
d0: ['s0-r0', 's1-r0'] : ([z1,z2],[boy(z1), (([z3],[dog(z3)]) -> ([],[chases(z3,z1)])), (z2 = z1), runs(z2)])
|
540 |
+
d1: ['s0-r1', 's1-r0'] : INVALID: AnaphoraResolutionException
|
541 |
+
>>> dt.readings(filter=True, show_thread_readings=True)
|
542 |
+
d0: ['s0-r0', 's1-r0'] : ([z1,z3],[boy(z1), (([z2],[dog(z2)]) -> ([],[chases(z2,z1)])), (z3 = z1), runs(z3)])
|
543 |
+
|
544 |
+
>>> logic._counter._value = 0
|
545 |
+
|
546 |
+
>>> from nltk.parse import FeatureEarleyChartParser
|
547 |
+
>>> from nltk.sem.drt import DrtParser
|
548 |
+
>>> grammar = nltk.data.load('grammars/book_grammars/drt.fcfg', logic_parser=DrtParser())
|
549 |
+
>>> parser = FeatureEarleyChartParser(grammar, trace=0)
|
550 |
+
>>> trees = parser.parse('Angus owns a dog'.split())
|
551 |
+
>>> print(list(trees)[0].label()['SEM'].simplify().normalize())
|
552 |
+
([z1,z2],[Angus(z1), dog(z2), own(z1,z2)])
|
llmeval-env/lib/python3.10/site-packages/nltk/test/featgram.doctest
ADDED
@@ -0,0 +1,610 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.. Copyright (C) 2001-2023 NLTK Project
|
2 |
+
.. For license information, see LICENSE.TXT
|
3 |
+
|
4 |
+
=========================
|
5 |
+
Feature Grammar Parsing
|
6 |
+
=========================
|
7 |
+
|
8 |
+
.. definitions from nltk_book/definitions.rst
|
9 |
+
|
10 |
+
.. role:: feat
|
11 |
+
:class: feature
|
12 |
+
.. role:: fval
|
13 |
+
:class: fval
|
14 |
+
.. |rarr| unicode:: U+2192 .. right arrow
|
15 |
+
.. |dot| unicode:: U+2022 .. bullet
|
16 |
+
.. |pi| unicode:: U+03C0
|
17 |
+
|
18 |
+
Grammars can be parsed from strings.
|
19 |
+
|
20 |
+
>>> import nltk
|
21 |
+
>>> from nltk import grammar, parse
|
22 |
+
>>> g = """
|
23 |
+
... % start DP
|
24 |
+
... DP[AGR=?a] -> D[AGR=?a] N[AGR=?a]
|
25 |
+
... D[AGR=[NUM='sg', PERS=3]] -> 'this' | 'that'
|
26 |
+
... D[AGR=[NUM='pl', PERS=3]] -> 'these' | 'those'
|
27 |
+
... D[AGR=[NUM='pl', PERS=1]] -> 'we'
|
28 |
+
... D[AGR=[PERS=2]] -> 'you'
|
29 |
+
... N[AGR=[NUM='sg', GND='m']] -> 'boy'
|
30 |
+
... N[AGR=[NUM='pl', GND='m']] -> 'boys'
|
31 |
+
... N[AGR=[NUM='sg', GND='f']] -> 'girl'
|
32 |
+
... N[AGR=[NUM='pl', GND='f']] -> 'girls'
|
33 |
+
... N[AGR=[NUM='sg']] -> 'student'
|
34 |
+
... N[AGR=[NUM='pl']] -> 'students'
|
35 |
+
... """
|
36 |
+
>>> grammar = grammar.FeatureGrammar.fromstring(g)
|
37 |
+
>>> tokens = 'these girls'.split()
|
38 |
+
>>> parser = parse.FeatureEarleyChartParser(grammar)
|
39 |
+
>>> trees = parser.parse(tokens)
|
40 |
+
>>> for tree in trees: print(tree)
|
41 |
+
(DP[AGR=[GND='f', NUM='pl', PERS=3]]
|
42 |
+
(D[AGR=[NUM='pl', PERS=3]] these)
|
43 |
+
(N[AGR=[GND='f', NUM='pl']] girls))
|
44 |
+
|
45 |
+
In general, when we are trying to develop even a very small grammar,
|
46 |
+
it is convenient to put the rules in a file where they can be edited,
|
47 |
+
tested and revised. Let's assume that we have saved feat0cfg as a file named
|
48 |
+
``'feat0.fcfg'`` and placed it in the NLTK ``data`` directory. We can
|
49 |
+
inspect it as follows:
|
50 |
+
|
51 |
+
>>> nltk.data.show_cfg('grammars/book_grammars/feat0.fcfg')
|
52 |
+
% start S
|
53 |
+
# ###################
|
54 |
+
# Grammar Productions
|
55 |
+
# ###################
|
56 |
+
# S expansion productions
|
57 |
+
S -> NP[NUM=?n] VP[NUM=?n]
|
58 |
+
# NP expansion productions
|
59 |
+
NP[NUM=?n] -> N[NUM=?n]
|
60 |
+
NP[NUM=?n] -> PropN[NUM=?n]
|
61 |
+
NP[NUM=?n] -> Det[NUM=?n] N[NUM=?n]
|
62 |
+
NP[NUM=pl] -> N[NUM=pl]
|
63 |
+
# VP expansion productions
|
64 |
+
VP[TENSE=?t, NUM=?n] -> IV[TENSE=?t, NUM=?n]
|
65 |
+
VP[TENSE=?t, NUM=?n] -> TV[TENSE=?t, NUM=?n] NP
|
66 |
+
# ###################
|
67 |
+
# Lexical Productions
|
68 |
+
# ###################
|
69 |
+
Det[NUM=sg] -> 'this' | 'every'
|
70 |
+
Det[NUM=pl] -> 'these' | 'all'
|
71 |
+
Det -> 'the' | 'some' | 'several'
|
72 |
+
PropN[NUM=sg]-> 'Kim' | 'Jody'
|
73 |
+
N[NUM=sg] -> 'dog' | 'girl' | 'car' | 'child'
|
74 |
+
N[NUM=pl] -> 'dogs' | 'girls' | 'cars' | 'children'
|
75 |
+
IV[TENSE=pres, NUM=sg] -> 'disappears' | 'walks'
|
76 |
+
TV[TENSE=pres, NUM=sg] -> 'sees' | 'likes'
|
77 |
+
IV[TENSE=pres, NUM=pl] -> 'disappear' | 'walk'
|
78 |
+
TV[TENSE=pres, NUM=pl] -> 'see' | 'like'
|
79 |
+
IV[TENSE=past] -> 'disappeared' | 'walked'
|
80 |
+
TV[TENSE=past] -> 'saw' | 'liked'
|
81 |
+
|
82 |
+
Assuming we have saved feat0cfg as a file named
|
83 |
+
``'feat0.fcfg'``, the function ``parse.load_parser`` allows us to
|
84 |
+
read the grammar into NLTK, ready for use in parsing.
|
85 |
+
|
86 |
+
|
87 |
+
>>> cp = parse.load_parser('grammars/book_grammars/feat0.fcfg', trace=1)
|
88 |
+
>>> sent = 'Kim likes children'
|
89 |
+
>>> tokens = sent.split()
|
90 |
+
>>> tokens
|
91 |
+
['Kim', 'likes', 'children']
|
92 |
+
>>> trees = cp.parse(tokens)
|
93 |
+
|.Kim .like.chil.|
|
94 |
+
|[----] . .| [0:1] 'Kim'
|
95 |
+
|. [----] .| [1:2] 'likes'
|
96 |
+
|. . [----]| [2:3] 'children'
|
97 |
+
|[----] . .| [0:1] PropN[NUM='sg'] -> 'Kim' *
|
98 |
+
|[----] . .| [0:1] NP[NUM='sg'] -> PropN[NUM='sg'] *
|
99 |
+
|[----> . .| [0:1] S[] -> NP[NUM=?n] * VP[NUM=?n] {?n: 'sg'}
|
100 |
+
|. [----] .| [1:2] TV[NUM='sg', TENSE='pres'] -> 'likes' *
|
101 |
+
|. [----> .| [1:2] VP[NUM=?n, TENSE=?t] -> TV[NUM=?n, TENSE=?t] * NP[] {?n: 'sg', ?t: 'pres'}
|
102 |
+
|. . [----]| [2:3] N[NUM='pl'] -> 'children' *
|
103 |
+
|. . [----]| [2:3] NP[NUM='pl'] -> N[NUM='pl'] *
|
104 |
+
|. . [---->| [2:3] S[] -> NP[NUM=?n] * VP[NUM=?n] {?n: 'pl'}
|
105 |
+
|. [---------]| [1:3] VP[NUM='sg', TENSE='pres'] -> TV[NUM='sg', TENSE='pres'] NP[] *
|
106 |
+
|[==============]| [0:3] S[] -> NP[NUM='sg'] VP[NUM='sg'] *
|
107 |
+
>>> for tree in trees: print(tree)
|
108 |
+
(S[]
|
109 |
+
(NP[NUM='sg'] (PropN[NUM='sg'] Kim))
|
110 |
+
(VP[NUM='sg', TENSE='pres']
|
111 |
+
(TV[NUM='sg', TENSE='pres'] likes)
|
112 |
+
(NP[NUM='pl'] (N[NUM='pl'] children))))
|
113 |
+
|
114 |
+
The parser works directly with
|
115 |
+
the underspecified productions given by the grammar. That is, the
|
116 |
+
Predictor rule does not attempt to compile out all admissible feature
|
117 |
+
combinations before trying to expand the non-terminals on the left hand
|
118 |
+
side of a production. However, when the Scanner matches an input word
|
119 |
+
against a lexical production that has been predicted, the new edge will
|
120 |
+
typically contain fully specified features; e.g., the edge
|
121 |
+
[PropN[`num`:feat: = `sg`:fval:] |rarr| 'Kim', (0, 1)]. Recall from
|
122 |
+
Chapter 8 that the Fundamental (or Completer) Rule in
|
123 |
+
standard CFGs is used to combine an incomplete edge that's expecting a
|
124 |
+
nonterminal *B* with a following, complete edge whose left hand side
|
125 |
+
matches *B*. In our current setting, rather than checking for a
|
126 |
+
complete match, we test whether the expected category *B* will
|
127 |
+
unify with the left hand side *B'* of a following complete
|
128 |
+
edge. We will explain in more detail in Section 9.2 how
|
129 |
+
unification works; for the moment, it is enough to know that as a
|
130 |
+
result of unification, any variable values of features in *B* will be
|
131 |
+
instantiated by constant values in the corresponding feature structure
|
132 |
+
in *B'*, and these instantiated values will be used in the new edge
|
133 |
+
added by the Completer. This instantiation can be seen, for example,
|
134 |
+
in the edge
|
135 |
+
[NP [`num`:feat:\ =\ `sg`:fval:] |rarr| PropN[`num`:feat:\ =\ `sg`:fval:] |dot|, (0, 1)]
|
136 |
+
in Example 9.2, where the feature `num`:feat: has been assigned the value `sg`:fval:.
|
137 |
+
|
138 |
+
Feature structures in NLTK are ... Atomic feature values can be strings or
|
139 |
+
integers.
|
140 |
+
|
141 |
+
>>> fs1 = nltk.FeatStruct(TENSE='past', NUM='sg')
|
142 |
+
>>> print(fs1)
|
143 |
+
[ NUM = 'sg' ]
|
144 |
+
[ TENSE = 'past' ]
|
145 |
+
|
146 |
+
We can think of a feature structure as being like a Python dictionary,
|
147 |
+
and access its values by indexing in the usual way.
|
148 |
+
|
149 |
+
>>> fs1 = nltk.FeatStruct(PER=3, NUM='pl', GND='fem')
|
150 |
+
>>> print(fs1['GND'])
|
151 |
+
fem
|
152 |
+
|
153 |
+
We can also define feature structures which have complex values, as
|
154 |
+
discussed earlier.
|
155 |
+
|
156 |
+
>>> fs2 = nltk.FeatStruct(POS='N', AGR=fs1)
|
157 |
+
>>> print(fs2)
|
158 |
+
[ [ GND = 'fem' ] ]
|
159 |
+
[ AGR = [ NUM = 'pl' ] ]
|
160 |
+
[ [ PER = 3 ] ]
|
161 |
+
[ ]
|
162 |
+
[ POS = 'N' ]
|
163 |
+
>>> print(fs2['AGR'])
|
164 |
+
[ GND = 'fem' ]
|
165 |
+
[ NUM = 'pl' ]
|
166 |
+
[ PER = 3 ]
|
167 |
+
>>> print(fs2['AGR']['PER'])
|
168 |
+
3
|
169 |
+
|
170 |
+
Feature structures can also be constructed using the ``parse()``
|
171 |
+
method of the ``nltk.FeatStruct`` class. Note that in this case, atomic
|
172 |
+
feature values do not need to be enclosed in quotes.
|
173 |
+
|
174 |
+
>>> f1 = nltk.FeatStruct("[NUMBER = sg]")
|
175 |
+
>>> f2 = nltk.FeatStruct("[PERSON = 3]")
|
176 |
+
>>> print(nltk.unify(f1, f2))
|
177 |
+
[ NUMBER = 'sg' ]
|
178 |
+
[ PERSON = 3 ]
|
179 |
+
|
180 |
+
>>> f1 = nltk.FeatStruct("[A = [B = b, D = d]]")
|
181 |
+
>>> f2 = nltk.FeatStruct("[A = [C = c, D = d]]")
|
182 |
+
>>> print(nltk.unify(f1, f2))
|
183 |
+
[ [ B = 'b' ] ]
|
184 |
+
[ A = [ C = 'c' ] ]
|
185 |
+
[ [ D = 'd' ] ]
|
186 |
+
|
187 |
+
|
188 |
+
Feature Structures as Graphs
|
189 |
+
----------------------------
|
190 |
+
|
191 |
+
Feature structures are not inherently tied to linguistic objects; they are
|
192 |
+
general purpose structures for representing knowledge. For example, we
|
193 |
+
could encode information about a person in a feature structure:
|
194 |
+
|
195 |
+
>>> person01 = nltk.FeatStruct("[NAME=Lee, TELNO='01 27 86 42 96',AGE=33]")
|
196 |
+
>>> print(person01)
|
197 |
+
[ AGE = 33 ]
|
198 |
+
[ NAME = 'Lee' ]
|
199 |
+
[ TELNO = '01 27 86 42 96' ]
|
200 |
+
|
201 |
+
There are a number of notations for representing reentrancy in
|
202 |
+
matrix-style representations of feature structures. In NLTK, we adopt
|
203 |
+
the following convention: the first occurrence of a shared feature structure
|
204 |
+
is prefixed with an integer in parentheses, such as ``(1)``, and any
|
205 |
+
subsequent reference to that structure uses the notation
|
206 |
+
``->(1)``, as shown below.
|
207 |
+
|
208 |
+
|
209 |
+
>>> fs = nltk.FeatStruct("""[NAME=Lee, ADDRESS=(1)[NUMBER=74, STREET='rue Pascal'],
|
210 |
+
... SPOUSE=[NAME=Kim, ADDRESS->(1)]]""")
|
211 |
+
>>> print(fs)
|
212 |
+
[ ADDRESS = (1) [ NUMBER = 74 ] ]
|
213 |
+
[ [ STREET = 'rue Pascal' ] ]
|
214 |
+
[ ]
|
215 |
+
[ NAME = 'Lee' ]
|
216 |
+
[ ]
|
217 |
+
[ SPOUSE = [ ADDRESS -> (1) ] ]
|
218 |
+
[ [ NAME = 'Kim' ] ]
|
219 |
+
|
220 |
+
There can be any number of tags within a single feature structure.
|
221 |
+
|
222 |
+
>>> fs3 = nltk.FeatStruct("[A=(1)[B=b], C=(2)[], D->(1), E->(2)]")
|
223 |
+
>>> print(fs3)
|
224 |
+
[ A = (1) [ B = 'b' ] ]
|
225 |
+
[ ]
|
226 |
+
[ C = (2) [] ]
|
227 |
+
[ ]
|
228 |
+
[ D -> (1) ]
|
229 |
+
[ E -> (2) ]
|
230 |
+
>>> fs1 = nltk.FeatStruct(NUMBER=74, STREET='rue Pascal')
|
231 |
+
>>> fs2 = nltk.FeatStruct(CITY='Paris')
|
232 |
+
>>> print(nltk.unify(fs1, fs2))
|
233 |
+
[ CITY = 'Paris' ]
|
234 |
+
[ NUMBER = 74 ]
|
235 |
+
[ STREET = 'rue Pascal' ]
|
236 |
+
|
237 |
+
Unification is symmetric:
|
238 |
+
|
239 |
+
>>> nltk.unify(fs1, fs2) == nltk.unify(fs2, fs1)
|
240 |
+
True
|
241 |
+
|
242 |
+
Unification is commutative:
|
243 |
+
|
244 |
+
>>> fs3 = nltk.FeatStruct(TELNO='01 27 86 42 96')
|
245 |
+
>>> nltk.unify(nltk.unify(fs1, fs2), fs3) == nltk.unify(fs1, nltk.unify(fs2, fs3))
|
246 |
+
True
|
247 |
+
|
248 |
+
Unification between *FS*:math:`_0` and *FS*:math:`_1` will fail if the
|
249 |
+
two feature structures share a path |pi|,
|
250 |
+
but the value of |pi| in *FS*:math:`_0` is a distinct
|
251 |
+
atom from the value of |pi| in *FS*:math:`_1`. In NLTK,
|
252 |
+
this is implemented by setting the result of unification to be
|
253 |
+
``None``.
|
254 |
+
|
255 |
+
>>> fs0 = nltk.FeatStruct(A='a')
|
256 |
+
>>> fs1 = nltk.FeatStruct(A='b')
|
257 |
+
>>> print(nltk.unify(fs0, fs1))
|
258 |
+
None
|
259 |
+
|
260 |
+
Now, if we look at how unification interacts with structure-sharing,
|
261 |
+
things become really interesting.
|
262 |
+
|
263 |
+
|
264 |
+
|
265 |
+
>>> fs0 = nltk.FeatStruct("""[NAME=Lee,
|
266 |
+
... ADDRESS=[NUMBER=74,
|
267 |
+
... STREET='rue Pascal'],
|
268 |
+
... SPOUSE= [NAME=Kim,
|
269 |
+
... ADDRESS=[NUMBER=74,
|
270 |
+
... STREET='rue Pascal']]]""")
|
271 |
+
>>> print(fs0)
|
272 |
+
[ ADDRESS = [ NUMBER = 74 ] ]
|
273 |
+
[ [ STREET = 'rue Pascal' ] ]
|
274 |
+
[ ]
|
275 |
+
[ NAME = 'Lee' ]
|
276 |
+
[ ]
|
277 |
+
[ [ ADDRESS = [ NUMBER = 74 ] ] ]
|
278 |
+
[ SPOUSE = [ [ STREET = 'rue Pascal' ] ] ]
|
279 |
+
[ [ ] ]
|
280 |
+
[ [ NAME = 'Kim' ] ]
|
281 |
+
|
282 |
+
|
283 |
+
>>> fs1 = nltk.FeatStruct("[SPOUSE=[ADDRESS=[CITY=Paris]]]")
|
284 |
+
>>> print(nltk.unify(fs0, fs1))
|
285 |
+
[ ADDRESS = [ NUMBER = 74 ] ]
|
286 |
+
[ [ STREET = 'rue Pascal' ] ]
|
287 |
+
[ ]
|
288 |
+
[ NAME = 'Lee' ]
|
289 |
+
[ ]
|
290 |
+
[ [ [ CITY = 'Paris' ] ] ]
|
291 |
+
[ [ ADDRESS = [ NUMBER = 74 ] ] ]
|
292 |
+
[ SPOUSE = [ [ STREET = 'rue Pascal' ] ] ]
|
293 |
+
[ [ ] ]
|
294 |
+
[ [ NAME = 'Kim' ] ]
|
295 |
+
|
296 |
+
>>> fs2 = nltk.FeatStruct("""[NAME=Lee, ADDRESS=(1)[NUMBER=74, STREET='rue Pascal'],
|
297 |
+
... SPOUSE=[NAME=Kim, ADDRESS->(1)]]""")
|
298 |
+
|
299 |
+
|
300 |
+
>>> print(fs2)
|
301 |
+
[ ADDRESS = (1) [ NUMBER = 74 ] ]
|
302 |
+
[ [ STREET = 'rue Pascal' ] ]
|
303 |
+
[ ]
|
304 |
+
[ NAME = 'Lee' ]
|
305 |
+
[ ]
|
306 |
+
[ SPOUSE = [ ADDRESS -> (1) ] ]
|
307 |
+
[ [ NAME = 'Kim' ] ]
|
308 |
+
|
309 |
+
|
310 |
+
>>> print(nltk.unify(fs2, fs1))
|
311 |
+
[ [ CITY = 'Paris' ] ]
|
312 |
+
[ ADDRESS = (1) [ NUMBER = 74 ] ]
|
313 |
+
[ [ STREET = 'rue Pascal' ] ]
|
314 |
+
[ ]
|
315 |
+
[ NAME = 'Lee' ]
|
316 |
+
[ ]
|
317 |
+
[ SPOUSE = [ ADDRESS -> (1) ] ]
|
318 |
+
[ [ NAME = 'Kim' ] ]
|
319 |
+
|
320 |
+
|
321 |
+
>>> fs1 = nltk.FeatStruct("[ADDRESS1=[NUMBER=74, STREET='rue Pascal']]")
|
322 |
+
>>> fs2 = nltk.FeatStruct("[ADDRESS1=?x, ADDRESS2=?x]")
|
323 |
+
>>> print(fs2)
|
324 |
+
[ ADDRESS1 = ?x ]
|
325 |
+
[ ADDRESS2 = ?x ]
|
326 |
+
>>> print(nltk.unify(fs1, fs2))
|
327 |
+
[ ADDRESS1 = (1) [ NUMBER = 74 ] ]
|
328 |
+
[ [ STREET = 'rue Pascal' ] ]
|
329 |
+
[ ]
|
330 |
+
[ ADDRESS2 -> (1) ]
|
331 |
+
|
332 |
+
|
333 |
+
|
334 |
+
|
335 |
+
>>> sent = 'who do you claim that you like'
|
336 |
+
>>> tokens = sent.split()
|
337 |
+
>>> cp = parse.load_parser('grammars/book_grammars/feat1.fcfg', trace=1)
|
338 |
+
>>> trees = cp.parse(tokens)
|
339 |
+
|.w.d.y.c.t.y.l.|
|
340 |
+
|[-] . . . . . .| [0:1] 'who'
|
341 |
+
|. [-] . . . . .| [1:2] 'do'
|
342 |
+
|. . [-] . . . .| [2:3] 'you'
|
343 |
+
|. . . [-] . . .| [3:4] 'claim'
|
344 |
+
|. . . . [-] . .| [4:5] 'that'
|
345 |
+
|. . . . . [-] .| [5:6] 'you'
|
346 |
+
|. . . . . . [-]| [6:7] 'like'
|
347 |
+
|# . . . . . . .| [0:0] NP[]/NP[] -> *
|
348 |
+
|. # . . . . . .| [1:1] NP[]/NP[] -> *
|
349 |
+
|. . # . . . . .| [2:2] NP[]/NP[] -> *
|
350 |
+
|. . . # . . . .| [3:3] NP[]/NP[] -> *
|
351 |
+
|. . . . # . . .| [4:4] NP[]/NP[] -> *
|
352 |
+
|. . . . . # . .| [5:5] NP[]/NP[] -> *
|
353 |
+
|. . . . . . # .| [6:6] NP[]/NP[] -> *
|
354 |
+
|. . . . . . . #| [7:7] NP[]/NP[] -> *
|
355 |
+
|[-] . . . . . .| [0:1] NP[+WH] -> 'who' *
|
356 |
+
|[-> . . . . . .| [0:1] S[-INV] -> NP[] * VP[] {}
|
357 |
+
|[-> . . . . . .| [0:1] S[-INV]/?x[] -> NP[] * VP[]/?x[] {}
|
358 |
+
|[-> . . . . . .| [0:1] S[-INV] -> NP[] * S[]/NP[] {}
|
359 |
+
|. [-] . . . . .| [1:2] V[+AUX] -> 'do' *
|
360 |
+
|. [-> . . . . .| [1:2] S[+INV] -> V[+AUX] * NP[] VP[] {}
|
361 |
+
|. [-> . . . . .| [1:2] S[+INV]/?x[] -> V[+AUX] * NP[] VP[]/?x[] {}
|
362 |
+
|. [-> . . . . .| [1:2] VP[] -> V[+AUX] * VP[] {}
|
363 |
+
|. [-> . . . . .| [1:2] VP[]/?x[] -> V[+AUX] * VP[]/?x[] {}
|
364 |
+
|. . [-] . . . .| [2:3] NP[-WH] -> 'you' *
|
365 |
+
|. . [-> . . . .| [2:3] S[-INV] -> NP[] * VP[] {}
|
366 |
+
|. . [-> . . . .| [2:3] S[-INV]/?x[] -> NP[] * VP[]/?x[] {}
|
367 |
+
|. . [-> . . . .| [2:3] S[-INV] -> NP[] * S[]/NP[] {}
|
368 |
+
|. [---> . . . .| [1:3] S[+INV] -> V[+AUX] NP[] * VP[] {}
|
369 |
+
|. [---> . . . .| [1:3] S[+INV]/?x[] -> V[+AUX] NP[] * VP[]/?x[] {}
|
370 |
+
|. . . [-] . . .| [3:4] V[-AUX, SUBCAT='clause'] -> 'claim' *
|
371 |
+
|. . . [-> . . .| [3:4] VP[] -> V[-AUX, SUBCAT='clause'] * SBar[] {}
|
372 |
+
|. . . [-> . . .| [3:4] VP[]/?x[] -> V[-AUX, SUBCAT='clause'] * SBar[]/?x[] {}
|
373 |
+
|. . . . [-] . .| [4:5] Comp[] -> 'that' *
|
374 |
+
|. . . . [-> . .| [4:5] SBar[] -> Comp[] * S[-INV] {}
|
375 |
+
|. . . . [-> . .| [4:5] SBar[]/?x[] -> Comp[] * S[-INV]/?x[] {}
|
376 |
+
|. . . . . [-] .| [5:6] NP[-WH] -> 'you' *
|
377 |
+
|. . . . . [-> .| [5:6] S[-INV] -> NP[] * VP[] {}
|
378 |
+
|. . . . . [-> .| [5:6] S[-INV]/?x[] -> NP[] * VP[]/?x[] {}
|
379 |
+
|. . . . . [-> .| [5:6] S[-INV] -> NP[] * S[]/NP[] {}
|
380 |
+
|. . . . . . [-]| [6:7] V[-AUX, SUBCAT='trans'] -> 'like' *
|
381 |
+
|. . . . . . [->| [6:7] VP[] -> V[-AUX, SUBCAT='trans'] * NP[] {}
|
382 |
+
|. . . . . . [->| [6:7] VP[]/?x[] -> V[-AUX, SUBCAT='trans'] * NP[]/?x[] {}
|
383 |
+
|. . . . . . [-]| [6:7] VP[]/NP[] -> V[-AUX, SUBCAT='trans'] NP[]/NP[] *
|
384 |
+
|. . . . . [---]| [5:7] S[-INV]/NP[] -> NP[] VP[]/NP[] *
|
385 |
+
|. . . . [-----]| [4:7] SBar[]/NP[] -> Comp[] S[-INV]/NP[] *
|
386 |
+
|. . . [-------]| [3:7] VP[]/NP[] -> V[-AUX, SUBCAT='clause'] SBar[]/NP[] *
|
387 |
+
|. . [---------]| [2:7] S[-INV]/NP[] -> NP[] VP[]/NP[] *
|
388 |
+
|. [-----------]| [1:7] S[+INV]/NP[] -> V[+AUX] NP[] VP[]/NP[] *
|
389 |
+
|[=============]| [0:7] S[-INV] -> NP[] S[]/NP[] *
|
390 |
+
|
391 |
+
>>> trees = list(trees)
|
392 |
+
>>> for tree in trees: print(tree)
|
393 |
+
(S[-INV]
|
394 |
+
(NP[+WH] who)
|
395 |
+
(S[+INV]/NP[]
|
396 |
+
(V[+AUX] do)
|
397 |
+
(NP[-WH] you)
|
398 |
+
(VP[]/NP[]
|
399 |
+
(V[-AUX, SUBCAT='clause'] claim)
|
400 |
+
(SBar[]/NP[]
|
401 |
+
(Comp[] that)
|
402 |
+
(S[-INV]/NP[]
|
403 |
+
(NP[-WH] you)
|
404 |
+
(VP[]/NP[] (V[-AUX, SUBCAT='trans'] like) (NP[]/NP[] )))))))
|
405 |
+
|
406 |
+
A different parser should give the same parse trees, but perhaps in a different order:
|
407 |
+
|
408 |
+
>>> cp2 = parse.load_parser('grammars/book_grammars/feat1.fcfg', trace=1,
|
409 |
+
... parser=parse.FeatureEarleyChartParser)
|
410 |
+
>>> trees2 = cp2.parse(tokens)
|
411 |
+
|.w.d.y.c.t.y.l.|
|
412 |
+
|[-] . . . . . .| [0:1] 'who'
|
413 |
+
|. [-] . . . . .| [1:2] 'do'
|
414 |
+
|. . [-] . . . .| [2:3] 'you'
|
415 |
+
|. . . [-] . . .| [3:4] 'claim'
|
416 |
+
|. . . . [-] . .| [4:5] 'that'
|
417 |
+
|. . . . . [-] .| [5:6] 'you'
|
418 |
+
|. . . . . . [-]| [6:7] 'like'
|
419 |
+
|> . . . . . . .| [0:0] S[-INV] -> * NP[] VP[] {}
|
420 |
+
|> . . . . . . .| [0:0] S[-INV]/?x[] -> * NP[] VP[]/?x[] {}
|
421 |
+
|> . . . . . . .| [0:0] S[-INV] -> * NP[] S[]/NP[] {}
|
422 |
+
|> . . . . . . .| [0:0] S[-INV] -> * Adv[+NEG] S[+INV] {}
|
423 |
+
|> . . . . . . .| [0:0] S[+INV] -> * V[+AUX] NP[] VP[] {}
|
424 |
+
|> . . . . . . .| [0:0] S[+INV]/?x[] -> * V[+AUX] NP[] VP[]/?x[] {}
|
425 |
+
|> . . . . . . .| [0:0] NP[+WH] -> * 'who' {}
|
426 |
+
|[-] . . . . . .| [0:1] NP[+WH] -> 'who' *
|
427 |
+
|[-> . . . . . .| [0:1] S[-INV] -> NP[] * VP[] {}
|
428 |
+
|[-> . . . . . .| [0:1] S[-INV]/?x[] -> NP[] * VP[]/?x[] {}
|
429 |
+
|[-> . . . . . .| [0:1] S[-INV] -> NP[] * S[]/NP[] {}
|
430 |
+
|. > . . . . . .| [1:1] S[-INV]/?x[] -> * NP[] VP[]/?x[] {}
|
431 |
+
|. > . . . . . .| [1:1] S[+INV]/?x[] -> * V[+AUX] NP[] VP[]/?x[] {}
|
432 |
+
|. > . . . . . .| [1:1] V[+AUX] -> * 'do' {}
|
433 |
+
|. > . . . . . .| [1:1] VP[]/?x[] -> * V[-AUX, SUBCAT='trans'] NP[]/?x[] {}
|
434 |
+
|. > . . . . . .| [1:1] VP[]/?x[] -> * V[-AUX, SUBCAT='clause'] SBar[]/?x[] {}
|
435 |
+
|. > . . . . . .| [1:1] VP[]/?x[] -> * V[+AUX] VP[]/?x[] {}
|
436 |
+
|. > . . . . . .| [1:1] VP[] -> * V[-AUX, SUBCAT='intrans'] {}
|
437 |
+
|. > . . . . . .| [1:1] VP[] -> * V[-AUX, SUBCAT='trans'] NP[] {}
|
438 |
+
|. > . . . . . .| [1:1] VP[] -> * V[-AUX, SUBCAT='clause'] SBar[] {}
|
439 |
+
|. > . . . . . .| [1:1] VP[] -> * V[+AUX] VP[] {}
|
440 |
+
|. [-] . . . . .| [1:2] V[+AUX] -> 'do' *
|
441 |
+
|. [-> . . . . .| [1:2] S[+INV]/?x[] -> V[+AUX] * NP[] VP[]/?x[] {}
|
442 |
+
|. [-> . . . . .| [1:2] VP[]/?x[] -> V[+AUX] * VP[]/?x[] {}
|
443 |
+
|. [-> . . . . .| [1:2] VP[] -> V[+AUX] * VP[] {}
|
444 |
+
|. . > . . . . .| [2:2] VP[] -> * V[-AUX, SUBCAT='intrans'] {}
|
445 |
+
|. . > . . . . .| [2:2] VP[] -> * V[-AUX, SUBCAT='trans'] NP[] {}
|
446 |
+
|. . > . . . . .| [2:2] VP[] -> * V[-AUX, SUBCAT='clause'] SBar[] {}
|
447 |
+
|. . > . . . . .| [2:2] VP[] -> * V[+AUX] VP[] {}
|
448 |
+
|. . > . . . . .| [2:2] VP[]/?x[] -> * V[-AUX, SUBCAT='trans'] NP[]/?x[] {}
|
449 |
+
|. . > . . . . .| [2:2] VP[]/?x[] -> * V[-AUX, SUBCAT='clause'] SBar[]/?x[] {}
|
450 |
+
|. . > . . . . .| [2:2] VP[]/?x[] -> * V[+AUX] VP[]/?x[] {}
|
451 |
+
|. . > . . . . .| [2:2] NP[-WH] -> * 'you' {}
|
452 |
+
|. . [-] . . . .| [2:3] NP[-WH] -> 'you' *
|
453 |
+
|. [---> . . . .| [1:3] S[+INV]/?x[] -> V[+AUX] NP[] * VP[]/?x[] {}
|
454 |
+
|. . . > . . . .| [3:3] VP[]/?x[] -> * V[-AUX, SUBCAT='trans'] NP[]/?x[] {}
|
455 |
+
|. . . > . . . .| [3:3] VP[]/?x[] -> * V[-AUX, SUBCAT='clause'] SBar[]/?x[] {}
|
456 |
+
|. . . > . . . .| [3:3] VP[]/?x[] -> * V[+AUX] VP[]/?x[] {}
|
457 |
+
|. . . > . . . .| [3:3] V[-AUX, SUBCAT='clause'] -> * 'claim' {}
|
458 |
+
|. . . [-] . . .| [3:4] V[-AUX, SUBCAT='clause'] -> 'claim' *
|
459 |
+
|. . . [-> . . .| [3:4] VP[]/?x[] -> V[-AUX, SUBCAT='clause'] * SBar[]/?x[] {}
|
460 |
+
|. . . . > . . .| [4:4] SBar[]/?x[] -> * Comp[] S[-INV]/?x[] {}
|
461 |
+
|. . . . > . . .| [4:4] Comp[] -> * 'that' {}
|
462 |
+
|. . . . [-] . .| [4:5] Comp[] -> 'that' *
|
463 |
+
|. . . . [-> . .| [4:5] SBar[]/?x[] -> Comp[] * S[-INV]/?x[] {}
|
464 |
+
|. . . . . > . .| [5:5] S[-INV]/?x[] -> * NP[] VP[]/?x[] {}
|
465 |
+
|. . . . . > . .| [5:5] NP[-WH] -> * 'you' {}
|
466 |
+
|. . . . . [-] .| [5:6] NP[-WH] -> 'you' *
|
467 |
+
|. . . . . [-> .| [5:6] S[-INV]/?x[] -> NP[] * VP[]/?x[] {}
|
468 |
+
|. . . . . . > .| [6:6] VP[]/?x[] -> * V[-AUX, SUBCAT='trans'] NP[]/?x[] {}
|
469 |
+
|. . . . . . > .| [6:6] VP[]/?x[] -> * V[-AUX, SUBCAT='clause'] SBar[]/?x[] {}
|
470 |
+
|. . . . . . > .| [6:6] VP[]/?x[] -> * V[+AUX] VP[]/?x[] {}
|
471 |
+
|. . . . . . > .| [6:6] V[-AUX, SUBCAT='trans'] -> * 'like' {}
|
472 |
+
|. . . . . . [-]| [6:7] V[-AUX, SUBCAT='trans'] -> 'like' *
|
473 |
+
|. . . . . . [->| [6:7] VP[]/?x[] -> V[-AUX, SUBCAT='trans'] * NP[]/?x[] {}
|
474 |
+
|. . . . . . . #| [7:7] NP[]/NP[] -> *
|
475 |
+
|. . . . . . [-]| [6:7] VP[]/NP[] -> V[-AUX, SUBCAT='trans'] NP[]/NP[] *
|
476 |
+
|. . . . . [---]| [5:7] S[-INV]/NP[] -> NP[] VP[]/NP[] *
|
477 |
+
|. . . . [-----]| [4:7] SBar[]/NP[] -> Comp[] S[-INV]/NP[] *
|
478 |
+
|. . . [-------]| [3:7] VP[]/NP[] -> V[-AUX, SUBCAT='clause'] SBar[]/NP[] *
|
479 |
+
|. [-----------]| [1:7] S[+INV]/NP[] -> V[+AUX] NP[] VP[]/NP[] *
|
480 |
+
|[=============]| [0:7] S[-INV] -> NP[] S[]/NP[] *
|
481 |
+
|
482 |
+
>>> sorted(trees) == sorted(trees2)
|
483 |
+
True
|
484 |
+
|
485 |
+
|
486 |
+
Let's load a German grammar:
|
487 |
+
|
488 |
+
>>> cp = parse.load_parser('grammars/book_grammars/german.fcfg', trace=0)
|
489 |
+
>>> sent = 'die Katze sieht den Hund'
|
490 |
+
>>> tokens = sent.split()
|
491 |
+
>>> trees = cp.parse(tokens)
|
492 |
+
>>> for tree in trees: print(tree)
|
493 |
+
(S[]
|
494 |
+
(NP[AGR=[GND='fem', NUM='sg', PER=3], CASE='nom']
|
495 |
+
(Det[AGR=[GND='fem', NUM='sg', PER=3], CASE='nom'] die)
|
496 |
+
(N[AGR=[GND='fem', NUM='sg', PER=3]] Katze))
|
497 |
+
(VP[AGR=[NUM='sg', PER=3]]
|
498 |
+
(TV[AGR=[NUM='sg', PER=3], OBJCASE='acc'] sieht)
|
499 |
+
(NP[AGR=[GND='masc', NUM='sg', PER=3], CASE='acc']
|
500 |
+
(Det[AGR=[GND='masc', NUM='sg', PER=3], CASE='acc'] den)
|
501 |
+
(N[AGR=[GND='masc', NUM='sg', PER=3]] Hund))))
|
502 |
+
|
503 |
+
Grammar with Binding Operators
|
504 |
+
------------------------------
|
505 |
+
The bindop.fcfg grammar is a semantic grammar that uses lambda
|
506 |
+
calculus. Each element has a core semantics, which is a single lambda
|
507 |
+
calculus expression; and a set of binding operators, which bind
|
508 |
+
variables.
|
509 |
+
|
510 |
+
In order to make the binding operators work right, they need to
|
511 |
+
instantiate their bound variable every time they are added to the
|
512 |
+
chart. To do this, we use a special subclass of `Chart`, called
|
513 |
+
`InstantiateVarsChart`.
|
514 |
+
|
515 |
+
>>> from nltk.parse.featurechart import InstantiateVarsChart
|
516 |
+
>>> cp = parse.load_parser('grammars/sample_grammars/bindop.fcfg', trace=1,
|
517 |
+
... chart_class=InstantiateVarsChart)
|
518 |
+
>>> print(cp.grammar())
|
519 |
+
Grammar with 15 productions (start state = S[])
|
520 |
+
S[SEM=[BO={?b1+?b2}, CORE=<?vp(?subj)>]] -> NP[SEM=[BO=?b1, CORE=?subj]] VP[SEM=[BO=?b2, CORE=?vp]]
|
521 |
+
VP[SEM=[BO={?b1+?b2}, CORE=<?v(?obj)>]] -> TV[SEM=[BO=?b1, CORE=?v]] NP[SEM=[BO=?b2, CORE=?obj]]
|
522 |
+
VP[SEM=?s] -> IV[SEM=?s]
|
523 |
+
NP[SEM=[BO={?b1+?b2+{bo(?det(?n),@x)}}, CORE=<@x>]] -> Det[SEM=[BO=?b1, CORE=?det]] N[SEM=[BO=?b2, CORE=?n]]
|
524 |
+
Det[SEM=[BO={/}, CORE=<\Q P.exists x.(Q(x) & P(x))>]] -> 'a'
|
525 |
+
N[SEM=[BO={/}, CORE=<dog>]] -> 'dog'
|
526 |
+
N[SEM=[BO={/}, CORE=<dog>]] -> 'cat'
|
527 |
+
N[SEM=[BO={/}, CORE=<dog>]] -> 'mouse'
|
528 |
+
IV[SEM=[BO={/}, CORE=<\x.bark(x)>]] -> 'barks'
|
529 |
+
IV[SEM=[BO={/}, CORE=<\x.bark(x)>]] -> 'eats'
|
530 |
+
IV[SEM=[BO={/}, CORE=<\x.bark(x)>]] -> 'walks'
|
531 |
+
TV[SEM=[BO={/}, CORE=<\x y.feed(y,x)>]] -> 'feeds'
|
532 |
+
TV[SEM=[BO={/}, CORE=<\x y.feed(y,x)>]] -> 'walks'
|
533 |
+
NP[SEM=[BO={bo(\P.P(John),@x)}, CORE=<@x>]] -> 'john'
|
534 |
+
NP[SEM=[BO={bo(\P.P(John),@x)}, CORE=<@x>]] -> 'alex'
|
535 |
+
|
536 |
+
A simple intransitive sentence:
|
537 |
+
|
538 |
+
>>> from nltk.sem import logic
|
539 |
+
>>> logic._counter._value = 100
|
540 |
+
|
541 |
+
>>> trees = cp.parse('john barks'.split())
|
542 |
+
|. john.barks.|
|
543 |
+
|[-----] .| [0:1] 'john'
|
544 |
+
|. [-----]| [1:2] 'barks'
|
545 |
+
|[-----] .| [0:1] NP[SEM=[BO={bo(\P.P(John),z101)}, CORE=<z101>]] -> 'john' *
|
546 |
+
|[-----> .| [0:1] S[SEM=[BO={?b1+?b2}, CORE=<?vp(?subj)>]] -> NP[SEM=[BO=?b1, CORE=?subj]] * VP[SEM=[BO=?b2, CORE=?vp]] {?b1: {bo(\P.P(John),z2)}, ?subj: <IndividualVariableExpression z2>}
|
547 |
+
|. [-----]| [1:2] IV[SEM=[BO={/}, CORE=<\x.bark(x)>]] -> 'barks' *
|
548 |
+
|. [-----]| [1:2] VP[SEM=[BO={/}, CORE=<\x.bark(x)>]] -> IV[SEM=[BO={/}, CORE=<\x.bark(x)>]] *
|
549 |
+
|[===========]| [0:2] S[SEM=[BO={bo(\P.P(John),z2)}, CORE=<bark(z2)>]] -> NP[SEM=[BO={bo(\P.P(John),z2)}, CORE=<z2>]] VP[SEM=[BO={/}, CORE=<\x.bark(x)>]] *
|
550 |
+
>>> for tree in trees: print(tree)
|
551 |
+
(S[SEM=[BO={bo(\P.P(John),z2)}, CORE=<bark(z2)>]]
|
552 |
+
(NP[SEM=[BO={bo(\P.P(John),z101)}, CORE=<z101>]] john)
|
553 |
+
(VP[SEM=[BO={/}, CORE=<\x.bark(x)>]]
|
554 |
+
(IV[SEM=[BO={/}, CORE=<\x.bark(x)>]] barks)))
|
555 |
+
|
556 |
+
A transitive sentence:
|
557 |
+
|
558 |
+
>>> trees = cp.parse('john feeds a dog'.split())
|
559 |
+
|.joh.fee. a .dog.|
|
560 |
+
|[---] . . .| [0:1] 'john'
|
561 |
+
|. [---] . .| [1:2] 'feeds'
|
562 |
+
|. . [---] .| [2:3] 'a'
|
563 |
+
|. . . [---]| [3:4] 'dog'
|
564 |
+
|[---] . . .| [0:1] NP[SEM=[BO={bo(\P.P(John),z102)}, CORE=<z102>]] -> 'john' *
|
565 |
+
|[---> . . .| [0:1] S[SEM=[BO={?b1+?b2}, CORE=<?vp(?subj)>]] -> NP[SEM=[BO=?b1, CORE=?subj]] * VP[SEM=[BO=?b2, CORE=?vp]] {?b1: {bo(\P.P(John),z2)}, ?subj: <IndividualVariableExpression z2>}
|
566 |
+
|. [---] . .| [1:2] TV[SEM=[BO={/}, CORE=<\x y.feed(y,x)>]] -> 'feeds' *
|
567 |
+
|. [---> . .| [1:2] VP[SEM=[BO={?b1+?b2}, CORE=<?v(?obj)>]] -> TV[SEM=[BO=?b1, CORE=?v]] * NP[SEM=[BO=?b2, CORE=?obj]] {?b1: {/}, ?v: <LambdaExpression \x y.feed(y,x)>}
|
568 |
+
|. . [---] .| [2:3] Det[SEM=[BO={/}, CORE=<\Q P.exists x.(Q(x) & P(x))>]] -> 'a' *
|
569 |
+
|. . [---> .| [2:3] NP[SEM=[BO={?b1+?b2+{bo(?det(?n),@x)}}, CORE=<@x>]] -> Det[SEM=[BO=?b1, CORE=?det]] * N[SEM=[BO=?b2, CORE=?n]] {?b1: {/}, ?det: <LambdaExpression \Q P.exists x.(Q(x) & P(x))>}
|
570 |
+
|. . . [---]| [3:4] N[SEM=[BO={/}, CORE=<dog>]] -> 'dog' *
|
571 |
+
|. . [-------]| [2:4] NP[SEM=[BO={bo(\P.exists x.(dog(x) & P(x)),z103)}, CORE=<z103>]] -> Det[SEM=[BO={/}, CORE=<\Q P.exists x.(Q(x) & P(x))>]] N[SEM=[BO={/}, CORE=<dog>]] *
|
572 |
+
|. . [------->| [2:4] S[SEM=[BO={?b1+?b2}, CORE=<?vp(?subj)>]] -> NP[SEM=[BO=?b1, CORE=?subj]] * VP[SEM=[BO=?b2, CORE=?vp]] {?b1: {bo(\P.exists x.(dog(x) & P(x)),z2)}, ?subj: <IndividualVariableExpression z2>}
|
573 |
+
|. [-----------]| [1:4] VP[SEM=[BO={bo(\P.exists x.(dog(x) & P(x)),z2)}, CORE=<\y.feed(y,z2)>]] -> TV[SEM=[BO={/}, CORE=<\x y.feed(y,x)>]] NP[SEM=[BO={bo(\P.exists x.(dog(x) & P(x)),z2)}, CORE=<z2>]] *
|
574 |
+
|[===============]| [0:4] S[SEM=[BO={bo(\P.P(John),z2), bo(\P.exists x.(dog(x) & P(x)),z3)}, CORE=<feed(z2,z3)>]] -> NP[SEM=[BO={bo(\P.P(John),z2)}, CORE=<z2>]] VP[SEM=[BO={bo(\P.exists x.(dog(x) & P(x)),z3)}, CORE=<\y.feed(y,z3)>]] *
|
575 |
+
|
576 |
+
>>> for tree in trees: print(tree)
|
577 |
+
(S[SEM=[BO={bo(\P.P(John),z2), bo(\P.exists x.(dog(x) & P(x)),z3)}, CORE=<feed(z2,z3)>]]
|
578 |
+
(NP[SEM=[BO={bo(\P.P(John),z102)}, CORE=<z102>]] john)
|
579 |
+
(VP[SEM=[BO={bo(\P.exists x.(dog(x) & P(x)),z2)}, CORE=<\y.feed(y,z2)>]]
|
580 |
+
(TV[SEM=[BO={/}, CORE=<\x y.feed(y,x)>]] feeds)
|
581 |
+
(NP[SEM=[BO={bo(\P.exists x.(dog(x) & P(x)),z103)}, CORE=<z103>]]
|
582 |
+
(Det[SEM=[BO={/}, CORE=<\Q P.exists x.(Q(x) & P(x))>]] a)
|
583 |
+
(N[SEM=[BO={/}, CORE=<dog>]] dog))))
|
584 |
+
|
585 |
+
Turn down the verbosity:
|
586 |
+
|
587 |
+
>>> cp = parse.load_parser('grammars/sample_grammars/bindop.fcfg', trace=0,
|
588 |
+
... chart_class=InstantiateVarsChart)
|
589 |
+
|
590 |
+
Reuse the same lexical item twice:
|
591 |
+
|
592 |
+
>>> trees = cp.parse('john feeds john'.split())
|
593 |
+
>>> for tree in trees: print(tree)
|
594 |
+
(S[SEM=[BO={bo(\P.P(John),z2), bo(\P.P(John),z3)}, CORE=<feed(z2,z3)>]]
|
595 |
+
(NP[SEM=[BO={bo(\P.P(John),z104)}, CORE=<z104>]] john)
|
596 |
+
(VP[SEM=[BO={bo(\P.P(John),z2)}, CORE=<\y.feed(y,z2)>]]
|
597 |
+
(TV[SEM=[BO={/}, CORE=<\x y.feed(y,x)>]] feeds)
|
598 |
+
(NP[SEM=[BO={bo(\P.P(John),z105)}, CORE=<z105>]] john)))
|
599 |
+
|
600 |
+
>>> trees = cp.parse('a dog feeds a dog'.split())
|
601 |
+
>>> for tree in trees: print(tree)
|
602 |
+
(S[SEM=[BO={bo(\P.exists x.(dog(x) & P(x)),z2), bo(\P.exists x.(dog(x) & P(x)),z3)}, CORE=<feed(z2,z3)>]]
|
603 |
+
(NP[SEM=[BO={bo(\P.exists x.(dog(x) & P(x)),z106)}, CORE=<z106>]]
|
604 |
+
(Det[SEM=[BO={/}, CORE=<\Q P.exists x.(Q(x) & P(x))>]] a)
|
605 |
+
(N[SEM=[BO={/}, CORE=<dog>]] dog))
|
606 |
+
(VP[SEM=[BO={bo(\P.exists x.(dog(x) & P(x)),z2)}, CORE=<\y.feed(y,z2)>]]
|
607 |
+
(TV[SEM=[BO={/}, CORE=<\x y.feed(y,x)>]] feeds)
|
608 |
+
(NP[SEM=[BO={bo(\P.exists x.(dog(x) & P(x)),z107)}, CORE=<z107>]]
|
609 |
+
(Det[SEM=[BO={/}, CORE=<\Q P.exists x.(Q(x) & P(x))>]] a)
|
610 |
+
(N[SEM=[BO={/}, CORE=<dog>]] dog))))
|
llmeval-env/lib/python3.10/site-packages/nltk/test/gensim_fixt.py
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
def setup_module():
|
2 |
+
import pytest
|
3 |
+
|
4 |
+
pytest.importorskip("gensim")
|
llmeval-env/lib/python3.10/site-packages/nltk/test/gluesemantics.doctest
ADDED
@@ -0,0 +1,383 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.. Copyright (C) 2001-2023 NLTK Project
|
2 |
+
.. For license information, see LICENSE.TXT
|
3 |
+
|
4 |
+
==============================================================================
|
5 |
+
Glue Semantics
|
6 |
+
==============================================================================
|
7 |
+
|
8 |
+
|
9 |
+
|
10 |
+
======================
|
11 |
+
Linear logic
|
12 |
+
======================
|
13 |
+
|
14 |
+
>>> from nltk.sem import logic
|
15 |
+
>>> from nltk.sem.glue import *
|
16 |
+
>>> from nltk.sem.linearlogic import *
|
17 |
+
|
18 |
+
>>> from nltk.sem.linearlogic import Expression
|
19 |
+
>>> read_expr = Expression.fromstring
|
20 |
+
|
21 |
+
Parser
|
22 |
+
|
23 |
+
>>> print(read_expr(r'f'))
|
24 |
+
f
|
25 |
+
>>> print(read_expr(r'(g -o f)'))
|
26 |
+
(g -o f)
|
27 |
+
>>> print(read_expr(r'(g -o (h -o f))'))
|
28 |
+
(g -o (h -o f))
|
29 |
+
>>> print(read_expr(r'((g -o G) -o G)'))
|
30 |
+
((g -o G) -o G)
|
31 |
+
>>> print(read_expr(r'(g -o f)(g)'))
|
32 |
+
(g -o f)(g)
|
33 |
+
>>> print(read_expr(r'((g -o G) -o G)((g -o f))'))
|
34 |
+
((g -o G) -o G)((g -o f))
|
35 |
+
|
36 |
+
Simplify
|
37 |
+
|
38 |
+
>>> print(read_expr(r'f').simplify())
|
39 |
+
f
|
40 |
+
>>> print(read_expr(r'(g -o f)').simplify())
|
41 |
+
(g -o f)
|
42 |
+
>>> print(read_expr(r'((g -o G) -o G)').simplify())
|
43 |
+
((g -o G) -o G)
|
44 |
+
>>> print(read_expr(r'(g -o f)(g)').simplify())
|
45 |
+
f
|
46 |
+
>>> try: read_expr(r'(g -o f)(f)').simplify()
|
47 |
+
... except LinearLogicApplicationException as e: print(e)
|
48 |
+
...
|
49 |
+
Cannot apply (g -o f) to f. Cannot unify g with f given {}
|
50 |
+
>>> print(read_expr(r'(G -o f)(g)').simplify())
|
51 |
+
f
|
52 |
+
>>> print(read_expr(r'((g -o G) -o G)((g -o f))').simplify())
|
53 |
+
f
|
54 |
+
|
55 |
+
Test BindingDict
|
56 |
+
|
57 |
+
>>> h = ConstantExpression('h')
|
58 |
+
>>> g = ConstantExpression('g')
|
59 |
+
>>> f = ConstantExpression('f')
|
60 |
+
|
61 |
+
>>> H = VariableExpression('H')
|
62 |
+
>>> G = VariableExpression('G')
|
63 |
+
>>> F = VariableExpression('F')
|
64 |
+
|
65 |
+
>>> d1 = BindingDict({H: h})
|
66 |
+
>>> d2 = BindingDict({F: f, G: F})
|
67 |
+
>>> d12 = d1 + d2
|
68 |
+
>>> all12 = ['%s: %s' % (v, d12[v]) for v in d12.d]
|
69 |
+
>>> all12.sort()
|
70 |
+
>>> print(all12)
|
71 |
+
['F: f', 'G: f', 'H: h']
|
72 |
+
|
73 |
+
>>> BindingDict([(F,f),(G,g),(H,h)]) == BindingDict({F:f, G:g, H:h})
|
74 |
+
True
|
75 |
+
|
76 |
+
>>> d4 = BindingDict({F: f})
|
77 |
+
>>> try: d4[F] = g
|
78 |
+
... except VariableBindingException as e: print(e)
|
79 |
+
Variable F already bound to another value
|
80 |
+
|
81 |
+
Test Unify
|
82 |
+
|
83 |
+
>>> try: f.unify(g, BindingDict())
|
84 |
+
... except UnificationException as e: print(e)
|
85 |
+
...
|
86 |
+
Cannot unify f with g given {}
|
87 |
+
|
88 |
+
>>> f.unify(G, BindingDict()) == BindingDict({G: f})
|
89 |
+
True
|
90 |
+
>>> try: f.unify(G, BindingDict({G: h}))
|
91 |
+
... except UnificationException as e: print(e)
|
92 |
+
...
|
93 |
+
Cannot unify f with G given {G: h}
|
94 |
+
>>> f.unify(G, BindingDict({G: f})) == BindingDict({G: f})
|
95 |
+
True
|
96 |
+
>>> f.unify(G, BindingDict({H: f})) == BindingDict({G: f, H: f})
|
97 |
+
True
|
98 |
+
|
99 |
+
>>> G.unify(f, BindingDict()) == BindingDict({G: f})
|
100 |
+
True
|
101 |
+
>>> try: G.unify(f, BindingDict({G: h}))
|
102 |
+
... except UnificationException as e: print(e)
|
103 |
+
...
|
104 |
+
Cannot unify G with f given {G: h}
|
105 |
+
>>> G.unify(f, BindingDict({G: f})) == BindingDict({G: f})
|
106 |
+
True
|
107 |
+
>>> G.unify(f, BindingDict({H: f})) == BindingDict({G: f, H: f})
|
108 |
+
True
|
109 |
+
|
110 |
+
>>> G.unify(F, BindingDict()) == BindingDict({G: F})
|
111 |
+
True
|
112 |
+
>>> try: G.unify(F, BindingDict({G: H}))
|
113 |
+
... except UnificationException as e: print(e)
|
114 |
+
...
|
115 |
+
Cannot unify G with F given {G: H}
|
116 |
+
>>> G.unify(F, BindingDict({G: F})) == BindingDict({G: F})
|
117 |
+
True
|
118 |
+
>>> G.unify(F, BindingDict({H: F})) == BindingDict({G: F, H: F})
|
119 |
+
True
|
120 |
+
|
121 |
+
Test Compile
|
122 |
+
|
123 |
+
>>> print(read_expr('g').compile_pos(Counter(), GlueFormula))
|
124 |
+
(<ConstantExpression g>, [])
|
125 |
+
>>> print(read_expr('(g -o f)').compile_pos(Counter(), GlueFormula))
|
126 |
+
(<ImpExpression (g -o f)>, [])
|
127 |
+
>>> print(read_expr('(g -o (h -o f))').compile_pos(Counter(), GlueFormula))
|
128 |
+
(<ImpExpression (g -o (h -o f))>, [])
|
129 |
+
|
130 |
+
|
131 |
+
======================
|
132 |
+
Glue
|
133 |
+
======================
|
134 |
+
|
135 |
+
Demo of "John walks"
|
136 |
+
--------------------
|
137 |
+
|
138 |
+
>>> john = GlueFormula("John", "g")
|
139 |
+
>>> print(john)
|
140 |
+
John : g
|
141 |
+
>>> walks = GlueFormula(r"\x.walks(x)", "(g -o f)")
|
142 |
+
>>> print(walks)
|
143 |
+
\x.walks(x) : (g -o f)
|
144 |
+
>>> print(walks.applyto(john))
|
145 |
+
\x.walks(x)(John) : (g -o f)(g)
|
146 |
+
>>> print(walks.applyto(john).simplify())
|
147 |
+
walks(John) : f
|
148 |
+
|
149 |
+
|
150 |
+
Demo of "A dog walks"
|
151 |
+
---------------------
|
152 |
+
|
153 |
+
>>> a = GlueFormula("\\P Q.some x.(P(x) and Q(x))", "((gv -o gr) -o ((g -o G) -o G))")
|
154 |
+
>>> print(a)
|
155 |
+
\P Q.exists x.(P(x) & Q(x)) : ((gv -o gr) -o ((g -o G) -o G))
|
156 |
+
>>> man = GlueFormula(r"\x.man(x)", "(gv -o gr)")
|
157 |
+
>>> print(man)
|
158 |
+
\x.man(x) : (gv -o gr)
|
159 |
+
>>> walks = GlueFormula(r"\x.walks(x)", "(g -o f)")
|
160 |
+
>>> print(walks)
|
161 |
+
\x.walks(x) : (g -o f)
|
162 |
+
>>> a_man = a.applyto(man)
|
163 |
+
>>> print(a_man.simplify())
|
164 |
+
\Q.exists x.(man(x) & Q(x)) : ((g -o G) -o G)
|
165 |
+
>>> a_man_walks = a_man.applyto(walks)
|
166 |
+
>>> print(a_man_walks.simplify())
|
167 |
+
exists x.(man(x) & walks(x)) : f
|
168 |
+
|
169 |
+
|
170 |
+
Demo of 'every girl chases a dog'
|
171 |
+
---------------------------------
|
172 |
+
|
173 |
+
Individual words:
|
174 |
+
|
175 |
+
>>> every = GlueFormula("\\P Q.all x.(P(x) -> Q(x))", "((gv -o gr) -o ((g -o G) -o G))")
|
176 |
+
>>> print(every)
|
177 |
+
\P Q.all x.(P(x) -> Q(x)) : ((gv -o gr) -o ((g -o G) -o G))
|
178 |
+
>>> girl = GlueFormula(r"\x.girl(x)", "(gv -o gr)")
|
179 |
+
>>> print(girl)
|
180 |
+
\x.girl(x) : (gv -o gr)
|
181 |
+
>>> chases = GlueFormula(r"\x y.chases(x,y)", "(g -o (h -o f))")
|
182 |
+
>>> print(chases)
|
183 |
+
\x y.chases(x,y) : (g -o (h -o f))
|
184 |
+
>>> a = GlueFormula("\\P Q.some x.(P(x) and Q(x))", "((hv -o hr) -o ((h -o H) -o H))")
|
185 |
+
>>> print(a)
|
186 |
+
\P Q.exists x.(P(x) & Q(x)) : ((hv -o hr) -o ((h -o H) -o H))
|
187 |
+
>>> dog = GlueFormula(r"\x.dog(x)", "(hv -o hr)")
|
188 |
+
>>> print(dog)
|
189 |
+
\x.dog(x) : (hv -o hr)
|
190 |
+
|
191 |
+
Noun Quantification can only be done one way:
|
192 |
+
|
193 |
+
>>> every_girl = every.applyto(girl)
|
194 |
+
>>> print(every_girl.simplify())
|
195 |
+
\Q.all x.(girl(x) -> Q(x)) : ((g -o G) -o G)
|
196 |
+
>>> a_dog = a.applyto(dog)
|
197 |
+
>>> print(a_dog.simplify())
|
198 |
+
\Q.exists x.(dog(x) & Q(x)) : ((h -o H) -o H)
|
199 |
+
|
200 |
+
The first reading is achieved by combining 'chases' with 'a dog' first.
|
201 |
+
Since 'a girl' requires something of the form '(h -o H)' we must
|
202 |
+
get rid of the 'g' in the glue of 'see'. We will do this with
|
203 |
+
the '-o elimination' rule. So, x1 will be our subject placeholder.
|
204 |
+
|
205 |
+
>>> xPrime = GlueFormula("x1", "g")
|
206 |
+
>>> print(xPrime)
|
207 |
+
x1 : g
|
208 |
+
>>> xPrime_chases = chases.applyto(xPrime)
|
209 |
+
>>> print(xPrime_chases.simplify())
|
210 |
+
\y.chases(x1,y) : (h -o f)
|
211 |
+
>>> xPrime_chases_a_dog = a_dog.applyto(xPrime_chases)
|
212 |
+
>>> print(xPrime_chases_a_dog.simplify())
|
213 |
+
exists x.(dog(x) & chases(x1,x)) : f
|
214 |
+
|
215 |
+
Now we can retract our subject placeholder using lambda-abstraction and
|
216 |
+
combine with the true subject.
|
217 |
+
|
218 |
+
>>> chases_a_dog = xPrime_chases_a_dog.lambda_abstract(xPrime)
|
219 |
+
>>> print(chases_a_dog.simplify())
|
220 |
+
\x1.exists x.(dog(x) & chases(x1,x)) : (g -o f)
|
221 |
+
>>> every_girl_chases_a_dog = every_girl.applyto(chases_a_dog)
|
222 |
+
>>> r1 = every_girl_chases_a_dog.simplify()
|
223 |
+
>>> r2 = GlueFormula(r'all x.(girl(x) -> exists z1.(dog(z1) & chases(x,z1)))', 'f')
|
224 |
+
>>> r1 == r2
|
225 |
+
True
|
226 |
+
|
227 |
+
The second reading is achieved by combining 'every girl' with 'chases' first.
|
228 |
+
|
229 |
+
>>> xPrime = GlueFormula("x1", "g")
|
230 |
+
>>> print(xPrime)
|
231 |
+
x1 : g
|
232 |
+
>>> xPrime_chases = chases.applyto(xPrime)
|
233 |
+
>>> print(xPrime_chases.simplify())
|
234 |
+
\y.chases(x1,y) : (h -o f)
|
235 |
+
>>> yPrime = GlueFormula("x2", "h")
|
236 |
+
>>> print(yPrime)
|
237 |
+
x2 : h
|
238 |
+
>>> xPrime_chases_yPrime = xPrime_chases.applyto(yPrime)
|
239 |
+
>>> print(xPrime_chases_yPrime.simplify())
|
240 |
+
chases(x1,x2) : f
|
241 |
+
>>> chases_yPrime = xPrime_chases_yPrime.lambda_abstract(xPrime)
|
242 |
+
>>> print(chases_yPrime.simplify())
|
243 |
+
\x1.chases(x1,x2) : (g -o f)
|
244 |
+
>>> every_girl_chases_yPrime = every_girl.applyto(chases_yPrime)
|
245 |
+
>>> print(every_girl_chases_yPrime.simplify())
|
246 |
+
all x.(girl(x) -> chases(x,x2)) : f
|
247 |
+
>>> every_girl_chases = every_girl_chases_yPrime.lambda_abstract(yPrime)
|
248 |
+
>>> print(every_girl_chases.simplify())
|
249 |
+
\x2.all x.(girl(x) -> chases(x,x2)) : (h -o f)
|
250 |
+
>>> every_girl_chases_a_dog = a_dog.applyto(every_girl_chases)
|
251 |
+
>>> r1 = every_girl_chases_a_dog.simplify()
|
252 |
+
>>> r2 = GlueFormula(r'exists x.(dog(x) & all z2.(girl(z2) -> chases(z2,x)))', 'f')
|
253 |
+
>>> r1 == r2
|
254 |
+
True
|
255 |
+
|
256 |
+
|
257 |
+
Compilation
|
258 |
+
-----------
|
259 |
+
|
260 |
+
>>> for cp in GlueFormula('m', '(b -o a)').compile(Counter()): print(cp)
|
261 |
+
m : (b -o a) : {1}
|
262 |
+
>>> for cp in GlueFormula('m', '((c -o b) -o a)').compile(Counter()): print(cp)
|
263 |
+
v1 : c : {1}
|
264 |
+
m : (b[1] -o a) : {2}
|
265 |
+
>>> for cp in GlueFormula('m', '((d -o (c -o b)) -o a)').compile(Counter()): print(cp)
|
266 |
+
v1 : c : {1}
|
267 |
+
v2 : d : {2}
|
268 |
+
m : (b[1, 2] -o a) : {3}
|
269 |
+
>>> for cp in GlueFormula('m', '((d -o e) -o ((c -o b) -o a))').compile(Counter()): print(cp)
|
270 |
+
v1 : d : {1}
|
271 |
+
v2 : c : {2}
|
272 |
+
m : (e[1] -o (b[2] -o a)) : {3}
|
273 |
+
>>> for cp in GlueFormula('m', '(((d -o c) -o b) -o a)').compile(Counter()): print(cp)
|
274 |
+
v1 : (d -o c) : {1}
|
275 |
+
m : (b[1] -o a) : {2}
|
276 |
+
>>> for cp in GlueFormula('m', '((((e -o d) -o c) -o b) -o a)').compile(Counter()): print(cp)
|
277 |
+
v1 : e : {1}
|
278 |
+
v2 : (d[1] -o c) : {2}
|
279 |
+
m : (b[2] -o a) : {3}
|
280 |
+
|
281 |
+
|
282 |
+
Demo of 'a man walks' using Compilation
|
283 |
+
---------------------------------------
|
284 |
+
|
285 |
+
Premises
|
286 |
+
|
287 |
+
>>> a = GlueFormula('\\P Q.some x.(P(x) and Q(x))', '((gv -o gr) -o ((g -o G) -o G))')
|
288 |
+
>>> print(a)
|
289 |
+
\P Q.exists x.(P(x) & Q(x)) : ((gv -o gr) -o ((g -o G) -o G))
|
290 |
+
|
291 |
+
>>> man = GlueFormula('\\x.man(x)', '(gv -o gr)')
|
292 |
+
>>> print(man)
|
293 |
+
\x.man(x) : (gv -o gr)
|
294 |
+
|
295 |
+
>>> walks = GlueFormula('\\x.walks(x)', '(g -o f)')
|
296 |
+
>>> print(walks)
|
297 |
+
\x.walks(x) : (g -o f)
|
298 |
+
|
299 |
+
Compiled Premises:
|
300 |
+
|
301 |
+
>>> counter = Counter()
|
302 |
+
>>> ahc = a.compile(counter)
|
303 |
+
>>> g1 = ahc[0]
|
304 |
+
>>> print(g1)
|
305 |
+
v1 : gv : {1}
|
306 |
+
>>> g2 = ahc[1]
|
307 |
+
>>> print(g2)
|
308 |
+
v2 : g : {2}
|
309 |
+
>>> g3 = ahc[2]
|
310 |
+
>>> print(g3)
|
311 |
+
\P Q.exists x.(P(x) & Q(x)) : (gr[1] -o (G[2] -o G)) : {3}
|
312 |
+
>>> g4 = man.compile(counter)[0]
|
313 |
+
>>> print(g4)
|
314 |
+
\x.man(x) : (gv -o gr) : {4}
|
315 |
+
>>> g5 = walks.compile(counter)[0]
|
316 |
+
>>> print(g5)
|
317 |
+
\x.walks(x) : (g -o f) : {5}
|
318 |
+
|
319 |
+
Derivation:
|
320 |
+
|
321 |
+
>>> g14 = g4.applyto(g1)
|
322 |
+
>>> print(g14.simplify())
|
323 |
+
man(v1) : gr : {1, 4}
|
324 |
+
>>> g134 = g3.applyto(g14)
|
325 |
+
>>> print(g134.simplify())
|
326 |
+
\Q.exists x.(man(x) & Q(x)) : (G[2] -o G) : {1, 3, 4}
|
327 |
+
>>> g25 = g5.applyto(g2)
|
328 |
+
>>> print(g25.simplify())
|
329 |
+
walks(v2) : f : {2, 5}
|
330 |
+
>>> g12345 = g134.applyto(g25)
|
331 |
+
>>> print(g12345.simplify())
|
332 |
+
exists x.(man(x) & walks(x)) : f : {1, 2, 3, 4, 5}
|
333 |
+
|
334 |
+
---------------------------------
|
335 |
+
Dependency Graph to Glue Formulas
|
336 |
+
---------------------------------
|
337 |
+
>>> from nltk.corpus.reader.dependency import DependencyGraph
|
338 |
+
|
339 |
+
>>> depgraph = DependencyGraph("""1 John _ NNP NNP _ 2 SUBJ _ _
|
340 |
+
... 2 sees _ VB VB _ 0 ROOT _ _
|
341 |
+
... 3 a _ ex_quant ex_quant _ 4 SPEC _ _
|
342 |
+
... 4 dog _ NN NN _ 2 OBJ _ _
|
343 |
+
... """)
|
344 |
+
>>> gfl = GlueDict('nltk:grammars/sample_grammars/glue.semtype').to_glueformula_list(depgraph)
|
345 |
+
>>> print(gfl) # doctest: +SKIP
|
346 |
+
[\x y.sees(x,y) : (f -o (i -o g)),
|
347 |
+
\x.dog(x) : (iv -o ir),
|
348 |
+
\P Q.exists x.(P(x) & Q(x)) : ((iv -o ir) -o ((i -o I3) -o I3)),
|
349 |
+
\P Q.exists x.(P(x) & Q(x)) : ((fv -o fr) -o ((f -o F4) -o F4)),
|
350 |
+
\x.John(x) : (fv -o fr)]
|
351 |
+
>>> glue = Glue()
|
352 |
+
>>> for r in sorted([r.simplify().normalize() for r in glue.get_readings(glue.gfl_to_compiled(gfl))], key=str):
|
353 |
+
... print(r)
|
354 |
+
exists z1.(John(z1) & exists z2.(dog(z2) & sees(z1,z2)))
|
355 |
+
exists z1.(dog(z1) & exists z2.(John(z2) & sees(z2,z1)))
|
356 |
+
|
357 |
+
-----------------------------------
|
358 |
+
Dependency Graph to LFG f-structure
|
359 |
+
-----------------------------------
|
360 |
+
>>> from nltk.sem.lfg import FStructure
|
361 |
+
|
362 |
+
>>> fstruct = FStructure.read_depgraph(depgraph)
|
363 |
+
|
364 |
+
>>> print(fstruct) # doctest: +SKIP
|
365 |
+
f:[pred 'sees'
|
366 |
+
obj h:[pred 'dog'
|
367 |
+
spec 'a']
|
368 |
+
subj g:[pred 'John']]
|
369 |
+
|
370 |
+
>>> fstruct.to_depgraph().tree().pprint()
|
371 |
+
(sees (dog a) John)
|
372 |
+
|
373 |
+
---------------------------------
|
374 |
+
LFG f-structure to Glue
|
375 |
+
---------------------------------
|
376 |
+
>>> fstruct.to_glueformula_list(GlueDict('nltk:grammars/sample_grammars/glue.semtype')) # doctest: +SKIP
|
377 |
+
[\x y.sees(x,y) : (i -o (g -o f)),
|
378 |
+
\x.dog(x) : (gv -o gr),
|
379 |
+
\P Q.exists x.(P(x) & Q(x)) : ((gv -o gr) -o ((g -o G3) -o G3)),
|
380 |
+
\P Q.exists x.(P(x) & Q(x)) : ((iv -o ir) -o ((i -o I4) -o I4)),
|
381 |
+
\x.John(x) : (iv -o ir)]
|
382 |
+
|
383 |
+
.. see gluesemantics_malt.doctest for more
|
llmeval-env/lib/python3.10/site-packages/nltk/test/gluesemantics_malt_fixt.py
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
def setup_module():
|
2 |
+
import pytest
|
3 |
+
|
4 |
+
from nltk.parse.malt import MaltParser
|
5 |
+
|
6 |
+
try:
|
7 |
+
depparser = MaltParser()
|
8 |
+
except (AssertionError, LookupError) as e:
|
9 |
+
pytest.skip("MaltParser is not available")
|
llmeval-env/lib/python3.10/site-packages/nltk/test/grammar.doctest
ADDED
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.. Copyright (C) 2001-2023 NLTK Project
|
2 |
+
.. For license information, see LICENSE.TXT
|
3 |
+
|
4 |
+
===============
|
5 |
+
Grammar Parsing
|
6 |
+
===============
|
7 |
+
|
8 |
+
Grammars can be parsed from strings:
|
9 |
+
|
10 |
+
>>> from nltk import CFG
|
11 |
+
>>> grammar = CFG.fromstring("""
|
12 |
+
... S -> NP VP
|
13 |
+
... PP -> P NP
|
14 |
+
... NP -> Det N | NP PP
|
15 |
+
... VP -> V NP | VP PP
|
16 |
+
... Det -> 'a' | 'the'
|
17 |
+
... N -> 'dog' | 'cat'
|
18 |
+
... V -> 'chased' | 'sat'
|
19 |
+
... P -> 'on' | 'in'
|
20 |
+
... """)
|
21 |
+
>>> grammar
|
22 |
+
<Grammar with 14 productions>
|
23 |
+
>>> grammar.start()
|
24 |
+
S
|
25 |
+
>>> grammar.productions()
|
26 |
+
[S -> NP VP, PP -> P NP, NP -> Det N, NP -> NP PP, VP -> V NP, VP -> VP PP,
|
27 |
+
Det -> 'a', Det -> 'the', N -> 'dog', N -> 'cat', V -> 'chased', V -> 'sat',
|
28 |
+
P -> 'on', P -> 'in']
|
29 |
+
|
30 |
+
Probabilistic CFGs:
|
31 |
+
|
32 |
+
>>> from nltk import PCFG
|
33 |
+
>>> toy_pcfg1 = PCFG.fromstring("""
|
34 |
+
... S -> NP VP [1.0]
|
35 |
+
... NP -> Det N [0.5] | NP PP [0.25] | 'John' [0.1] | 'I' [0.15]
|
36 |
+
... Det -> 'the' [0.8] | 'my' [0.2]
|
37 |
+
... N -> 'man' [0.5] | 'telescope' [0.5]
|
38 |
+
... VP -> VP PP [0.1] | V NP [0.7] | V [0.2]
|
39 |
+
... V -> 'ate' [0.35] | 'saw' [0.65]
|
40 |
+
... PP -> P NP [1.0]
|
41 |
+
... P -> 'with' [0.61] | 'under' [0.39]
|
42 |
+
... """)
|
43 |
+
|
44 |
+
Chomsky Normal Form grammar (Test for bug 474)
|
45 |
+
|
46 |
+
>>> g = CFG.fromstring("VP^<TOP> -> VBP NP^<VP-TOP>")
|
47 |
+
>>> g.productions()[0].lhs()
|
48 |
+
VP^<TOP>
|
49 |
+
|
50 |
+
Grammars can contain both empty strings and empty productions:
|
51 |
+
|
52 |
+
>>> from nltk.grammar import CFG
|
53 |
+
>>> from nltk.parse.generate import generate
|
54 |
+
>>> grammar = CFG.fromstring("""
|
55 |
+
... S -> A B
|
56 |
+
... A -> 'a'
|
57 |
+
... # An empty string:
|
58 |
+
... B -> 'b' | ''
|
59 |
+
... """)
|
60 |
+
>>> list(generate(grammar))
|
61 |
+
[['a', 'b'], ['a', '']]
|
62 |
+
>>> grammar = CFG.fromstring("""
|
63 |
+
... S -> A B
|
64 |
+
... A -> 'a'
|
65 |
+
... # An empty production:
|
66 |
+
... B -> 'b' |
|
67 |
+
... """)
|
68 |
+
>>> list(generate(grammar))
|
69 |
+
[['a', 'b'], ['a']]
|
llmeval-env/lib/python3.10/site-packages/nltk/test/index.doctest
ADDED
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.. Copyright (C) 2001-2023 NLTK Project
|
2 |
+
.. For license information, see LICENSE.TXT
|
3 |
+
|
4 |
+
.. _align howto: align.html
|
5 |
+
.. _ccg howto: ccg.html
|
6 |
+
.. _chat80 howto: chat80.html
|
7 |
+
.. _childes howto: childes.html
|
8 |
+
.. _chunk howto: chunk.html
|
9 |
+
.. _classify howto: classify.html
|
10 |
+
.. _collocations howto: collocations.html
|
11 |
+
.. _compat howto: compat.html
|
12 |
+
.. _corpus howto: corpus.html
|
13 |
+
.. _data howto: data.html
|
14 |
+
.. _dependency howto: dependency.html
|
15 |
+
.. _discourse howto: discourse.html
|
16 |
+
.. _drt howto: drt.html
|
17 |
+
.. _featgram howto: featgram.html
|
18 |
+
.. _featstruct howto: featstruct.html
|
19 |
+
.. _framenet howto: framenet.html
|
20 |
+
.. _generate howto: generate.html
|
21 |
+
.. _gluesemantics howto: gluesemantics.html
|
22 |
+
.. _gluesemantics_malt howto: gluesemantics_malt.html
|
23 |
+
.. _grammar howto: grammar.html
|
24 |
+
.. _grammartestsuites howto: grammartestsuites.html
|
25 |
+
.. _index howto: index.html
|
26 |
+
.. _inference howto: inference.html
|
27 |
+
.. _internals howto: internals.html
|
28 |
+
.. _japanese howto: japanese.html
|
29 |
+
.. _logic howto: logic.html
|
30 |
+
.. _metrics howto: metrics.html
|
31 |
+
.. _misc howto: misc.html
|
32 |
+
.. _nonmonotonic howto: nonmonotonic.html
|
33 |
+
.. _parse howto: parse.html
|
34 |
+
.. _portuguese_en howto: portuguese_en.html
|
35 |
+
.. _probability howto: probability.html
|
36 |
+
.. _propbank howto: propbank.html
|
37 |
+
.. _relextract howto: relextract.html
|
38 |
+
.. _resolution howto: resolution.html
|
39 |
+
.. _semantics howto: semantics.html
|
40 |
+
.. _simple howto: simple.html
|
41 |
+
.. _stem howto: stem.html
|
42 |
+
.. _tag howto: tag.html
|
43 |
+
.. _tokenize howto: tokenize.html
|
44 |
+
.. _toolbox howto: toolbox.html
|
45 |
+
.. _tree howto: tree.html
|
46 |
+
.. _treetransforms howto: treetransforms.html
|
47 |
+
.. _util howto: util.html
|
48 |
+
.. _wordnet howto: wordnet.html
|
49 |
+
.. _wordnet_lch howto: wordnet_lch.html
|
50 |
+
|
51 |
+
===========
|
52 |
+
NLTK HOWTOs
|
53 |
+
===========
|
54 |
+
|
55 |
+
* `align HOWTO`_
|
56 |
+
* `ccg HOWTO`_
|
57 |
+
* `chat80 HOWTO`_
|
58 |
+
* `childes HOWTO`_
|
59 |
+
* `chunk HOWTO`_
|
60 |
+
* `classify HOWTO`_
|
61 |
+
* `collocations HOWTO`_
|
62 |
+
* `compat HOWTO`_
|
63 |
+
* `corpus HOWTO`_
|
64 |
+
* `data HOWTO`_
|
65 |
+
* `dependency HOWTO`_
|
66 |
+
* `discourse HOWTO`_
|
67 |
+
* `drt HOWTO`_
|
68 |
+
* `featgram HOWTO`_
|
69 |
+
* `featstruct HOWTO`_
|
70 |
+
* `framenet HOWTO`_
|
71 |
+
* `generate HOWTO`_
|
72 |
+
* `gluesemantics HOWTO`_
|
73 |
+
* `gluesemantics_malt HOWTO`_
|
74 |
+
* `grammar HOWTO`_
|
75 |
+
* `grammartestsuites HOWTO`_
|
76 |
+
* `index HOWTO`_
|
77 |
+
* `inference HOWTO`_
|
78 |
+
* `internals HOWTO`_
|
79 |
+
* `japanese HOWTO`_
|
80 |
+
* `logic HOWTO`_
|
81 |
+
* `metrics HOWTO`_
|
82 |
+
* `misc HOWTO`_
|
83 |
+
* `nonmonotonic HOWTO`_
|
84 |
+
* `parse HOWTO`_
|
85 |
+
* `portuguese_en HOWTO`_
|
86 |
+
* `probability HOWTO`_
|
87 |
+
* `propbank HOWTO`_
|
88 |
+
* `relextract HOWTO`_
|
89 |
+
* `resolution HOWTO`_
|
90 |
+
* `semantics HOWTO`_
|
91 |
+
* `simple HOWTO`_
|
92 |
+
* `stem HOWTO`_
|
93 |
+
* `tag HOWTO`_
|
94 |
+
* `tokenize HOWTO`_
|
95 |
+
* `toolbox HOWTO`_
|
96 |
+
* `tree HOWTO`_
|
97 |
+
* `treetransforms HOWTO`_
|
98 |
+
* `util HOWTO`_
|
99 |
+
* `wordnet HOWTO`_
|
100 |
+
* `wordnet_lch HOWTO`_
|
llmeval-env/lib/python3.10/site-packages/nltk/test/inference.doctest
ADDED
@@ -0,0 +1,536 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.. Copyright (C) 2001-2023 NLTK Project
|
2 |
+
.. For license information, see LICENSE.TXT
|
3 |
+
|
4 |
+
====================================
|
5 |
+
Logical Inference and Model Building
|
6 |
+
====================================
|
7 |
+
|
8 |
+
>>> from nltk.test.setup_fixt import check_binary
|
9 |
+
>>> check_binary('mace4')
|
10 |
+
|
11 |
+
>>> from nltk import *
|
12 |
+
>>> from nltk.sem.drt import DrtParser
|
13 |
+
>>> from nltk.sem import logic
|
14 |
+
>>> logic._counter._value = 0
|
15 |
+
|
16 |
+
------------
|
17 |
+
Introduction
|
18 |
+
------------
|
19 |
+
|
20 |
+
Within the area of automated reasoning, first order theorem proving
|
21 |
+
and model building (or model generation) have both received much
|
22 |
+
attention, and have given rise to highly sophisticated techniques. We
|
23 |
+
focus therefore on providing an NLTK interface to third party tools
|
24 |
+
for these tasks. In particular, the module ``nltk.inference`` can be
|
25 |
+
used to access both theorem provers and model builders.
|
26 |
+
|
27 |
+
---------------------------------
|
28 |
+
NLTK Interface to Theorem Provers
|
29 |
+
---------------------------------
|
30 |
+
|
31 |
+
The main class used to interface with a theorem prover is the ``Prover``
|
32 |
+
class, found in ``nltk.api``. The ``prove()`` method takes three optional
|
33 |
+
arguments: a goal, a list of assumptions, and a ``verbose`` boolean to
|
34 |
+
indicate whether the proof should be printed to the console. The proof goal
|
35 |
+
and any assumptions need to be instances of the ``Expression`` class
|
36 |
+
specified by ``nltk.sem.logic``. There are currently three theorem provers
|
37 |
+
included with NLTK: ``Prover9``, ``TableauProver``, and
|
38 |
+
``ResolutionProver``. The first is an off-the-shelf prover, while the other
|
39 |
+
two are written in Python and included in the ``nltk.inference`` package.
|
40 |
+
|
41 |
+
>>> from nltk.sem import Expression
|
42 |
+
>>> read_expr = Expression.fromstring
|
43 |
+
>>> p1 = read_expr('man(socrates)')
|
44 |
+
>>> p2 = read_expr('all x.(man(x) -> mortal(x))')
|
45 |
+
>>> c = read_expr('mortal(socrates)')
|
46 |
+
>>> Prover9().prove(c, [p1,p2])
|
47 |
+
True
|
48 |
+
>>> TableauProver().prove(c, [p1,p2])
|
49 |
+
True
|
50 |
+
>>> ResolutionProver().prove(c, [p1,p2], verbose=True)
|
51 |
+
[1] {-mortal(socrates)} A
|
52 |
+
[2] {man(socrates)} A
|
53 |
+
[3] {-man(z2), mortal(z2)} A
|
54 |
+
[4] {-man(socrates)} (1, 3)
|
55 |
+
[5] {mortal(socrates)} (2, 3)
|
56 |
+
[6] {} (1, 5)
|
57 |
+
<BLANKLINE>
|
58 |
+
True
|
59 |
+
|
60 |
+
---------------------
|
61 |
+
The ``ProverCommand``
|
62 |
+
---------------------
|
63 |
+
|
64 |
+
A ``ProverCommand`` is a stateful holder for a theorem
|
65 |
+
prover. The command stores a theorem prover instance (of type ``Prover``),
|
66 |
+
a goal, a list of assumptions, the result of the proof, and a string version
|
67 |
+
of the entire proof. Corresponding to the three included ``Prover``
|
68 |
+
implementations, there are three ``ProverCommand`` implementations:
|
69 |
+
``Prover9Command``, ``TableauProverCommand``, and
|
70 |
+
``ResolutionProverCommand``.
|
71 |
+
|
72 |
+
The ``ProverCommand``'s constructor takes its goal and assumptions. The
|
73 |
+
``prove()`` command executes the ``Prover`` and ``proof()``
|
74 |
+
returns a String form of the proof
|
75 |
+
If the ``prove()`` method has not been called,
|
76 |
+
then the prover command will be unable to display a proof.
|
77 |
+
|
78 |
+
>>> prover = ResolutionProverCommand(c, [p1,p2])
|
79 |
+
>>> print(prover.proof())
|
80 |
+
Traceback (most recent call last):
|
81 |
+
File "...", line 1212, in __run
|
82 |
+
compileflags, 1) in test.globs
|
83 |
+
File "<doctest nltk/test/inference.doctest[10]>", line 1, in <module>
|
84 |
+
File "...", line ..., in proof
|
85 |
+
raise LookupError("You have to call prove() first to get a proof!")
|
86 |
+
LookupError: You have to call prove() first to get a proof!
|
87 |
+
>>> prover.prove()
|
88 |
+
True
|
89 |
+
>>> print(prover.proof())
|
90 |
+
[1] {-mortal(socrates)} A
|
91 |
+
[2] {man(socrates)} A
|
92 |
+
[3] {-man(z4), mortal(z4)} A
|
93 |
+
[4] {-man(socrates)} (1, 3)
|
94 |
+
[5] {mortal(socrates)} (2, 3)
|
95 |
+
[6] {} (1, 5)
|
96 |
+
<BLANKLINE>
|
97 |
+
|
98 |
+
The prover command stores the result of proving so that if ``prove()`` is
|
99 |
+
called again, then the command can return the result without executing the
|
100 |
+
prover again. This allows the user to access the result of the proof without
|
101 |
+
wasting time re-computing what it already knows.
|
102 |
+
|
103 |
+
>>> prover.prove()
|
104 |
+
True
|
105 |
+
>>> prover.prove()
|
106 |
+
True
|
107 |
+
|
108 |
+
The assumptions and goal may be accessed using the ``assumptions()`` and
|
109 |
+
``goal()`` methods, respectively.
|
110 |
+
|
111 |
+
>>> prover.assumptions()
|
112 |
+
[<ApplicationExpression man(socrates)>, <AllExpression all x.(man(x) -> mortal(x))>]
|
113 |
+
>>> prover.goal()
|
114 |
+
<ApplicationExpression mortal(socrates)>
|
115 |
+
|
116 |
+
The assumptions list may be modified using the ``add_assumptions()`` and
|
117 |
+
``retract_assumptions()`` methods. Both methods take a list of ``Expression``
|
118 |
+
objects. Since adding or removing assumptions may change the result of the
|
119 |
+
proof, the stored result is cleared when either of these methods are called.
|
120 |
+
That means that ``proof()`` will be unavailable until ``prove()`` is called and
|
121 |
+
a call to ``prove()`` will execute the theorem prover.
|
122 |
+
|
123 |
+
>>> prover.retract_assumptions([read_expr('man(socrates)')])
|
124 |
+
>>> print(prover.proof())
|
125 |
+
Traceback (most recent call last):
|
126 |
+
File "...", line 1212, in __run
|
127 |
+
compileflags, 1) in test.globs
|
128 |
+
File "<doctest nltk/test/inference.doctest[10]>", line 1, in <module>
|
129 |
+
File "...", line ..., in proof
|
130 |
+
raise LookupError("You have to call prove() first to get a proof!")
|
131 |
+
LookupError: You have to call prove() first to get a proof!
|
132 |
+
>>> prover.prove()
|
133 |
+
False
|
134 |
+
>>> print(prover.proof())
|
135 |
+
[1] {-mortal(socrates)} A
|
136 |
+
[2] {-man(z6), mortal(z6)} A
|
137 |
+
[3] {-man(socrates)} (1, 2)
|
138 |
+
<BLANKLINE>
|
139 |
+
>>> prover.add_assumptions([read_expr('man(socrates)')])
|
140 |
+
>>> prover.prove()
|
141 |
+
True
|
142 |
+
|
143 |
+
-------
|
144 |
+
Prover9
|
145 |
+
-------
|
146 |
+
|
147 |
+
Prover9 Installation
|
148 |
+
~~~~~~~~~~~~~~~~~~~~
|
149 |
+
|
150 |
+
You can download Prover9 from https://www.cs.unm.edu/~mccune/prover9/.
|
151 |
+
|
152 |
+
Extract the source code into a suitable directory and follow the
|
153 |
+
instructions in the Prover9 ``README.make`` file to compile the executables.
|
154 |
+
Install these into an appropriate location; the
|
155 |
+
``prover9_search`` variable is currently configured to look in the
|
156 |
+
following locations:
|
157 |
+
|
158 |
+
>>> p = Prover9()
|
159 |
+
>>> p.binary_locations()
|
160 |
+
['/usr/local/bin/prover9',
|
161 |
+
'/usr/local/bin/prover9/bin',
|
162 |
+
'/usr/local/bin',
|
163 |
+
'/usr/bin',
|
164 |
+
'/usr/local/prover9',
|
165 |
+
'/usr/local/share/prover9']
|
166 |
+
|
167 |
+
Alternatively, the environment variable ``PROVER9HOME`` may be configured with
|
168 |
+
the binary's location.
|
169 |
+
|
170 |
+
The path to the correct directory can be set manually in the following
|
171 |
+
manner:
|
172 |
+
|
173 |
+
>>> config_prover9(path='/usr/local/bin') # doctest: +SKIP
|
174 |
+
[Found prover9: /usr/local/bin/prover9]
|
175 |
+
|
176 |
+
If the executables cannot be found, ``Prover9`` will issue a warning message:
|
177 |
+
|
178 |
+
>>> p.prove() # doctest: +SKIP
|
179 |
+
Traceback (most recent call last):
|
180 |
+
...
|
181 |
+
LookupError:
|
182 |
+
===========================================================================
|
183 |
+
NLTK was unable to find the prover9 executable! Use config_prover9() or
|
184 |
+
set the PROVER9HOME environment variable.
|
185 |
+
<BLANKLINE>
|
186 |
+
>> config_prover9('/path/to/prover9')
|
187 |
+
<BLANKLINE>
|
188 |
+
For more information, on prover9, see:
|
189 |
+
<https://www.cs.unm.edu/~mccune/prover9/>
|
190 |
+
===========================================================================
|
191 |
+
|
192 |
+
|
193 |
+
Using Prover9
|
194 |
+
~~~~~~~~~~~~~
|
195 |
+
|
196 |
+
The general case in theorem proving is to determine whether ``S |- g``
|
197 |
+
holds, where ``S`` is a possibly empty set of assumptions, and ``g``
|
198 |
+
is a proof goal.
|
199 |
+
|
200 |
+
As mentioned earlier, NLTK input to ``Prover9`` must be
|
201 |
+
``Expression``\ s of ``nltk.sem.logic``. A ``Prover9`` instance is
|
202 |
+
initialized with a proof goal and, possibly, some assumptions. The
|
203 |
+
``prove()`` method attempts to find a proof of the goal, given the
|
204 |
+
list of assumptions (in this case, none).
|
205 |
+
|
206 |
+
>>> goal = read_expr('(man(x) <-> --man(x))')
|
207 |
+
>>> prover = Prover9Command(goal)
|
208 |
+
>>> prover.prove()
|
209 |
+
True
|
210 |
+
|
211 |
+
Given a ``ProverCommand`` instance ``prover``, the method
|
212 |
+
``prover.proof()`` will return a String of the extensive proof information
|
213 |
+
provided by Prover9, shown in abbreviated form here::
|
214 |
+
|
215 |
+
============================== Prover9 ===============================
|
216 |
+
Prover9 (32) version ...
|
217 |
+
Process ... was started by ... on ...
|
218 |
+
...
|
219 |
+
The command was ".../prover9 -f ...".
|
220 |
+
============================== end of head ===========================
|
221 |
+
|
222 |
+
============================== INPUT =================================
|
223 |
+
|
224 |
+
% Reading from file /var/...
|
225 |
+
|
226 |
+
|
227 |
+
formulas(goals).
|
228 |
+
(all x (man(x) -> man(x))).
|
229 |
+
end_of_list.
|
230 |
+
|
231 |
+
...
|
232 |
+
============================== end of search =========================
|
233 |
+
|
234 |
+
THEOREM PROVED
|
235 |
+
|
236 |
+
Exiting with 1 proof.
|
237 |
+
|
238 |
+
Process 6317 exit (max_proofs) Mon Jan 21 15:23:28 2008
|
239 |
+
|
240 |
+
|
241 |
+
As mentioned earlier, we may want to list some assumptions for
|
242 |
+
the proof, as shown here.
|
243 |
+
|
244 |
+
>>> g = read_expr('mortal(socrates)')
|
245 |
+
>>> a1 = read_expr('all x.(man(x) -> mortal(x))')
|
246 |
+
>>> prover = Prover9Command(g, assumptions=[a1])
|
247 |
+
>>> prover.print_assumptions()
|
248 |
+
all x.(man(x) -> mortal(x))
|
249 |
+
|
250 |
+
However, the assumptions are not sufficient to derive the goal:
|
251 |
+
|
252 |
+
>>> print(prover.prove())
|
253 |
+
False
|
254 |
+
|
255 |
+
So let's add another assumption:
|
256 |
+
|
257 |
+
>>> a2 = read_expr('man(socrates)')
|
258 |
+
>>> prover.add_assumptions([a2])
|
259 |
+
>>> prover.print_assumptions()
|
260 |
+
all x.(man(x) -> mortal(x))
|
261 |
+
man(socrates)
|
262 |
+
>>> print(prover.prove())
|
263 |
+
True
|
264 |
+
|
265 |
+
We can also show the assumptions in ``Prover9`` format.
|
266 |
+
|
267 |
+
>>> prover.print_assumptions(output_format='Prover9')
|
268 |
+
all x (man(x) -> mortal(x))
|
269 |
+
man(socrates)
|
270 |
+
|
271 |
+
>>> prover.print_assumptions(output_format='Spass')
|
272 |
+
Traceback (most recent call last):
|
273 |
+
. . .
|
274 |
+
NameError: Unrecognized value for 'output_format': Spass
|
275 |
+
|
276 |
+
Assumptions can be retracted from the list of assumptions.
|
277 |
+
|
278 |
+
>>> prover.retract_assumptions([a1])
|
279 |
+
>>> prover.print_assumptions()
|
280 |
+
man(socrates)
|
281 |
+
>>> prover.retract_assumptions([a1])
|
282 |
+
|
283 |
+
Statements can be loaded from a file and parsed. We can then add these
|
284 |
+
statements as new assumptions.
|
285 |
+
|
286 |
+
>>> g = read_expr('all x.(boxer(x) -> -boxerdog(x))')
|
287 |
+
>>> prover = Prover9Command(g)
|
288 |
+
>>> prover.prove()
|
289 |
+
False
|
290 |
+
>>> import nltk.data
|
291 |
+
>>> new = nltk.data.load('grammars/sample_grammars/background0.fol')
|
292 |
+
>>> for a in new:
|
293 |
+
... print(a)
|
294 |
+
all x.(boxerdog(x) -> dog(x))
|
295 |
+
all x.(boxer(x) -> person(x))
|
296 |
+
all x.-(dog(x) & person(x))
|
297 |
+
exists x.boxer(x)
|
298 |
+
exists x.boxerdog(x)
|
299 |
+
>>> prover.add_assumptions(new)
|
300 |
+
>>> print(prover.prove())
|
301 |
+
True
|
302 |
+
>>> print(prover.proof())
|
303 |
+
============================== prooftrans ============================
|
304 |
+
Prover9 (...) version ...
|
305 |
+
Process ... was started by ... on ...
|
306 |
+
...
|
307 |
+
The command was ".../prover9".
|
308 |
+
============================== end of head ===========================
|
309 |
+
<BLANKLINE>
|
310 |
+
============================== end of input ==========================
|
311 |
+
<BLANKLINE>
|
312 |
+
============================== PROOF =================================
|
313 |
+
<BLANKLINE>
|
314 |
+
% -------- Comments from original proof --------
|
315 |
+
% Proof 1 at ... seconds.
|
316 |
+
% Length of proof is 13.
|
317 |
+
% Level of proof is 4.
|
318 |
+
% Maximum clause weight is 0.
|
319 |
+
% Given clauses 0.
|
320 |
+
<BLANKLINE>
|
321 |
+
1 (all x (boxerdog(x) -> dog(x))). [assumption].
|
322 |
+
2 (all x (boxer(x) -> person(x))). [assumption].
|
323 |
+
3 (all x -(dog(x) & person(x))). [assumption].
|
324 |
+
6 (all x (boxer(x) -> -boxerdog(x))). [goal].
|
325 |
+
8 -boxerdog(x) | dog(x). [clausify(1)].
|
326 |
+
9 boxerdog(c3). [deny(6)].
|
327 |
+
11 -boxer(x) | person(x). [clausify(2)].
|
328 |
+
12 boxer(c3). [deny(6)].
|
329 |
+
14 -dog(x) | -person(x). [clausify(3)].
|
330 |
+
15 dog(c3). [resolve(9,a,8,a)].
|
331 |
+
18 person(c3). [resolve(12,a,11,a)].
|
332 |
+
19 -person(c3). [resolve(15,a,14,a)].
|
333 |
+
20 $F. [resolve(19,a,18,a)].
|
334 |
+
<BLANKLINE>
|
335 |
+
============================== end of proof ==========================
|
336 |
+
|
337 |
+
----------------------
|
338 |
+
The equiv() method
|
339 |
+
----------------------
|
340 |
+
|
341 |
+
One application of the theorem prover functionality is to check if
|
342 |
+
two Expressions have the same meaning.
|
343 |
+
The ``equiv()`` method calls a theorem prover to determine whether two
|
344 |
+
Expressions are logically equivalent.
|
345 |
+
|
346 |
+
>>> a = read_expr(r'exists x.(man(x) & walks(x))')
|
347 |
+
>>> b = read_expr(r'exists x.(walks(x) & man(x))')
|
348 |
+
>>> print(a.equiv(b))
|
349 |
+
True
|
350 |
+
|
351 |
+
The same method can be used on Discourse Representation Structures (DRSs).
|
352 |
+
In this case, each DRS is converted to a first order logic form, and then
|
353 |
+
passed to the theorem prover.
|
354 |
+
|
355 |
+
>>> dp = DrtParser()
|
356 |
+
>>> a = dp.parse(r'([x],[man(x), walks(x)])')
|
357 |
+
>>> b = dp.parse(r'([x],[walks(x), man(x)])')
|
358 |
+
>>> print(a.equiv(b))
|
359 |
+
True
|
360 |
+
|
361 |
+
|
362 |
+
--------------------------------
|
363 |
+
NLTK Interface to Model Builders
|
364 |
+
--------------------------------
|
365 |
+
|
366 |
+
The top-level to model builders is parallel to that for
|
367 |
+
theorem-provers. The ``ModelBuilder`` interface is located
|
368 |
+
in ``nltk.inference.api``. It is currently only implemented by
|
369 |
+
``Mace``, which interfaces with the Mace4 model builder.
|
370 |
+
|
371 |
+
Typically we use a model builder to show that some set of formulas has
|
372 |
+
a model, and is therefore consistent. One way of doing this is by
|
373 |
+
treating our candidate set of sentences as assumptions, and leaving
|
374 |
+
the goal unspecified.
|
375 |
+
Thus, the following interaction shows how both ``{a, c1}`` and ``{a, c2}``
|
376 |
+
are consistent sets, since Mace succeeds in a building a
|
377 |
+
model for each of them, while ``{c1, c2}`` is inconsistent.
|
378 |
+
|
379 |
+
>>> a3 = read_expr('exists x.(man(x) and walks(x))')
|
380 |
+
>>> c1 = read_expr('mortal(socrates)')
|
381 |
+
>>> c2 = read_expr('-mortal(socrates)')
|
382 |
+
>>> mace = Mace()
|
383 |
+
>>> print(mace.build_model(None, [a3, c1]))
|
384 |
+
True
|
385 |
+
>>> print(mace.build_model(None, [a3, c2]))
|
386 |
+
True
|
387 |
+
|
388 |
+
We can also use the model builder as an adjunct to theorem prover.
|
389 |
+
Let's suppose we are trying to prove ``S |- g``, i.e. that ``g``
|
390 |
+
is logically entailed by assumptions ``S = {s1, s2, ..., sn}``.
|
391 |
+
We can this same input to Mace4, and the model builder will try to
|
392 |
+
find a counterexample, that is, to show that ``g`` does *not* follow
|
393 |
+
from ``S``. So, given this input, Mace4 will try to find a model for
|
394 |
+
the set ``S' = {s1, s2, ..., sn, (not g)}``. If ``g`` fails to follow
|
395 |
+
from ``S``, then Mace4 may well return with a counterexample faster
|
396 |
+
than Prover9 concludes that it cannot find the required proof.
|
397 |
+
Conversely, if ``g`` *is* provable from ``S``, Mace4 may take a long
|
398 |
+
time unsuccessfully trying to find a counter model, and will eventually give up.
|
399 |
+
|
400 |
+
In the following example, we see that the model builder does succeed
|
401 |
+
in building a model of the assumptions together with the negation of
|
402 |
+
the goal. That is, it succeeds in finding a model
|
403 |
+
where there is a woman that every man loves; Adam is a man; Eve is a
|
404 |
+
woman; but Adam does not love Eve.
|
405 |
+
|
406 |
+
>>> a4 = read_expr('exists y. (woman(y) & all x. (man(x) -> love(x,y)))')
|
407 |
+
>>> a5 = read_expr('man(adam)')
|
408 |
+
>>> a6 = read_expr('woman(eve)')
|
409 |
+
>>> g = read_expr('love(adam,eve)')
|
410 |
+
>>> print(mace.build_model(g, [a4, a5, a6]))
|
411 |
+
True
|
412 |
+
|
413 |
+
The Model Builder will fail to find a model if the assumptions do entail
|
414 |
+
the goal. Mace will continue to look for models of ever-increasing sizes
|
415 |
+
until the end_size number is reached. By default, end_size is 500,
|
416 |
+
but it can be set manually for quicker response time.
|
417 |
+
|
418 |
+
>>> a7 = read_expr('all x.(man(x) -> mortal(x))')
|
419 |
+
>>> a8 = read_expr('man(socrates)')
|
420 |
+
>>> g2 = read_expr('mortal(socrates)')
|
421 |
+
>>> print(Mace(end_size=50).build_model(g2, [a7, a8]))
|
422 |
+
False
|
423 |
+
|
424 |
+
There is also a ``ModelBuilderCommand`` class that, like ``ProverCommand``,
|
425 |
+
stores a ``ModelBuilder``, a goal, assumptions, a result, and a model. The
|
426 |
+
only implementation in NLTK is ``MaceCommand``.
|
427 |
+
|
428 |
+
|
429 |
+
-----
|
430 |
+
Mace4
|
431 |
+
-----
|
432 |
+
|
433 |
+
Mace4 Installation
|
434 |
+
~~~~~~~~~~~~~~~~~~
|
435 |
+
|
436 |
+
Mace4 is packaged with Prover9, and can be downloaded from the same
|
437 |
+
source, namely https://www.cs.unm.edu/~mccune/prover9/. It is installed
|
438 |
+
in the same manner as Prover9.
|
439 |
+
|
440 |
+
Using Mace4
|
441 |
+
~~~~~~~~~~~
|
442 |
+
|
443 |
+
Check whether Mace4 can find a model.
|
444 |
+
|
445 |
+
>>> a = read_expr('(see(mary,john) & -(mary = john))')
|
446 |
+
>>> mb = MaceCommand(assumptions=[a])
|
447 |
+
>>> mb.build_model()
|
448 |
+
True
|
449 |
+
|
450 |
+
Show the model in 'tabular' format.
|
451 |
+
|
452 |
+
>>> print(mb.model(format='tabular'))
|
453 |
+
% number = 1
|
454 |
+
% seconds = 0
|
455 |
+
<BLANKLINE>
|
456 |
+
% Interpretation of size 2
|
457 |
+
<BLANKLINE>
|
458 |
+
john : 0
|
459 |
+
<BLANKLINE>
|
460 |
+
mary : 1
|
461 |
+
<BLANKLINE>
|
462 |
+
see :
|
463 |
+
| 0 1
|
464 |
+
---+----
|
465 |
+
0 | 0 0
|
466 |
+
1 | 1 0
|
467 |
+
<BLANKLINE>
|
468 |
+
|
469 |
+
Show the model in 'tabular' format.
|
470 |
+
|
471 |
+
>>> print(mb.model(format='cooked'))
|
472 |
+
% number = 1
|
473 |
+
% seconds = 0
|
474 |
+
<BLANKLINE>
|
475 |
+
% Interpretation of size 2
|
476 |
+
<BLANKLINE>
|
477 |
+
john = 0.
|
478 |
+
<BLANKLINE>
|
479 |
+
mary = 1.
|
480 |
+
<BLANKLINE>
|
481 |
+
- see(0,0).
|
482 |
+
- see(0,1).
|
483 |
+
see(1,0).
|
484 |
+
- see(1,1).
|
485 |
+
<BLANKLINE>
|
486 |
+
|
487 |
+
The property ``valuation`` accesses the stored ``Valuation``.
|
488 |
+
|
489 |
+
>>> print(mb.valuation)
|
490 |
+
{'john': 'a', 'mary': 'b', 'see': {('b', 'a')}}
|
491 |
+
|
492 |
+
We can return to our earlier example and inspect the model:
|
493 |
+
|
494 |
+
>>> mb = MaceCommand(g, assumptions=[a4, a5, a6])
|
495 |
+
>>> m = mb.build_model()
|
496 |
+
>>> print(mb.model(format='cooked'))
|
497 |
+
% number = 1
|
498 |
+
% seconds = 0
|
499 |
+
<BLANKLINE>
|
500 |
+
% Interpretation of size 2
|
501 |
+
<BLANKLINE>
|
502 |
+
adam = 0.
|
503 |
+
<BLANKLINE>
|
504 |
+
eve = 0.
|
505 |
+
<BLANKLINE>
|
506 |
+
c1 = 1.
|
507 |
+
<BLANKLINE>
|
508 |
+
man(0).
|
509 |
+
- man(1).
|
510 |
+
<BLANKLINE>
|
511 |
+
woman(0).
|
512 |
+
woman(1).
|
513 |
+
<BLANKLINE>
|
514 |
+
- love(0,0).
|
515 |
+
love(0,1).
|
516 |
+
- love(1,0).
|
517 |
+
- love(1,1).
|
518 |
+
<BLANKLINE>
|
519 |
+
|
520 |
+
Here, we can see that ``adam`` and ``eve`` have been assigned the same
|
521 |
+
individual, namely ``0`` as value; ``0`` is both a man and a woman; a second
|
522 |
+
individual ``1`` is also a woman; and ``0`` loves ``1``. Thus, this is
|
523 |
+
an interpretation in which there is a woman that every man loves but
|
524 |
+
Adam doesn't love Eve.
|
525 |
+
|
526 |
+
Mace can also be used with propositional logic.
|
527 |
+
|
528 |
+
>>> p = read_expr('P')
|
529 |
+
>>> q = read_expr('Q')
|
530 |
+
>>> mb = MaceCommand(q, [p, p>-q])
|
531 |
+
>>> mb.build_model()
|
532 |
+
True
|
533 |
+
>>> mb.valuation['P']
|
534 |
+
True
|
535 |
+
>>> mb.valuation['Q']
|
536 |
+
False
|
llmeval-env/lib/python3.10/site-packages/nltk/test/japanese.doctest
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.. Copyright (C) 2001-2023 NLTK Project
|
2 |
+
.. For license information, see LICENSE.TXT
|
3 |
+
|
4 |
+
============================
|
5 |
+
Japanese Language Processing
|
6 |
+
============================
|
7 |
+
|
8 |
+
>>> from nltk import *
|
9 |
+
|
10 |
+
-------------
|
11 |
+
Corpus Access
|
12 |
+
-------------
|
13 |
+
|
14 |
+
KNB Corpus
|
15 |
+
----------
|
16 |
+
|
17 |
+
>>> from nltk.corpus import knbc
|
18 |
+
|
19 |
+
Access the words: this should produce a list of strings:
|
20 |
+
|
21 |
+
>>> type(knbc.words()[0]) is not bytes
|
22 |
+
True
|
23 |
+
|
24 |
+
Access the sentences: this should produce a list of lists of strings:
|
25 |
+
|
26 |
+
>>> type(knbc.sents()[0][0]) is not bytes
|
27 |
+
True
|
28 |
+
|
29 |
+
Access the tagged words: this should produce a list of word, tag pairs:
|
30 |
+
|
31 |
+
>>> type(knbc.tagged_words()[0])
|
32 |
+
<... 'tuple'>
|
33 |
+
|
34 |
+
Access the tagged sentences: this should produce a list of lists of word, tag pairs:
|
35 |
+
|
36 |
+
>>> type(knbc.tagged_sents()[0][0])
|
37 |
+
<... 'tuple'>
|
38 |
+
|
39 |
+
|
40 |
+
JEITA Corpus
|
41 |
+
------------
|
42 |
+
|
43 |
+
>>> from nltk.corpus import jeita
|
44 |
+
|
45 |
+
Access the tagged words: this should produce a list of word, tag pairs, where a tag is a string:
|
46 |
+
|
47 |
+
>>> type(jeita.tagged_words()[0][1]) is not bytes
|
48 |
+
True
|
llmeval-env/lib/python3.10/site-packages/nltk/test/meteor.doctest
ADDED
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.. Copyright (C) 2001-2023 NLTK Project
|
2 |
+
.. For license information, see LICENSE.TXT
|
3 |
+
|
4 |
+
.. -*- coding: utf-8 -*-
|
5 |
+
|
6 |
+
=============
|
7 |
+
METEOR tests
|
8 |
+
=============
|
9 |
+
|
10 |
+
No Alignment test
|
11 |
+
------------------
|
12 |
+
|
13 |
+
>>> from nltk.translate import meteor
|
14 |
+
>>> from nltk import word_tokenize
|
15 |
+
|
16 |
+
If the candidate has no alignment to any of the references, the METEOR score is 0.
|
17 |
+
|
18 |
+
>>> round(meteor(
|
19 |
+
... [word_tokenize('The candidate has no alignment to any of the references')],
|
20 |
+
... word_tokenize('John loves Mary')
|
21 |
+
... ), 4)
|
22 |
+
0.0
|
23 |
+
|
24 |
+
Tests based on wikipedia examples
|
25 |
+
---------------------------------
|
26 |
+
|
27 |
+
Testing on `wikipedia examples <https://en.wikipedia.org/wiki/METEOR#Examples>`_
|
28 |
+
|
29 |
+
>>> same_res = round(meteor(
|
30 |
+
... [word_tokenize('The cat sat on the mat')],
|
31 |
+
... word_tokenize('The cat sat on the mat')
|
32 |
+
... ), 4)
|
33 |
+
>>> abs(same_res - 0.9977) < 1e-2
|
34 |
+
True
|
35 |
+
|
36 |
+
>>> meteor(
|
37 |
+
... [word_tokenize('The cat sat on the mat')],
|
38 |
+
... word_tokenize('on the mat sat the cat')
|
39 |
+
... )
|
40 |
+
0.5
|
41 |
+
|
42 |
+
>>> round(meteor(
|
43 |
+
... [word_tokenize('The cat sat on the mat')],
|
44 |
+
... word_tokenize('The cat was sat on the mat')
|
45 |
+
... ), 4)
|
46 |
+
0.9654
|
47 |
+
|
48 |
+
Test corresponding to issue #2751, where METEOR score > 1
|
49 |
+
|
50 |
+
>>> round(meteor(
|
51 |
+
... [word_tokenize('create or update a vm set')],
|
52 |
+
... word_tokenize('creates or updates a virtual machine scale set')
|
53 |
+
... ), 4)
|
54 |
+
0.7806
|
llmeval-env/lib/python3.10/site-packages/nltk/test/nonmonotonic.doctest
ADDED
@@ -0,0 +1,293 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.. Copyright (C) 2001-2023 NLTK Project
|
2 |
+
.. For license information, see LICENSE.TXT
|
3 |
+
|
4 |
+
======================
|
5 |
+
Nonmonotonic Reasoning
|
6 |
+
======================
|
7 |
+
|
8 |
+
>>> from nltk.test.setup_fixt import check_binary
|
9 |
+
>>> check_binary('mace4')
|
10 |
+
|
11 |
+
>>> from nltk import *
|
12 |
+
>>> from nltk.inference.nonmonotonic import *
|
13 |
+
>>> from nltk.sem import logic
|
14 |
+
>>> logic._counter._value = 0
|
15 |
+
>>> read_expr = logic.Expression.fromstring
|
16 |
+
|
17 |
+
------------------------
|
18 |
+
Closed Domain Assumption
|
19 |
+
------------------------
|
20 |
+
|
21 |
+
The only entities in the domain are those found in the assumptions or goal.
|
22 |
+
If the domain only contains "A" and "B", then the expression "exists x.P(x)" can
|
23 |
+
be replaced with "P(A) | P(B)" and an expression "all x.P(x)" can be replaced
|
24 |
+
with "P(A) & P(B)".
|
25 |
+
|
26 |
+
>>> p1 = read_expr(r'all x.(man(x) -> mortal(x))')
|
27 |
+
>>> p2 = read_expr(r'man(Socrates)')
|
28 |
+
>>> c = read_expr(r'mortal(Socrates)')
|
29 |
+
>>> prover = Prover9Command(c, [p1,p2])
|
30 |
+
>>> prover.prove()
|
31 |
+
True
|
32 |
+
>>> cdp = ClosedDomainProver(prover)
|
33 |
+
>>> for a in cdp.assumptions(): print(a) # doctest: +SKIP
|
34 |
+
(man(Socrates) -> mortal(Socrates))
|
35 |
+
man(Socrates)
|
36 |
+
>>> cdp.prove()
|
37 |
+
True
|
38 |
+
|
39 |
+
>>> p1 = read_expr(r'exists x.walk(x)')
|
40 |
+
>>> p2 = read_expr(r'man(Socrates)')
|
41 |
+
>>> c = read_expr(r'walk(Socrates)')
|
42 |
+
>>> prover = Prover9Command(c, [p1,p2])
|
43 |
+
>>> prover.prove()
|
44 |
+
False
|
45 |
+
>>> cdp = ClosedDomainProver(prover)
|
46 |
+
>>> for a in cdp.assumptions(): print(a) # doctest: +SKIP
|
47 |
+
walk(Socrates)
|
48 |
+
man(Socrates)
|
49 |
+
>>> cdp.prove()
|
50 |
+
True
|
51 |
+
|
52 |
+
>>> p1 = read_expr(r'exists x.walk(x)')
|
53 |
+
>>> p2 = read_expr(r'man(Socrates)')
|
54 |
+
>>> p3 = read_expr(r'-walk(Bill)')
|
55 |
+
>>> c = read_expr(r'walk(Socrates)')
|
56 |
+
>>> prover = Prover9Command(c, [p1,p2,p3])
|
57 |
+
>>> prover.prove()
|
58 |
+
False
|
59 |
+
>>> cdp = ClosedDomainProver(prover)
|
60 |
+
>>> for a in cdp.assumptions(): print(a) # doctest: +SKIP
|
61 |
+
(walk(Socrates) | walk(Bill))
|
62 |
+
man(Socrates)
|
63 |
+
-walk(Bill)
|
64 |
+
>>> cdp.prove()
|
65 |
+
True
|
66 |
+
|
67 |
+
>>> p1 = read_expr(r'walk(Socrates)')
|
68 |
+
>>> p2 = read_expr(r'walk(Bill)')
|
69 |
+
>>> c = read_expr(r'all x.walk(x)')
|
70 |
+
>>> prover = Prover9Command(c, [p1,p2])
|
71 |
+
>>> prover.prove()
|
72 |
+
False
|
73 |
+
>>> cdp = ClosedDomainProver(prover)
|
74 |
+
>>> for a in cdp.assumptions(): print(a) # doctest: +SKIP
|
75 |
+
walk(Socrates)
|
76 |
+
walk(Bill)
|
77 |
+
>>> print(cdp.goal()) # doctest: +SKIP
|
78 |
+
(walk(Socrates) & walk(Bill))
|
79 |
+
>>> cdp.prove()
|
80 |
+
True
|
81 |
+
|
82 |
+
>>> p1 = read_expr(r'girl(mary)')
|
83 |
+
>>> p2 = read_expr(r'dog(rover)')
|
84 |
+
>>> p3 = read_expr(r'all x.(girl(x) -> -dog(x))')
|
85 |
+
>>> p4 = read_expr(r'all x.(dog(x) -> -girl(x))')
|
86 |
+
>>> p5 = read_expr(r'chase(mary, rover)')
|
87 |
+
>>> c = read_expr(r'exists y.(dog(y) & all x.(girl(x) -> chase(x,y)))')
|
88 |
+
>>> prover = Prover9Command(c, [p1,p2,p3,p4,p5])
|
89 |
+
>>> print(prover.prove())
|
90 |
+
False
|
91 |
+
>>> cdp = ClosedDomainProver(prover)
|
92 |
+
>>> for a in cdp.assumptions(): print(a) # doctest: +SKIP
|
93 |
+
girl(mary)
|
94 |
+
dog(rover)
|
95 |
+
((girl(rover) -> -dog(rover)) & (girl(mary) -> -dog(mary)))
|
96 |
+
((dog(rover) -> -girl(rover)) & (dog(mary) -> -girl(mary)))
|
97 |
+
chase(mary,rover)
|
98 |
+
>>> print(cdp.goal()) # doctest: +SKIP
|
99 |
+
((dog(rover) & (girl(rover) -> chase(rover,rover)) & (girl(mary) -> chase(mary,rover))) | (dog(mary) & (girl(rover) -> chase(rover,mary)) & (girl(mary) -> chase(mary,mary))))
|
100 |
+
>>> print(cdp.prove())
|
101 |
+
True
|
102 |
+
|
103 |
+
-----------------------
|
104 |
+
Unique Names Assumption
|
105 |
+
-----------------------
|
106 |
+
|
107 |
+
No two entities in the domain represent the same entity unless it can be
|
108 |
+
explicitly proven that they do. Therefore, if the domain contains "A" and "B",
|
109 |
+
then add the assumption "-(A = B)" if it is not the case that
|
110 |
+
"<assumptions> \|- (A = B)".
|
111 |
+
|
112 |
+
>>> p1 = read_expr(r'man(Socrates)')
|
113 |
+
>>> p2 = read_expr(r'man(Bill)')
|
114 |
+
>>> c = read_expr(r'exists x.exists y.-(x = y)')
|
115 |
+
>>> prover = Prover9Command(c, [p1,p2])
|
116 |
+
>>> prover.prove()
|
117 |
+
False
|
118 |
+
>>> unp = UniqueNamesProver(prover)
|
119 |
+
>>> for a in unp.assumptions(): print(a) # doctest: +SKIP
|
120 |
+
man(Socrates)
|
121 |
+
man(Bill)
|
122 |
+
-(Socrates = Bill)
|
123 |
+
>>> unp.prove()
|
124 |
+
True
|
125 |
+
|
126 |
+
>>> p1 = read_expr(r'all x.(walk(x) -> (x = Socrates))')
|
127 |
+
>>> p2 = read_expr(r'Bill = William')
|
128 |
+
>>> p3 = read_expr(r'Bill = Billy')
|
129 |
+
>>> c = read_expr(r'-walk(William)')
|
130 |
+
>>> prover = Prover9Command(c, [p1,p2,p3])
|
131 |
+
>>> prover.prove()
|
132 |
+
False
|
133 |
+
>>> unp = UniqueNamesProver(prover)
|
134 |
+
>>> for a in unp.assumptions(): print(a) # doctest: +SKIP
|
135 |
+
all x.(walk(x) -> (x = Socrates))
|
136 |
+
(Bill = William)
|
137 |
+
(Bill = Billy)
|
138 |
+
-(William = Socrates)
|
139 |
+
-(Billy = Socrates)
|
140 |
+
-(Socrates = Bill)
|
141 |
+
>>> unp.prove()
|
142 |
+
True
|
143 |
+
|
144 |
+
-----------------------
|
145 |
+
Closed World Assumption
|
146 |
+
-----------------------
|
147 |
+
|
148 |
+
The only entities that have certain properties are those that is it stated
|
149 |
+
have the properties. We accomplish this assumption by "completing" predicates.
|
150 |
+
|
151 |
+
If the assumptions contain "P(A)", then "all x.(P(x) -> (x=A))" is the completion
|
152 |
+
of "P". If the assumptions contain "all x.(ostrich(x) -> bird(x))", then
|
153 |
+
"all x.(bird(x) -> ostrich(x))" is the completion of "bird". If the
|
154 |
+
assumptions don't contain anything that are "P", then "all x.-P(x)" is the
|
155 |
+
completion of "P".
|
156 |
+
|
157 |
+
>>> p1 = read_expr(r'walk(Socrates)')
|
158 |
+
>>> p2 = read_expr(r'-(Socrates = Bill)')
|
159 |
+
>>> c = read_expr(r'-walk(Bill)')
|
160 |
+
>>> prover = Prover9Command(c, [p1,p2])
|
161 |
+
>>> prover.prove()
|
162 |
+
False
|
163 |
+
>>> cwp = ClosedWorldProver(prover)
|
164 |
+
>>> for a in cwp.assumptions(): print(a) # doctest: +SKIP
|
165 |
+
walk(Socrates)
|
166 |
+
-(Socrates = Bill)
|
167 |
+
all z1.(walk(z1) -> (z1 = Socrates))
|
168 |
+
>>> cwp.prove()
|
169 |
+
True
|
170 |
+
|
171 |
+
>>> p1 = read_expr(r'see(Socrates, John)')
|
172 |
+
>>> p2 = read_expr(r'see(John, Mary)')
|
173 |
+
>>> p3 = read_expr(r'-(Socrates = John)')
|
174 |
+
>>> p4 = read_expr(r'-(John = Mary)')
|
175 |
+
>>> c = read_expr(r'-see(Socrates, Mary)')
|
176 |
+
>>> prover = Prover9Command(c, [p1,p2,p3,p4])
|
177 |
+
>>> prover.prove()
|
178 |
+
False
|
179 |
+
>>> cwp = ClosedWorldProver(prover)
|
180 |
+
>>> for a in cwp.assumptions(): print(a) # doctest: +SKIP
|
181 |
+
see(Socrates,John)
|
182 |
+
see(John,Mary)
|
183 |
+
-(Socrates = John)
|
184 |
+
-(John = Mary)
|
185 |
+
all z3 z4.(see(z3,z4) -> (((z3 = Socrates) & (z4 = John)) | ((z3 = John) & (z4 = Mary))))
|
186 |
+
>>> cwp.prove()
|
187 |
+
True
|
188 |
+
|
189 |
+
>>> p1 = read_expr(r'all x.(ostrich(x) -> bird(x))')
|
190 |
+
>>> p2 = read_expr(r'bird(Tweety)')
|
191 |
+
>>> p3 = read_expr(r'-ostrich(Sam)')
|
192 |
+
>>> p4 = read_expr(r'Sam != Tweety')
|
193 |
+
>>> c = read_expr(r'-bird(Sam)')
|
194 |
+
>>> prover = Prover9Command(c, [p1,p2,p3,p4])
|
195 |
+
>>> prover.prove()
|
196 |
+
False
|
197 |
+
>>> cwp = ClosedWorldProver(prover)
|
198 |
+
>>> for a in cwp.assumptions(): print(a) # doctest: +SKIP
|
199 |
+
all x.(ostrich(x) -> bird(x))
|
200 |
+
bird(Tweety)
|
201 |
+
-ostrich(Sam)
|
202 |
+
-(Sam = Tweety)
|
203 |
+
all z7.-ostrich(z7)
|
204 |
+
all z8.(bird(z8) -> ((z8 = Tweety) | ostrich(z8)))
|
205 |
+
>>> print(cwp.prove())
|
206 |
+
True
|
207 |
+
|
208 |
+
-----------------------
|
209 |
+
Multi-Decorator Example
|
210 |
+
-----------------------
|
211 |
+
|
212 |
+
Decorators can be nested to utilize multiple assumptions.
|
213 |
+
|
214 |
+
>>> p1 = read_expr(r'see(Socrates, John)')
|
215 |
+
>>> p2 = read_expr(r'see(John, Mary)')
|
216 |
+
>>> c = read_expr(r'-see(Socrates, Mary)')
|
217 |
+
>>> prover = Prover9Command(c, [p1,p2])
|
218 |
+
>>> print(prover.prove())
|
219 |
+
False
|
220 |
+
>>> cmd = ClosedDomainProver(UniqueNamesProver(ClosedWorldProver(prover)))
|
221 |
+
>>> print(cmd.prove())
|
222 |
+
True
|
223 |
+
|
224 |
+
-----------------
|
225 |
+
Default Reasoning
|
226 |
+
-----------------
|
227 |
+
>>> logic._counter._value = 0
|
228 |
+
>>> premises = []
|
229 |
+
|
230 |
+
define the taxonomy
|
231 |
+
|
232 |
+
>>> premises.append(read_expr(r'all x.(elephant(x) -> animal(x))'))
|
233 |
+
>>> premises.append(read_expr(r'all x.(bird(x) -> animal(x))'))
|
234 |
+
>>> premises.append(read_expr(r'all x.(dove(x) -> bird(x))'))
|
235 |
+
>>> premises.append(read_expr(r'all x.(ostrich(x) -> bird(x))'))
|
236 |
+
>>> premises.append(read_expr(r'all x.(flying_ostrich(x) -> ostrich(x))'))
|
237 |
+
|
238 |
+
default the properties using abnormalities
|
239 |
+
|
240 |
+
>>> premises.append(read_expr(r'all x.((animal(x) & -Ab1(x)) -> -fly(x))')) #normal animals don't fly
|
241 |
+
>>> premises.append(read_expr(r'all x.((bird(x) & -Ab2(x)) -> fly(x))')) #normal birds fly
|
242 |
+
>>> premises.append(read_expr(r'all x.((ostrich(x) & -Ab3(x)) -> -fly(x))')) #normal ostriches don't fly
|
243 |
+
|
244 |
+
specify abnormal entities
|
245 |
+
|
246 |
+
>>> premises.append(read_expr(r'all x.(bird(x) -> Ab1(x))')) #flight
|
247 |
+
>>> premises.append(read_expr(r'all x.(ostrich(x) -> Ab2(x))')) #non-flying bird
|
248 |
+
>>> premises.append(read_expr(r'all x.(flying_ostrich(x) -> Ab3(x))')) #flying ostrich
|
249 |
+
|
250 |
+
define entities
|
251 |
+
|
252 |
+
>>> premises.append(read_expr(r'elephant(el)'))
|
253 |
+
>>> premises.append(read_expr(r'dove(do)'))
|
254 |
+
>>> premises.append(read_expr(r'ostrich(os)'))
|
255 |
+
|
256 |
+
print the augmented assumptions list
|
257 |
+
|
258 |
+
>>> prover = Prover9Command(None, premises)
|
259 |
+
>>> command = UniqueNamesProver(ClosedWorldProver(prover))
|
260 |
+
>>> for a in command.assumptions(): print(a) # doctest: +SKIP
|
261 |
+
all x.(elephant(x) -> animal(x))
|
262 |
+
all x.(bird(x) -> animal(x))
|
263 |
+
all x.(dove(x) -> bird(x))
|
264 |
+
all x.(ostrich(x) -> bird(x))
|
265 |
+
all x.(flying_ostrich(x) -> ostrich(x))
|
266 |
+
all x.((animal(x) & -Ab1(x)) -> -fly(x))
|
267 |
+
all x.((bird(x) & -Ab2(x)) -> fly(x))
|
268 |
+
all x.((ostrich(x) & -Ab3(x)) -> -fly(x))
|
269 |
+
all x.(bird(x) -> Ab1(x))
|
270 |
+
all x.(ostrich(x) -> Ab2(x))
|
271 |
+
all x.(flying_ostrich(x) -> Ab3(x))
|
272 |
+
elephant(el)
|
273 |
+
dove(do)
|
274 |
+
ostrich(os)
|
275 |
+
all z1.(animal(z1) -> (elephant(z1) | bird(z1)))
|
276 |
+
all z2.(Ab1(z2) -> bird(z2))
|
277 |
+
all z3.(bird(z3) -> (dove(z3) | ostrich(z3)))
|
278 |
+
all z4.(dove(z4) -> (z4 = do))
|
279 |
+
all z5.(Ab2(z5) -> ostrich(z5))
|
280 |
+
all z6.(Ab3(z6) -> flying_ostrich(z6))
|
281 |
+
all z7.(ostrich(z7) -> ((z7 = os) | flying_ostrich(z7)))
|
282 |
+
all z8.-flying_ostrich(z8)
|
283 |
+
all z9.(elephant(z9) -> (z9 = el))
|
284 |
+
-(el = os)
|
285 |
+
-(el = do)
|
286 |
+
-(os = do)
|
287 |
+
|
288 |
+
>>> UniqueNamesProver(ClosedWorldProver(Prover9Command(read_expr('-fly(el)'), premises))).prove()
|
289 |
+
True
|
290 |
+
>>> UniqueNamesProver(ClosedWorldProver(Prover9Command(read_expr('fly(do)'), premises))).prove()
|
291 |
+
True
|
292 |
+
>>> UniqueNamesProver(ClosedWorldProver(Prover9Command(read_expr('-fly(os)'), premises))).prove()
|
293 |
+
True
|
llmeval-env/lib/python3.10/site-packages/nltk/test/paice.doctest
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
=====================================================
|
3 |
+
PAICE's evaluation statistics for stemming algorithms
|
4 |
+
=====================================================
|
5 |
+
|
6 |
+
Given a list of words with their real lemmas and stems according to stemming algorithm under evaluation,
|
7 |
+
counts Understemming Index (UI), Overstemming Index (OI), Stemming Weight (SW) and Error-rate relative to truncation (ERRT).
|
8 |
+
|
9 |
+
>>> from nltk.metrics import Paice
|
10 |
+
|
11 |
+
|
12 |
+
-------------------------------------
|
13 |
+
Understemming and Overstemming values
|
14 |
+
-------------------------------------
|
15 |
+
|
16 |
+
>>> lemmas = {'kneel': ['kneel', 'knelt'],
|
17 |
+
... 'range': ['range', 'ranged'],
|
18 |
+
... 'ring': ['ring', 'rang', 'rung']}
|
19 |
+
>>> stems = {'kneel': ['kneel'],
|
20 |
+
... 'knelt': ['knelt'],
|
21 |
+
... 'rang': ['rang', 'range', 'ranged'],
|
22 |
+
... 'ring': ['ring'],
|
23 |
+
... 'rung': ['rung']}
|
24 |
+
>>> p = Paice(lemmas, stems)
|
25 |
+
>>> p.gumt, p.gdmt, p.gwmt, p.gdnt
|
26 |
+
(4.0, 5.0, 2.0, 16.0)
|
27 |
+
|
28 |
+
>>> p.ui, p.oi, p.sw
|
29 |
+
(0.8..., 0.125..., 0.15625...)
|
30 |
+
|
31 |
+
>>> p.errt
|
32 |
+
1.0
|
33 |
+
|
34 |
+
>>> [('{0:.3f}'.format(a), '{0:.3f}'.format(b)) for a, b in p.coords]
|
35 |
+
[('0.000', '1.000'), ('0.000', '0.375'), ('0.600', '0.125'), ('0.800', '0.125')]
|
llmeval-env/lib/python3.10/site-packages/nltk/test/parse.doctest
ADDED
@@ -0,0 +1,933 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.. Copyright (C) 2001-2023 NLTK Project
|
2 |
+
.. For license information, see LICENSE.TXT
|
3 |
+
|
4 |
+
=========
|
5 |
+
Parsing
|
6 |
+
=========
|
7 |
+
|
8 |
+
Unit tests for the Context Free Grammar class
|
9 |
+
---------------------------------------------
|
10 |
+
|
11 |
+
>>> import pickle
|
12 |
+
>>> import subprocess
|
13 |
+
>>> import sys
|
14 |
+
>>> from nltk import Nonterminal, nonterminals, Production, CFG
|
15 |
+
|
16 |
+
>>> nt1 = Nonterminal('NP')
|
17 |
+
>>> nt2 = Nonterminal('VP')
|
18 |
+
|
19 |
+
>>> nt1.symbol()
|
20 |
+
'NP'
|
21 |
+
|
22 |
+
>>> nt1 == Nonterminal('NP')
|
23 |
+
True
|
24 |
+
|
25 |
+
>>> nt1 == nt2
|
26 |
+
False
|
27 |
+
|
28 |
+
>>> S, NP, VP, PP = nonterminals('S, NP, VP, PP')
|
29 |
+
>>> N, V, P, DT = nonterminals('N, V, P, DT')
|
30 |
+
|
31 |
+
>>> prod1 = Production(S, [NP, VP])
|
32 |
+
>>> prod2 = Production(NP, [DT, NP])
|
33 |
+
|
34 |
+
>>> prod1.lhs()
|
35 |
+
S
|
36 |
+
|
37 |
+
>>> prod1.rhs()
|
38 |
+
(NP, VP)
|
39 |
+
|
40 |
+
>>> prod1 == Production(S, [NP, VP])
|
41 |
+
True
|
42 |
+
|
43 |
+
>>> prod1 == prod2
|
44 |
+
False
|
45 |
+
|
46 |
+
>>> grammar = CFG.fromstring("""
|
47 |
+
... S -> NP VP
|
48 |
+
... PP -> P NP
|
49 |
+
... NP -> 'the' N | N PP | 'the' N PP
|
50 |
+
... VP -> V NP | V PP | V NP PP
|
51 |
+
... N -> 'cat'
|
52 |
+
... N -> 'dog'
|
53 |
+
... N -> 'rug'
|
54 |
+
... V -> 'chased'
|
55 |
+
... V -> 'sat'
|
56 |
+
... P -> 'in'
|
57 |
+
... P -> 'on'
|
58 |
+
... """)
|
59 |
+
|
60 |
+
>>> cmd = """import pickle
|
61 |
+
... from nltk import Production
|
62 |
+
... p = Production('S', ['NP', 'VP'])
|
63 |
+
... print(pickle.dumps(p))
|
64 |
+
... """
|
65 |
+
|
66 |
+
>>> # Start a subprocess to simulate pickling in another process
|
67 |
+
>>> proc = subprocess.run([sys.executable, '-c', cmd], stdout=subprocess.PIPE)
|
68 |
+
>>> p1 = pickle.loads(eval(proc.stdout))
|
69 |
+
>>> p2 = Production('S', ['NP', 'VP'])
|
70 |
+
>>> print(hash(p1) == hash(p2))
|
71 |
+
True
|
72 |
+
|
73 |
+
Unit tests for the rd (Recursive Descent Parser) class
|
74 |
+
------------------------------------------------------
|
75 |
+
|
76 |
+
Create and run a recursive descent parser over both a syntactically ambiguous
|
77 |
+
and unambiguous sentence.
|
78 |
+
|
79 |
+
>>> from nltk.parse import RecursiveDescentParser
|
80 |
+
>>> rd = RecursiveDescentParser(grammar)
|
81 |
+
|
82 |
+
>>> sentence1 = 'the cat chased the dog'.split()
|
83 |
+
>>> sentence2 = 'the cat chased the dog on the rug'.split()
|
84 |
+
|
85 |
+
>>> for t in rd.parse(sentence1):
|
86 |
+
... print(t)
|
87 |
+
(S (NP the (N cat)) (VP (V chased) (NP the (N dog))))
|
88 |
+
|
89 |
+
>>> for t in rd.parse(sentence2):
|
90 |
+
... print(t)
|
91 |
+
(S
|
92 |
+
(NP the (N cat))
|
93 |
+
(VP (V chased) (NP the (N dog) (PP (P on) (NP the (N rug))))))
|
94 |
+
(S
|
95 |
+
(NP the (N cat))
|
96 |
+
(VP (V chased) (NP the (N dog)) (PP (P on) (NP the (N rug)))))
|
97 |
+
|
98 |
+
|
99 |
+
(dolist (expr doctest-font-lock-keywords)
|
100 |
+
(add-to-list 'font-lock-keywords expr))
|
101 |
+
|
102 |
+
font-lock-keywords
|
103 |
+
(add-to-list 'font-lock-keywords
|
104 |
+
(car doctest-font-lock-keywords))
|
105 |
+
|
106 |
+
|
107 |
+
Unit tests for the sr (Shift Reduce Parser) class
|
108 |
+
-------------------------------------------------
|
109 |
+
|
110 |
+
Create and run a shift reduce parser over both a syntactically ambiguous
|
111 |
+
and unambiguous sentence. Note that unlike the recursive descent parser, one
|
112 |
+
and only one parse is ever returned.
|
113 |
+
|
114 |
+
>>> from nltk.parse import ShiftReduceParser
|
115 |
+
>>> sr = ShiftReduceParser(grammar)
|
116 |
+
|
117 |
+
>>> sentence1 = 'the cat chased the dog'.split()
|
118 |
+
>>> sentence2 = 'the cat chased the dog on the rug'.split()
|
119 |
+
|
120 |
+
>>> for t in sr.parse(sentence1):
|
121 |
+
... print(t)
|
122 |
+
(S (NP the (N cat)) (VP (V chased) (NP the (N dog))))
|
123 |
+
|
124 |
+
|
125 |
+
The shift reduce parser uses heuristics to decide what to do when there are
|
126 |
+
multiple possible shift or reduce operations available - for the supplied
|
127 |
+
grammar clearly the wrong operation is selected.
|
128 |
+
|
129 |
+
>>> for t in sr.parse(sentence2):
|
130 |
+
... print(t)
|
131 |
+
|
132 |
+
|
133 |
+
Unit tests for the Chart Parser class
|
134 |
+
-------------------------------------
|
135 |
+
|
136 |
+
We use the demo() function for testing.
|
137 |
+
We must turn off showing of times.
|
138 |
+
|
139 |
+
>>> import nltk
|
140 |
+
|
141 |
+
First we test tracing with a short sentence
|
142 |
+
|
143 |
+
>>> nltk.parse.chart.demo(2, print_times=False, trace=1,
|
144 |
+
... sent='I saw a dog', numparses=1)
|
145 |
+
* Sentence:
|
146 |
+
I saw a dog
|
147 |
+
['I', 'saw', 'a', 'dog']
|
148 |
+
<BLANKLINE>
|
149 |
+
* Strategy: Bottom-up
|
150 |
+
<BLANKLINE>
|
151 |
+
|. I . saw . a . dog .|
|
152 |
+
|[---------] . . .| [0:1] 'I'
|
153 |
+
|. [---------] . .| [1:2] 'saw'
|
154 |
+
|. . [---------] .| [2:3] 'a'
|
155 |
+
|. . . [---------]| [3:4] 'dog'
|
156 |
+
|> . . . .| [0:0] NP -> * 'I'
|
157 |
+
|[---------] . . .| [0:1] NP -> 'I' *
|
158 |
+
|> . . . .| [0:0] S -> * NP VP
|
159 |
+
|> . . . .| [0:0] NP -> * NP PP
|
160 |
+
|[---------> . . .| [0:1] S -> NP * VP
|
161 |
+
|[---------> . . .| [0:1] NP -> NP * PP
|
162 |
+
|. > . . .| [1:1] Verb -> * 'saw'
|
163 |
+
|. [---------] . .| [1:2] Verb -> 'saw' *
|
164 |
+
|. > . . .| [1:1] VP -> * Verb NP
|
165 |
+
|. > . . .| [1:1] VP -> * Verb
|
166 |
+
|. [---------> . .| [1:2] VP -> Verb * NP
|
167 |
+
|. [---------] . .| [1:2] VP -> Verb *
|
168 |
+
|. > . . .| [1:1] VP -> * VP PP
|
169 |
+
|[-------------------] . .| [0:2] S -> NP VP *
|
170 |
+
|. [---------> . .| [1:2] VP -> VP * PP
|
171 |
+
|. . > . .| [2:2] Det -> * 'a'
|
172 |
+
|. . [---------] .| [2:3] Det -> 'a' *
|
173 |
+
|. . > . .| [2:2] NP -> * Det Noun
|
174 |
+
|. . [---------> .| [2:3] NP -> Det * Noun
|
175 |
+
|. . . > .| [3:3] Noun -> * 'dog'
|
176 |
+
|. . . [---------]| [3:4] Noun -> 'dog' *
|
177 |
+
|. . [-------------------]| [2:4] NP -> Det Noun *
|
178 |
+
|. . > . .| [2:2] S -> * NP VP
|
179 |
+
|. . > . .| [2:2] NP -> * NP PP
|
180 |
+
|. [-----------------------------]| [1:4] VP -> Verb NP *
|
181 |
+
|. . [------------------->| [2:4] S -> NP * VP
|
182 |
+
|. . [------------------->| [2:4] NP -> NP * PP
|
183 |
+
|[=======================================]| [0:4] S -> NP VP *
|
184 |
+
|. [----------------------------->| [1:4] VP -> VP * PP
|
185 |
+
Nr edges in chart: 33
|
186 |
+
(S (NP I) (VP (Verb saw) (NP (Det a) (Noun dog))))
|
187 |
+
<BLANKLINE>
|
188 |
+
|
189 |
+
Then we test the different parsing Strategies.
|
190 |
+
Note that the number of edges differ between the strategies.
|
191 |
+
|
192 |
+
Top-down
|
193 |
+
|
194 |
+
>>> nltk.parse.chart.demo(1, print_times=False, trace=0,
|
195 |
+
... sent='I saw John with a dog', numparses=2)
|
196 |
+
* Sentence:
|
197 |
+
I saw John with a dog
|
198 |
+
['I', 'saw', 'John', 'with', 'a', 'dog']
|
199 |
+
<BLANKLINE>
|
200 |
+
* Strategy: Top-down
|
201 |
+
<BLANKLINE>
|
202 |
+
Nr edges in chart: 48
|
203 |
+
(S
|
204 |
+
(NP I)
|
205 |
+
(VP (Verb saw) (NP (NP John) (PP with (NP (Det a) (Noun dog))))))
|
206 |
+
(S
|
207 |
+
(NP I)
|
208 |
+
(VP (VP (Verb saw) (NP John)) (PP with (NP (Det a) (Noun dog)))))
|
209 |
+
<BLANKLINE>
|
210 |
+
|
211 |
+
Bottom-up
|
212 |
+
|
213 |
+
>>> nltk.parse.chart.demo(2, print_times=False, trace=0,
|
214 |
+
... sent='I saw John with a dog', numparses=2)
|
215 |
+
* Sentence:
|
216 |
+
I saw John with a dog
|
217 |
+
['I', 'saw', 'John', 'with', 'a', 'dog']
|
218 |
+
<BLANKLINE>
|
219 |
+
* Strategy: Bottom-up
|
220 |
+
<BLANKLINE>
|
221 |
+
Nr edges in chart: 53
|
222 |
+
(S
|
223 |
+
(NP I)
|
224 |
+
(VP (VP (Verb saw) (NP John)) (PP with (NP (Det a) (Noun dog)))))
|
225 |
+
(S
|
226 |
+
(NP I)
|
227 |
+
(VP (Verb saw) (NP (NP John) (PP with (NP (Det a) (Noun dog))))))
|
228 |
+
<BLANKLINE>
|
229 |
+
|
230 |
+
Bottom-up Left-Corner
|
231 |
+
|
232 |
+
>>> nltk.parse.chart.demo(3, print_times=False, trace=0,
|
233 |
+
... sent='I saw John with a dog', numparses=2)
|
234 |
+
* Sentence:
|
235 |
+
I saw John with a dog
|
236 |
+
['I', 'saw', 'John', 'with', 'a', 'dog']
|
237 |
+
<BLANKLINE>
|
238 |
+
* Strategy: Bottom-up left-corner
|
239 |
+
<BLANKLINE>
|
240 |
+
Nr edges in chart: 36
|
241 |
+
(S
|
242 |
+
(NP I)
|
243 |
+
(VP (VP (Verb saw) (NP John)) (PP with (NP (Det a) (Noun dog)))))
|
244 |
+
(S
|
245 |
+
(NP I)
|
246 |
+
(VP (Verb saw) (NP (NP John) (PP with (NP (Det a) (Noun dog))))))
|
247 |
+
<BLANKLINE>
|
248 |
+
|
249 |
+
Left-Corner with Bottom-Up Filter
|
250 |
+
|
251 |
+
>>> nltk.parse.chart.demo(4, print_times=False, trace=0,
|
252 |
+
... sent='I saw John with a dog', numparses=2)
|
253 |
+
* Sentence:
|
254 |
+
I saw John with a dog
|
255 |
+
['I', 'saw', 'John', 'with', 'a', 'dog']
|
256 |
+
<BLANKLINE>
|
257 |
+
* Strategy: Filtered left-corner
|
258 |
+
<BLANKLINE>
|
259 |
+
Nr edges in chart: 28
|
260 |
+
(S
|
261 |
+
(NP I)
|
262 |
+
(VP (VP (Verb saw) (NP John)) (PP with (NP (Det a) (Noun dog)))))
|
263 |
+
(S
|
264 |
+
(NP I)
|
265 |
+
(VP (Verb saw) (NP (NP John) (PP with (NP (Det a) (Noun dog))))))
|
266 |
+
<BLANKLINE>
|
267 |
+
|
268 |
+
The stepping chart parser
|
269 |
+
|
270 |
+
>>> nltk.parse.chart.demo(5, print_times=False, trace=1,
|
271 |
+
... sent='I saw John with a dog', numparses=2)
|
272 |
+
* Sentence:
|
273 |
+
I saw John with a dog
|
274 |
+
['I', 'saw', 'John', 'with', 'a', 'dog']
|
275 |
+
<BLANKLINE>
|
276 |
+
* Strategy: Stepping (top-down vs bottom-up)
|
277 |
+
<BLANKLINE>
|
278 |
+
*** SWITCH TO TOP DOWN
|
279 |
+
|[------] . . . . .| [0:1] 'I'
|
280 |
+
|. [------] . . . .| [1:2] 'saw'
|
281 |
+
|. . [------] . . .| [2:3] 'John'
|
282 |
+
|. . . [------] . .| [3:4] 'with'
|
283 |
+
|. . . . [------] .| [4:5] 'a'
|
284 |
+
|. . . . . [------]| [5:6] 'dog'
|
285 |
+
|> . . . . . .| [0:0] S -> * NP VP
|
286 |
+
|> . . . . . .| [0:0] NP -> * NP PP
|
287 |
+
|> . . . . . .| [0:0] NP -> * Det Noun
|
288 |
+
|> . . . . . .| [0:0] NP -> * 'I'
|
289 |
+
|[------] . . . . .| [0:1] NP -> 'I' *
|
290 |
+
|[------> . . . . .| [0:1] S -> NP * VP
|
291 |
+
|[------> . . . . .| [0:1] NP -> NP * PP
|
292 |
+
|. > . . . . .| [1:1] VP -> * VP PP
|
293 |
+
|. > . . . . .| [1:1] VP -> * Verb NP
|
294 |
+
|. > . . . . .| [1:1] VP -> * Verb
|
295 |
+
|. > . . . . .| [1:1] Verb -> * 'saw'
|
296 |
+
|. [------] . . . .| [1:2] Verb -> 'saw' *
|
297 |
+
|. [------> . . . .| [1:2] VP -> Verb * NP
|
298 |
+
|. [------] . . . .| [1:2] VP -> Verb *
|
299 |
+
|[-------------] . . . .| [0:2] S -> NP VP *
|
300 |
+
|. [------> . . . .| [1:2] VP -> VP * PP
|
301 |
+
*** SWITCH TO BOTTOM UP
|
302 |
+
|. . > . . . .| [2:2] NP -> * 'John'
|
303 |
+
|. . . > . . .| [3:3] PP -> * 'with' NP
|
304 |
+
|. . . > . . .| [3:3] Prep -> * 'with'
|
305 |
+
|. . . . > . .| [4:4] Det -> * 'a'
|
306 |
+
|. . . . . > .| [5:5] Noun -> * 'dog'
|
307 |
+
|. . [------] . . .| [2:3] NP -> 'John' *
|
308 |
+
|. . . [------> . .| [3:4] PP -> 'with' * NP
|
309 |
+
|. . . [------] . .| [3:4] Prep -> 'with' *
|
310 |
+
|. . . . [------] .| [4:5] Det -> 'a' *
|
311 |
+
|. . . . . [------]| [5:6] Noun -> 'dog' *
|
312 |
+
|. [-------------] . . .| [1:3] VP -> Verb NP *
|
313 |
+
|[--------------------] . . .| [0:3] S -> NP VP *
|
314 |
+
|. [-------------> . . .| [1:3] VP -> VP * PP
|
315 |
+
|. . > . . . .| [2:2] S -> * NP VP
|
316 |
+
|. . > . . . .| [2:2] NP -> * NP PP
|
317 |
+
|. . . . > . .| [4:4] NP -> * Det Noun
|
318 |
+
|. . [------> . . .| [2:3] S -> NP * VP
|
319 |
+
|. . [------> . . .| [2:3] NP -> NP * PP
|
320 |
+
|. . . . [------> .| [4:5] NP -> Det * Noun
|
321 |
+
|. . . . [-------------]| [4:6] NP -> Det Noun *
|
322 |
+
|. . . [--------------------]| [3:6] PP -> 'with' NP *
|
323 |
+
|. [----------------------------------]| [1:6] VP -> VP PP *
|
324 |
+
*** SWITCH TO TOP DOWN
|
325 |
+
|. . > . . . .| [2:2] NP -> * Det Noun
|
326 |
+
|. . . . > . .| [4:4] NP -> * NP PP
|
327 |
+
|. . . > . . .| [3:3] VP -> * VP PP
|
328 |
+
|. . . > . . .| [3:3] VP -> * Verb NP
|
329 |
+
|. . . > . . .| [3:3] VP -> * Verb
|
330 |
+
|[=========================================]| [0:6] S -> NP VP *
|
331 |
+
|. [---------------------------------->| [1:6] VP -> VP * PP
|
332 |
+
|. . [---------------------------]| [2:6] NP -> NP PP *
|
333 |
+
|. . . . [------------->| [4:6] NP -> NP * PP
|
334 |
+
|. [----------------------------------]| [1:6] VP -> Verb NP *
|
335 |
+
|. . [--------------------------->| [2:6] S -> NP * VP
|
336 |
+
|. . [--------------------------->| [2:6] NP -> NP * PP
|
337 |
+
|[=========================================]| [0:6] S -> NP VP *
|
338 |
+
|. [---------------------------------->| [1:6] VP -> VP * PP
|
339 |
+
|. . . . . . >| [6:6] VP -> * VP PP
|
340 |
+
|. . . . . . >| [6:6] VP -> * Verb NP
|
341 |
+
|. . . . . . >| [6:6] VP -> * Verb
|
342 |
+
*** SWITCH TO BOTTOM UP
|
343 |
+
|. . . . > . .| [4:4] S -> * NP VP
|
344 |
+
|. . . . [------------->| [4:6] S -> NP * VP
|
345 |
+
*** SWITCH TO TOP DOWN
|
346 |
+
*** SWITCH TO BOTTOM UP
|
347 |
+
*** SWITCH TO TOP DOWN
|
348 |
+
*** SWITCH TO BOTTOM UP
|
349 |
+
*** SWITCH TO TOP DOWN
|
350 |
+
*** SWITCH TO BOTTOM UP
|
351 |
+
Nr edges in chart: 61
|
352 |
+
(S
|
353 |
+
(NP I)
|
354 |
+
(VP (VP (Verb saw) (NP John)) (PP with (NP (Det a) (Noun dog)))))
|
355 |
+
(S
|
356 |
+
(NP I)
|
357 |
+
(VP (Verb saw) (NP (NP John) (PP with (NP (Det a) (Noun dog))))))
|
358 |
+
<BLANKLINE>
|
359 |
+
|
360 |
+
|
361 |
+
Unit tests for the Incremental Chart Parser class
|
362 |
+
-------------------------------------------------
|
363 |
+
|
364 |
+
The incremental chart parsers are defined in earleychart.py.
|
365 |
+
We use the demo() function for testing. We must turn off showing of times.
|
366 |
+
|
367 |
+
>>> import nltk
|
368 |
+
|
369 |
+
Earley Chart Parser
|
370 |
+
|
371 |
+
>>> nltk.parse.earleychart.demo(print_times=False, trace=1,
|
372 |
+
... sent='I saw John with a dog', numparses=2)
|
373 |
+
* Sentence:
|
374 |
+
I saw John with a dog
|
375 |
+
['I', 'saw', 'John', 'with', 'a', 'dog']
|
376 |
+
<BLANKLINE>
|
377 |
+
|. I . saw . John . with . a . dog .|
|
378 |
+
|[------] . . . . .| [0:1] 'I'
|
379 |
+
|. [------] . . . .| [1:2] 'saw'
|
380 |
+
|. . [------] . . .| [2:3] 'John'
|
381 |
+
|. . . [------] . .| [3:4] 'with'
|
382 |
+
|. . . . [------] .| [4:5] 'a'
|
383 |
+
|. . . . . [------]| [5:6] 'dog'
|
384 |
+
|> . . . . . .| [0:0] S -> * NP VP
|
385 |
+
|> . . . . . .| [0:0] NP -> * NP PP
|
386 |
+
|> . . . . . .| [0:0] NP -> * Det Noun
|
387 |
+
|> . . . . . .| [0:0] NP -> * 'I'
|
388 |
+
|[------] . . . . .| [0:1] NP -> 'I' *
|
389 |
+
|[------> . . . . .| [0:1] S -> NP * VP
|
390 |
+
|[------> . . . . .| [0:1] NP -> NP * PP
|
391 |
+
|. > . . . . .| [1:1] VP -> * VP PP
|
392 |
+
|. > . . . . .| [1:1] VP -> * Verb NP
|
393 |
+
|. > . . . . .| [1:1] VP -> * Verb
|
394 |
+
|. > . . . . .| [1:1] Verb -> * 'saw'
|
395 |
+
|. [------] . . . .| [1:2] Verb -> 'saw' *
|
396 |
+
|. [------> . . . .| [1:2] VP -> Verb * NP
|
397 |
+
|. [------] . . . .| [1:2] VP -> Verb *
|
398 |
+
|[-------------] . . . .| [0:2] S -> NP VP *
|
399 |
+
|. [------> . . . .| [1:2] VP -> VP * PP
|
400 |
+
|. . > . . . .| [2:2] NP -> * NP PP
|
401 |
+
|. . > . . . .| [2:2] NP -> * Det Noun
|
402 |
+
|. . > . . . .| [2:2] NP -> * 'John'
|
403 |
+
|. . [------] . . .| [2:3] NP -> 'John' *
|
404 |
+
|. [-------------] . . .| [1:3] VP -> Verb NP *
|
405 |
+
|. . [------> . . .| [2:3] NP -> NP * PP
|
406 |
+
|. . . > . . .| [3:3] PP -> * 'with' NP
|
407 |
+
|[--------------------] . . .| [0:3] S -> NP VP *
|
408 |
+
|. [-------------> . . .| [1:3] VP -> VP * PP
|
409 |
+
|. . . [------> . .| [3:4] PP -> 'with' * NP
|
410 |
+
|. . . . > . .| [4:4] NP -> * NP PP
|
411 |
+
|. . . . > . .| [4:4] NP -> * Det Noun
|
412 |
+
|. . . . > . .| [4:4] Det -> * 'a'
|
413 |
+
|. . . . [------] .| [4:5] Det -> 'a' *
|
414 |
+
|. . . . [------> .| [4:5] NP -> Det * Noun
|
415 |
+
|. . . . . > .| [5:5] Noun -> * 'dog'
|
416 |
+
|. . . . . [------]| [5:6] Noun -> 'dog' *
|
417 |
+
|. . . . [-------------]| [4:6] NP -> Det Noun *
|
418 |
+
|. . . [--------------------]| [3:6] PP -> 'with' NP *
|
419 |
+
|. . . . [------------->| [4:6] NP -> NP * PP
|
420 |
+
|. . [---------------------------]| [2:6] NP -> NP PP *
|
421 |
+
|. [----------------------------------]| [1:6] VP -> VP PP *
|
422 |
+
|[=========================================]| [0:6] S -> NP VP *
|
423 |
+
|. [---------------------------------->| [1:6] VP -> VP * PP
|
424 |
+
|. [----------------------------------]| [1:6] VP -> Verb NP *
|
425 |
+
|. . [--------------------------->| [2:6] NP -> NP * PP
|
426 |
+
|[=========================================]| [0:6] S -> NP VP *
|
427 |
+
|. [---------------------------------->| [1:6] VP -> VP * PP
|
428 |
+
(S
|
429 |
+
(NP I)
|
430 |
+
(VP (VP (Verb saw) (NP John)) (PP with (NP (Det a) (Noun dog)))))
|
431 |
+
(S
|
432 |
+
(NP I)
|
433 |
+
(VP (Verb saw) (NP (NP John) (PP with (NP (Det a) (Noun dog))))))
|
434 |
+
|
435 |
+
|
436 |
+
Unit tests for LARGE context-free grammars
|
437 |
+
------------------------------------------
|
438 |
+
|
439 |
+
Reading the ATIS grammar.
|
440 |
+
|
441 |
+
>>> grammar = nltk.data.load('grammars/large_grammars/atis.cfg')
|
442 |
+
>>> grammar
|
443 |
+
<Grammar with 5517 productions>
|
444 |
+
|
445 |
+
Reading the test sentences.
|
446 |
+
|
447 |
+
>>> sentences = nltk.data.load('grammars/large_grammars/atis_sentences.txt')
|
448 |
+
>>> sentences = nltk.parse.util.extract_test_sentences(sentences)
|
449 |
+
>>> len(sentences)
|
450 |
+
98
|
451 |
+
>>> testsentence = sentences[22]
|
452 |
+
>>> testsentence[0]
|
453 |
+
['show', 'me', 'northwest', 'flights', 'to', 'detroit', '.']
|
454 |
+
>>> testsentence[1]
|
455 |
+
17
|
456 |
+
>>> sentence = testsentence[0]
|
457 |
+
|
458 |
+
Now we test all different parsing strategies.
|
459 |
+
Note that the number of edges differ between the strategies.
|
460 |
+
|
461 |
+
Bottom-up parsing.
|
462 |
+
|
463 |
+
>>> parser = nltk.parse.BottomUpChartParser(grammar)
|
464 |
+
>>> chart = parser.chart_parse(sentence)
|
465 |
+
>>> print((chart.num_edges()))
|
466 |
+
7661
|
467 |
+
>>> print((len(list(chart.parses(grammar.start())))))
|
468 |
+
17
|
469 |
+
|
470 |
+
Bottom-up Left-corner parsing.
|
471 |
+
|
472 |
+
>>> parser = nltk.parse.BottomUpLeftCornerChartParser(grammar)
|
473 |
+
>>> chart = parser.chart_parse(sentence)
|
474 |
+
>>> print((chart.num_edges()))
|
475 |
+
4986
|
476 |
+
>>> print((len(list(chart.parses(grammar.start())))))
|
477 |
+
17
|
478 |
+
|
479 |
+
Left-corner parsing with bottom-up filter.
|
480 |
+
|
481 |
+
>>> parser = nltk.parse.LeftCornerChartParser(grammar)
|
482 |
+
>>> chart = parser.chart_parse(sentence)
|
483 |
+
>>> print((chart.num_edges()))
|
484 |
+
1342
|
485 |
+
>>> print((len(list(chart.parses(grammar.start())))))
|
486 |
+
17
|
487 |
+
|
488 |
+
Top-down parsing.
|
489 |
+
|
490 |
+
>>> parser = nltk.parse.TopDownChartParser(grammar)
|
491 |
+
>>> chart = parser.chart_parse(sentence)
|
492 |
+
>>> print((chart.num_edges()))
|
493 |
+
28352
|
494 |
+
>>> print((len(list(chart.parses(grammar.start())))))
|
495 |
+
17
|
496 |
+
|
497 |
+
Incremental Bottom-up parsing.
|
498 |
+
|
499 |
+
>>> parser = nltk.parse.IncrementalBottomUpChartParser(grammar)
|
500 |
+
>>> chart = parser.chart_parse(sentence)
|
501 |
+
>>> print((chart.num_edges()))
|
502 |
+
7661
|
503 |
+
>>> print((len(list(chart.parses(grammar.start())))))
|
504 |
+
17
|
505 |
+
|
506 |
+
Incremental Bottom-up Left-corner parsing.
|
507 |
+
|
508 |
+
>>> parser = nltk.parse.IncrementalBottomUpLeftCornerChartParser(grammar)
|
509 |
+
>>> chart = parser.chart_parse(sentence)
|
510 |
+
>>> print((chart.num_edges()))
|
511 |
+
4986
|
512 |
+
>>> print((len(list(chart.parses(grammar.start())))))
|
513 |
+
17
|
514 |
+
|
515 |
+
Incremental Left-corner parsing with bottom-up filter.
|
516 |
+
|
517 |
+
>>> parser = nltk.parse.IncrementalLeftCornerChartParser(grammar)
|
518 |
+
>>> chart = parser.chart_parse(sentence)
|
519 |
+
>>> print((chart.num_edges()))
|
520 |
+
1342
|
521 |
+
>>> print((len(list(chart.parses(grammar.start())))))
|
522 |
+
17
|
523 |
+
|
524 |
+
Incremental Top-down parsing.
|
525 |
+
|
526 |
+
>>> parser = nltk.parse.IncrementalTopDownChartParser(grammar)
|
527 |
+
>>> chart = parser.chart_parse(sentence)
|
528 |
+
>>> print((chart.num_edges()))
|
529 |
+
28352
|
530 |
+
>>> print((len(list(chart.parses(grammar.start())))))
|
531 |
+
17
|
532 |
+
|
533 |
+
Earley parsing. This is similar to the incremental top-down algorithm.
|
534 |
+
|
535 |
+
>>> parser = nltk.parse.EarleyChartParser(grammar)
|
536 |
+
>>> chart = parser.chart_parse(sentence)
|
537 |
+
>>> print((chart.num_edges()))
|
538 |
+
28352
|
539 |
+
>>> print((len(list(chart.parses(grammar.start())))))
|
540 |
+
17
|
541 |
+
|
542 |
+
|
543 |
+
Unit tests for the Probabilistic CFG class
|
544 |
+
------------------------------------------
|
545 |
+
|
546 |
+
>>> from nltk.corpus import treebank
|
547 |
+
>>> from itertools import islice
|
548 |
+
>>> from nltk.grammar import PCFG, induce_pcfg
|
549 |
+
>>> toy_pcfg1 = PCFG.fromstring("""
|
550 |
+
... S -> NP VP [1.0]
|
551 |
+
... NP -> Det N [0.5] | NP PP [0.25] | 'John' [0.1] | 'I' [0.15]
|
552 |
+
... Det -> 'the' [0.8] | 'my' [0.2]
|
553 |
+
... N -> 'man' [0.5] | 'telescope' [0.5]
|
554 |
+
... VP -> VP PP [0.1] | V NP [0.7] | V [0.2]
|
555 |
+
... V -> 'ate' [0.35] | 'saw' [0.65]
|
556 |
+
... PP -> P NP [1.0]
|
557 |
+
... P -> 'with' [0.61] | 'under' [0.39]
|
558 |
+
... """)
|
559 |
+
|
560 |
+
>>> toy_pcfg2 = PCFG.fromstring("""
|
561 |
+
... S -> NP VP [1.0]
|
562 |
+
... VP -> V NP [.59]
|
563 |
+
... VP -> V [.40]
|
564 |
+
... VP -> VP PP [.01]
|
565 |
+
... NP -> Det N [.41]
|
566 |
+
... NP -> Name [.28]
|
567 |
+
... NP -> NP PP [.31]
|
568 |
+
... PP -> P NP [1.0]
|
569 |
+
... V -> 'saw' [.21]
|
570 |
+
... V -> 'ate' [.51]
|
571 |
+
... V -> 'ran' [.28]
|
572 |
+
... N -> 'boy' [.11]
|
573 |
+
... N -> 'cookie' [.12]
|
574 |
+
... N -> 'table' [.13]
|
575 |
+
... N -> 'telescope' [.14]
|
576 |
+
... N -> 'hill' [.5]
|
577 |
+
... Name -> 'Jack' [.52]
|
578 |
+
... Name -> 'Bob' [.48]
|
579 |
+
... P -> 'with' [.61]
|
580 |
+
... P -> 'under' [.39]
|
581 |
+
... Det -> 'the' [.41]
|
582 |
+
... Det -> 'a' [.31]
|
583 |
+
... Det -> 'my' [.28]
|
584 |
+
... """)
|
585 |
+
|
586 |
+
Create a set of PCFG productions.
|
587 |
+
|
588 |
+
>>> grammar = PCFG.fromstring("""
|
589 |
+
... A -> B B [.3] | C B C [.7]
|
590 |
+
... B -> B D [.5] | C [.5]
|
591 |
+
... C -> 'a' [.1] | 'b' [0.9]
|
592 |
+
... D -> 'b' [1.0]
|
593 |
+
... """)
|
594 |
+
>>> prod = grammar.productions()[0]
|
595 |
+
>>> prod
|
596 |
+
A -> B B [0.3]
|
597 |
+
|
598 |
+
>>> prod.lhs()
|
599 |
+
A
|
600 |
+
|
601 |
+
>>> prod.rhs()
|
602 |
+
(B, B)
|
603 |
+
|
604 |
+
>>> print((prod.prob()))
|
605 |
+
0.3
|
606 |
+
|
607 |
+
>>> grammar.start()
|
608 |
+
A
|
609 |
+
|
610 |
+
>>> grammar.productions()
|
611 |
+
[A -> B B [0.3], A -> C B C [0.7], B -> B D [0.5], B -> C [0.5], C -> 'a' [0.1], C -> 'b' [0.9], D -> 'b' [1.0]]
|
612 |
+
|
613 |
+
Induce some productions using parsed Treebank data.
|
614 |
+
|
615 |
+
>>> productions = []
|
616 |
+
>>> for fileid in treebank.fileids()[:2]:
|
617 |
+
... for t in treebank.parsed_sents(fileid):
|
618 |
+
... productions += t.productions()
|
619 |
+
|
620 |
+
>>> grammar = induce_pcfg(S, productions)
|
621 |
+
>>> grammar
|
622 |
+
<Grammar with 71 productions>
|
623 |
+
|
624 |
+
>>> sorted(grammar.productions(lhs=Nonterminal('PP')))[:2]
|
625 |
+
[PP -> IN NP [1.0]]
|
626 |
+
>>> sorted(grammar.productions(lhs=Nonterminal('NNP')))[:2]
|
627 |
+
[NNP -> 'Agnew' [0.0714286], NNP -> 'Consolidated' [0.0714286]]
|
628 |
+
>>> sorted(grammar.productions(lhs=Nonterminal('JJ')))[:2]
|
629 |
+
[JJ -> 'British' [0.142857], JJ -> 'former' [0.142857]]
|
630 |
+
>>> sorted(grammar.productions(lhs=Nonterminal('NP')))[:2]
|
631 |
+
[NP -> CD NNS [0.133333], NP -> DT JJ JJ NN [0.0666667]]
|
632 |
+
|
633 |
+
Unit tests for the Probabilistic Chart Parse classes
|
634 |
+
----------------------------------------------------
|
635 |
+
|
636 |
+
>>> tokens = "Jack saw Bob with my cookie".split()
|
637 |
+
>>> grammar = toy_pcfg2
|
638 |
+
>>> print(grammar)
|
639 |
+
Grammar with 23 productions (start state = S)
|
640 |
+
S -> NP VP [1.0]
|
641 |
+
VP -> V NP [0.59]
|
642 |
+
VP -> V [0.4]
|
643 |
+
VP -> VP PP [0.01]
|
644 |
+
NP -> Det N [0.41]
|
645 |
+
NP -> Name [0.28]
|
646 |
+
NP -> NP PP [0.31]
|
647 |
+
PP -> P NP [1.0]
|
648 |
+
V -> 'saw' [0.21]
|
649 |
+
V -> 'ate' [0.51]
|
650 |
+
V -> 'ran' [0.28]
|
651 |
+
N -> 'boy' [0.11]
|
652 |
+
N -> 'cookie' [0.12]
|
653 |
+
N -> 'table' [0.13]
|
654 |
+
N -> 'telescope' [0.14]
|
655 |
+
N -> 'hill' [0.5]
|
656 |
+
Name -> 'Jack' [0.52]
|
657 |
+
Name -> 'Bob' [0.48]
|
658 |
+
P -> 'with' [0.61]
|
659 |
+
P -> 'under' [0.39]
|
660 |
+
Det -> 'the' [0.41]
|
661 |
+
Det -> 'a' [0.31]
|
662 |
+
Det -> 'my' [0.28]
|
663 |
+
|
664 |
+
Create several parsers using different queuing strategies and show the
|
665 |
+
resulting parses.
|
666 |
+
|
667 |
+
>>> from nltk.parse import pchart
|
668 |
+
|
669 |
+
>>> parser = pchart.InsideChartParser(grammar)
|
670 |
+
>>> for t in parser.parse(tokens):
|
671 |
+
... print(t)
|
672 |
+
(S
|
673 |
+
(NP (Name Jack))
|
674 |
+
(VP
|
675 |
+
(V saw)
|
676 |
+
(NP
|
677 |
+
(NP (Name Bob))
|
678 |
+
(PP (P with) (NP (Det my) (N cookie)))))) (p=6.31607e-06)
|
679 |
+
(S
|
680 |
+
(NP (Name Jack))
|
681 |
+
(VP
|
682 |
+
(VP (V saw) (NP (Name Bob)))
|
683 |
+
(PP (P with) (NP (Det my) (N cookie))))) (p=2.03744e-07)
|
684 |
+
|
685 |
+
>>> parser = pchart.RandomChartParser(grammar)
|
686 |
+
>>> for t in parser.parse(tokens):
|
687 |
+
... print(t)
|
688 |
+
(S
|
689 |
+
(NP (Name Jack))
|
690 |
+
(VP
|
691 |
+
(V saw)
|
692 |
+
(NP
|
693 |
+
(NP (Name Bob))
|
694 |
+
(PP (P with) (NP (Det my) (N cookie)))))) (p=6.31607e-06)
|
695 |
+
(S
|
696 |
+
(NP (Name Jack))
|
697 |
+
(VP
|
698 |
+
(VP (V saw) (NP (Name Bob)))
|
699 |
+
(PP (P with) (NP (Det my) (N cookie))))) (p=2.03744e-07)
|
700 |
+
|
701 |
+
>>> parser = pchart.UnsortedChartParser(grammar)
|
702 |
+
>>> for t in parser.parse(tokens):
|
703 |
+
... print(t)
|
704 |
+
(S
|
705 |
+
(NP (Name Jack))
|
706 |
+
(VP
|
707 |
+
(V saw)
|
708 |
+
(NP
|
709 |
+
(NP (Name Bob))
|
710 |
+
(PP (P with) (NP (Det my) (N cookie)))))) (p=6.31607e-06)
|
711 |
+
(S
|
712 |
+
(NP (Name Jack))
|
713 |
+
(VP
|
714 |
+
(VP (V saw) (NP (Name Bob)))
|
715 |
+
(PP (P with) (NP (Det my) (N cookie))))) (p=2.03744e-07)
|
716 |
+
|
717 |
+
>>> parser = pchart.LongestChartParser(grammar)
|
718 |
+
>>> for t in parser.parse(tokens):
|
719 |
+
... print(t)
|
720 |
+
(S
|
721 |
+
(NP (Name Jack))
|
722 |
+
(VP
|
723 |
+
(V saw)
|
724 |
+
(NP
|
725 |
+
(NP (Name Bob))
|
726 |
+
(PP (P with) (NP (Det my) (N cookie)))))) (p=6.31607e-06)
|
727 |
+
(S
|
728 |
+
(NP (Name Jack))
|
729 |
+
(VP
|
730 |
+
(VP (V saw) (NP (Name Bob)))
|
731 |
+
(PP (P with) (NP (Det my) (N cookie))))) (p=2.03744e-07)
|
732 |
+
|
733 |
+
>>> parser = pchart.InsideChartParser(grammar, beam_size = len(tokens)+1)
|
734 |
+
>>> for t in parser.parse(tokens):
|
735 |
+
... print(t)
|
736 |
+
|
737 |
+
|
738 |
+
Unit tests for the Viterbi Parse classes
|
739 |
+
----------------------------------------
|
740 |
+
|
741 |
+
>>> from nltk.parse import ViterbiParser
|
742 |
+
>>> tokens = "Jack saw Bob with my cookie".split()
|
743 |
+
>>> grammar = toy_pcfg2
|
744 |
+
|
745 |
+
Parse the tokenized sentence.
|
746 |
+
|
747 |
+
>>> parser = ViterbiParser(grammar)
|
748 |
+
>>> for t in parser.parse(tokens):
|
749 |
+
... print(t)
|
750 |
+
(S
|
751 |
+
(NP (Name Jack))
|
752 |
+
(VP
|
753 |
+
(V saw)
|
754 |
+
(NP
|
755 |
+
(NP (Name Bob))
|
756 |
+
(PP (P with) (NP (Det my) (N cookie)))))) (p=6.31607e-06)
|
757 |
+
|
758 |
+
|
759 |
+
Unit tests for the FeatStructNonterminal class
|
760 |
+
----------------------------------------------
|
761 |
+
|
762 |
+
>>> from nltk.grammar import FeatStructNonterminal
|
763 |
+
>>> FeatStructNonterminal(
|
764 |
+
... pos='n', agr=FeatStructNonterminal(number='pl', gender='f'))
|
765 |
+
[agr=[gender='f', number='pl'], pos='n']
|
766 |
+
|
767 |
+
>>> FeatStructNonterminal('VP[+fin]/NP[+pl]')
|
768 |
+
VP[+fin]/NP[+pl]
|
769 |
+
|
770 |
+
|
771 |
+
Tracing the Feature Chart Parser
|
772 |
+
--------------------------------
|
773 |
+
|
774 |
+
We use the featurechart.demo() function for tracing the Feature Chart Parser.
|
775 |
+
|
776 |
+
>>> nltk.parse.featurechart.demo(print_times=False,
|
777 |
+
... print_grammar=True,
|
778 |
+
... parser=nltk.parse.featurechart.FeatureChartParser,
|
779 |
+
... sent='I saw John with a dog')
|
780 |
+
<BLANKLINE>
|
781 |
+
Grammar with 18 productions (start state = S[])
|
782 |
+
S[] -> NP[] VP[]
|
783 |
+
PP[] -> Prep[] NP[]
|
784 |
+
NP[] -> NP[] PP[]
|
785 |
+
VP[] -> VP[] PP[]
|
786 |
+
VP[] -> Verb[] NP[]
|
787 |
+
VP[] -> Verb[]
|
788 |
+
NP[] -> Det[pl=?x] Noun[pl=?x]
|
789 |
+
NP[] -> 'John'
|
790 |
+
NP[] -> 'I'
|
791 |
+
Det[] -> 'the'
|
792 |
+
Det[] -> 'my'
|
793 |
+
Det[-pl] -> 'a'
|
794 |
+
Noun[-pl] -> 'dog'
|
795 |
+
Noun[-pl] -> 'cookie'
|
796 |
+
Verb[] -> 'ate'
|
797 |
+
Verb[] -> 'saw'
|
798 |
+
Prep[] -> 'with'
|
799 |
+
Prep[] -> 'under'
|
800 |
+
<BLANKLINE>
|
801 |
+
* FeatureChartParser
|
802 |
+
Sentence: I saw John with a dog
|
803 |
+
|.I.s.J.w.a.d.|
|
804 |
+
|[-] . . . . .| [0:1] 'I'
|
805 |
+
|. [-] . . . .| [1:2] 'saw'
|
806 |
+
|. . [-] . . .| [2:3] 'John'
|
807 |
+
|. . . [-] . .| [3:4] 'with'
|
808 |
+
|. . . . [-] .| [4:5] 'a'
|
809 |
+
|. . . . . [-]| [5:6] 'dog'
|
810 |
+
|[-] . . . . .| [0:1] NP[] -> 'I' *
|
811 |
+
|[-> . . . . .| [0:1] S[] -> NP[] * VP[] {}
|
812 |
+
|[-> . . . . .| [0:1] NP[] -> NP[] * PP[] {}
|
813 |
+
|. [-] . . . .| [1:2] Verb[] -> 'saw' *
|
814 |
+
|. [-> . . . .| [1:2] VP[] -> Verb[] * NP[] {}
|
815 |
+
|. [-] . . . .| [1:2] VP[] -> Verb[] *
|
816 |
+
|. [-> . . . .| [1:2] VP[] -> VP[] * PP[] {}
|
817 |
+
|[---] . . . .| [0:2] S[] -> NP[] VP[] *
|
818 |
+
|. . [-] . . .| [2:3] NP[] -> 'John' *
|
819 |
+
|. . [-> . . .| [2:3] S[] -> NP[] * VP[] {}
|
820 |
+
|. . [-> . . .| [2:3] NP[] -> NP[] * PP[] {}
|
821 |
+
|. [---] . . .| [1:3] VP[] -> Verb[] NP[] *
|
822 |
+
|. [---> . . .| [1:3] VP[] -> VP[] * PP[] {}
|
823 |
+
|[-----] . . .| [0:3] S[] -> NP[] VP[] *
|
824 |
+
|. . . [-] . .| [3:4] Prep[] -> 'with' *
|
825 |
+
|. . . [-> . .| [3:4] PP[] -> Prep[] * NP[] {}
|
826 |
+
|. . . . [-] .| [4:5] Det[-pl] -> 'a' *
|
827 |
+
|. . . . [-> .| [4:5] NP[] -> Det[pl=?x] * Noun[pl=?x] {?x: False}
|
828 |
+
|. . . . . [-]| [5:6] Noun[-pl] -> 'dog' *
|
829 |
+
|. . . . [---]| [4:6] NP[] -> Det[-pl] Noun[-pl] *
|
830 |
+
|. . . . [--->| [4:6] S[] -> NP[] * VP[] {}
|
831 |
+
|. . . . [--->| [4:6] NP[] -> NP[] * PP[] {}
|
832 |
+
|. . . [-----]| [3:6] PP[] -> Prep[] NP[] *
|
833 |
+
|. . [-------]| [2:6] NP[] -> NP[] PP[] *
|
834 |
+
|. [---------]| [1:6] VP[] -> VP[] PP[] *
|
835 |
+
|. [--------->| [1:6] VP[] -> VP[] * PP[] {}
|
836 |
+
|[===========]| [0:6] S[] -> NP[] VP[] *
|
837 |
+
|. . [------->| [2:6] S[] -> NP[] * VP[] {}
|
838 |
+
|. . [------->| [2:6] NP[] -> NP[] * PP[] {}
|
839 |
+
|. [---------]| [1:6] VP[] -> Verb[] NP[] *
|
840 |
+
|. [--------->| [1:6] VP[] -> VP[] * PP[] {}
|
841 |
+
|[===========]| [0:6] S[] -> NP[] VP[] *
|
842 |
+
(S[]
|
843 |
+
(NP[] I)
|
844 |
+
(VP[]
|
845 |
+
(VP[] (Verb[] saw) (NP[] John))
|
846 |
+
(PP[] (Prep[] with) (NP[] (Det[-pl] a) (Noun[-pl] dog)))))
|
847 |
+
(S[]
|
848 |
+
(NP[] I)
|
849 |
+
(VP[]
|
850 |
+
(Verb[] saw)
|
851 |
+
(NP[]
|
852 |
+
(NP[] John)
|
853 |
+
(PP[] (Prep[] with) (NP[] (Det[-pl] a) (Noun[-pl] dog))))))
|
854 |
+
|
855 |
+
|
856 |
+
Unit tests for the Feature Chart Parser classes
|
857 |
+
-----------------------------------------------
|
858 |
+
|
859 |
+
The list of parsers we want to test.
|
860 |
+
|
861 |
+
>>> parsers = [nltk.parse.featurechart.FeatureChartParser,
|
862 |
+
... nltk.parse.featurechart.FeatureTopDownChartParser,
|
863 |
+
... nltk.parse.featurechart.FeatureBottomUpChartParser,
|
864 |
+
... nltk.parse.featurechart.FeatureBottomUpLeftCornerChartParser,
|
865 |
+
... nltk.parse.earleychart.FeatureIncrementalChartParser,
|
866 |
+
... nltk.parse.earleychart.FeatureEarleyChartParser,
|
867 |
+
... nltk.parse.earleychart.FeatureIncrementalTopDownChartParser,
|
868 |
+
... nltk.parse.earleychart.FeatureIncrementalBottomUpChartParser,
|
869 |
+
... nltk.parse.earleychart.FeatureIncrementalBottomUpLeftCornerChartParser,
|
870 |
+
... ]
|
871 |
+
|
872 |
+
A helper function that tests each parser on the given grammar and sentence.
|
873 |
+
We check that the number of trees are correct, and that all parsers
|
874 |
+
return the same trees. Otherwise an error is printed.
|
875 |
+
|
876 |
+
>>> def unittest(grammar, sentence, nr_trees):
|
877 |
+
... sentence = sentence.split()
|
878 |
+
... trees = None
|
879 |
+
... for P in parsers:
|
880 |
+
... result = P(grammar).parse(sentence)
|
881 |
+
... result = set(tree.freeze() for tree in result)
|
882 |
+
... if len(result) != nr_trees:
|
883 |
+
... print("Wrong nr of trees:", len(result))
|
884 |
+
... elif trees is None:
|
885 |
+
... trees = result
|
886 |
+
... elif result != trees:
|
887 |
+
... print("Trees differ for parser:", P.__name__)
|
888 |
+
|
889 |
+
The demo grammar from before, with an ambiguous sentence.
|
890 |
+
|
891 |
+
>>> isawjohn = nltk.parse.featurechart.demo_grammar()
|
892 |
+
>>> unittest(isawjohn, "I saw John with a dog with my cookie", 5)
|
893 |
+
|
894 |
+
This grammar tests that variables in different grammar rules are renamed
|
895 |
+
before unification. (The problematic variable is in this case ?X).
|
896 |
+
|
897 |
+
>>> whatwasthat = nltk.grammar.FeatureGrammar.fromstring('''
|
898 |
+
... S[] -> NP[num=?N] VP[num=?N, slash=?X]
|
899 |
+
... NP[num=?X] -> "what"
|
900 |
+
... NP[num=?X] -> "that"
|
901 |
+
... VP[num=?P, slash=none] -> V[num=?P] NP[]
|
902 |
+
... V[num=sg] -> "was"
|
903 |
+
... ''')
|
904 |
+
>>> unittest(whatwasthat, "what was that", 1)
|
905 |
+
|
906 |
+
This grammar tests that the same rule can be used in different places
|
907 |
+
in another rule, and that the variables are properly renamed.
|
908 |
+
|
909 |
+
>>> thislovesthat = nltk.grammar.FeatureGrammar.fromstring('''
|
910 |
+
... S[] -> NP[case=nom] V[] NP[case=acc]
|
911 |
+
... NP[case=?X] -> Pron[case=?X]
|
912 |
+
... Pron[] -> "this"
|
913 |
+
... Pron[] -> "that"
|
914 |
+
... V[] -> "loves"
|
915 |
+
... ''')
|
916 |
+
>>> unittest(thislovesthat, "this loves that", 1)
|
917 |
+
|
918 |
+
|
919 |
+
Tests for loading feature grammar files
|
920 |
+
---------------------------------------
|
921 |
+
|
922 |
+
Alternative 1: first load the grammar, then create the parser.
|
923 |
+
|
924 |
+
>>> fcfg = nltk.data.load('grammars/book_grammars/feat0.fcfg')
|
925 |
+
>>> fcp1 = nltk.parse.FeatureChartParser(fcfg)
|
926 |
+
>>> print((type(fcp1)))
|
927 |
+
<class 'nltk.parse.featurechart.FeatureChartParser'>
|
928 |
+
|
929 |
+
Alternative 2: directly load the parser.
|
930 |
+
|
931 |
+
>>> fcp2 = nltk.parse.load_parser('grammars/book_grammars/feat0.fcfg')
|
932 |
+
>>> print((type(fcp2)))
|
933 |
+
<class 'nltk.parse.featurechart.FeatureChartParser'>
|
llmeval-env/lib/python3.10/site-packages/nltk/test/portuguese_en.doctest
ADDED
@@ -0,0 +1,568 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.. Copyright (C) 2001-2023 NLTK Project
|
2 |
+
.. For license information, see LICENSE.TXT
|
3 |
+
|
4 |
+
==================================
|
5 |
+
Examples for Portuguese Processing
|
6 |
+
==================================
|
7 |
+
|
8 |
+
This HOWTO contains a variety of examples relating to the Portuguese language.
|
9 |
+
It is intended to be read in conjunction with the NLTK book
|
10 |
+
(``https://www.nltk.org/book/``). For instructions on running the Python
|
11 |
+
interpreter, please see the section *Getting Started with Python*, in Chapter 1.
|
12 |
+
|
13 |
+
--------------------------------------------
|
14 |
+
Python Programming, with Portuguese Examples
|
15 |
+
--------------------------------------------
|
16 |
+
|
17 |
+
Chapter 1 of the NLTK book contains many elementary programming examples, all
|
18 |
+
with English texts. In this section, we'll see some corresponding examples
|
19 |
+
using Portuguese. Please refer to the chapter for full discussion. *Vamos!*
|
20 |
+
|
21 |
+
>>> from nltk.test.portuguese_en_fixt import setup_module
|
22 |
+
>>> setup_module()
|
23 |
+
|
24 |
+
>>> from nltk.examples.pt import *
|
25 |
+
*** Introductory Examples for the NLTK Book ***
|
26 |
+
Loading ptext1, ... and psent1, ...
|
27 |
+
Type the name of the text or sentence to view it.
|
28 |
+
Type: 'texts()' or 'sents()' to list the materials.
|
29 |
+
ptext1: Memórias Póstumas de Brás Cubas (1881)
|
30 |
+
ptext2: Dom Casmurro (1899)
|
31 |
+
ptext3: Gênesis
|
32 |
+
ptext4: Folha de Sao Paulo (1994)
|
33 |
+
|
34 |
+
|
35 |
+
Any time we want to find out about these texts, we just have
|
36 |
+
to enter their names at the Python prompt:
|
37 |
+
|
38 |
+
>>> ptext2
|
39 |
+
<Text: Dom Casmurro (1899)>
|
40 |
+
|
41 |
+
Searching Text
|
42 |
+
--------------
|
43 |
+
|
44 |
+
A concordance permits us to see words in context.
|
45 |
+
|
46 |
+
>>> ptext1.concordance('olhos')
|
47 |
+
Building index...
|
48 |
+
Displaying 25 of 138 matches:
|
49 |
+
De pé , à cabeceira da cama , com os olhos estúpidos , a boca entreaberta , a t
|
50 |
+
orelhas . Pela minha parte fechei os olhos e deixei - me ir à ventura . Já agor
|
51 |
+
xões de cérebro enfermo . Como ia de olhos fechados , não via o caminho ; lembr
|
52 |
+
gelos eternos . Com efeito , abri os olhos e vi que o meu animal galopava numa
|
53 |
+
me apareceu então , fitando - me uns olhos rutilantes como o sol . Tudo nessa f
|
54 |
+
mim mesmo . Então , encarei - a com olhos súplices , e pedi mais alguns anos .
|
55 |
+
...
|
56 |
+
|
57 |
+
For a given word, we can find words with a similar text distribution:
|
58 |
+
|
59 |
+
>>> ptext1.similar('chegar')
|
60 |
+
Building word-context index...
|
61 |
+
acabada acudir aludir avistar bramanismo casamento cheguei com contar
|
62 |
+
contrário corpo dali deixei desferirem dizer fazer filhos já leitor lhe
|
63 |
+
>>> ptext3.similar('chegar')
|
64 |
+
Building word-context index...
|
65 |
+
achar alumiar arrombar destruir governar guardar ir lavrar passar que
|
66 |
+
toda tomar ver vir
|
67 |
+
|
68 |
+
We can search for the statistically significant collocations in a text:
|
69 |
+
|
70 |
+
>>> ptext1.collocations()
|
71 |
+
Building collocations list
|
72 |
+
Quincas Borba; Lobo Neves; alguma coisa; Brás Cubas; meu pai; dia
|
73 |
+
seguinte; não sei; Meu pai; alguns instantes; outra vez; outra coisa;
|
74 |
+
por exemplo; mim mesmo; coisa nenhuma; mesma coisa; não era; dias
|
75 |
+
depois; Passeio Público; olhar para; das coisas
|
76 |
+
|
77 |
+
We can search for words in context, with the help of *regular expressions*, e.g.:
|
78 |
+
|
79 |
+
>>> ptext1.findall("<olhos> (<.*>)")
|
80 |
+
estúpidos; e; fechados; rutilantes; súplices; a; do; babavam;
|
81 |
+
na; moles; se; da; umas; espraiavam; chamejantes; espetados;
|
82 |
+
...
|
83 |
+
|
84 |
+
We can automatically generate random text based on a given text, e.g.:
|
85 |
+
|
86 |
+
>>> ptext3.generate() # doctest: +SKIP
|
87 |
+
No princípio , criou Deus os abençoou , dizendo : Onde { estão } e até
|
88 |
+
à ave dos céus , { que } será . Disse mais Abrão : Dá - me a mulher
|
89 |
+
que tomaste ; porque daquele poço Eseque , { tinha .} E disse : Não
|
90 |
+
poderemos descer ; mas , do campo ainda não estava na casa do teu
|
91 |
+
pescoço . E viveu Serugue , depois Simeão e Levi { são } estes ? E o
|
92 |
+
varão , porque habitava na terra de Node , da mão de Esaú : Jeús ,
|
93 |
+
Jalão e Corá
|
94 |
+
|
95 |
+
Texts as List of Words
|
96 |
+
----------------------
|
97 |
+
|
98 |
+
A few sentences have been defined for you.
|
99 |
+
|
100 |
+
>>> psent1
|
101 |
+
['o', 'amor', 'da', 'gl\xf3ria', 'era', 'a', 'coisa', 'mais',
|
102 |
+
'verdadeiramente', 'humana', 'que', 'h\xe1', 'no', 'homem', ',',
|
103 |
+
'e', ',', 'conseq\xfcentemente', ',', 'a', 'sua', 'mais',
|
104 |
+
'genu\xedna', 'fei\xe7\xe3o', '.']
|
105 |
+
>>>
|
106 |
+
|
107 |
+
Notice that the sentence has been *tokenized*. Each token is
|
108 |
+
represented as a string, represented using quotes, e.g. ``'coisa'``.
|
109 |
+
Some strings contain special characters, e.g. ``\xf3``,
|
110 |
+
the internal representation for ó.
|
111 |
+
The tokens are combined in the form of a *list*. How long is this list?
|
112 |
+
|
113 |
+
>>> len(psent1)
|
114 |
+
25
|
115 |
+
>>>
|
116 |
+
|
117 |
+
What is the vocabulary of this sentence?
|
118 |
+
|
119 |
+
>>> sorted(set(psent1))
|
120 |
+
[',', '.', 'a', 'amor', 'coisa', 'conseqüentemente', 'da', 'e', 'era',
|
121 |
+
'feição', 'genuína', 'glória', 'homem', 'humana', 'há', 'mais', 'no',
|
122 |
+
'o', 'que', 'sua', 'verdadeiramente']
|
123 |
+
>>>
|
124 |
+
|
125 |
+
Let's iterate over each item in ``psent2``, and print information for each:
|
126 |
+
|
127 |
+
>>> for w in psent2:
|
128 |
+
... print(w, len(w), w[-1])
|
129 |
+
...
|
130 |
+
Não 3 o
|
131 |
+
consultes 9 s
|
132 |
+
dicionários 11 s
|
133 |
+
. 1 .
|
134 |
+
|
135 |
+
Observe how we make a human-readable version of a string, using ``decode()``.
|
136 |
+
Also notice that we accessed the last character of a string ``w`` using ``w[-1]``.
|
137 |
+
|
138 |
+
We just saw a ``for`` loop above. Another useful control structure is a
|
139 |
+
*list comprehension*.
|
140 |
+
|
141 |
+
>>> [w.upper() for w in psent2]
|
142 |
+
['N\xc3O', 'CONSULTES', 'DICION\xc1RIOS', '.']
|
143 |
+
>>> [w for w in psent1 if w.endswith('a')]
|
144 |
+
['da', 'gl\xf3ria', 'era', 'a', 'coisa', 'humana', 'a', 'sua', 'genu\xedna']
|
145 |
+
>>> [w for w in ptext4 if len(w) > 15]
|
146 |
+
['norte-irlandeses', 'pan-nacionalismo', 'predominatemente', 'primeiro-ministro',
|
147 |
+
'primeiro-ministro', 'irlandesa-americana', 'responsabilidades', 'significativamente']
|
148 |
+
|
149 |
+
We can examine the relative frequency of words in a text, using ``FreqDist``:
|
150 |
+
|
151 |
+
>>> fd1 = FreqDist(ptext1)
|
152 |
+
>>> fd1
|
153 |
+
<FreqDist with 10848 samples and 77098 outcomes>
|
154 |
+
>>> fd1['olhos']
|
155 |
+
137
|
156 |
+
>>> fd1.max()
|
157 |
+
','
|
158 |
+
>>> fd1.samples()[:100]
|
159 |
+
[',', '.', 'a', 'que', 'de', 'e', '-', 'o', ';', 'me', 'um', 'n\xe3o',
|
160 |
+
'\x97', 'se', 'do', 'da', 'uma', 'com', 'os', '\xe9', 'era', 'as', 'eu',
|
161 |
+
'lhe', 'ao', 'em', 'para', 'mas', '...', '!', '\xe0', 'na', 'mais', '?',
|
162 |
+
'no', 'como', 'por', 'N\xe3o', 'dos', 'o', 'ele', ':', 'Virg\xedlia',
|
163 |
+
'me', 'disse', 'minha', 'das', 'O', '/', 'A', 'CAP\xcdTULO', 'muito',
|
164 |
+
'depois', 'coisa', 'foi', 'sem', 'olhos', 'ela', 'nos', 'tinha', 'nem',
|
165 |
+
'E', 'outro', 'vida', 'nada', 'tempo', 'menos', 'outra', 'casa', 'homem',
|
166 |
+
'porque', 'quando', 'mim', 'mesmo', 'ser', 'pouco', 'estava', 'dia',
|
167 |
+
't\xe3o', 'tudo', 'Mas', 'at\xe9', 'D', 'ainda', 's\xf3', 'alguma',
|
168 |
+
'la', 'vez', 'anos', 'h\xe1', 'Era', 'pai', 'esse', 'lo', 'dizer', 'assim',
|
169 |
+
'ent\xe3o', 'dizia', 'aos', 'Borba']
|
170 |
+
|
171 |
+
---------------
|
172 |
+
Reading Corpora
|
173 |
+
---------------
|
174 |
+
|
175 |
+
Accessing the Machado Text Corpus
|
176 |
+
---------------------------------
|
177 |
+
|
178 |
+
NLTK includes the complete works of Machado de Assis.
|
179 |
+
|
180 |
+
>>> from nltk.corpus import machado
|
181 |
+
>>> machado.fileids()
|
182 |
+
['contos/macn001.txt', 'contos/macn002.txt', 'contos/macn003.txt', ...]
|
183 |
+
|
184 |
+
Each file corresponds to one of the works of Machado de Assis. To see a complete
|
185 |
+
list of works, you can look at the corpus README file: ``print machado.readme()``.
|
186 |
+
Let's access the text of the *Posthumous Memories of Brás Cubas*.
|
187 |
+
|
188 |
+
We can access the text as a list of characters, and access 200 characters starting
|
189 |
+
from position 10,000.
|
190 |
+
|
191 |
+
>>> raw_text = machado.raw('romance/marm05.txt')
|
192 |
+
>>> raw_text[10000:10200]
|
193 |
+
u', primou no\nEstado, e foi um dos amigos particulares do vice-rei Conde
|
194 |
+
da Cunha.\n\nComo este apelido de Cubas lhe\ncheirasse excessivamente a
|
195 |
+
tanoaria, alegava meu pai, bisneto de Dami\xe3o, que o\ndito ape'
|
196 |
+
|
197 |
+
However, this is not a very useful way to work with a text. We generally think
|
198 |
+
of a text as a sequence of words and punctuation, not characters:
|
199 |
+
|
200 |
+
>>> text1 = machado.words('romance/marm05.txt')
|
201 |
+
>>> text1
|
202 |
+
['Romance', ',', 'Mem\xf3rias', 'P\xf3stumas', 'de', ...]
|
203 |
+
>>> len(text1)
|
204 |
+
77098
|
205 |
+
>>> len(set(text1))
|
206 |
+
10848
|
207 |
+
|
208 |
+
Here's a program that finds the most common ngrams that contain a
|
209 |
+
particular target word.
|
210 |
+
|
211 |
+
>>> from nltk import ngrams, FreqDist
|
212 |
+
>>> target_word = 'olhos'
|
213 |
+
>>> fd = FreqDist(ng
|
214 |
+
... for ng in ngrams(text1, 5)
|
215 |
+
... if target_word in ng)
|
216 |
+
>>> for hit in fd.samples():
|
217 |
+
... print(' '.join(hit))
|
218 |
+
...
|
219 |
+
, com os olhos no
|
220 |
+
com os olhos no ar
|
221 |
+
com os olhos no chão
|
222 |
+
e todos com os olhos
|
223 |
+
me estar com os olhos
|
224 |
+
os olhos estúpidos , a
|
225 |
+
os olhos na costura ,
|
226 |
+
os olhos no ar ,
|
227 |
+
, com os olhos espetados
|
228 |
+
, com os olhos estúpidos
|
229 |
+
, com os olhos fitos
|
230 |
+
, com os olhos naquele
|
231 |
+
, com os olhos para
|
232 |
+
|
233 |
+
|
234 |
+
Accessing the MacMorpho Tagged Corpus
|
235 |
+
-------------------------------------
|
236 |
+
|
237 |
+
NLTK includes the MAC-MORPHO Brazilian Portuguese POS-tagged news text,
|
238 |
+
with over a million words of
|
239 |
+
journalistic texts extracted from ten sections of
|
240 |
+
the daily newspaper *Folha de Sao Paulo*, 1994.
|
241 |
+
|
242 |
+
We can access this corpus as a sequence of words or tagged words as follows:
|
243 |
+
|
244 |
+
>>> import nltk.corpus
|
245 |
+
>>> nltk.corpus.mac_morpho.words()
|
246 |
+
['Jersei', 'atinge', 'm\xe9dia', 'de', 'Cr$', '1,4', ...]
|
247 |
+
>>> nltk.corpus.mac_morpho.sents()
|
248 |
+
[['Jersei', 'atinge', 'm\xe9dia', 'de', 'Cr$', '1,4', 'milh\xe3o',
|
249 |
+
'em', 'a', 'venda', 'de', 'a', 'Pinhal', 'em', 'S\xe3o', 'Paulo'],
|
250 |
+
['Programe', 'sua', 'viagem', 'a', 'a', 'Exposi\xe7\xe3o', 'Nacional',
|
251 |
+
'do', 'Zeb', ',', 'que', 'come\xe7a', 'dia', '25'], ...]
|
252 |
+
>>> nltk.corpus.mac_morpho.tagged_words()
|
253 |
+
[('Jersei', 'N'), ('atinge', 'V'), ('m\xe9dia', 'N'), ...]
|
254 |
+
|
255 |
+
We can also access it in sentence chunks.
|
256 |
+
|
257 |
+
>>> nltk.corpus.mac_morpho.tagged_sents()
|
258 |
+
[[('Jersei', 'N'), ('atinge', 'V'), ('m\xe9dia', 'N'), ('de', 'PREP'),
|
259 |
+
('Cr$', 'CUR'), ('1,4', 'NUM'), ('milh\xe3o', 'N'), ('em', 'PREP|+'),
|
260 |
+
('a', 'ART'), ('venda', 'N'), ('de', 'PREP|+'), ('a', 'ART'),
|
261 |
+
('Pinhal', 'NPROP'), ('em', 'PREP'), ('S\xe3o', 'NPROP'),
|
262 |
+
('Paulo', 'NPROP')],
|
263 |
+
[('Programe', 'V'), ('sua', 'PROADJ'), ('viagem', 'N'), ('a', 'PREP|+'),
|
264 |
+
('a', 'ART'), ('Exposi\xe7\xe3o', 'NPROP'), ('Nacional', 'NPROP'),
|
265 |
+
('do', 'NPROP'), ('Zeb', 'NPROP'), (',', ','), ('que', 'PRO-KS-REL'),
|
266 |
+
('come\xe7a', 'V'), ('dia', 'N'), ('25', 'N|AP')], ...]
|
267 |
+
|
268 |
+
This data can be used to train taggers (examples below for the Floresta treebank).
|
269 |
+
|
270 |
+
Accessing the Floresta Portuguese Treebank
|
271 |
+
------------------------------------------
|
272 |
+
|
273 |
+
The NLTK data distribution includes the
|
274 |
+
"Floresta Sinta(c)tica Corpus" version 7.4, available from
|
275 |
+
``https://www.linguateca.pt/Floresta/``.
|
276 |
+
|
277 |
+
We can access this corpus as a sequence of words or tagged words as follows:
|
278 |
+
|
279 |
+
>>> from nltk.corpus import floresta
|
280 |
+
>>> floresta.words()
|
281 |
+
['Um', 'revivalismo', 'refrescante', 'O', '7_e_Meio', ...]
|
282 |
+
>>> floresta.tagged_words()
|
283 |
+
[('Um', '>N+art'), ('revivalismo', 'H+n'), ...]
|
284 |
+
|
285 |
+
The tags consist of some syntactic information, followed by a plus sign,
|
286 |
+
followed by a conventional part-of-speech tag. Let's strip off the material before
|
287 |
+
the plus sign:
|
288 |
+
|
289 |
+
>>> def simplify_tag(t):
|
290 |
+
... if "+" in t:
|
291 |
+
... return t[t.index("+")+1:]
|
292 |
+
... else:
|
293 |
+
... return t
|
294 |
+
>>> twords = floresta.tagged_words()
|
295 |
+
>>> twords = [(w.lower(), simplify_tag(t)) for (w,t) in twords]
|
296 |
+
>>> twords[:10]
|
297 |
+
[('um', 'art'), ('revivalismo', 'n'), ('refrescante', 'adj'), ('o', 'art'), ('7_e_meio', 'prop'),
|
298 |
+
('\xe9', 'v-fin'), ('um', 'art'), ('ex-libris', 'n'), ('de', 'prp'), ('a', 'art')]
|
299 |
+
|
300 |
+
Pretty printing the tagged words:
|
301 |
+
|
302 |
+
>>> print(' '.join(word + '/' + tag for (word, tag) in twords[:10]))
|
303 |
+
um/art revivalismo/n refrescante/adj o/art 7_e_meio/prop é/v-fin um/art ex-libris/n de/prp a/art
|
304 |
+
|
305 |
+
Count the word tokens and types, and determine the most common word:
|
306 |
+
|
307 |
+
>>> words = floresta.words()
|
308 |
+
>>> len(words)
|
309 |
+
211852
|
310 |
+
>>> fd = nltk.FreqDist(words)
|
311 |
+
>>> len(fd)
|
312 |
+
29421
|
313 |
+
>>> fd.max()
|
314 |
+
'de'
|
315 |
+
|
316 |
+
List the 20 most frequent tags, in order of decreasing frequency:
|
317 |
+
|
318 |
+
>>> tags = [simplify_tag(tag) for (word,tag) in floresta.tagged_words()]
|
319 |
+
>>> fd = nltk.FreqDist(tags)
|
320 |
+
>>> fd.keys()[:20]
|
321 |
+
['n', 'prp', 'art', 'v-fin', ',', 'prop', 'adj', 'adv', '.',
|
322 |
+
'conj-c', 'v-inf', 'pron-det', 'v-pcp', 'num', 'pron-indp',
|
323 |
+
'pron-pers', '\xab', '\xbb', 'conj-s', '}']
|
324 |
+
|
325 |
+
We can also access the corpus grouped by sentence:
|
326 |
+
|
327 |
+
>>> floresta.sents()
|
328 |
+
[['Um', 'revivalismo', 'refrescante'],
|
329 |
+
['O', '7_e_Meio', '\xe9', 'um', 'ex-libris', 'de', 'a', 'noite',
|
330 |
+
'algarvia', '.'], ...]
|
331 |
+
>>> floresta.tagged_sents()
|
332 |
+
[[('Um', '>N+art'), ('revivalismo', 'H+n'), ('refrescante', 'N<+adj')],
|
333 |
+
[('O', '>N+art'), ('7_e_Meio', 'H+prop'), ('\xe9', 'P+v-fin'),
|
334 |
+
('um', '>N+art'), ('ex-libris', 'H+n'), ('de', 'H+prp'),
|
335 |
+
('a', '>N+art'), ('noite', 'H+n'), ('algarvia', 'N<+adj'), ('.', '.')],
|
336 |
+
...]
|
337 |
+
>>> floresta.parsed_sents()
|
338 |
+
[Tree('UTT+np', [Tree('>N+art', ['Um']), Tree('H+n', ['revivalismo']),
|
339 |
+
Tree('N<+adj', ['refrescante'])]),
|
340 |
+
Tree('STA+fcl',
|
341 |
+
[Tree('SUBJ+np', [Tree('>N+art', ['O']),
|
342 |
+
Tree('H+prop', ['7_e_Meio'])]),
|
343 |
+
Tree('P+v-fin', ['\xe9']),
|
344 |
+
Tree('SC+np',
|
345 |
+
[Tree('>N+art', ['um']),
|
346 |
+
Tree('H+n', ['ex-libris']),
|
347 |
+
Tree('N<+pp', [Tree('H+prp', ['de']),
|
348 |
+
Tree('P<+np', [Tree('>N+art', ['a']),
|
349 |
+
Tree('H+n', ['noite']),
|
350 |
+
Tree('N<+adj', ['algarvia'])])])]),
|
351 |
+
Tree('.', ['.'])]), ...]
|
352 |
+
|
353 |
+
To view a parse tree, use the ``draw()`` method, e.g.:
|
354 |
+
|
355 |
+
>>> psents = floresta.parsed_sents()
|
356 |
+
>>> psents[5].draw() # doctest: +SKIP
|
357 |
+
|
358 |
+
Character Encodings
|
359 |
+
-------------------
|
360 |
+
|
361 |
+
Python understands the common character encoding used for Portuguese, ISO 8859-1 (ISO Latin 1).
|
362 |
+
|
363 |
+
>>> import os, nltk.test
|
364 |
+
>>> testdir = os.path.split(nltk.test.__file__)[0]
|
365 |
+
>>> text = open(os.path.join(testdir, 'floresta.txt'), 'rb').read().decode('ISO 8859-1')
|
366 |
+
>>> text[:60]
|
367 |
+
'O 7 e Meio \xe9 um ex-libris da noite algarvia.\n\xc9 uma das mais '
|
368 |
+
>>> print(text[:60])
|
369 |
+
O 7 e Meio é um ex-libris da noite algarvia.
|
370 |
+
É uma das mais
|
371 |
+
|
372 |
+
For more information about character encodings and Python, please see section 3.3 of the book.
|
373 |
+
|
374 |
+
----------------
|
375 |
+
Processing Tasks
|
376 |
+
----------------
|
377 |
+
|
378 |
+
|
379 |
+
Simple Concordancing
|
380 |
+
--------------------
|
381 |
+
|
382 |
+
Here's a function that takes a word and a specified amount of context (measured
|
383 |
+
in characters), and generates a concordance for that word.
|
384 |
+
|
385 |
+
>>> def concordance(word, context=30):
|
386 |
+
... for sent in floresta.sents():
|
387 |
+
... if word in sent:
|
388 |
+
... pos = sent.index(word)
|
389 |
+
... left = ' '.join(sent[:pos])
|
390 |
+
... right = ' '.join(sent[pos+1:])
|
391 |
+
... print('%*s %s %-*s' %
|
392 |
+
... (context, left[-context:], word, context, right[:context]))
|
393 |
+
|
394 |
+
>>> concordance("dar") # doctest: +SKIP
|
395 |
+
anduru , foi o suficiente para dar a volta a o resultado .
|
396 |
+
1. O P?BLICO veio dar a a imprensa di?ria portuguesa
|
397 |
+
A fartura de pensamento pode dar maus resultados e n?s n?o quer
|
398 |
+
Come?a a dar resultados a pol?tica de a Uni
|
399 |
+
ial come?ar a incorporar- lo e dar forma a um ' site ' que tem se
|
400 |
+
r com Constantino para ele lhe dar tamb?m os pap?is assinados .
|
401 |
+
va a brincar , pois n?o lhe ia dar procura??o nenhuma enquanto n?
|
402 |
+
?rica como o ant?doto capaz de dar sentido a o seu enorme poder .
|
403 |
+
. . .
|
404 |
+
>>> concordance("vender") # doctest: +SKIP
|
405 |
+
er recebido uma encomenda para vender 4000 blindados a o Iraque .
|
406 |
+
m?rico_Amorim caso conseguisse vender o lote de ac??es de o empres?r
|
407 |
+
mpre ter jovens simp?ticos a ? vender ? chega ! }
|
408 |
+
Disse que o governo vai vender ? desde autom?vel at? particip
|
409 |
+
ndiciou ontem duas pessoas por vender carro com ?gio .
|
410 |
+
A inten??o de Fleury ? vender as a??es para equilibrar as fi
|
411 |
+
|
412 |
+
Part-of-Speech Tagging
|
413 |
+
----------------------
|
414 |
+
|
415 |
+
Let's begin by getting the tagged sentence data, and simplifying the tags
|
416 |
+
as described earlier.
|
417 |
+
|
418 |
+
>>> from nltk.corpus import floresta
|
419 |
+
>>> tsents = floresta.tagged_sents()
|
420 |
+
>>> tsents = [[(w.lower(),simplify_tag(t)) for (w,t) in sent] for sent in tsents if sent]
|
421 |
+
>>> train = tsents[100:]
|
422 |
+
>>> test = tsents[:100]
|
423 |
+
|
424 |
+
We already know that ``n`` is the most common tag, so we can set up a
|
425 |
+
default tagger that tags every word as a noun, and see how well it does:
|
426 |
+
|
427 |
+
>>> tagger0 = nltk.DefaultTagger('n')
|
428 |
+
>>> nltk.tag.accuracy(tagger0, test)
|
429 |
+
0.17697228144989338
|
430 |
+
|
431 |
+
Evidently, about one in every six words is a noun. Let's improve on this by
|
432 |
+
training a unigram tagger:
|
433 |
+
|
434 |
+
>>> tagger1 = nltk.UnigramTagger(train, backoff=tagger0)
|
435 |
+
>>> nltk.tag.accuracy(tagger1, test)
|
436 |
+
0.87029140014214645
|
437 |
+
|
438 |
+
Next a bigram tagger:
|
439 |
+
|
440 |
+
>>> tagger2 = nltk.BigramTagger(train, backoff=tagger1)
|
441 |
+
>>> nltk.tag.accuracy(tagger2, test)
|
442 |
+
0.89019189765458417
|
443 |
+
|
444 |
+
|
445 |
+
Sentence Segmentation
|
446 |
+
---------------------
|
447 |
+
|
448 |
+
Punkt is a language-neutral sentence segmentation tool. We
|
449 |
+
|
450 |
+
>>> sent_tokenizer=nltk.data.load('tokenizers/punkt/portuguese.pickle')
|
451 |
+
>>> raw_text = machado.raw('romance/marm05.txt')
|
452 |
+
>>> sentences = sent_tokenizer.tokenize(raw_text)
|
453 |
+
>>> for sent in sentences[1000:1005]:
|
454 |
+
... print("<<", sent, ">>")
|
455 |
+
...
|
456 |
+
<< Em verdade, parecia ainda mais mulher do que era;
|
457 |
+
seria criança nos seus folgares de moça; mas assim quieta, impassível, tinha a
|
458 |
+
compostura da mulher casada. >>
|
459 |
+
<< Talvez essa circunstância lhe diminuía um pouco da
|
460 |
+
graça virginal. >>
|
461 |
+
<< Depressa nos familiarizamos; a mãe fazia-lhe grandes elogios, eu
|
462 |
+
escutava-os de boa sombra, e ela sorria com os olhos fúlgidos, como se lá dentro
|
463 |
+
do cérebro lhe estivesse a voar uma borboletinha de asas de ouro e olhos de
|
464 |
+
diamante... >>
|
465 |
+
<< Digo lá dentro, porque cá fora o
|
466 |
+
que esvoaçou foi uma borboleta preta, que subitamente penetrou na varanda, e
|
467 |
+
começou a bater as asas em derredor de D. Eusébia. >>
|
468 |
+
<< D. Eusébia deu um grito,
|
469 |
+
levantou-se, praguejou umas palavras soltas: - T'esconjuro!... >>
|
470 |
+
|
471 |
+
The sentence tokenizer can be trained and evaluated on other text.
|
472 |
+
The source text (from the Floresta Portuguese Treebank) contains one sentence per line.
|
473 |
+
We read the text, split it into its lines, and then join these lines together using
|
474 |
+
spaces. Now the information about sentence breaks has been discarded. We split this
|
475 |
+
material into training and testing data:
|
476 |
+
|
477 |
+
>>> import os, nltk.test
|
478 |
+
>>> testdir = os.path.split(nltk.test.__file__)[0]
|
479 |
+
>>> text = open(os.path.join(testdir, 'floresta.txt'), 'rb').read().decode('ISO-8859-1')
|
480 |
+
>>> lines = text.split('\n')
|
481 |
+
>>> train = ' '.join(lines[10:])
|
482 |
+
>>> test = ' '.join(lines[:10])
|
483 |
+
|
484 |
+
Now we train the sentence segmenter (or sentence tokenizer) and use it on our test sentences:
|
485 |
+
|
486 |
+
>>> stok = nltk.PunktSentenceTokenizer(train)
|
487 |
+
>>> print(stok.tokenize(test))
|
488 |
+
['O 7 e Meio \xe9 um ex-libris da noite algarvia.',
|
489 |
+
'\xc9 uma das mais antigas discotecas do Algarve, situada em Albufeira,
|
490 |
+
que continua a manter os tra\xe7os decorativos e as clientelas de sempre.',
|
491 |
+
'\xc9 um pouco a vers\xe3o de uma esp\xe9cie de \xaboutro lado\xbb da noite,
|
492 |
+
a meio caminho entre os devaneios de uma fauna perif\xe9rica, seja de Lisboa,
|
493 |
+
Londres, Dublin ou Faro e Portim\xe3o, e a postura circunspecta dos fi\xe9is da casa,
|
494 |
+
que dela esperam a m\xfasica \xabgeracionista\xbb dos 60 ou dos 70.',
|
495 |
+
'N\xe3o deixa de ser, nos tempos que correm, um certo \xabvery typical\xbb algarvio,
|
496 |
+
cabe\xe7a de cartaz para os que querem fugir a algumas movimenta\xe7\xf5es nocturnas
|
497 |
+
j\xe1 a caminho da ritualiza\xe7\xe3o de massas, do g\xe9nero \xabvamos todos ao
|
498 |
+
Calypso e encontramo-nos na Locomia\xbb.',
|
499 |
+
'E assim, aos 2,5 milh\xf5es que o Minist\xe9rio do Planeamento e Administra\xe7\xe3o
|
500 |
+
do Territ\xf3rio j\xe1 gasta no pagamento do pessoal afecto a estes organismos,
|
501 |
+
v\xeam juntar-se os montantes das obras propriamente ditas, que os munic\xedpios,
|
502 |
+
j\xe1 com projectos na m\xe3o, v\xeam reivindicar junto do Executivo, como salienta
|
503 |
+
aquele membro do Governo.',
|
504 |
+
'E o dinheiro \xabn\xe3o falta s\xf3 \xe0s c\xe2maras\xbb, lembra o secret\xe1rio de Estado,
|
505 |
+
que considera que a solu\xe7\xe3o para as autarquias \xe9 \xabespecializarem-se em
|
506 |
+
fundos comunit\xe1rios\xbb.',
|
507 |
+
'Mas como, se muitas n\xe3o disp\xf5em, nos seus quadros, dos t\xe9cnicos necess\xe1rios?',
|
508 |
+
'\xabEncomendem-nos a projectistas de fora\xbb porque, se as obras vierem a ser financiadas,
|
509 |
+
eles at\xe9 saem de gra\xe7a, j\xe1 que, nesse caso, \xabos fundos comunit\xe1rios pagam
|
510 |
+
os projectos, o mesmo n\xe3o acontecendo quando eles s\xe3o feitos pelos GAT\xbb,
|
511 |
+
dado serem organismos do Estado.',
|
512 |
+
'Essa poder\xe1 vir a ser uma hip\xf3tese, at\xe9 porque, no terreno, a capacidade dos GAT
|
513 |
+
est\xe1 cada vez mais enfraquecida.',
|
514 |
+
'Alguns at\xe9 j\xe1 desapareceram, como o de Castro Verde, e outros t\xeam vindo a perder quadros.']
|
515 |
+
|
516 |
+
NLTK's data collection includes a trained model for Portuguese sentence
|
517 |
+
segmentation, which can be loaded as follows. It is faster to load a trained model than
|
518 |
+
to retrain it.
|
519 |
+
|
520 |
+
>>> stok = nltk.data.load('tokenizers/punkt/portuguese.pickle')
|
521 |
+
|
522 |
+
Stemming
|
523 |
+
--------
|
524 |
+
|
525 |
+
NLTK includes the RSLP Portuguese stemmer. Here we use it to stem some Portuguese text:
|
526 |
+
|
527 |
+
>>> stemmer = nltk.stem.RSLPStemmer()
|
528 |
+
>>> stemmer.stem("copiar")
|
529 |
+
'copi'
|
530 |
+
>>> stemmer.stem("paisagem")
|
531 |
+
'pais'
|
532 |
+
|
533 |
+
|
534 |
+
Stopwords
|
535 |
+
---------
|
536 |
+
|
537 |
+
NLTK includes Portuguese stopwords:
|
538 |
+
|
539 |
+
>>> stopwords = nltk.corpus.stopwords.words('portuguese')
|
540 |
+
>>> stopwords[:10]
|
541 |
+
['a', 'ao', 'aos', 'aquela', 'aquelas', 'aquele', 'aqueles', 'aquilo', 'as', 'at\xe9']
|
542 |
+
|
543 |
+
Now we can use these to filter text. Let's find the most frequent words (other than stopwords)
|
544 |
+
and print them in descending order of frequency:
|
545 |
+
|
546 |
+
>>> fd = nltk.FreqDist(w.lower() for w in floresta.words() if w not in stopwords)
|
547 |
+
>>> for word in list(fd.keys())[:20]:
|
548 |
+
... print(word, fd[word])
|
549 |
+
, 13444
|
550 |
+
. 7725
|
551 |
+
« 2369
|
552 |
+
» 2310
|
553 |
+
é 1305
|
554 |
+
o 1086
|
555 |
+
} 1047
|
556 |
+
{ 1044
|
557 |
+
a 897
|
558 |
+
; 633
|
559 |
+
em 516
|
560 |
+
ser 466
|
561 |
+
sobre 349
|
562 |
+
os 313
|
563 |
+
anos 301
|
564 |
+
ontem 292
|
565 |
+
ainda 279
|
566 |
+
segundo 256
|
567 |
+
ter 249
|
568 |
+
dois 231
|
llmeval-env/lib/python3.10/site-packages/nltk/test/semantics.doctest
ADDED
@@ -0,0 +1,667 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.. Copyright (C) 2001-2023 NLTK Project
|
2 |
+
.. For license information, see LICENSE.TXT
|
3 |
+
|
4 |
+
=========
|
5 |
+
Semantics
|
6 |
+
=========
|
7 |
+
|
8 |
+
>>> # Setup tests by setting the counter to 0
|
9 |
+
>>> from nltk.sem import logic
|
10 |
+
>>> logic._counter._value = 0
|
11 |
+
|
12 |
+
>>> import nltk
|
13 |
+
>>> from nltk.sem import Valuation, Model
|
14 |
+
>>> v = [('adam', 'b1'), ('betty', 'g1'), ('fido', 'd1'),
|
15 |
+
... ('girl', set(['g1', 'g2'])), ('boy', set(['b1', 'b2'])),
|
16 |
+
... ('dog', set(['d1'])),
|
17 |
+
... ('love', set([('b1', 'g1'), ('b2', 'g2'), ('g1', 'b1'), ('g2', 'b1')]))]
|
18 |
+
>>> val = Valuation(v)
|
19 |
+
>>> dom = val.domain
|
20 |
+
>>> m = Model(dom, val)
|
21 |
+
|
22 |
+
Evaluation
|
23 |
+
----------
|
24 |
+
|
25 |
+
The top-level method of a ``Model`` instance is ``evaluate()``, which
|
26 |
+
assigns a semantic value to expressions of the ``logic`` module, under
|
27 |
+
an assignment ``g``:
|
28 |
+
|
29 |
+
>>> dom = val.domain
|
30 |
+
>>> g = nltk.sem.Assignment(dom)
|
31 |
+
>>> m.evaluate('all x.(boy(x) -> - girl(x))', g)
|
32 |
+
True
|
33 |
+
|
34 |
+
|
35 |
+
``evaluate()`` calls a recursive function ``satisfy()``, which in turn
|
36 |
+
calls a function ``i()`` to interpret non-logical constants and
|
37 |
+
individual variables. ``i()`` delegates the interpretation of these to
|
38 |
+
the the model's ``Valuation`` and the variable assignment ``g``
|
39 |
+
respectively. Any atomic expression which cannot be assigned a value
|
40 |
+
by ``i`` raises an ``Undefined`` exception; this is caught by
|
41 |
+
``evaluate``, which returns the string ``'Undefined'``.
|
42 |
+
|
43 |
+
>>> m.evaluate('walk(adam)', g, trace=2)
|
44 |
+
<BLANKLINE>
|
45 |
+
'walk(adam)' is undefined under M, g
|
46 |
+
'Undefined'
|
47 |
+
|
48 |
+
Batch Processing
|
49 |
+
----------------
|
50 |
+
|
51 |
+
The utility functions ``interpret_sents()`` and ``evaluate_sents()`` are intended to
|
52 |
+
help with processing multiple sentences. Here's an example of the first of these:
|
53 |
+
|
54 |
+
>>> sents = ['Mary walks']
|
55 |
+
>>> results = nltk.sem.util.interpret_sents(sents, 'grammars/sample_grammars/sem2.fcfg')
|
56 |
+
>>> for result in results:
|
57 |
+
... for (synrep, semrep) in result:
|
58 |
+
... print(synrep)
|
59 |
+
(S[SEM=<walk(mary)>]
|
60 |
+
(NP[-LOC, NUM='sg', SEM=<\P.P(mary)>]
|
61 |
+
(PropN[-LOC, NUM='sg', SEM=<\P.P(mary)>] Mary))
|
62 |
+
(VP[NUM='sg', SEM=<\x.walk(x)>]
|
63 |
+
(IV[NUM='sg', SEM=<\x.walk(x)>, TNS='pres'] walks)))
|
64 |
+
|
65 |
+
In order to provide backwards compatibility with 'legacy' grammars where the semantics value
|
66 |
+
is specified with a lowercase
|
67 |
+
``sem`` feature, the relevant feature name can be passed to the function using the
|
68 |
+
``semkey`` parameter, as shown here:
|
69 |
+
|
70 |
+
>>> sents = ['raining']
|
71 |
+
>>> g = nltk.grammar.FeatureGrammar.fromstring("""
|
72 |
+
... % start S
|
73 |
+
... S[sem=<raining>] -> 'raining'
|
74 |
+
... """)
|
75 |
+
>>> results = nltk.sem.util.interpret_sents(sents, g, semkey='sem')
|
76 |
+
>>> for result in results:
|
77 |
+
... for (synrep, semrep) in result:
|
78 |
+
... print(semrep)
|
79 |
+
raining
|
80 |
+
|
81 |
+
The function ``evaluate_sents()`` works in a similar manner, but also needs to be
|
82 |
+
passed a ``Model`` against which the semantic representations are evaluated.
|
83 |
+
|
84 |
+
Unit Tests
|
85 |
+
==========
|
86 |
+
|
87 |
+
|
88 |
+
Unit tests for relations and valuations
|
89 |
+
---------------------------------------
|
90 |
+
|
91 |
+
>>> from nltk.sem import *
|
92 |
+
|
93 |
+
Relations are sets of tuples, all of the same length.
|
94 |
+
|
95 |
+
>>> s1 = set([('d1', 'd2'), ('d1', 'd1'), ('d2', 'd1')])
|
96 |
+
>>> is_rel(s1)
|
97 |
+
True
|
98 |
+
>>> s2 = set([('d1', 'd2'), ('d1', 'd2'), ('d1',)])
|
99 |
+
>>> is_rel(s2)
|
100 |
+
Traceback (most recent call last):
|
101 |
+
. . .
|
102 |
+
ValueError: Set set([('d1', 'd2'), ('d1',)]) contains sequences of different lengths
|
103 |
+
>>> s3 = set(['d1', 'd2'])
|
104 |
+
>>> is_rel(s3)
|
105 |
+
Traceback (most recent call last):
|
106 |
+
. . .
|
107 |
+
ValueError: Set set(['d2', 'd1']) contains sequences of different lengths
|
108 |
+
>>> s4 = set2rel(s3)
|
109 |
+
>>> is_rel(s4)
|
110 |
+
True
|
111 |
+
>>> is_rel(set())
|
112 |
+
True
|
113 |
+
>>> null_binary_rel = set([(None, None)])
|
114 |
+
>>> is_rel(null_binary_rel)
|
115 |
+
True
|
116 |
+
|
117 |
+
Sets of entities are converted into sets of singleton tuples
|
118 |
+
(containing strings).
|
119 |
+
|
120 |
+
>>> sorted(set2rel(s3))
|
121 |
+
[('d1',), ('d2',)]
|
122 |
+
>>> sorted(set2rel(set([1,3,5,])))
|
123 |
+
['1', '3', '5']
|
124 |
+
>>> set2rel(set()) == set()
|
125 |
+
True
|
126 |
+
>>> set2rel(set2rel(s3)) == set2rel(s3)
|
127 |
+
True
|
128 |
+
|
129 |
+
Predication is evaluated by set membership.
|
130 |
+
|
131 |
+
>>> ('d1', 'd2') in s1
|
132 |
+
True
|
133 |
+
>>> ('d2', 'd2') in s1
|
134 |
+
False
|
135 |
+
>>> ('d1',) in s1
|
136 |
+
False
|
137 |
+
>>> 'd2' in s1
|
138 |
+
False
|
139 |
+
>>> ('d1',) in s4
|
140 |
+
True
|
141 |
+
>>> ('d1',) in set()
|
142 |
+
False
|
143 |
+
>>> 'd1' in null_binary_rel
|
144 |
+
False
|
145 |
+
|
146 |
+
|
147 |
+
>>> val = Valuation([('Fido', 'd1'), ('dog', set(['d1', 'd2'])), ('walk', set())])
|
148 |
+
>>> sorted(val['dog'])
|
149 |
+
[('d1',), ('d2',)]
|
150 |
+
>>> val.domain == set(['d1', 'd2'])
|
151 |
+
True
|
152 |
+
>>> print(val.symbols)
|
153 |
+
['Fido', 'dog', 'walk']
|
154 |
+
|
155 |
+
|
156 |
+
Parse a valuation from a string.
|
157 |
+
|
158 |
+
>>> v = """
|
159 |
+
... john => b1
|
160 |
+
... mary => g1
|
161 |
+
... suzie => g2
|
162 |
+
... fido => d1
|
163 |
+
... tess => d2
|
164 |
+
... noosa => n
|
165 |
+
... girl => {g1, g2}
|
166 |
+
... boy => {b1, b2}
|
167 |
+
... dog => {d1, d2}
|
168 |
+
... bark => {d1, d2}
|
169 |
+
... walk => {b1, g2, d1}
|
170 |
+
... chase => {(b1, g1), (b2, g1), (g1, d1), (g2, d2)}
|
171 |
+
... see => {(b1, g1), (b2, d2), (g1, b1),(d2, b1), (g2, n)}
|
172 |
+
... in => {(b1, n), (b2, n), (d2, n)}
|
173 |
+
... with => {(b1, g1), (g1, b1), (d1, b1), (b1, d1)}
|
174 |
+
... """
|
175 |
+
>>> val = Valuation.fromstring(v)
|
176 |
+
|
177 |
+
>>> print(val) # doctest: +SKIP
|
178 |
+
{'bark': set([('d1',), ('d2',)]),
|
179 |
+
'boy': set([('b1',), ('b2',)]),
|
180 |
+
'chase': set([('b1', 'g1'), ('g2', 'd2'), ('g1', 'd1'), ('b2', 'g1')]),
|
181 |
+
'dog': set([('d1',), ('d2',)]),
|
182 |
+
'fido': 'd1',
|
183 |
+
'girl': set([('g2',), ('g1',)]),
|
184 |
+
'in': set([('d2', 'n'), ('b1', 'n'), ('b2', 'n')]),
|
185 |
+
'john': 'b1',
|
186 |
+
'mary': 'g1',
|
187 |
+
'noosa': 'n',
|
188 |
+
'see': set([('b1', 'g1'), ('b2', 'd2'), ('d2', 'b1'), ('g2', 'n'), ('g1', 'b1')]),
|
189 |
+
'suzie': 'g2',
|
190 |
+
'tess': 'd2',
|
191 |
+
'walk': set([('d1',), ('b1',), ('g2',)]),
|
192 |
+
'with': set([('b1', 'g1'), ('d1', 'b1'), ('b1', 'd1'), ('g1', 'b1')])}
|
193 |
+
|
194 |
+
|
195 |
+
Unit tests for function argument application in a Model
|
196 |
+
-------------------------------------------------------
|
197 |
+
|
198 |
+
>>> v = [('adam', 'b1'), ('betty', 'g1'), ('fido', 'd1'),\
|
199 |
+
... ('girl', set(['g1', 'g2'])), ('boy', set(['b1', 'b2'])), ('dog', set(['d1'])),
|
200 |
+
... ('love', set([('b1', 'g1'), ('b2', 'g2'), ('g1', 'b1'), ('g2', 'b1')])),
|
201 |
+
... ('kiss', null_binary_rel)]
|
202 |
+
>>> val = Valuation(v)
|
203 |
+
>>> dom = val.domain
|
204 |
+
>>> m = Model(dom, val)
|
205 |
+
>>> g = Assignment(dom)
|
206 |
+
>>> sorted(val['boy'])
|
207 |
+
[('b1',), ('b2',)]
|
208 |
+
>>> ('b1',) in val['boy']
|
209 |
+
True
|
210 |
+
>>> ('g1',) in val['boy']
|
211 |
+
False
|
212 |
+
>>> ('foo',) in val['boy']
|
213 |
+
False
|
214 |
+
>>> ('b1', 'g1') in val['love']
|
215 |
+
True
|
216 |
+
>>> ('b1', 'b1') in val['kiss']
|
217 |
+
False
|
218 |
+
>>> sorted(val.domain)
|
219 |
+
['b1', 'b2', 'd1', 'g1', 'g2']
|
220 |
+
|
221 |
+
|
222 |
+
Model Tests
|
223 |
+
===========
|
224 |
+
|
225 |
+
Extension of Lambda expressions
|
226 |
+
|
227 |
+
>>> v0 = [('adam', 'b1'), ('betty', 'g1'), ('fido', 'd1'),\
|
228 |
+
... ('girl', set(['g1', 'g2'])), ('boy', set(['b1', 'b2'])),
|
229 |
+
... ('dog', set(['d1'])),
|
230 |
+
... ('love', set([('b1', 'g1'), ('b2', 'g2'), ('g1', 'b1'), ('g2', 'b1')]))]
|
231 |
+
|
232 |
+
>>> val0 = Valuation(v0)
|
233 |
+
>>> dom0 = val0.domain
|
234 |
+
>>> m0 = Model(dom0, val0)
|
235 |
+
>>> g0 = Assignment(dom0)
|
236 |
+
|
237 |
+
>>> print(m0.evaluate(r'\x. \y. love(x, y)', g0) == {'g2': {'g2': False, 'b2': False, 'b1': True, 'g1': False, 'd1': False}, 'b2': {'g2': True, 'b2': False, 'b1': False, 'g1': False, 'd1': False}, 'b1': {'g2': False, 'b2': False, 'b1': False, 'g1': True, 'd1': False}, 'g1': {'g2': False, 'b2': False, 'b1': True, 'g1': False, 'd1': False}, 'd1': {'g2': False, 'b2': False, 'b1': False, 'g1': False, 'd1': False}})
|
238 |
+
True
|
239 |
+
>>> print(m0.evaluate(r'\x. dog(x) (adam)', g0))
|
240 |
+
False
|
241 |
+
>>> print(m0.evaluate(r'\x. (dog(x) | boy(x)) (adam)', g0))
|
242 |
+
True
|
243 |
+
>>> print(m0.evaluate(r'\x. \y. love(x, y)(fido)', g0) == {'g2': False, 'b2': False, 'b1': False, 'g1': False, 'd1': False})
|
244 |
+
True
|
245 |
+
>>> print(m0.evaluate(r'\x. \y. love(x, y)(adam)', g0) == {'g2': False, 'b2': False, 'b1': False, 'g1': True, 'd1': False})
|
246 |
+
True
|
247 |
+
>>> print(m0.evaluate(r'\x. \y. love(x, y)(betty)', g0) == {'g2': False, 'b2': False, 'b1': True, 'g1': False, 'd1': False})
|
248 |
+
True
|
249 |
+
>>> print(m0.evaluate(r'\x. \y. love(x, y)(betty)(adam)', g0))
|
250 |
+
True
|
251 |
+
>>> print(m0.evaluate(r'\x. \y. love(x, y)(betty, adam)', g0))
|
252 |
+
True
|
253 |
+
>>> print(m0.evaluate(r'\y. \x. love(x, y)(fido)(adam)', g0))
|
254 |
+
False
|
255 |
+
>>> print(m0.evaluate(r'\y. \x. love(x, y)(betty, adam)', g0))
|
256 |
+
True
|
257 |
+
>>> print(m0.evaluate(r'\x. exists y. love(x, y)', g0) == {'g2': True, 'b2': True, 'b1': True, 'g1': True, 'd1': False})
|
258 |
+
True
|
259 |
+
>>> print(m0.evaluate(r'\z. adam', g0) == {'g2': 'b1', 'b2': 'b1', 'b1': 'b1', 'g1': 'b1', 'd1': 'b1'})
|
260 |
+
True
|
261 |
+
>>> print(m0.evaluate(r'\z. love(x, y)', g0) == {'g2': False, 'b2': False, 'b1': False, 'g1': False, 'd1': False})
|
262 |
+
True
|
263 |
+
|
264 |
+
|
265 |
+
Propositional Model Test
|
266 |
+
------------------------
|
267 |
+
|
268 |
+
>>> tests = [
|
269 |
+
... ('P & Q', True),
|
270 |
+
... ('P & R', False),
|
271 |
+
... ('- P', False),
|
272 |
+
... ('- R', True),
|
273 |
+
... ('- - P', True),
|
274 |
+
... ('- (P & R)', True),
|
275 |
+
... ('P | R', True),
|
276 |
+
... ('R | P', True),
|
277 |
+
... ('R | R', False),
|
278 |
+
... ('- P | R', False),
|
279 |
+
... ('P | - P', True),
|
280 |
+
... ('P -> Q', True),
|
281 |
+
... ('P -> R', False),
|
282 |
+
... ('R -> P', True),
|
283 |
+
... ('P <-> P', True),
|
284 |
+
... ('R <-> R', True),
|
285 |
+
... ('P <-> R', False),
|
286 |
+
... ]
|
287 |
+
>>> val1 = Valuation([('P', True), ('Q', True), ('R', False)])
|
288 |
+
>>> dom = set([])
|
289 |
+
>>> m = Model(dom, val1)
|
290 |
+
>>> g = Assignment(dom)
|
291 |
+
>>> for (sent, testvalue) in tests:
|
292 |
+
... semvalue = m.evaluate(sent, g)
|
293 |
+
... if semvalue == testvalue:
|
294 |
+
... print('*', end=' ')
|
295 |
+
* * * * * * * * * * * * * * * * *
|
296 |
+
|
297 |
+
|
298 |
+
Test of i Function
|
299 |
+
------------------
|
300 |
+
|
301 |
+
>>> from nltk.sem import Expression
|
302 |
+
>>> v = [('adam', 'b1'), ('betty', 'g1'), ('fido', 'd1'),
|
303 |
+
... ('girl', set(['g1', 'g2'])), ('boy', set(['b1', 'b2'])), ('dog', set(['d1'])),
|
304 |
+
... ('love', set([('b1', 'g1'), ('b2', 'g2'), ('g1', 'b1'), ('g2', 'b1')]))]
|
305 |
+
>>> val = Valuation(v)
|
306 |
+
>>> dom = val.domain
|
307 |
+
>>> m = Model(dom, val)
|
308 |
+
>>> g = Assignment(dom, [('x', 'b1'), ('y', 'g2')])
|
309 |
+
>>> exprs = ['adam', 'girl', 'love', 'walks', 'x', 'y', 'z']
|
310 |
+
>>> parsed_exprs = [Expression.fromstring(e) for e in exprs]
|
311 |
+
>>> sorted_set = lambda x: sorted(x) if isinstance(x, set) else x
|
312 |
+
>>> for parsed in parsed_exprs:
|
313 |
+
... try:
|
314 |
+
... print("'%s' gets value %s" % (parsed, sorted_set(m.i(parsed, g))))
|
315 |
+
... except Undefined:
|
316 |
+
... print("'%s' is Undefined" % parsed)
|
317 |
+
'adam' gets value b1
|
318 |
+
'girl' gets value [('g1',), ('g2',)]
|
319 |
+
'love' gets value [('b1', 'g1'), ('b2', 'g2'), ('g1', 'b1'), ('g2', 'b1')]
|
320 |
+
'walks' is Undefined
|
321 |
+
'x' gets value b1
|
322 |
+
'y' gets value g2
|
323 |
+
'z' is Undefined
|
324 |
+
|
325 |
+
Test for formulas in Model
|
326 |
+
--------------------------
|
327 |
+
|
328 |
+
>>> tests = [
|
329 |
+
... ('love(adam, betty)', True),
|
330 |
+
... ('love(adam, sue)', 'Undefined'),
|
331 |
+
... ('dog(fido)', True),
|
332 |
+
... ('- dog(fido)', False),
|
333 |
+
... ('- - dog(fido)', True),
|
334 |
+
... ('- dog(sue)', 'Undefined'),
|
335 |
+
... ('dog(fido) & boy(adam)', True),
|
336 |
+
... ('- (dog(fido) & boy(adam))', False),
|
337 |
+
... ('- dog(fido) & boy(adam)', False),
|
338 |
+
... ('dog(fido) | boy(adam)', True),
|
339 |
+
... ('- (dog(fido) | boy(adam))', False),
|
340 |
+
... ('- dog(fido) | boy(adam)', True),
|
341 |
+
... ('- dog(fido) | - boy(adam)', False),
|
342 |
+
... ('dog(fido) -> boy(adam)', True),
|
343 |
+
... ('- (dog(fido) -> boy(adam))', False),
|
344 |
+
... ('- dog(fido) -> boy(adam)', True),
|
345 |
+
... ('exists x . love(adam, x)', True),
|
346 |
+
... ('all x . love(adam, x)', False),
|
347 |
+
... ('fido = fido', True),
|
348 |
+
... ('exists x . all y. love(x, y)', False),
|
349 |
+
... ('exists x . (x = fido)', True),
|
350 |
+
... ('all x . (dog(x) | - dog(x))', True),
|
351 |
+
... ('adam = mia', 'Undefined'),
|
352 |
+
... ('\\x. (boy(x) | girl(x))', {'g2': True, 'b2': True, 'b1': True, 'g1': True, 'd1': False}),
|
353 |
+
... ('\\x. exists y. (boy(x) & love(x, y))', {'g2': False, 'b2': True, 'b1': True, 'g1': False, 'd1': False}),
|
354 |
+
... ('exists z1. boy(z1)', True),
|
355 |
+
... ('exists x. (boy(x) & - (x = adam))', True),
|
356 |
+
... ('exists x. (boy(x) & all y. love(y, x))', False),
|
357 |
+
... ('all x. (boy(x) | girl(x))', False),
|
358 |
+
... ('all x. (girl(x) -> exists y. boy(y) & love(x, y))', False),
|
359 |
+
... ('exists x. (boy(x) & all y. (girl(y) -> love(y, x)))', True),
|
360 |
+
... ('exists x. (boy(x) & all y. (girl(y) -> love(x, y)))', False),
|
361 |
+
... ('all x. (dog(x) -> - girl(x))', True),
|
362 |
+
... ('exists x. exists y. (love(x, y) & love(x, y))', True),
|
363 |
+
... ]
|
364 |
+
>>> for (sent, testvalue) in tests:
|
365 |
+
... semvalue = m.evaluate(sent, g)
|
366 |
+
... if semvalue == testvalue:
|
367 |
+
... print('*', end=' ')
|
368 |
+
... else:
|
369 |
+
... print(sent, semvalue)
|
370 |
+
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
|
371 |
+
|
372 |
+
|
373 |
+
|
374 |
+
Satisfier Tests
|
375 |
+
---------------
|
376 |
+
|
377 |
+
>>> formulas = [
|
378 |
+
... 'boy(x)',
|
379 |
+
... '(x = x)',
|
380 |
+
... '(boy(x) | girl(x))',
|
381 |
+
... '(boy(x) & girl(x))',
|
382 |
+
... 'love(adam, x)',
|
383 |
+
... 'love(x, adam)',
|
384 |
+
... '- (x = adam)',
|
385 |
+
... 'exists z22. love(x, z22)',
|
386 |
+
... 'exists y. love(y, x)',
|
387 |
+
... 'all y. (girl(y) -> love(x, y))',
|
388 |
+
... 'all y. (girl(y) -> love(y, x))',
|
389 |
+
... 'all y. (girl(y) -> (boy(x) & love(y, x)))',
|
390 |
+
... 'boy(x) & all y. (girl(y) -> love(x, y))',
|
391 |
+
... 'boy(x) & all y. (girl(y) -> love(y, x))',
|
392 |
+
... 'boy(x) & exists y. (girl(y) & love(y, x))',
|
393 |
+
... 'girl(x) -> dog(x)',
|
394 |
+
... 'all y. (dog(y) -> (x = y))',
|
395 |
+
... '- exists y. love(y, x)',
|
396 |
+
... 'exists y. (love(adam, y) & love(y, x))'
|
397 |
+
... ]
|
398 |
+
>>> g.purge()
|
399 |
+
>>> g.add('x', 'b1')
|
400 |
+
{'x': 'b1'}
|
401 |
+
>>> for f in formulas:
|
402 |
+
... try:
|
403 |
+
... print("'%s' gets value: %s" % (f, m.evaluate(f, g)))
|
404 |
+
... except Undefined:
|
405 |
+
... print("'%s' is Undefined" % f)
|
406 |
+
'boy(x)' gets value: True
|
407 |
+
'(x = x)' gets value: True
|
408 |
+
'(boy(x) | girl(x))' gets value: True
|
409 |
+
'(boy(x) & girl(x))' gets value: False
|
410 |
+
'love(adam, x)' gets value: False
|
411 |
+
'love(x, adam)' gets value: False
|
412 |
+
'- (x = adam)' gets value: False
|
413 |
+
'exists z22. love(x, z22)' gets value: True
|
414 |
+
'exists y. love(y, x)' gets value: True
|
415 |
+
'all y. (girl(y) -> love(x, y))' gets value: False
|
416 |
+
'all y. (girl(y) -> love(y, x))' gets value: True
|
417 |
+
'all y. (girl(y) -> (boy(x) & love(y, x)))' gets value: True
|
418 |
+
'boy(x) & all y. (girl(y) -> love(x, y))' gets value: False
|
419 |
+
'boy(x) & all y. (girl(y) -> love(y, x))' gets value: True
|
420 |
+
'boy(x) & exists y. (girl(y) & love(y, x))' gets value: True
|
421 |
+
'girl(x) -> dog(x)' gets value: True
|
422 |
+
'all y. (dog(y) -> (x = y))' gets value: False
|
423 |
+
'- exists y. love(y, x)' gets value: False
|
424 |
+
'exists y. (love(adam, y) & love(y, x))' gets value: True
|
425 |
+
|
426 |
+
>>> from nltk.sem import Expression
|
427 |
+
>>> for fmla in formulas:
|
428 |
+
... p = Expression.fromstring(fmla)
|
429 |
+
... g.purge()
|
430 |
+
... print("Satisfiers of '%s':\n\t%s" % (p, sorted(m.satisfiers(p, 'x', g))))
|
431 |
+
Satisfiers of 'boy(x)':
|
432 |
+
['b1', 'b2']
|
433 |
+
Satisfiers of '(x = x)':
|
434 |
+
['b1', 'b2', 'd1', 'g1', 'g2']
|
435 |
+
Satisfiers of '(boy(x) | girl(x))':
|
436 |
+
['b1', 'b2', 'g1', 'g2']
|
437 |
+
Satisfiers of '(boy(x) & girl(x))':
|
438 |
+
[]
|
439 |
+
Satisfiers of 'love(adam,x)':
|
440 |
+
['g1']
|
441 |
+
Satisfiers of 'love(x,adam)':
|
442 |
+
['g1', 'g2']
|
443 |
+
Satisfiers of '-(x = adam)':
|
444 |
+
['b2', 'd1', 'g1', 'g2']
|
445 |
+
Satisfiers of 'exists z22.love(x,z22)':
|
446 |
+
['b1', 'b2', 'g1', 'g2']
|
447 |
+
Satisfiers of 'exists y.love(y,x)':
|
448 |
+
['b1', 'g1', 'g2']
|
449 |
+
Satisfiers of 'all y.(girl(y) -> love(x,y))':
|
450 |
+
[]
|
451 |
+
Satisfiers of 'all y.(girl(y) -> love(y,x))':
|
452 |
+
['b1']
|
453 |
+
Satisfiers of 'all y.(girl(y) -> (boy(x) & love(y,x)))':
|
454 |
+
['b1']
|
455 |
+
Satisfiers of '(boy(x) & all y.(girl(y) -> love(x,y)))':
|
456 |
+
[]
|
457 |
+
Satisfiers of '(boy(x) & all y.(girl(y) -> love(y,x)))':
|
458 |
+
['b1']
|
459 |
+
Satisfiers of '(boy(x) & exists y.(girl(y) & love(y,x)))':
|
460 |
+
['b1']
|
461 |
+
Satisfiers of '(girl(x) -> dog(x))':
|
462 |
+
['b1', 'b2', 'd1']
|
463 |
+
Satisfiers of 'all y.(dog(y) -> (x = y))':
|
464 |
+
['d1']
|
465 |
+
Satisfiers of '-exists y.love(y,x)':
|
466 |
+
['b2', 'd1']
|
467 |
+
Satisfiers of 'exists y.(love(adam,y) & love(y,x))':
|
468 |
+
['b1']
|
469 |
+
|
470 |
+
|
471 |
+
Tests based on the Blackburn & Bos testsuite
|
472 |
+
--------------------------------------------
|
473 |
+
|
474 |
+
>>> v1 = [('jules', 'd1'), ('vincent', 'd2'), ('pumpkin', 'd3'),
|
475 |
+
... ('honey_bunny', 'd4'), ('yolanda', 'd5'),
|
476 |
+
... ('customer', set(['d1', 'd2'])),
|
477 |
+
... ('robber', set(['d3', 'd4'])),
|
478 |
+
... ('love', set([('d3', 'd4')]))]
|
479 |
+
>>> val1 = Valuation(v1)
|
480 |
+
>>> dom1 = val1.domain
|
481 |
+
>>> m1 = Model(dom1, val1)
|
482 |
+
>>> g1 = Assignment(dom1)
|
483 |
+
|
484 |
+
>>> v2 = [('jules', 'd1'), ('vincent', 'd2'), ('pumpkin', 'd3'),
|
485 |
+
... ('honey_bunny', 'd4'), ('yolanda', 'd4'),
|
486 |
+
... ('customer', set(['d1', 'd2', 'd5', 'd6'])),
|
487 |
+
... ('robber', set(['d3', 'd4'])),
|
488 |
+
... ('love', set([(None, None)]))]
|
489 |
+
>>> val2 = Valuation(v2)
|
490 |
+
>>> dom2 = set(['d1', 'd2', 'd3', 'd4', 'd5', 'd6'])
|
491 |
+
>>> m2 = Model(dom2, val2)
|
492 |
+
>>> g2 = Assignment(dom2)
|
493 |
+
>>> g21 = Assignment(dom2)
|
494 |
+
>>> g21.add('y', 'd3')
|
495 |
+
{'y': 'd3'}
|
496 |
+
|
497 |
+
>>> v3 = [('mia', 'd1'), ('jody', 'd2'), ('jules', 'd3'),
|
498 |
+
... ('vincent', 'd4'),
|
499 |
+
... ('woman', set(['d1', 'd2'])), ('man', set(['d3', 'd4'])),
|
500 |
+
... ('joke', set(['d5', 'd6'])), ('episode', set(['d7', 'd8'])),
|
501 |
+
... ('in', set([('d5', 'd7'), ('d5', 'd8')])),
|
502 |
+
... ('tell', set([('d1', 'd5'), ('d2', 'd6')]))]
|
503 |
+
>>> val3 = Valuation(v3)
|
504 |
+
>>> dom3 = set(['d1', 'd2', 'd3', 'd4', 'd5', 'd6', 'd7', 'd8'])
|
505 |
+
>>> m3 = Model(dom3, val3)
|
506 |
+
>>> g3 = Assignment(dom3)
|
507 |
+
|
508 |
+
>>> tests = [
|
509 |
+
... ('exists x. robber(x)', m1, g1, True),
|
510 |
+
... ('exists x. exists y. love(y, x)', m1, g1, True),
|
511 |
+
... ('exists x0. exists x1. love(x1, x0)', m2, g2, False),
|
512 |
+
... ('all x. all y. love(y, x)', m2, g2, False),
|
513 |
+
... ('- (all x. all y. love(y, x))', m2, g2, True),
|
514 |
+
... ('all x. all y. - love(y, x)', m2, g2, True),
|
515 |
+
... ('yolanda = honey_bunny', m2, g2, True),
|
516 |
+
... ('mia = honey_bunny', m2, g2, 'Undefined'),
|
517 |
+
... ('- (yolanda = honey_bunny)', m2, g2, False),
|
518 |
+
... ('- (mia = honey_bunny)', m2, g2, 'Undefined'),
|
519 |
+
... ('all x. (robber(x) | customer(x))', m2, g2, True),
|
520 |
+
... ('- (all x. (robber(x) | customer(x)))', m2, g2, False),
|
521 |
+
... ('(robber(x) | customer(x))', m2, g2, 'Undefined'),
|
522 |
+
... ('(robber(y) | customer(y))', m2, g21, True),
|
523 |
+
... ('exists x. (man(x) & exists x. woman(x))', m3, g3, True),
|
524 |
+
... ('exists x. (man(x) & exists x. woman(x))', m3, g3, True),
|
525 |
+
... ('- exists x. woman(x)', m3, g3, False),
|
526 |
+
... ('exists x. (tasty(x) & burger(x))', m3, g3, 'Undefined'),
|
527 |
+
... ('- exists x. (tasty(x) & burger(x))', m3, g3, 'Undefined'),
|
528 |
+
... ('exists x. (man(x) & - exists y. woman(y))', m3, g3, False),
|
529 |
+
... ('exists x. (man(x) & - exists x. woman(x))', m3, g3, False),
|
530 |
+
... ('exists x. (woman(x) & - exists x. customer(x))', m2, g2, 'Undefined'),
|
531 |
+
... ]
|
532 |
+
|
533 |
+
>>> for item in tests:
|
534 |
+
... sentence, model, g, testvalue = item
|
535 |
+
... semvalue = model.evaluate(sentence, g)
|
536 |
+
... if semvalue == testvalue:
|
537 |
+
... print('*', end=' ')
|
538 |
+
... g.purge()
|
539 |
+
* * * * * * * * * * * * * * * * * * * * * *
|
540 |
+
|
541 |
+
|
542 |
+
Tests for mapping from syntax to semantics
|
543 |
+
------------------------------------------
|
544 |
+
|
545 |
+
Load a valuation from a file.
|
546 |
+
|
547 |
+
>>> import nltk.data
|
548 |
+
>>> from nltk.sem.util import parse_sents
|
549 |
+
>>> val = nltk.data.load('grammars/sample_grammars/valuation1.val')
|
550 |
+
>>> dom = val.domain
|
551 |
+
>>> m = Model(dom, val)
|
552 |
+
>>> g = Assignment(dom)
|
553 |
+
>>> gramfile = 'grammars/sample_grammars/sem2.fcfg'
|
554 |
+
>>> inputs = ['John sees a girl', 'every dog barks']
|
555 |
+
>>> parses = parse_sents(inputs, gramfile)
|
556 |
+
>>> for sent, trees in zip(inputs, parses):
|
557 |
+
... print()
|
558 |
+
... print("Sentence: %s" % sent)
|
559 |
+
... for tree in trees:
|
560 |
+
... print("Parse:\n %s" %tree)
|
561 |
+
... print("Semantics: %s" % root_semrep(tree))
|
562 |
+
<BLANKLINE>
|
563 |
+
Sentence: John sees a girl
|
564 |
+
Parse:
|
565 |
+
(S[SEM=<exists x.(girl(x) & see(john,x))>]
|
566 |
+
(NP[-LOC, NUM='sg', SEM=<\P.P(john)>]
|
567 |
+
(PropN[-LOC, NUM='sg', SEM=<\P.P(john)>] John))
|
568 |
+
(VP[NUM='sg', SEM=<\y.exists x.(girl(x) & see(y,x))>]
|
569 |
+
(TV[NUM='sg', SEM=<\X y.X(\x.see(y,x))>, TNS='pres'] sees)
|
570 |
+
(NP[NUM='sg', SEM=<\Q.exists x.(girl(x) & Q(x))>]
|
571 |
+
(Det[NUM='sg', SEM=<\P Q.exists x.(P(x) & Q(x))>] a)
|
572 |
+
(Nom[NUM='sg', SEM=<\x.girl(x)>]
|
573 |
+
(N[NUM='sg', SEM=<\x.girl(x)>] girl)))))
|
574 |
+
Semantics: exists x.(girl(x) & see(john,x))
|
575 |
+
<BLANKLINE>
|
576 |
+
Sentence: every dog barks
|
577 |
+
Parse:
|
578 |
+
(S[SEM=<all x.(dog(x) -> bark(x))>]
|
579 |
+
(NP[NUM='sg', SEM=<\Q.all x.(dog(x) -> Q(x))>]
|
580 |
+
(Det[NUM='sg', SEM=<\P Q.all x.(P(x) -> Q(x))>] every)
|
581 |
+
(Nom[NUM='sg', SEM=<\x.dog(x)>]
|
582 |
+
(N[NUM='sg', SEM=<\x.dog(x)>] dog)))
|
583 |
+
(VP[NUM='sg', SEM=<\x.bark(x)>]
|
584 |
+
(IV[NUM='sg', SEM=<\x.bark(x)>, TNS='pres'] barks)))
|
585 |
+
Semantics: all x.(dog(x) -> bark(x))
|
586 |
+
|
587 |
+
>>> sent = "every dog barks"
|
588 |
+
>>> result = nltk.sem.util.interpret_sents([sent], gramfile)[0]
|
589 |
+
>>> for (syntree, semrep) in result:
|
590 |
+
... print(syntree)
|
591 |
+
... print()
|
592 |
+
... print(semrep)
|
593 |
+
(S[SEM=<all x.(dog(x) -> bark(x))>]
|
594 |
+
(NP[NUM='sg', SEM=<\Q.all x.(dog(x) -> Q(x))>]
|
595 |
+
(Det[NUM='sg', SEM=<\P Q.all x.(P(x) -> Q(x))>] every)
|
596 |
+
(Nom[NUM='sg', SEM=<\x.dog(x)>]
|
597 |
+
(N[NUM='sg', SEM=<\x.dog(x)>] dog)))
|
598 |
+
(VP[NUM='sg', SEM=<\x.bark(x)>]
|
599 |
+
(IV[NUM='sg', SEM=<\x.bark(x)>, TNS='pres'] barks)))
|
600 |
+
<BLANKLINE>
|
601 |
+
all x.(dog(x) -> bark(x))
|
602 |
+
|
603 |
+
>>> result = nltk.sem.util.evaluate_sents([sent], gramfile, m, g)[0]
|
604 |
+
>>> for (syntree, semrel, value) in result:
|
605 |
+
... print(syntree)
|
606 |
+
... print()
|
607 |
+
... print(semrep)
|
608 |
+
... print()
|
609 |
+
... print(value)
|
610 |
+
(S[SEM=<all x.(dog(x) -> bark(x))>]
|
611 |
+
(NP[NUM='sg', SEM=<\Q.all x.(dog(x) -> Q(x))>]
|
612 |
+
(Det[NUM='sg', SEM=<\P Q.all x.(P(x) -> Q(x))>] every)
|
613 |
+
(Nom[NUM='sg', SEM=<\x.dog(x)>]
|
614 |
+
(N[NUM='sg', SEM=<\x.dog(x)>] dog)))
|
615 |
+
(VP[NUM='sg', SEM=<\x.bark(x)>]
|
616 |
+
(IV[NUM='sg', SEM=<\x.bark(x)>, TNS='pres'] barks)))
|
617 |
+
<BLANKLINE>
|
618 |
+
all x.(dog(x) -> bark(x))
|
619 |
+
<BLANKLINE>
|
620 |
+
True
|
621 |
+
|
622 |
+
>>> sents = ['Mary walks', 'John sees a dog']
|
623 |
+
>>> results = nltk.sem.util.interpret_sents(sents, 'grammars/sample_grammars/sem2.fcfg')
|
624 |
+
>>> for result in results:
|
625 |
+
... for (synrep, semrep) in result:
|
626 |
+
... print(synrep)
|
627 |
+
(S[SEM=<walk(mary)>]
|
628 |
+
(NP[-LOC, NUM='sg', SEM=<\P.P(mary)>]
|
629 |
+
(PropN[-LOC, NUM='sg', SEM=<\P.P(mary)>] Mary))
|
630 |
+
(VP[NUM='sg', SEM=<\x.walk(x)>]
|
631 |
+
(IV[NUM='sg', SEM=<\x.walk(x)>, TNS='pres'] walks)))
|
632 |
+
(S[SEM=<exists x.(dog(x) & see(john,x))>]
|
633 |
+
(NP[-LOC, NUM='sg', SEM=<\P.P(john)>]
|
634 |
+
(PropN[-LOC, NUM='sg', SEM=<\P.P(john)>] John))
|
635 |
+
(VP[NUM='sg', SEM=<\y.exists x.(dog(x) & see(y,x))>]
|
636 |
+
(TV[NUM='sg', SEM=<\X y.X(\x.see(y,x))>, TNS='pres'] sees)
|
637 |
+
(NP[NUM='sg', SEM=<\Q.exists x.(dog(x) & Q(x))>]
|
638 |
+
(Det[NUM='sg', SEM=<\P Q.exists x.(P(x) & Q(x))>] a)
|
639 |
+
(Nom[NUM='sg', SEM=<\x.dog(x)>]
|
640 |
+
(N[NUM='sg', SEM=<\x.dog(x)>] dog)))))
|
641 |
+
|
642 |
+
Cooper Storage
|
643 |
+
--------------
|
644 |
+
|
645 |
+
>>> from nltk.sem import cooper_storage as cs
|
646 |
+
>>> sentence = 'every girl chases a dog'
|
647 |
+
>>> trees = cs.parse_with_bindops(sentence, grammar='grammars/book_grammars/storage.fcfg')
|
648 |
+
>>> semrep = trees[0].label()['SEM']
|
649 |
+
>>> cs_semrep = cs.CooperStore(semrep)
|
650 |
+
>>> print(cs_semrep.core)
|
651 |
+
chase(z2,z4)
|
652 |
+
>>> for bo in cs_semrep.store:
|
653 |
+
... print(bo)
|
654 |
+
bo(\P.all x.(girl(x) -> P(x)),z2)
|
655 |
+
bo(\P.exists x.(dog(x) & P(x)),z4)
|
656 |
+
>>> cs_semrep.s_retrieve(trace=True)
|
657 |
+
Permutation 1
|
658 |
+
(\P.all x.(girl(x) -> P(x)))(\z2.chase(z2,z4))
|
659 |
+
(\P.exists x.(dog(x) & P(x)))(\z4.all x.(girl(x) -> chase(x,z4)))
|
660 |
+
Permutation 2
|
661 |
+
(\P.exists x.(dog(x) & P(x)))(\z4.chase(z2,z4))
|
662 |
+
(\P.all x.(girl(x) -> P(x)))(\z2.exists x.(dog(x) & chase(z2,x)))
|
663 |
+
|
664 |
+
>>> for reading in cs_semrep.readings:
|
665 |
+
... print(reading)
|
666 |
+
exists x.(dog(x) & all z3.(girl(z3) -> chase(z3,x)))
|
667 |
+
all x.(girl(x) -> exists z4.(dog(z4) & chase(x,z4)))
|
llmeval-env/lib/python3.10/site-packages/nltk/test/treetransforms.doctest
ADDED
@@ -0,0 +1,154 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.. Copyright (C) 2001-2023 NLTK Project
|
2 |
+
.. For license information, see LICENSE.TXT
|
3 |
+
|
4 |
+
-------------------------------------------
|
5 |
+
Unit tests for the TreeTransformation class
|
6 |
+
-------------------------------------------
|
7 |
+
|
8 |
+
>>> from copy import deepcopy
|
9 |
+
>>> from nltk.tree import Tree, collapse_unary, chomsky_normal_form, un_chomsky_normal_form
|
10 |
+
|
11 |
+
>>> tree_string = "(TOP (S (S (VP (VBN Turned) (ADVP (RB loose)) (PP (IN in) (NP (NP (NNP Shane) (NNP Longman) (POS 's)) (NN trading) (NN room))))) (, ,) (NP (DT the) (NN yuppie) (NNS dealers)) (VP (AUX do) (NP (NP (RB little)) (ADJP (RB right)))) (. .)))"
|
12 |
+
|
13 |
+
>>> tree = Tree.fromstring(tree_string)
|
14 |
+
>>> print(tree)
|
15 |
+
(TOP
|
16 |
+
(S
|
17 |
+
(S
|
18 |
+
(VP
|
19 |
+
(VBN Turned)
|
20 |
+
(ADVP (RB loose))
|
21 |
+
(PP
|
22 |
+
(IN in)
|
23 |
+
(NP
|
24 |
+
(NP (NNP Shane) (NNP Longman) (POS 's))
|
25 |
+
(NN trading)
|
26 |
+
(NN room)))))
|
27 |
+
(, ,)
|
28 |
+
(NP (DT the) (NN yuppie) (NNS dealers))
|
29 |
+
(VP (AUX do) (NP (NP (RB little)) (ADJP (RB right))))
|
30 |
+
(. .)))
|
31 |
+
|
32 |
+
Make a copy of the original tree and collapse the subtrees with only one child
|
33 |
+
|
34 |
+
>>> collapsedTree = deepcopy(tree)
|
35 |
+
>>> collapse_unary(collapsedTree)
|
36 |
+
>>> print(collapsedTree)
|
37 |
+
(TOP
|
38 |
+
(S
|
39 |
+
(S+VP
|
40 |
+
(VBN Turned)
|
41 |
+
(ADVP (RB loose))
|
42 |
+
(PP
|
43 |
+
(IN in)
|
44 |
+
(NP
|
45 |
+
(NP (NNP Shane) (NNP Longman) (POS 's))
|
46 |
+
(NN trading)
|
47 |
+
(NN room))))
|
48 |
+
(, ,)
|
49 |
+
(NP (DT the) (NN yuppie) (NNS dealers))
|
50 |
+
(VP (AUX do) (NP (NP (RB little)) (ADJP (RB right))))
|
51 |
+
(. .)))
|
52 |
+
|
53 |
+
>>> collapsedTree2 = deepcopy(tree)
|
54 |
+
>>> collapse_unary(collapsedTree2, collapsePOS=True, collapseRoot=True)
|
55 |
+
>>> print(collapsedTree2)
|
56 |
+
(TOP+S
|
57 |
+
(S+VP
|
58 |
+
(VBN Turned)
|
59 |
+
(ADVP+RB loose)
|
60 |
+
(PP
|
61 |
+
(IN in)
|
62 |
+
(NP
|
63 |
+
(NP (NNP Shane) (NNP Longman) (POS 's))
|
64 |
+
(NN trading)
|
65 |
+
(NN room))))
|
66 |
+
(, ,)
|
67 |
+
(NP (DT the) (NN yuppie) (NNS dealers))
|
68 |
+
(VP (AUX do) (NP (NP+RB little) (ADJP+RB right)))
|
69 |
+
(. .))
|
70 |
+
|
71 |
+
Convert the tree to Chomsky Normal Form i.e. each subtree has either two
|
72 |
+
subtree children or a single leaf value. This conversion can be performed
|
73 |
+
using either left- or right-factoring.
|
74 |
+
|
75 |
+
>>> cnfTree = deepcopy(collapsedTree)
|
76 |
+
>>> chomsky_normal_form(cnfTree, factor='left')
|
77 |
+
>>> print(cnfTree)
|
78 |
+
(TOP
|
79 |
+
(S
|
80 |
+
(S|<S+VP-,-NP-VP>
|
81 |
+
(S|<S+VP-,-NP>
|
82 |
+
(S|<S+VP-,>
|
83 |
+
(S+VP
|
84 |
+
(S+VP|<VBN-ADVP> (VBN Turned) (ADVP (RB loose)))
|
85 |
+
(PP
|
86 |
+
(IN in)
|
87 |
+
(NP
|
88 |
+
(NP|<NP-NN>
|
89 |
+
(NP
|
90 |
+
(NP|<NNP-NNP> (NNP Shane) (NNP Longman))
|
91 |
+
(POS 's))
|
92 |
+
(NN trading))
|
93 |
+
(NN room))))
|
94 |
+
(, ,))
|
95 |
+
(NP (NP|<DT-NN> (DT the) (NN yuppie)) (NNS dealers)))
|
96 |
+
(VP (AUX do) (NP (NP (RB little)) (ADJP (RB right)))))
|
97 |
+
(. .)))
|
98 |
+
|
99 |
+
>>> cnfTree = deepcopy(collapsedTree)
|
100 |
+
>>> chomsky_normal_form(cnfTree, factor='right')
|
101 |
+
>>> print(cnfTree)
|
102 |
+
(TOP
|
103 |
+
(S
|
104 |
+
(S+VP
|
105 |
+
(VBN Turned)
|
106 |
+
(S+VP|<ADVP-PP>
|
107 |
+
(ADVP (RB loose))
|
108 |
+
(PP
|
109 |
+
(IN in)
|
110 |
+
(NP
|
111 |
+
(NP (NNP Shane) (NP|<NNP-POS> (NNP Longman) (POS 's)))
|
112 |
+
(NP|<NN-NN> (NN trading) (NN room))))))
|
113 |
+
(S|<,-NP-VP-.>
|
114 |
+
(, ,)
|
115 |
+
(S|<NP-VP-.>
|
116 |
+
(NP (DT the) (NP|<NN-NNS> (NN yuppie) (NNS dealers)))
|
117 |
+
(S|<VP-.>
|
118 |
+
(VP (AUX do) (NP (NP (RB little)) (ADJP (RB right))))
|
119 |
+
(. .))))))
|
120 |
+
|
121 |
+
Employ some Markov smoothing to make the artificial node labels a bit more
|
122 |
+
readable. See the treetransforms.py documentation for more details.
|
123 |
+
|
124 |
+
>>> markovTree = deepcopy(collapsedTree)
|
125 |
+
>>> chomsky_normal_form(markovTree, horzMarkov=2, vertMarkov=1)
|
126 |
+
>>> print(markovTree)
|
127 |
+
(TOP
|
128 |
+
(S^<TOP>
|
129 |
+
(S+VP^<S>
|
130 |
+
(VBN Turned)
|
131 |
+
(S+VP|<ADVP-PP>^<S>
|
132 |
+
(ADVP^<S+VP> (RB loose))
|
133 |
+
(PP^<S+VP>
|
134 |
+
(IN in)
|
135 |
+
(NP^<PP>
|
136 |
+
(NP^<NP>
|
137 |
+
(NNP Shane)
|
138 |
+
(NP|<NNP-POS>^<NP> (NNP Longman) (POS 's)))
|
139 |
+
(NP|<NN-NN>^<PP> (NN trading) (NN room))))))
|
140 |
+
(S|<,-NP>^<TOP>
|
141 |
+
(, ,)
|
142 |
+
(S|<NP-VP>^<TOP>
|
143 |
+
(NP^<S> (DT the) (NP|<NN-NNS>^<S> (NN yuppie) (NNS dealers)))
|
144 |
+
(S|<VP-.>^<TOP>
|
145 |
+
(VP^<S>
|
146 |
+
(AUX do)
|
147 |
+
(NP^<VP> (NP^<NP> (RB little)) (ADJP^<NP> (RB right))))
|
148 |
+
(. .))))))
|
149 |
+
|
150 |
+
Convert the transformed tree back to its original form
|
151 |
+
|
152 |
+
>>> un_chomsky_normal_form(markovTree)
|
153 |
+
>>> tree == markovTree
|
154 |
+
True
|
llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_bllip.py
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pytest
|
2 |
+
|
3 |
+
from nltk.data import find
|
4 |
+
from nltk.parse.bllip import BllipParser
|
5 |
+
from nltk.tree import Tree
|
6 |
+
|
7 |
+
|
8 |
+
@pytest.fixture(scope="module")
|
9 |
+
def parser():
|
10 |
+
model_dir = find("models/bllip_wsj_no_aux").path
|
11 |
+
return BllipParser.from_unified_model_dir(model_dir)
|
12 |
+
|
13 |
+
|
14 |
+
def setup_module():
|
15 |
+
pytest.importorskip("bllipparser")
|
16 |
+
|
17 |
+
|
18 |
+
class TestBllipParser:
|
19 |
+
def test_parser_loads_a_valid_tree(self, parser):
|
20 |
+
parsed = parser.parse("I saw the man with the telescope")
|
21 |
+
tree = next(parsed)
|
22 |
+
|
23 |
+
assert isinstance(tree, Tree)
|
24 |
+
assert (
|
25 |
+
tree.pformat()
|
26 |
+
== """
|
27 |
+
(S1
|
28 |
+
(S
|
29 |
+
(NP (PRP I))
|
30 |
+
(VP
|
31 |
+
(VBD saw)
|
32 |
+
(NP (DT the) (NN man))
|
33 |
+
(PP (IN with) (NP (DT the) (NN telescope))))))
|
34 |
+
""".strip()
|
35 |
+
)
|
36 |
+
|
37 |
+
def test_tagged_parse_finds_matching_element(self, parser):
|
38 |
+
parsed = parser.parse("I saw the man with the telescope")
|
39 |
+
tagged_tree = next(parser.tagged_parse([("telescope", "NN")]))
|
40 |
+
|
41 |
+
assert isinstance(tagged_tree, Tree)
|
42 |
+
assert tagged_tree.pformat() == "(S1 (NP (NN telescope)))"
|
llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_collocations.py
ADDED
@@ -0,0 +1,120 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from nltk.collocations import BigramCollocationFinder
|
2 |
+
from nltk.metrics import BigramAssocMeasures
|
3 |
+
|
4 |
+
## Test bigram counters with discontinuous bigrams and repeated words
|
5 |
+
|
6 |
+
_EPSILON = 1e-8
|
7 |
+
SENT = "this this is is a a test test".split()
|
8 |
+
|
9 |
+
|
10 |
+
def close_enough(x, y):
|
11 |
+
"""Verify that two sequences of n-gram association values are within
|
12 |
+
_EPSILON of each other.
|
13 |
+
"""
|
14 |
+
|
15 |
+
return all(abs(x1[1] - y1[1]) <= _EPSILON for x1, y1 in zip(x, y))
|
16 |
+
|
17 |
+
|
18 |
+
def test_bigram2():
|
19 |
+
b = BigramCollocationFinder.from_words(SENT)
|
20 |
+
|
21 |
+
assert sorted(b.ngram_fd.items()) == [
|
22 |
+
(("a", "a"), 1),
|
23 |
+
(("a", "test"), 1),
|
24 |
+
(("is", "a"), 1),
|
25 |
+
(("is", "is"), 1),
|
26 |
+
(("test", "test"), 1),
|
27 |
+
(("this", "is"), 1),
|
28 |
+
(("this", "this"), 1),
|
29 |
+
]
|
30 |
+
assert sorted(b.word_fd.items()) == [("a", 2), ("is", 2), ("test", 2), ("this", 2)]
|
31 |
+
|
32 |
+
assert len(SENT) == sum(b.word_fd.values()) == sum(b.ngram_fd.values()) + 1
|
33 |
+
assert close_enough(
|
34 |
+
sorted(b.score_ngrams(BigramAssocMeasures.pmi)),
|
35 |
+
[
|
36 |
+
(("a", "a"), 1.0),
|
37 |
+
(("a", "test"), 1.0),
|
38 |
+
(("is", "a"), 1.0),
|
39 |
+
(("is", "is"), 1.0),
|
40 |
+
(("test", "test"), 1.0),
|
41 |
+
(("this", "is"), 1.0),
|
42 |
+
(("this", "this"), 1.0),
|
43 |
+
],
|
44 |
+
)
|
45 |
+
|
46 |
+
|
47 |
+
def test_bigram3():
|
48 |
+
b = BigramCollocationFinder.from_words(SENT, window_size=3)
|
49 |
+
assert sorted(b.ngram_fd.items()) == sorted(
|
50 |
+
[
|
51 |
+
(("a", "test"), 3),
|
52 |
+
(("is", "a"), 3),
|
53 |
+
(("this", "is"), 3),
|
54 |
+
(("a", "a"), 1),
|
55 |
+
(("is", "is"), 1),
|
56 |
+
(("test", "test"), 1),
|
57 |
+
(("this", "this"), 1),
|
58 |
+
]
|
59 |
+
)
|
60 |
+
|
61 |
+
assert sorted(b.word_fd.items()) == sorted(
|
62 |
+
[("a", 2), ("is", 2), ("test", 2), ("this", 2)]
|
63 |
+
)
|
64 |
+
|
65 |
+
assert (
|
66 |
+
len(SENT) == sum(b.word_fd.values()) == (sum(b.ngram_fd.values()) + 2 + 1) / 2.0
|
67 |
+
)
|
68 |
+
assert close_enough(
|
69 |
+
sorted(b.score_ngrams(BigramAssocMeasures.pmi)),
|
70 |
+
sorted(
|
71 |
+
[
|
72 |
+
(("a", "test"), 1.584962500721156),
|
73 |
+
(("is", "a"), 1.584962500721156),
|
74 |
+
(("this", "is"), 1.584962500721156),
|
75 |
+
(("a", "a"), 0.0),
|
76 |
+
(("is", "is"), 0.0),
|
77 |
+
(("test", "test"), 0.0),
|
78 |
+
(("this", "this"), 0.0),
|
79 |
+
]
|
80 |
+
),
|
81 |
+
)
|
82 |
+
|
83 |
+
|
84 |
+
def test_bigram5():
|
85 |
+
b = BigramCollocationFinder.from_words(SENT, window_size=5)
|
86 |
+
assert sorted(b.ngram_fd.items()) == sorted(
|
87 |
+
[
|
88 |
+
(("a", "test"), 4),
|
89 |
+
(("is", "a"), 4),
|
90 |
+
(("this", "is"), 4),
|
91 |
+
(("is", "test"), 3),
|
92 |
+
(("this", "a"), 3),
|
93 |
+
(("a", "a"), 1),
|
94 |
+
(("is", "is"), 1),
|
95 |
+
(("test", "test"), 1),
|
96 |
+
(("this", "this"), 1),
|
97 |
+
]
|
98 |
+
)
|
99 |
+
assert sorted(b.word_fd.items()) == sorted(
|
100 |
+
[("a", 2), ("is", 2), ("test", 2), ("this", 2)]
|
101 |
+
)
|
102 |
+
n_word_fd = sum(b.word_fd.values())
|
103 |
+
n_ngram_fd = (sum(b.ngram_fd.values()) + 4 + 3 + 2 + 1) / 4.0
|
104 |
+
assert len(SENT) == n_word_fd == n_ngram_fd
|
105 |
+
assert close_enough(
|
106 |
+
sorted(b.score_ngrams(BigramAssocMeasures.pmi)),
|
107 |
+
sorted(
|
108 |
+
[
|
109 |
+
(("a", "test"), 1.0),
|
110 |
+
(("is", "a"), 1.0),
|
111 |
+
(("this", "is"), 1.0),
|
112 |
+
(("is", "test"), 0.5849625007211562),
|
113 |
+
(("this", "a"), 0.5849625007211562),
|
114 |
+
(("a", "a"), -1.0),
|
115 |
+
(("is", "is"), -1.0),
|
116 |
+
(("test", "test"), -1.0),
|
117 |
+
(("this", "this"), -1.0),
|
118 |
+
]
|
119 |
+
),
|
120 |
+
)
|
llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_corpus_views.py
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Corpus View Regression Tests
|
3 |
+
"""
|
4 |
+
import unittest
|
5 |
+
|
6 |
+
import nltk.data
|
7 |
+
from nltk.corpus.reader.util import (
|
8 |
+
StreamBackedCorpusView,
|
9 |
+
read_line_block,
|
10 |
+
read_whitespace_block,
|
11 |
+
)
|
12 |
+
|
13 |
+
|
14 |
+
class TestCorpusViews(unittest.TestCase):
|
15 |
+
|
16 |
+
linetok = nltk.LineTokenizer(blanklines="keep")
|
17 |
+
names = [
|
18 |
+
"corpora/inaugural/README", # A very short file (160 chars)
|
19 |
+
"corpora/inaugural/1793-Washington.txt", # A relatively short file (791 chars)
|
20 |
+
"corpora/inaugural/1909-Taft.txt", # A longer file (32k chars)
|
21 |
+
]
|
22 |
+
|
23 |
+
def data(self):
|
24 |
+
for name in self.names:
|
25 |
+
f = nltk.data.find(name)
|
26 |
+
with f.open() as fp:
|
27 |
+
file_data = fp.read().decode("utf8")
|
28 |
+
yield f, file_data
|
29 |
+
|
30 |
+
def test_correct_values(self):
|
31 |
+
# Check that corpus views produce the correct sequence of values.
|
32 |
+
|
33 |
+
for f, file_data in self.data():
|
34 |
+
v = StreamBackedCorpusView(f, read_whitespace_block)
|
35 |
+
self.assertEqual(list(v), file_data.split())
|
36 |
+
|
37 |
+
v = StreamBackedCorpusView(f, read_line_block)
|
38 |
+
self.assertEqual(list(v), self.linetok.tokenize(file_data))
|
39 |
+
|
40 |
+
def test_correct_length(self):
|
41 |
+
# Check that the corpus views report the correct lengths:
|
42 |
+
|
43 |
+
for f, file_data in self.data():
|
44 |
+
v = StreamBackedCorpusView(f, read_whitespace_block)
|
45 |
+
self.assertEqual(len(v), len(file_data.split()))
|
46 |
+
|
47 |
+
v = StreamBackedCorpusView(f, read_line_block)
|
48 |
+
self.assertEqual(len(v), len(self.linetok.tokenize(file_data)))
|
llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_freqdist.py
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import nltk
|
2 |
+
|
3 |
+
|
4 |
+
def test_iterating_returns_an_iterator_ordered_by_frequency():
|
5 |
+
samples = ["one", "two", "two"]
|
6 |
+
distribution = nltk.FreqDist(samples)
|
7 |
+
assert list(distribution) == ["two", "one"]
|
llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_json2csv_corpus.py
ADDED
@@ -0,0 +1,210 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Natural Language Toolkit: Twitter client
|
2 |
+
#
|
3 |
+
# Copyright (C) 2001-2023 NLTK Project
|
4 |
+
# Author: Lorenzo Rubio <lrnzcig@gmail.com>
|
5 |
+
# URL: <https://www.nltk.org/>
|
6 |
+
# For license information, see LICENSE.TXT
|
7 |
+
|
8 |
+
"""
|
9 |
+
Regression tests for `json2csv()` and `json2csv_entities()` in Twitter
|
10 |
+
package.
|
11 |
+
"""
|
12 |
+
from pathlib import Path
|
13 |
+
|
14 |
+
import pytest
|
15 |
+
|
16 |
+
from nltk.corpus import twitter_samples
|
17 |
+
from nltk.twitter.common import json2csv, json2csv_entities
|
18 |
+
|
19 |
+
|
20 |
+
def files_are_identical(pathA, pathB):
|
21 |
+
"""
|
22 |
+
Compare two files, ignoring carriage returns,
|
23 |
+
leading whitespace, and trailing whitespace
|
24 |
+
"""
|
25 |
+
f1 = [l.strip() for l in pathA.read_bytes().splitlines()]
|
26 |
+
f2 = [l.strip() for l in pathB.read_bytes().splitlines()]
|
27 |
+
return f1 == f2
|
28 |
+
|
29 |
+
|
30 |
+
subdir = Path(__file__).parent / "files"
|
31 |
+
|
32 |
+
|
33 |
+
@pytest.fixture
|
34 |
+
def infile():
|
35 |
+
with open(twitter_samples.abspath("tweets.20150430-223406.json")) as infile:
|
36 |
+
return [next(infile) for x in range(100)]
|
37 |
+
|
38 |
+
|
39 |
+
def test_textoutput(tmp_path, infile):
|
40 |
+
ref_fn = subdir / "tweets.20150430-223406.text.csv.ref"
|
41 |
+
outfn = tmp_path / "tweets.20150430-223406.text.csv"
|
42 |
+
json2csv(infile, outfn, ["text"], gzip_compress=False)
|
43 |
+
assert files_are_identical(outfn, ref_fn)
|
44 |
+
|
45 |
+
|
46 |
+
def test_tweet_metadata(tmp_path, infile):
|
47 |
+
ref_fn = subdir / "tweets.20150430-223406.tweet.csv.ref"
|
48 |
+
fields = [
|
49 |
+
"created_at",
|
50 |
+
"favorite_count",
|
51 |
+
"id",
|
52 |
+
"in_reply_to_status_id",
|
53 |
+
"in_reply_to_user_id",
|
54 |
+
"retweet_count",
|
55 |
+
"retweeted",
|
56 |
+
"text",
|
57 |
+
"truncated",
|
58 |
+
"user.id",
|
59 |
+
]
|
60 |
+
|
61 |
+
outfn = tmp_path / "tweets.20150430-223406.tweet.csv"
|
62 |
+
json2csv(infile, outfn, fields, gzip_compress=False)
|
63 |
+
assert files_are_identical(outfn, ref_fn)
|
64 |
+
|
65 |
+
|
66 |
+
def test_user_metadata(tmp_path, infile):
|
67 |
+
ref_fn = subdir / "tweets.20150430-223406.user.csv.ref"
|
68 |
+
fields = ["id", "text", "user.id", "user.followers_count", "user.friends_count"]
|
69 |
+
|
70 |
+
outfn = tmp_path / "tweets.20150430-223406.user.csv"
|
71 |
+
json2csv(infile, outfn, fields, gzip_compress=False)
|
72 |
+
assert files_are_identical(outfn, ref_fn)
|
73 |
+
|
74 |
+
|
75 |
+
def test_tweet_hashtag(tmp_path, infile):
|
76 |
+
ref_fn = subdir / "tweets.20150430-223406.hashtag.csv.ref"
|
77 |
+
outfn = tmp_path / "tweets.20150430-223406.hashtag.csv"
|
78 |
+
json2csv_entities(
|
79 |
+
infile,
|
80 |
+
outfn,
|
81 |
+
["id", "text"],
|
82 |
+
"hashtags",
|
83 |
+
["text"],
|
84 |
+
gzip_compress=False,
|
85 |
+
)
|
86 |
+
assert files_are_identical(outfn, ref_fn)
|
87 |
+
|
88 |
+
|
89 |
+
def test_tweet_usermention(tmp_path, infile):
|
90 |
+
ref_fn = subdir / "tweets.20150430-223406.usermention.csv.ref"
|
91 |
+
outfn = tmp_path / "tweets.20150430-223406.usermention.csv"
|
92 |
+
json2csv_entities(
|
93 |
+
infile,
|
94 |
+
outfn,
|
95 |
+
["id", "text"],
|
96 |
+
"user_mentions",
|
97 |
+
["id", "screen_name"],
|
98 |
+
gzip_compress=False,
|
99 |
+
)
|
100 |
+
assert files_are_identical(outfn, ref_fn)
|
101 |
+
|
102 |
+
|
103 |
+
def test_tweet_media(tmp_path, infile):
|
104 |
+
ref_fn = subdir / "tweets.20150430-223406.media.csv.ref"
|
105 |
+
outfn = tmp_path / "tweets.20150430-223406.media.csv"
|
106 |
+
json2csv_entities(
|
107 |
+
infile,
|
108 |
+
outfn,
|
109 |
+
["id"],
|
110 |
+
"media",
|
111 |
+
["media_url", "url"],
|
112 |
+
gzip_compress=False,
|
113 |
+
)
|
114 |
+
|
115 |
+
assert files_are_identical(outfn, ref_fn)
|
116 |
+
|
117 |
+
|
118 |
+
def test_tweet_url(tmp_path, infile):
|
119 |
+
ref_fn = subdir / "tweets.20150430-223406.url.csv.ref"
|
120 |
+
outfn = tmp_path / "tweets.20150430-223406.url.csv"
|
121 |
+
json2csv_entities(
|
122 |
+
infile,
|
123 |
+
outfn,
|
124 |
+
["id"],
|
125 |
+
"urls",
|
126 |
+
["url", "expanded_url"],
|
127 |
+
gzip_compress=False,
|
128 |
+
)
|
129 |
+
|
130 |
+
assert files_are_identical(outfn, ref_fn)
|
131 |
+
|
132 |
+
|
133 |
+
def test_userurl(tmp_path, infile):
|
134 |
+
ref_fn = subdir / "tweets.20150430-223406.userurl.csv.ref"
|
135 |
+
outfn = tmp_path / "tweets.20150430-223406.userurl.csv"
|
136 |
+
json2csv_entities(
|
137 |
+
infile,
|
138 |
+
outfn,
|
139 |
+
["id", "screen_name"],
|
140 |
+
"user.urls",
|
141 |
+
["url", "expanded_url"],
|
142 |
+
gzip_compress=False,
|
143 |
+
)
|
144 |
+
|
145 |
+
assert files_are_identical(outfn, ref_fn)
|
146 |
+
|
147 |
+
|
148 |
+
def test_tweet_place(tmp_path, infile):
|
149 |
+
ref_fn = subdir / "tweets.20150430-223406.place.csv.ref"
|
150 |
+
outfn = tmp_path / "tweets.20150430-223406.place.csv"
|
151 |
+
json2csv_entities(
|
152 |
+
infile,
|
153 |
+
outfn,
|
154 |
+
["id", "text"],
|
155 |
+
"place",
|
156 |
+
["name", "country"],
|
157 |
+
gzip_compress=False,
|
158 |
+
)
|
159 |
+
|
160 |
+
assert files_are_identical(outfn, ref_fn)
|
161 |
+
|
162 |
+
|
163 |
+
def test_tweet_place_boundingbox(tmp_path, infile):
|
164 |
+
ref_fn = subdir / "tweets.20150430-223406.placeboundingbox.csv.ref"
|
165 |
+
outfn = tmp_path / "tweets.20150430-223406.placeboundingbox.csv"
|
166 |
+
json2csv_entities(
|
167 |
+
infile,
|
168 |
+
outfn,
|
169 |
+
["id", "name"],
|
170 |
+
"place.bounding_box",
|
171 |
+
["coordinates"],
|
172 |
+
gzip_compress=False,
|
173 |
+
)
|
174 |
+
|
175 |
+
assert files_are_identical(outfn, ref_fn)
|
176 |
+
|
177 |
+
|
178 |
+
def test_retweet_original_tweet(tmp_path, infile):
|
179 |
+
ref_fn = subdir / "tweets.20150430-223406.retweet.csv.ref"
|
180 |
+
outfn = tmp_path / "tweets.20150430-223406.retweet.csv"
|
181 |
+
json2csv_entities(
|
182 |
+
infile,
|
183 |
+
outfn,
|
184 |
+
["id"],
|
185 |
+
"retweeted_status",
|
186 |
+
[
|
187 |
+
"created_at",
|
188 |
+
"favorite_count",
|
189 |
+
"id",
|
190 |
+
"in_reply_to_status_id",
|
191 |
+
"in_reply_to_user_id",
|
192 |
+
"retweet_count",
|
193 |
+
"text",
|
194 |
+
"truncated",
|
195 |
+
"user.id",
|
196 |
+
],
|
197 |
+
gzip_compress=False,
|
198 |
+
)
|
199 |
+
|
200 |
+
assert files_are_identical(outfn, ref_fn)
|
201 |
+
|
202 |
+
|
203 |
+
def test_file_is_wrong(tmp_path, infile):
|
204 |
+
"""
|
205 |
+
Sanity check that file comparison is not giving false positives.
|
206 |
+
"""
|
207 |
+
ref_fn = subdir / "tweets.20150430-223406.retweet.csv.ref"
|
208 |
+
outfn = tmp_path / "tweets.20150430-223406.text.csv"
|
209 |
+
json2csv(infile, outfn, ["text"], gzip_compress=False)
|
210 |
+
assert not files_are_identical(outfn, ref_fn)
|
llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_json_serialization.py
ADDED
@@ -0,0 +1,95 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import unittest
|
2 |
+
|
3 |
+
from nltk.corpus import brown
|
4 |
+
from nltk.jsontags import JSONTaggedDecoder, JSONTaggedEncoder
|
5 |
+
from nltk.tag import (
|
6 |
+
AffixTagger,
|
7 |
+
BigramTagger,
|
8 |
+
BrillTagger,
|
9 |
+
BrillTaggerTrainer,
|
10 |
+
DefaultTagger,
|
11 |
+
NgramTagger,
|
12 |
+
PerceptronTagger,
|
13 |
+
RegexpTagger,
|
14 |
+
TrigramTagger,
|
15 |
+
UnigramTagger,
|
16 |
+
)
|
17 |
+
from nltk.tag.brill import nltkdemo18
|
18 |
+
|
19 |
+
|
20 |
+
class TestJSONSerialization(unittest.TestCase):
|
21 |
+
def setUp(self):
|
22 |
+
self.corpus = brown.tagged_sents()[:35]
|
23 |
+
self.decoder = JSONTaggedDecoder()
|
24 |
+
self.encoder = JSONTaggedEncoder()
|
25 |
+
self.default_tagger = DefaultTagger("NN")
|
26 |
+
|
27 |
+
def test_default_tagger(self):
|
28 |
+
encoded = self.encoder.encode(self.default_tagger)
|
29 |
+
decoded = self.decoder.decode(encoded)
|
30 |
+
|
31 |
+
self.assertEqual(repr(self.default_tagger), repr(decoded))
|
32 |
+
self.assertEqual(self.default_tagger._tag, decoded._tag)
|
33 |
+
|
34 |
+
def test_regexp_tagger(self):
|
35 |
+
tagger = RegexpTagger([(r".*", "NN")], backoff=self.default_tagger)
|
36 |
+
|
37 |
+
encoded = self.encoder.encode(tagger)
|
38 |
+
decoded = self.decoder.decode(encoded)
|
39 |
+
|
40 |
+
self.assertEqual(repr(tagger), repr(decoded))
|
41 |
+
self.assertEqual(repr(tagger.backoff), repr(decoded.backoff))
|
42 |
+
self.assertEqual(tagger._regexps, decoded._regexps)
|
43 |
+
|
44 |
+
def test_affix_tagger(self):
|
45 |
+
tagger = AffixTagger(self.corpus, backoff=self.default_tagger)
|
46 |
+
|
47 |
+
encoded = self.encoder.encode(tagger)
|
48 |
+
decoded = self.decoder.decode(encoded)
|
49 |
+
|
50 |
+
self.assertEqual(repr(tagger), repr(decoded))
|
51 |
+
self.assertEqual(repr(tagger.backoff), repr(decoded.backoff))
|
52 |
+
self.assertEqual(tagger._affix_length, decoded._affix_length)
|
53 |
+
self.assertEqual(tagger._min_word_length, decoded._min_word_length)
|
54 |
+
self.assertEqual(tagger._context_to_tag, decoded._context_to_tag)
|
55 |
+
|
56 |
+
def test_ngram_taggers(self):
|
57 |
+
unitagger = UnigramTagger(self.corpus, backoff=self.default_tagger)
|
58 |
+
bitagger = BigramTagger(self.corpus, backoff=unitagger)
|
59 |
+
tritagger = TrigramTagger(self.corpus, backoff=bitagger)
|
60 |
+
ntagger = NgramTagger(4, self.corpus, backoff=tritagger)
|
61 |
+
|
62 |
+
encoded = self.encoder.encode(ntagger)
|
63 |
+
decoded = self.decoder.decode(encoded)
|
64 |
+
|
65 |
+
self.assertEqual(repr(ntagger), repr(decoded))
|
66 |
+
self.assertEqual(repr(tritagger), repr(decoded.backoff))
|
67 |
+
self.assertEqual(repr(bitagger), repr(decoded.backoff.backoff))
|
68 |
+
self.assertEqual(repr(unitagger), repr(decoded.backoff.backoff.backoff))
|
69 |
+
self.assertEqual(
|
70 |
+
repr(self.default_tagger), repr(decoded.backoff.backoff.backoff.backoff)
|
71 |
+
)
|
72 |
+
|
73 |
+
def test_perceptron_tagger(self):
|
74 |
+
tagger = PerceptronTagger(load=False)
|
75 |
+
tagger.train(self.corpus)
|
76 |
+
|
77 |
+
encoded = self.encoder.encode(tagger)
|
78 |
+
decoded = self.decoder.decode(encoded)
|
79 |
+
|
80 |
+
self.assertEqual(tagger.model.weights, decoded.model.weights)
|
81 |
+
self.assertEqual(tagger.tagdict, decoded.tagdict)
|
82 |
+
self.assertEqual(tagger.classes, decoded.classes)
|
83 |
+
|
84 |
+
def test_brill_tagger(self):
|
85 |
+
trainer = BrillTaggerTrainer(
|
86 |
+
self.default_tagger, nltkdemo18(), deterministic=True
|
87 |
+
)
|
88 |
+
tagger = trainer.train(self.corpus, max_rules=30)
|
89 |
+
|
90 |
+
encoded = self.encoder.encode(tagger)
|
91 |
+
decoded = self.decoder.decode(encoded)
|
92 |
+
|
93 |
+
self.assertEqual(repr(tagger._initial_tagger), repr(decoded._initial_tagger))
|
94 |
+
self.assertEqual(tagger._rules, decoded._rules)
|
95 |
+
self.assertEqual(tagger._training_stats, decoded._training_stats)
|
llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_rte_classify.py
ADDED
@@ -0,0 +1,94 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pytest
|
2 |
+
|
3 |
+
from nltk import config_megam
|
4 |
+
from nltk.classify.rte_classify import RTEFeatureExtractor, rte_classifier, rte_features
|
5 |
+
from nltk.corpus import rte as rte_corpus
|
6 |
+
|
7 |
+
expected_from_rte_feature_extration = """
|
8 |
+
alwayson => True
|
9 |
+
ne_hyp_extra => 0
|
10 |
+
ne_overlap => 1
|
11 |
+
neg_hyp => 0
|
12 |
+
neg_txt => 0
|
13 |
+
word_hyp_extra => 3
|
14 |
+
word_overlap => 3
|
15 |
+
|
16 |
+
alwayson => True
|
17 |
+
ne_hyp_extra => 0
|
18 |
+
ne_overlap => 1
|
19 |
+
neg_hyp => 0
|
20 |
+
neg_txt => 0
|
21 |
+
word_hyp_extra => 2
|
22 |
+
word_overlap => 1
|
23 |
+
|
24 |
+
alwayson => True
|
25 |
+
ne_hyp_extra => 1
|
26 |
+
ne_overlap => 1
|
27 |
+
neg_hyp => 0
|
28 |
+
neg_txt => 0
|
29 |
+
word_hyp_extra => 1
|
30 |
+
word_overlap => 2
|
31 |
+
|
32 |
+
alwayson => True
|
33 |
+
ne_hyp_extra => 1
|
34 |
+
ne_overlap => 0
|
35 |
+
neg_hyp => 0
|
36 |
+
neg_txt => 0
|
37 |
+
word_hyp_extra => 6
|
38 |
+
word_overlap => 2
|
39 |
+
|
40 |
+
alwayson => True
|
41 |
+
ne_hyp_extra => 1
|
42 |
+
ne_overlap => 0
|
43 |
+
neg_hyp => 0
|
44 |
+
neg_txt => 0
|
45 |
+
word_hyp_extra => 4
|
46 |
+
word_overlap => 0
|
47 |
+
|
48 |
+
alwayson => True
|
49 |
+
ne_hyp_extra => 1
|
50 |
+
ne_overlap => 0
|
51 |
+
neg_hyp => 0
|
52 |
+
neg_txt => 0
|
53 |
+
word_hyp_extra => 3
|
54 |
+
word_overlap => 1
|
55 |
+
"""
|
56 |
+
|
57 |
+
|
58 |
+
class TestRTEClassifier:
|
59 |
+
# Test the feature extraction method.
|
60 |
+
def test_rte_feature_extraction(self):
|
61 |
+
pairs = rte_corpus.pairs(["rte1_dev.xml"])[:6]
|
62 |
+
test_output = [
|
63 |
+
f"{key:<15} => {rte_features(pair)[key]}"
|
64 |
+
for pair in pairs
|
65 |
+
for key in sorted(rte_features(pair))
|
66 |
+
]
|
67 |
+
expected_output = expected_from_rte_feature_extration.strip().split("\n")
|
68 |
+
# Remove null strings.
|
69 |
+
expected_output = list(filter(None, expected_output))
|
70 |
+
assert test_output == expected_output
|
71 |
+
|
72 |
+
# Test the RTEFeatureExtractor object.
|
73 |
+
def test_feature_extractor_object(self):
|
74 |
+
rtepair = rte_corpus.pairs(["rte3_dev.xml"])[33]
|
75 |
+
extractor = RTEFeatureExtractor(rtepair)
|
76 |
+
|
77 |
+
assert extractor.hyp_words == {"member", "China", "SCO."}
|
78 |
+
assert extractor.overlap("word") == set()
|
79 |
+
assert extractor.overlap("ne") == {"China"}
|
80 |
+
assert extractor.hyp_extra("word") == {"member"}
|
81 |
+
|
82 |
+
# Test the RTE classifier training.
|
83 |
+
def test_rte_classification_without_megam(self):
|
84 |
+
# Use a sample size for unit testing, since we
|
85 |
+
# don't need to fully train these classifiers
|
86 |
+
clf = rte_classifier("IIS", sample_N=100)
|
87 |
+
clf = rte_classifier("GIS", sample_N=100)
|
88 |
+
|
89 |
+
def test_rte_classification_with_megam(self):
|
90 |
+
try:
|
91 |
+
config_megam()
|
92 |
+
except (LookupError, AttributeError) as e:
|
93 |
+
pytest.skip("Skipping tests with dependencies on MEGAM")
|
94 |
+
clf = rte_classifier("megam", sample_N=100)
|
llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_seekable_unicode_stream_reader.py
ADDED
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from io import BytesIO
|
3 |
+
|
4 |
+
import pytest
|
5 |
+
|
6 |
+
from nltk.corpus.reader import SeekableUnicodeStreamReader
|
7 |
+
|
8 |
+
|
9 |
+
def check_reader(unicode_string, encoding):
|
10 |
+
bytestr = unicode_string.encode(encoding)
|
11 |
+
stream = BytesIO(bytestr)
|
12 |
+
reader = SeekableUnicodeStreamReader(stream, encoding)
|
13 |
+
|
14 |
+
# Should open at the start of the file
|
15 |
+
assert reader.tell() == 0
|
16 |
+
|
17 |
+
# Compare original string to contents from `.readlines()`
|
18 |
+
assert unicode_string == "".join(reader.readlines())
|
19 |
+
|
20 |
+
# Should be at the end of the file now
|
21 |
+
stream.seek(0, os.SEEK_END)
|
22 |
+
assert reader.tell() == stream.tell()
|
23 |
+
|
24 |
+
reader.seek(0) # go back to start
|
25 |
+
|
26 |
+
# Compare original string to contents from `.read()`
|
27 |
+
contents = ""
|
28 |
+
char = None
|
29 |
+
while char != "":
|
30 |
+
char = reader.read(1)
|
31 |
+
contents += char
|
32 |
+
assert unicode_string == contents
|
33 |
+
|
34 |
+
|
35 |
+
# Call `check_reader` with a variety of input strings and encodings.
|
36 |
+
ENCODINGS = ["ascii", "latin1", "greek", "hebrew", "utf-16", "utf-8"]
|
37 |
+
|
38 |
+
STRINGS = [
|
39 |
+
"""
|
40 |
+
This is a test file.
|
41 |
+
It is fairly short.
|
42 |
+
""",
|
43 |
+
"This file can be encoded with latin1. \x83",
|
44 |
+
"""\
|
45 |
+
This is a test file.
|
46 |
+
Here's a blank line:
|
47 |
+
|
48 |
+
And here's some unicode: \xee \u0123 \uffe3
|
49 |
+
""",
|
50 |
+
"""\
|
51 |
+
This is a test file.
|
52 |
+
Unicode characters: \xf3 \u2222 \u3333\u4444 \u5555
|
53 |
+
""",
|
54 |
+
"""\
|
55 |
+
This is a larger file. It has some lines that are longer \
|
56 |
+
than 72 characters. It's got lots of repetition. Here's \
|
57 |
+
some unicode chars: \xee \u0123 \uffe3 \ueeee \u2345
|
58 |
+
|
59 |
+
How fun! Let's repeat it twenty times.
|
60 |
+
"""
|
61 |
+
* 20,
|
62 |
+
]
|
63 |
+
|
64 |
+
|
65 |
+
@pytest.mark.parametrize("string", STRINGS)
|
66 |
+
def test_reader(string):
|
67 |
+
for encoding in ENCODINGS:
|
68 |
+
# skip strings that can't be encoded with the current encoding
|
69 |
+
try:
|
70 |
+
string.encode(encoding)
|
71 |
+
except UnicodeEncodeError:
|
72 |
+
continue
|
73 |
+
check_reader(string, encoding)
|
74 |
+
|
75 |
+
|
76 |
+
def test_reader_stream_closes_when_deleted():
|
77 |
+
reader = SeekableUnicodeStreamReader(BytesIO(b""), "ascii")
|
78 |
+
assert not reader.stream.closed
|
79 |
+
reader.__del__()
|
80 |
+
assert reader.stream.closed
|
81 |
+
|
82 |
+
|
83 |
+
def teardown_module(module=None):
|
84 |
+
import gc
|
85 |
+
|
86 |
+
gc.collect()
|
llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_tokenize.py
ADDED
@@ -0,0 +1,867 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Unit tests for nltk.tokenize.
|
3 |
+
See also nltk/test/tokenize.doctest
|
4 |
+
"""
|
5 |
+
from typing import List, Tuple
|
6 |
+
|
7 |
+
import pytest
|
8 |
+
|
9 |
+
from nltk.tokenize import (
|
10 |
+
LegalitySyllableTokenizer,
|
11 |
+
StanfordSegmenter,
|
12 |
+
SyllableTokenizer,
|
13 |
+
TreebankWordTokenizer,
|
14 |
+
TweetTokenizer,
|
15 |
+
punkt,
|
16 |
+
sent_tokenize,
|
17 |
+
word_tokenize,
|
18 |
+
)
|
19 |
+
|
20 |
+
|
21 |
+
def load_stanford_segmenter():
|
22 |
+
try:
|
23 |
+
seg = StanfordSegmenter()
|
24 |
+
seg.default_config("ar")
|
25 |
+
seg.default_config("zh")
|
26 |
+
return True
|
27 |
+
except LookupError:
|
28 |
+
return False
|
29 |
+
|
30 |
+
|
31 |
+
check_stanford_segmenter = pytest.mark.skipif(
|
32 |
+
not load_stanford_segmenter(),
|
33 |
+
reason="NLTK was unable to find stanford-segmenter.jar.",
|
34 |
+
)
|
35 |
+
|
36 |
+
|
37 |
+
class TestTokenize:
|
38 |
+
def test_tweet_tokenizer(self):
|
39 |
+
"""
|
40 |
+
Test TweetTokenizer using words with special and accented characters.
|
41 |
+
"""
|
42 |
+
|
43 |
+
tokenizer = TweetTokenizer(strip_handles=True, reduce_len=True)
|
44 |
+
s9 = "@myke: Let's test these words: resumé España München français"
|
45 |
+
tokens = tokenizer.tokenize(s9)
|
46 |
+
expected = [
|
47 |
+
":",
|
48 |
+
"Let's",
|
49 |
+
"test",
|
50 |
+
"these",
|
51 |
+
"words",
|
52 |
+
":",
|
53 |
+
"resumé",
|
54 |
+
"España",
|
55 |
+
"München",
|
56 |
+
"français",
|
57 |
+
]
|
58 |
+
assert tokens == expected
|
59 |
+
|
60 |
+
@pytest.mark.parametrize(
|
61 |
+
"test_input, expecteds",
|
62 |
+
[
|
63 |
+
(
|
64 |
+
"My text 0106404243030 is great text",
|
65 |
+
(
|
66 |
+
["My", "text", "01064042430", "30", "is", "great", "text"],
|
67 |
+
["My", "text", "0106404243030", "is", "great", "text"],
|
68 |
+
),
|
69 |
+
),
|
70 |
+
(
|
71 |
+
"My ticket id is 1234543124123",
|
72 |
+
(
|
73 |
+
["My", "ticket", "id", "is", "12345431241", "23"],
|
74 |
+
["My", "ticket", "id", "is", "1234543124123"],
|
75 |
+
),
|
76 |
+
),
|
77 |
+
(
|
78 |
+
"@remy: This is waaaaayyyy too much for you!!!!!! 01064042430",
|
79 |
+
(
|
80 |
+
[
|
81 |
+
":",
|
82 |
+
"This",
|
83 |
+
"is",
|
84 |
+
"waaayyy",
|
85 |
+
"too",
|
86 |
+
"much",
|
87 |
+
"for",
|
88 |
+
"you",
|
89 |
+
"!",
|
90 |
+
"!",
|
91 |
+
"!",
|
92 |
+
"01064042430",
|
93 |
+
],
|
94 |
+
[
|
95 |
+
":",
|
96 |
+
"This",
|
97 |
+
"is",
|
98 |
+
"waaayyy",
|
99 |
+
"too",
|
100 |
+
"much",
|
101 |
+
"for",
|
102 |
+
"you",
|
103 |
+
"!",
|
104 |
+
"!",
|
105 |
+
"!",
|
106 |
+
"01064042430",
|
107 |
+
],
|
108 |
+
),
|
109 |
+
),
|
110 |
+
# Further tests from https://github.com/nltk/nltk/pull/2798#issuecomment-922533085,
|
111 |
+
# showing the TweetTokenizer performance for `match_phone_numbers=True` and
|
112 |
+
# `match_phone_numbers=False`.
|
113 |
+
(
|
114 |
+
# Some phone numbers are always tokenized, even with `match_phone_numbers=`False`
|
115 |
+
"My number is 06-46124080, except it's not.",
|
116 |
+
(
|
117 |
+
[
|
118 |
+
"My",
|
119 |
+
"number",
|
120 |
+
"is",
|
121 |
+
"06-46124080",
|
122 |
+
",",
|
123 |
+
"except",
|
124 |
+
"it's",
|
125 |
+
"not",
|
126 |
+
".",
|
127 |
+
],
|
128 |
+
[
|
129 |
+
"My",
|
130 |
+
"number",
|
131 |
+
"is",
|
132 |
+
"06-46124080",
|
133 |
+
",",
|
134 |
+
"except",
|
135 |
+
"it's",
|
136 |
+
"not",
|
137 |
+
".",
|
138 |
+
],
|
139 |
+
),
|
140 |
+
),
|
141 |
+
(
|
142 |
+
# Phone number here is only tokenized correctly if `match_phone_numbers=True`
|
143 |
+
"My number is 601-984-4813, except it's not.",
|
144 |
+
(
|
145 |
+
[
|
146 |
+
"My",
|
147 |
+
"number",
|
148 |
+
"is",
|
149 |
+
"601-984-4813",
|
150 |
+
",",
|
151 |
+
"except",
|
152 |
+
"it's",
|
153 |
+
"not",
|
154 |
+
".",
|
155 |
+
],
|
156 |
+
[
|
157 |
+
"My",
|
158 |
+
"number",
|
159 |
+
"is",
|
160 |
+
"601-984-",
|
161 |
+
"4813",
|
162 |
+
",",
|
163 |
+
"except",
|
164 |
+
"it's",
|
165 |
+
"not",
|
166 |
+
".",
|
167 |
+
],
|
168 |
+
),
|
169 |
+
),
|
170 |
+
(
|
171 |
+
# Phone number here is only tokenized correctly if `match_phone_numbers=True`
|
172 |
+
"My number is (393) 928 -3010, except it's not.",
|
173 |
+
(
|
174 |
+
[
|
175 |
+
"My",
|
176 |
+
"number",
|
177 |
+
"is",
|
178 |
+
"(393) 928 -3010",
|
179 |
+
",",
|
180 |
+
"except",
|
181 |
+
"it's",
|
182 |
+
"not",
|
183 |
+
".",
|
184 |
+
],
|
185 |
+
[
|
186 |
+
"My",
|
187 |
+
"number",
|
188 |
+
"is",
|
189 |
+
"(",
|
190 |
+
"393",
|
191 |
+
")",
|
192 |
+
"928",
|
193 |
+
"-",
|
194 |
+
"3010",
|
195 |
+
",",
|
196 |
+
"except",
|
197 |
+
"it's",
|
198 |
+
"not",
|
199 |
+
".",
|
200 |
+
],
|
201 |
+
),
|
202 |
+
),
|
203 |
+
(
|
204 |
+
# A long number is tokenized correctly only if `match_phone_numbers=False`
|
205 |
+
"The product identification number is 48103284512.",
|
206 |
+
(
|
207 |
+
[
|
208 |
+
"The",
|
209 |
+
"product",
|
210 |
+
"identification",
|
211 |
+
"number",
|
212 |
+
"is",
|
213 |
+
"4810328451",
|
214 |
+
"2",
|
215 |
+
".",
|
216 |
+
],
|
217 |
+
[
|
218 |
+
"The",
|
219 |
+
"product",
|
220 |
+
"identification",
|
221 |
+
"number",
|
222 |
+
"is",
|
223 |
+
"48103284512",
|
224 |
+
".",
|
225 |
+
],
|
226 |
+
),
|
227 |
+
),
|
228 |
+
(
|
229 |
+
# `match_phone_numbers=True` can have some unforeseen
|
230 |
+
"My favourite substraction is 240 - 1353.",
|
231 |
+
(
|
232 |
+
["My", "favourite", "substraction", "is", "240 - 1353", "."],
|
233 |
+
["My", "favourite", "substraction", "is", "240", "-", "1353", "."],
|
234 |
+
),
|
235 |
+
),
|
236 |
+
],
|
237 |
+
)
|
238 |
+
def test_tweet_tokenizer_expanded(
|
239 |
+
self, test_input: str, expecteds: Tuple[List[str], List[str]]
|
240 |
+
):
|
241 |
+
"""
|
242 |
+
Test `match_phone_numbers` in TweetTokenizer.
|
243 |
+
|
244 |
+
Note that TweetTokenizer is also passed the following for these tests:
|
245 |
+
* strip_handles=True
|
246 |
+
* reduce_len=True
|
247 |
+
|
248 |
+
:param test_input: The input string to tokenize using TweetTokenizer.
|
249 |
+
:type test_input: str
|
250 |
+
:param expecteds: A 2-tuple of tokenized sentences. The first of the two
|
251 |
+
tokenized is the expected output of tokenization with `match_phone_numbers=True`.
|
252 |
+
The second of the two tokenized lists is the expected output of tokenization
|
253 |
+
with `match_phone_numbers=False`.
|
254 |
+
:type expecteds: Tuple[List[str], List[str]]
|
255 |
+
"""
|
256 |
+
for match_phone_numbers, expected in zip([True, False], expecteds):
|
257 |
+
tokenizer = TweetTokenizer(
|
258 |
+
strip_handles=True,
|
259 |
+
reduce_len=True,
|
260 |
+
match_phone_numbers=match_phone_numbers,
|
261 |
+
)
|
262 |
+
predicted = tokenizer.tokenize(test_input)
|
263 |
+
assert predicted == expected
|
264 |
+
|
265 |
+
def test_sonority_sequencing_syllable_tokenizer(self):
|
266 |
+
"""
|
267 |
+
Test SyllableTokenizer tokenizer.
|
268 |
+
"""
|
269 |
+
tokenizer = SyllableTokenizer()
|
270 |
+
tokens = tokenizer.tokenize("justification")
|
271 |
+
assert tokens == ["jus", "ti", "fi", "ca", "tion"]
|
272 |
+
|
273 |
+
def test_syllable_tokenizer_numbers(self):
|
274 |
+
"""
|
275 |
+
Test SyllableTokenizer tokenizer.
|
276 |
+
"""
|
277 |
+
tokenizer = SyllableTokenizer()
|
278 |
+
text = "9" * 10000
|
279 |
+
tokens = tokenizer.tokenize(text)
|
280 |
+
assert tokens == [text]
|
281 |
+
|
282 |
+
def test_legality_principle_syllable_tokenizer(self):
|
283 |
+
"""
|
284 |
+
Test LegalitySyllableTokenizer tokenizer.
|
285 |
+
"""
|
286 |
+
from nltk.corpus import words
|
287 |
+
|
288 |
+
test_word = "wonderful"
|
289 |
+
tokenizer = LegalitySyllableTokenizer(words.words())
|
290 |
+
tokens = tokenizer.tokenize(test_word)
|
291 |
+
assert tokens == ["won", "der", "ful"]
|
292 |
+
|
293 |
+
@check_stanford_segmenter
|
294 |
+
def test_stanford_segmenter_arabic(self):
|
295 |
+
"""
|
296 |
+
Test the Stanford Word Segmenter for Arabic (default config)
|
297 |
+
"""
|
298 |
+
seg = StanfordSegmenter()
|
299 |
+
seg.default_config("ar")
|
300 |
+
sent = "يبحث علم الحاسوب استخدام الحوسبة بجميع اشكالها لحل المشكلات"
|
301 |
+
segmented_sent = seg.segment(sent.split())
|
302 |
+
assert segmented_sent.split() == [
|
303 |
+
"يبحث",
|
304 |
+
"علم",
|
305 |
+
"الحاسوب",
|
306 |
+
"استخدام",
|
307 |
+
"الحوسبة",
|
308 |
+
"ب",
|
309 |
+
"جميع",
|
310 |
+
"اشكال",
|
311 |
+
"ها",
|
312 |
+
"ل",
|
313 |
+
"حل",
|
314 |
+
"المشكلات",
|
315 |
+
]
|
316 |
+
|
317 |
+
@check_stanford_segmenter
|
318 |
+
def test_stanford_segmenter_chinese(self):
|
319 |
+
"""
|
320 |
+
Test the Stanford Word Segmenter for Chinese (default config)
|
321 |
+
"""
|
322 |
+
seg = StanfordSegmenter()
|
323 |
+
seg.default_config("zh")
|
324 |
+
sent = "这是斯坦福中文分词器测试"
|
325 |
+
segmented_sent = seg.segment(sent.split())
|
326 |
+
assert segmented_sent.split() == ["这", "是", "斯坦福", "中文", "分词器", "测试"]
|
327 |
+
|
328 |
+
def test_phone_tokenizer(self):
|
329 |
+
"""
|
330 |
+
Test a string that resembles a phone number but contains a newline
|
331 |
+
"""
|
332 |
+
|
333 |
+
# Should be recognized as a phone number, albeit one with multiple spaces
|
334 |
+
tokenizer = TweetTokenizer()
|
335 |
+
test1 = "(393) 928 -3010"
|
336 |
+
expected = ["(393) 928 -3010"]
|
337 |
+
result = tokenizer.tokenize(test1)
|
338 |
+
assert result == expected
|
339 |
+
|
340 |
+
# Due to newline, first three elements aren't part of a phone number;
|
341 |
+
# fourth is
|
342 |
+
test2 = "(393)\n928 -3010"
|
343 |
+
expected = ["(", "393", ")", "928 -3010"]
|
344 |
+
result = tokenizer.tokenize(test2)
|
345 |
+
assert result == expected
|
346 |
+
|
347 |
+
def test_emoji_tokenizer(self):
|
348 |
+
"""
|
349 |
+
Test a string that contains Emoji ZWJ Sequences and skin tone modifier
|
350 |
+
"""
|
351 |
+
tokenizer = TweetTokenizer()
|
352 |
+
|
353 |
+
# A Emoji ZWJ Sequences, they together build as a single emoji, should not be split.
|
354 |
+
test1 = "👨👩👧👧"
|
355 |
+
expected = ["👨👩👧👧"]
|
356 |
+
result = tokenizer.tokenize(test1)
|
357 |
+
assert result == expected
|
358 |
+
|
359 |
+
# A Emoji with skin tone modifier, the two characters build a single emoji, should not be split.
|
360 |
+
test2 = "👨🏿"
|
361 |
+
expected = ["👨🏿"]
|
362 |
+
result = tokenizer.tokenize(test2)
|
363 |
+
assert result == expected
|
364 |
+
|
365 |
+
# A string containing both skin tone modifier and ZWJ Sequences
|
366 |
+
test3 = "🤔 🙈 me así, se😌 ds 💕👭👙 hello 👩🏾🎓 emoji hello 👨👩👦👦 how are 😊 you today🙅🏽🙅🏽"
|
367 |
+
expected = [
|
368 |
+
"🤔",
|
369 |
+
"🙈",
|
370 |
+
"me",
|
371 |
+
"así",
|
372 |
+
",",
|
373 |
+
"se",
|
374 |
+
"😌",
|
375 |
+
"ds",
|
376 |
+
"💕",
|
377 |
+
"👭",
|
378 |
+
"👙",
|
379 |
+
"hello",
|
380 |
+
"👩🏾\u200d🎓",
|
381 |
+
"emoji",
|
382 |
+
"hello",
|
383 |
+
"👨\u200d👩\u200d👦\u200d👦",
|
384 |
+
"how",
|
385 |
+
"are",
|
386 |
+
"😊",
|
387 |
+
"you",
|
388 |
+
"today",
|
389 |
+
"🙅🏽",
|
390 |
+
"🙅🏽",
|
391 |
+
]
|
392 |
+
result = tokenizer.tokenize(test3)
|
393 |
+
assert result == expected
|
394 |
+
|
395 |
+
# emoji flag sequences, including enclosed letter pairs
|
396 |
+
# Expected behavior from #3034
|
397 |
+
test4 = "🇦🇵🇵🇱🇪"
|
398 |
+
expected = ["🇦🇵", "🇵🇱", "🇪"]
|
399 |
+
result = tokenizer.tokenize(test4)
|
400 |
+
assert result == expected
|
401 |
+
|
402 |
+
test5 = "Hi 🇨🇦, 😍!!"
|
403 |
+
expected = ["Hi", "🇨🇦", ",", "😍", "!", "!"]
|
404 |
+
result = tokenizer.tokenize(test5)
|
405 |
+
assert result == expected
|
406 |
+
|
407 |
+
test6 = "<3 🇨🇦 🤝 🇵🇱 <3"
|
408 |
+
expected = ["<3", "🇨🇦", "🤝", "🇵🇱", "<3"]
|
409 |
+
result = tokenizer.tokenize(test6)
|
410 |
+
assert result == expected
|
411 |
+
|
412 |
+
def test_pad_asterisk(self):
|
413 |
+
"""
|
414 |
+
Test padding of asterisk for word tokenization.
|
415 |
+
"""
|
416 |
+
text = "This is a, *weird sentence with *asterisks in it."
|
417 |
+
expected = [
|
418 |
+
"This",
|
419 |
+
"is",
|
420 |
+
"a",
|
421 |
+
",",
|
422 |
+
"*",
|
423 |
+
"weird",
|
424 |
+
"sentence",
|
425 |
+
"with",
|
426 |
+
"*",
|
427 |
+
"asterisks",
|
428 |
+
"in",
|
429 |
+
"it",
|
430 |
+
".",
|
431 |
+
]
|
432 |
+
assert word_tokenize(text) == expected
|
433 |
+
|
434 |
+
def test_pad_dotdot(self):
|
435 |
+
"""
|
436 |
+
Test padding of dotdot* for word tokenization.
|
437 |
+
"""
|
438 |
+
text = "Why did dotdot.. not get tokenized but dotdotdot... did? How about manydots....."
|
439 |
+
expected = [
|
440 |
+
"Why",
|
441 |
+
"did",
|
442 |
+
"dotdot",
|
443 |
+
"..",
|
444 |
+
"not",
|
445 |
+
"get",
|
446 |
+
"tokenized",
|
447 |
+
"but",
|
448 |
+
"dotdotdot",
|
449 |
+
"...",
|
450 |
+
"did",
|
451 |
+
"?",
|
452 |
+
"How",
|
453 |
+
"about",
|
454 |
+
"manydots",
|
455 |
+
".....",
|
456 |
+
]
|
457 |
+
assert word_tokenize(text) == expected
|
458 |
+
|
459 |
+
def test_remove_handle(self):
|
460 |
+
"""
|
461 |
+
Test remove_handle() from casual.py with specially crafted edge cases
|
462 |
+
"""
|
463 |
+
|
464 |
+
tokenizer = TweetTokenizer(strip_handles=True)
|
465 |
+
|
466 |
+
# Simple example. Handles with just numbers should be allowed
|
467 |
+
test1 = "@twitter hello @twi_tter_. hi @12345 @123news"
|
468 |
+
expected = ["hello", ".", "hi"]
|
469 |
+
result = tokenizer.tokenize(test1)
|
470 |
+
assert result == expected
|
471 |
+
|
472 |
+
# Handles are allowed to follow any of the following characters
|
473 |
+
test2 = "@n`@n~@n(@n)@n-@n=@n+@n\\@n|@n[@n]@n{@n}@n;@n:@n'@n\"@n/@n?@n.@n,@n<@n>@n @n\n@n ñ@n.ü@n.ç@n."
|
474 |
+
expected = [
|
475 |
+
"`",
|
476 |
+
"~",
|
477 |
+
"(",
|
478 |
+
")",
|
479 |
+
"-",
|
480 |
+
"=",
|
481 |
+
"+",
|
482 |
+
"\\",
|
483 |
+
"|",
|
484 |
+
"[",
|
485 |
+
"]",
|
486 |
+
"{",
|
487 |
+
"}",
|
488 |
+
";",
|
489 |
+
":",
|
490 |
+
"'",
|
491 |
+
'"',
|
492 |
+
"/",
|
493 |
+
"?",
|
494 |
+
".",
|
495 |
+
",",
|
496 |
+
"<",
|
497 |
+
">",
|
498 |
+
"ñ",
|
499 |
+
".",
|
500 |
+
"ü",
|
501 |
+
".",
|
502 |
+
"ç",
|
503 |
+
".",
|
504 |
+
]
|
505 |
+
result = tokenizer.tokenize(test2)
|
506 |
+
assert result == expected
|
507 |
+
|
508 |
+
# Handles are NOT allowed to follow any of the following characters
|
509 |
+
test3 = "a@n j@n z@n A@n L@n Z@n 1@n 4@n 7@n 9@n 0@n _@n !@n @@n #@n $@n %@n &@n *@n"
|
510 |
+
expected = [
|
511 |
+
"a",
|
512 |
+
"@n",
|
513 |
+
"j",
|
514 |
+
"@n",
|
515 |
+
"z",
|
516 |
+
"@n",
|
517 |
+
"A",
|
518 |
+
"@n",
|
519 |
+
"L",
|
520 |
+
"@n",
|
521 |
+
"Z",
|
522 |
+
"@n",
|
523 |
+
"1",
|
524 |
+
"@n",
|
525 |
+
"4",
|
526 |
+
"@n",
|
527 |
+
"7",
|
528 |
+
"@n",
|
529 |
+
"9",
|
530 |
+
"@n",
|
531 |
+
"0",
|
532 |
+
"@n",
|
533 |
+
"_",
|
534 |
+
"@n",
|
535 |
+
"!",
|
536 |
+
"@n",
|
537 |
+
"@",
|
538 |
+
"@n",
|
539 |
+
"#",
|
540 |
+
"@n",
|
541 |
+
"$",
|
542 |
+
"@n",
|
543 |
+
"%",
|
544 |
+
"@n",
|
545 |
+
"&",
|
546 |
+
"@n",
|
547 |
+
"*",
|
548 |
+
"@n",
|
549 |
+
]
|
550 |
+
result = tokenizer.tokenize(test3)
|
551 |
+
assert result == expected
|
552 |
+
|
553 |
+
# Handles are allowed to precede the following characters
|
554 |
+
test4 = "@n!a @n#a @n$a @n%a @n&a @n*a"
|
555 |
+
expected = ["!", "a", "#", "a", "$", "a", "%", "a", "&", "a", "*", "a"]
|
556 |
+
result = tokenizer.tokenize(test4)
|
557 |
+
assert result == expected
|
558 |
+
|
559 |
+
# Tests interactions with special symbols and multiple @
|
560 |
+
test5 = "@n!@n @n#@n @n$@n @n%@n @n&@n @n*@n @n@n @@n @n@@n @n_@n @n7@n @nj@n"
|
561 |
+
expected = [
|
562 |
+
"!",
|
563 |
+
"@n",
|
564 |
+
"#",
|
565 |
+
"@n",
|
566 |
+
"$",
|
567 |
+
"@n",
|
568 |
+
"%",
|
569 |
+
"@n",
|
570 |
+
"&",
|
571 |
+
"@n",
|
572 |
+
"*",
|
573 |
+
"@n",
|
574 |
+
"@n",
|
575 |
+
"@n",
|
576 |
+
"@",
|
577 |
+
"@n",
|
578 |
+
"@n",
|
579 |
+
"@",
|
580 |
+
"@n",
|
581 |
+
"@n_",
|
582 |
+
"@n",
|
583 |
+
"@n7",
|
584 |
+
"@n",
|
585 |
+
"@nj",
|
586 |
+
"@n",
|
587 |
+
]
|
588 |
+
result = tokenizer.tokenize(test5)
|
589 |
+
assert result == expected
|
590 |
+
|
591 |
+
# Tests that handles can have a max length of 15
|
592 |
+
test6 = "@abcdefghijklmnopqrstuvwxyz @abcdefghijklmno1234 @abcdefghijklmno_ @abcdefghijklmnoendofhandle"
|
593 |
+
expected = ["pqrstuvwxyz", "1234", "_", "endofhandle"]
|
594 |
+
result = tokenizer.tokenize(test6)
|
595 |
+
assert result == expected
|
596 |
+
|
597 |
+
# Edge case where an @ comes directly after a long handle
|
598 |
+
test7 = "@abcdefghijklmnop@abcde @abcdefghijklmno@abcde @abcdefghijklmno_@abcde @abcdefghijklmno5@abcde"
|
599 |
+
expected = [
|
600 |
+
"p",
|
601 |
+
"@abcde",
|
602 |
+
"@abcdefghijklmno",
|
603 |
+
"@abcde",
|
604 |
+
"_",
|
605 |
+
"@abcde",
|
606 |
+
"5",
|
607 |
+
"@abcde",
|
608 |
+
]
|
609 |
+
result = tokenizer.tokenize(test7)
|
610 |
+
assert result == expected
|
611 |
+
|
612 |
+
def test_treebank_span_tokenizer(self):
|
613 |
+
"""
|
614 |
+
Test TreebankWordTokenizer.span_tokenize function
|
615 |
+
"""
|
616 |
+
|
617 |
+
tokenizer = TreebankWordTokenizer()
|
618 |
+
|
619 |
+
# Test case in the docstring
|
620 |
+
test1 = "Good muffins cost $3.88\nin New (York). Please (buy) me\ntwo of them.\n(Thanks)."
|
621 |
+
expected = [
|
622 |
+
(0, 4),
|
623 |
+
(5, 12),
|
624 |
+
(13, 17),
|
625 |
+
(18, 19),
|
626 |
+
(19, 23),
|
627 |
+
(24, 26),
|
628 |
+
(27, 30),
|
629 |
+
(31, 32),
|
630 |
+
(32, 36),
|
631 |
+
(36, 37),
|
632 |
+
(37, 38),
|
633 |
+
(40, 46),
|
634 |
+
(47, 48),
|
635 |
+
(48, 51),
|
636 |
+
(51, 52),
|
637 |
+
(53, 55),
|
638 |
+
(56, 59),
|
639 |
+
(60, 62),
|
640 |
+
(63, 68),
|
641 |
+
(69, 70),
|
642 |
+
(70, 76),
|
643 |
+
(76, 77),
|
644 |
+
(77, 78),
|
645 |
+
]
|
646 |
+
result = list(tokenizer.span_tokenize(test1))
|
647 |
+
assert result == expected
|
648 |
+
|
649 |
+
# Test case with double quotation
|
650 |
+
test2 = 'The DUP is similar to the "religious right" in the United States and takes a hardline stance on social issues'
|
651 |
+
expected = [
|
652 |
+
(0, 3),
|
653 |
+
(4, 7),
|
654 |
+
(8, 10),
|
655 |
+
(11, 18),
|
656 |
+
(19, 21),
|
657 |
+
(22, 25),
|
658 |
+
(26, 27),
|
659 |
+
(27, 36),
|
660 |
+
(37, 42),
|
661 |
+
(42, 43),
|
662 |
+
(44, 46),
|
663 |
+
(47, 50),
|
664 |
+
(51, 57),
|
665 |
+
(58, 64),
|
666 |
+
(65, 68),
|
667 |
+
(69, 74),
|
668 |
+
(75, 76),
|
669 |
+
(77, 85),
|
670 |
+
(86, 92),
|
671 |
+
(93, 95),
|
672 |
+
(96, 102),
|
673 |
+
(103, 109),
|
674 |
+
]
|
675 |
+
result = list(tokenizer.span_tokenize(test2))
|
676 |
+
assert result == expected
|
677 |
+
|
678 |
+
# Test case with double qoutation as well as converted quotations
|
679 |
+
test3 = "The DUP is similar to the \"religious right\" in the United States and takes a ``hardline'' stance on social issues"
|
680 |
+
expected = [
|
681 |
+
(0, 3),
|
682 |
+
(4, 7),
|
683 |
+
(8, 10),
|
684 |
+
(11, 18),
|
685 |
+
(19, 21),
|
686 |
+
(22, 25),
|
687 |
+
(26, 27),
|
688 |
+
(27, 36),
|
689 |
+
(37, 42),
|
690 |
+
(42, 43),
|
691 |
+
(44, 46),
|
692 |
+
(47, 50),
|
693 |
+
(51, 57),
|
694 |
+
(58, 64),
|
695 |
+
(65, 68),
|
696 |
+
(69, 74),
|
697 |
+
(75, 76),
|
698 |
+
(77, 79),
|
699 |
+
(79, 87),
|
700 |
+
(87, 89),
|
701 |
+
(90, 96),
|
702 |
+
(97, 99),
|
703 |
+
(100, 106),
|
704 |
+
(107, 113),
|
705 |
+
]
|
706 |
+
result = list(tokenizer.span_tokenize(test3))
|
707 |
+
assert result == expected
|
708 |
+
|
709 |
+
def test_word_tokenize(self):
|
710 |
+
"""
|
711 |
+
Test word_tokenize function
|
712 |
+
"""
|
713 |
+
|
714 |
+
sentence = "The 'v', I've been fooled but I'll seek revenge."
|
715 |
+
expected = [
|
716 |
+
"The",
|
717 |
+
"'",
|
718 |
+
"v",
|
719 |
+
"'",
|
720 |
+
",",
|
721 |
+
"I",
|
722 |
+
"'ve",
|
723 |
+
"been",
|
724 |
+
"fooled",
|
725 |
+
"but",
|
726 |
+
"I",
|
727 |
+
"'ll",
|
728 |
+
"seek",
|
729 |
+
"revenge",
|
730 |
+
".",
|
731 |
+
]
|
732 |
+
assert word_tokenize(sentence) == expected
|
733 |
+
|
734 |
+
sentence = "'v' 're'"
|
735 |
+
expected = ["'", "v", "'", "'re", "'"]
|
736 |
+
assert word_tokenize(sentence) == expected
|
737 |
+
|
738 |
+
def test_punkt_pair_iter(self):
|
739 |
+
|
740 |
+
test_cases = [
|
741 |
+
("12", [("1", "2"), ("2", None)]),
|
742 |
+
("123", [("1", "2"), ("2", "3"), ("3", None)]),
|
743 |
+
("1234", [("1", "2"), ("2", "3"), ("3", "4"), ("4", None)]),
|
744 |
+
]
|
745 |
+
|
746 |
+
for (test_input, expected_output) in test_cases:
|
747 |
+
actual_output = [x for x in punkt._pair_iter(test_input)]
|
748 |
+
|
749 |
+
assert actual_output == expected_output
|
750 |
+
|
751 |
+
def test_punkt_pair_iter_handles_stop_iteration_exception(self):
|
752 |
+
# test input to trigger StopIteration from next()
|
753 |
+
it = iter([])
|
754 |
+
# call method under test and produce a generator
|
755 |
+
gen = punkt._pair_iter(it)
|
756 |
+
# unpack generator, ensure that no error is raised
|
757 |
+
list(gen)
|
758 |
+
|
759 |
+
def test_punkt_tokenize_words_handles_stop_iteration_exception(self):
|
760 |
+
obj = punkt.PunktBaseClass()
|
761 |
+
|
762 |
+
class TestPunktTokenizeWordsMock:
|
763 |
+
def word_tokenize(self, s):
|
764 |
+
return iter([])
|
765 |
+
|
766 |
+
obj._lang_vars = TestPunktTokenizeWordsMock()
|
767 |
+
# unpack generator, ensure that no error is raised
|
768 |
+
list(obj._tokenize_words("test"))
|
769 |
+
|
770 |
+
def test_punkt_tokenize_custom_lang_vars(self):
|
771 |
+
|
772 |
+
# Create LangVars including a full stop end character as used in Bengali
|
773 |
+
class BengaliLanguageVars(punkt.PunktLanguageVars):
|
774 |
+
sent_end_chars = (".", "?", "!", "\u0964")
|
775 |
+
|
776 |
+
obj = punkt.PunktSentenceTokenizer(lang_vars=BengaliLanguageVars())
|
777 |
+
|
778 |
+
# We now expect these sentences to be split up into the individual sentences
|
779 |
+
sentences = "উপরাষ্ট্রপতি শ্রী এম ভেঙ্কাইয়া নাইডু সোমবার আই আই টি দিল্লির হীরক জয়ন্তী উদযাপনের উদ্বোধন করেছেন। অনলাইনের মাধ্যমে এই অনুষ্ঠানে কেন্দ্রীয় মানব সম্পদ উন্নয়নমন্ত্রী শ্রী রমেশ পোখরিয়াল ‘নিশাঙ্ক’ উপস্থিত ছিলেন। এই উপলক্ষ্যে উপরাষ্ট্রপতি হীরকজয়ন্তীর লোগো এবং ২০৩০-এর জন্য প্রতিষ্ঠানের লক্ষ্য ও পরিকল্পনার নথি প্রকাশ করেছেন।"
|
780 |
+
expected = [
|
781 |
+
"উপরাষ্ট্রপতি শ্রী এম ভেঙ্কাইয়া নাইডু সোমবার আই আই টি দিল্লির হীরক জয়ন্তী উদযাপনের উদ্বোধন করেছেন।",
|
782 |
+
"অনলাইনের মাধ্যমে এই অনুষ্ঠানে কেন্দ্রীয় মানব সম্পদ উন্নয়নমন্ত্রী শ্রী রমেশ পোখরিয়াল ‘নিশাঙ্ক’ উপস্থিত ছিলেন।",
|
783 |
+
"এই উপলক্ষ্যে উপরাষ্ট্রপতি হীরকজয়ন্তীর লোগো এবং ২০৩০-এর জন্য প্রতিষ্ঠানের লক্ষ্য ও পরিকল্পনার নথি প্রকাশ করেছেন।",
|
784 |
+
]
|
785 |
+
|
786 |
+
assert obj.tokenize(sentences) == expected
|
787 |
+
|
788 |
+
def test_punkt_tokenize_no_custom_lang_vars(self):
|
789 |
+
|
790 |
+
obj = punkt.PunktSentenceTokenizer()
|
791 |
+
|
792 |
+
# We expect these sentences to not be split properly, as the Bengali full stop '।' is not included in the default language vars
|
793 |
+
sentences = "উপরাষ্ট্রপতি শ্রী এম ভেঙ্কাইয়া নাইডু সোমবার আই আই টি দিল্লির হীরক জয়ন্তী উদযাপনের উদ্বোধন করেছেন। অনলাইনের মাধ্যমে এই অনুষ্ঠানে কেন্দ্রীয় মানব সম্পদ উন্নয়নমন্ত্রী শ্রী রমেশ পোখরিয়াল ‘নিশাঙ্ক’ উপস্থিত ছিলেন। এই উপলক্ষ্যে উপরাষ্ট্রপতি হীরকজয়ন্তীর লোগো এবং ২০৩০-এর জন্য প্রতিষ্ঠানের লক্ষ্য ও পরিকল্পনার নথি প্রকাশ করেছেন।"
|
794 |
+
expected = [
|
795 |
+
"উপরাষ্ট্রপতি শ্রী এম ভেঙ্কাইয়া নাইডু সোমবার আই আই টি দিল্লির হীরক জয়ন্তী উদযাপনের উদ্বোধন করেছেন। অনলাইনের মাধ্যমে এই অনুষ্ঠানে কেন্দ্রীয় মানব সম্পদ উন্নয়নমন্ত্রী শ্রী রমেশ পোখরিয়াল ‘নিশাঙ্ক’ উপস্থিত ছিলেন। এই উপলক্ষ্যে উপরাষ্ট্রপতি হীরকজয়ন্তীর লোগো এবং ২০৩০-এর জন্য প্রতিষ্ঠানের লক্ষ্য ও পরিকল্পনার নথি প্রকাশ করেছেন।"
|
796 |
+
]
|
797 |
+
|
798 |
+
assert obj.tokenize(sentences) == expected
|
799 |
+
|
800 |
+
@pytest.mark.parametrize(
|
801 |
+
"input_text,n_sents,n_splits,lang_vars",
|
802 |
+
[
|
803 |
+
# Test debug_decisions on a text with two sentences, split by a dot.
|
804 |
+
("Subject: Some subject. Attachments: Some attachments", 2, 1),
|
805 |
+
# The sentence should be split into two sections,
|
806 |
+
# with one split and hence one decision.
|
807 |
+
# Test debug_decisions on a text with two sentences, split by an exclamation mark.
|
808 |
+
("Subject: Some subject! Attachments: Some attachments", 2, 1),
|
809 |
+
# The sentence should be split into two sections,
|
810 |
+
# with one split and hence one decision.
|
811 |
+
# Test debug_decisions on a text with one sentences,
|
812 |
+
# which is not split.
|
813 |
+
("This is just a normal sentence, just like any other.", 1, 0)
|
814 |
+
# Hence just 1
|
815 |
+
],
|
816 |
+
)
|
817 |
+
def punkt_debug_decisions(self, input_text, n_sents, n_splits, lang_vars=None):
|
818 |
+
tokenizer = punkt.PunktSentenceTokenizer()
|
819 |
+
if lang_vars != None:
|
820 |
+
tokenizer._lang_vars = lang_vars
|
821 |
+
|
822 |
+
assert len(tokenizer.tokenize(input_text)) == n_sents
|
823 |
+
assert len(list(tokenizer.debug_decisions(input_text))) == n_splits
|
824 |
+
|
825 |
+
def test_punkt_debug_decisions_custom_end(self):
|
826 |
+
# Test debug_decisions on a text with two sentences,
|
827 |
+
# split by a custom end character, based on Issue #2519
|
828 |
+
class ExtLangVars(punkt.PunktLanguageVars):
|
829 |
+
sent_end_chars = (".", "?", "!", "^")
|
830 |
+
|
831 |
+
self.punkt_debug_decisions(
|
832 |
+
"Subject: Some subject^ Attachments: Some attachments",
|
833 |
+
n_sents=2,
|
834 |
+
n_splits=1,
|
835 |
+
lang_vars=ExtLangVars(),
|
836 |
+
)
|
837 |
+
# The sentence should be split into two sections,
|
838 |
+
# with one split and hence one decision.
|
839 |
+
|
840 |
+
@pytest.mark.parametrize(
|
841 |
+
"sentences, expected",
|
842 |
+
[
|
843 |
+
(
|
844 |
+
"this is a test. . new sentence.",
|
845 |
+
["this is a test.", ".", "new sentence."],
|
846 |
+
),
|
847 |
+
("This. . . That", ["This.", ".", ".", "That"]),
|
848 |
+
("This..... That", ["This..... That"]),
|
849 |
+
("This... That", ["This... That"]),
|
850 |
+
("This.. . That", ["This.. .", "That"]),
|
851 |
+
("This. .. That", ["This.", ".. That"]),
|
852 |
+
("This. ,. That", ["This.", ",.", "That"]),
|
853 |
+
("This!!! That", ["This!!!", "That"]),
|
854 |
+
("This! That", ["This!", "That"]),
|
855 |
+
(
|
856 |
+
"1. This is R .\n2. This is A .\n3. That's all",
|
857 |
+
["1.", "This is R .", "2.", "This is A .", "3.", "That's all"],
|
858 |
+
),
|
859 |
+
(
|
860 |
+
"1. This is R .\t2. This is A .\t3. That's all",
|
861 |
+
["1.", "This is R .", "2.", "This is A .", "3.", "That's all"],
|
862 |
+
),
|
863 |
+
("Hello.\tThere", ["Hello.", "There"]),
|
864 |
+
],
|
865 |
+
)
|
866 |
+
def test_sent_tokenize(self, sentences: str, expected: List[str]):
|
867 |
+
assert sent_tokenize(sentences) == expected
|
llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_wordnet.py
ADDED
@@ -0,0 +1,240 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Unit tests for nltk.corpus.wordnet
|
3 |
+
See also nltk/test/wordnet.doctest
|
4 |
+
"""
|
5 |
+
import unittest
|
6 |
+
|
7 |
+
from nltk.corpus import wordnet as wn
|
8 |
+
from nltk.corpus import wordnet_ic as wnic
|
9 |
+
|
10 |
+
wn.ensure_loaded()
|
11 |
+
S = wn.synset
|
12 |
+
L = wn.lemma
|
13 |
+
|
14 |
+
|
15 |
+
class WordnNetDemo(unittest.TestCase):
|
16 |
+
def test_retrieve_synset(self):
|
17 |
+
move_synset = S("go.v.21")
|
18 |
+
self.assertEqual(move_synset.name(), "move.v.15")
|
19 |
+
self.assertEqual(move_synset.lemma_names(), ["move", "go"])
|
20 |
+
self.assertEqual(
|
21 |
+
move_synset.definition(), "have a turn; make one's move in a game"
|
22 |
+
)
|
23 |
+
self.assertEqual(move_synset.examples(), ["Can I go now?"])
|
24 |
+
|
25 |
+
def test_retrieve_synsets(self):
|
26 |
+
self.assertEqual(sorted(wn.synsets("zap", pos="n")), [S("zap.n.01")])
|
27 |
+
self.assertEqual(
|
28 |
+
sorted(wn.synsets("zap", pos="v")),
|
29 |
+
[S("microwave.v.01"), S("nuke.v.01"), S("zap.v.01"), S("zap.v.02")],
|
30 |
+
)
|
31 |
+
|
32 |
+
def test_hyperhyponyms(self):
|
33 |
+
# Not every synset as hypernyms()
|
34 |
+
self.assertEqual(S("travel.v.01").hypernyms(), [])
|
35 |
+
self.assertEqual(S("travel.v.02").hypernyms(), [S("travel.v.03")])
|
36 |
+
self.assertEqual(S("travel.v.03").hypernyms(), [])
|
37 |
+
|
38 |
+
# Test hyper-/hyponyms.
|
39 |
+
self.assertEqual(S("breakfast.n.1").hypernyms(), [S("meal.n.01")])
|
40 |
+
first_five_meal_hypo = [
|
41 |
+
S("banquet.n.02"),
|
42 |
+
S("bite.n.04"),
|
43 |
+
S("breakfast.n.01"),
|
44 |
+
S("brunch.n.01"),
|
45 |
+
S("buffet.n.02"),
|
46 |
+
]
|
47 |
+
self.assertEqual(sorted(S("meal.n.1").hyponyms()[:5]), first_five_meal_hypo)
|
48 |
+
self.assertEqual(S("Austen.n.1").instance_hypernyms(), [S("writer.n.01")])
|
49 |
+
first_five_composer_hypo = [
|
50 |
+
S("ambrose.n.01"),
|
51 |
+
S("bach.n.01"),
|
52 |
+
S("barber.n.01"),
|
53 |
+
S("bartok.n.01"),
|
54 |
+
S("beethoven.n.01"),
|
55 |
+
]
|
56 |
+
self.assertEqual(
|
57 |
+
S("composer.n.1").instance_hyponyms()[:5], first_five_composer_hypo
|
58 |
+
)
|
59 |
+
|
60 |
+
# Test root hyper-/hyponyms
|
61 |
+
self.assertEqual(S("person.n.01").root_hypernyms(), [S("entity.n.01")])
|
62 |
+
self.assertEqual(S("sail.v.01").root_hypernyms(), [S("travel.v.01")])
|
63 |
+
self.assertEqual(
|
64 |
+
S("fall.v.12").root_hypernyms(), [S("act.v.01"), S("fall.v.17")]
|
65 |
+
)
|
66 |
+
|
67 |
+
def test_derivationally_related_forms(self):
|
68 |
+
# Test `derivationally_related_forms()`
|
69 |
+
self.assertEqual(
|
70 |
+
L("zap.v.03.nuke").derivationally_related_forms(),
|
71 |
+
[L("atomic_warhead.n.01.nuke")],
|
72 |
+
)
|
73 |
+
self.assertEqual(
|
74 |
+
L("zap.v.03.atomize").derivationally_related_forms(),
|
75 |
+
[L("atomization.n.02.atomization")],
|
76 |
+
)
|
77 |
+
self.assertEqual(
|
78 |
+
L("zap.v.03.atomise").derivationally_related_forms(),
|
79 |
+
[L("atomization.n.02.atomisation")],
|
80 |
+
)
|
81 |
+
self.assertEqual(L("zap.v.03.zap").derivationally_related_forms(), [])
|
82 |
+
|
83 |
+
def test_meronyms_holonyms(self):
|
84 |
+
# Test meronyms, holonyms.
|
85 |
+
self.assertEqual(
|
86 |
+
S("dog.n.01").member_holonyms(), [S("canis.n.01"), S("pack.n.06")]
|
87 |
+
)
|
88 |
+
self.assertEqual(S("dog.n.01").part_meronyms(), [S("flag.n.07")])
|
89 |
+
|
90 |
+
self.assertEqual(S("faculty.n.2").member_meronyms(), [S("professor.n.01")])
|
91 |
+
self.assertEqual(S("copilot.n.1").member_holonyms(), [S("crew.n.01")])
|
92 |
+
|
93 |
+
self.assertEqual(
|
94 |
+
S("table.n.2").part_meronyms(),
|
95 |
+
[S("leg.n.03"), S("tabletop.n.01"), S("tableware.n.01")],
|
96 |
+
)
|
97 |
+
self.assertEqual(S("course.n.7").part_holonyms(), [S("meal.n.01")])
|
98 |
+
|
99 |
+
self.assertEqual(
|
100 |
+
S("water.n.1").substance_meronyms(), [S("hydrogen.n.01"), S("oxygen.n.01")]
|
101 |
+
)
|
102 |
+
self.assertEqual(
|
103 |
+
S("gin.n.1").substance_holonyms(),
|
104 |
+
[
|
105 |
+
S("gin_and_it.n.01"),
|
106 |
+
S("gin_and_tonic.n.01"),
|
107 |
+
S("martini.n.01"),
|
108 |
+
S("pink_lady.n.01"),
|
109 |
+
],
|
110 |
+
)
|
111 |
+
|
112 |
+
def test_antonyms(self):
|
113 |
+
# Test antonyms.
|
114 |
+
self.assertEqual(
|
115 |
+
L("leader.n.1.leader").antonyms(), [L("follower.n.01.follower")]
|
116 |
+
)
|
117 |
+
self.assertEqual(
|
118 |
+
L("increase.v.1.increase").antonyms(), [L("decrease.v.01.decrease")]
|
119 |
+
)
|
120 |
+
|
121 |
+
def test_misc_relations(self):
|
122 |
+
# Test misc relations.
|
123 |
+
self.assertEqual(S("snore.v.1").entailments(), [S("sleep.v.01")])
|
124 |
+
self.assertEqual(
|
125 |
+
S("heavy.a.1").similar_tos(),
|
126 |
+
[
|
127 |
+
S("dense.s.03"),
|
128 |
+
S("doughy.s.01"),
|
129 |
+
S("heavier-than-air.s.01"),
|
130 |
+
S("hefty.s.02"),
|
131 |
+
S("massive.s.04"),
|
132 |
+
S("non-buoyant.s.01"),
|
133 |
+
S("ponderous.s.02"),
|
134 |
+
],
|
135 |
+
)
|
136 |
+
self.assertEqual(S("light.a.1").attributes(), [S("weight.n.01")])
|
137 |
+
self.assertEqual(S("heavy.a.1").attributes(), [S("weight.n.01")])
|
138 |
+
|
139 |
+
# Test pertainyms.
|
140 |
+
self.assertEqual(
|
141 |
+
L("English.a.1.English").pertainyms(), [L("england.n.01.England")]
|
142 |
+
)
|
143 |
+
|
144 |
+
def test_lch(self):
|
145 |
+
# Test LCH.
|
146 |
+
self.assertEqual(
|
147 |
+
S("person.n.01").lowest_common_hypernyms(S("dog.n.01")),
|
148 |
+
[S("organism.n.01")],
|
149 |
+
)
|
150 |
+
self.assertEqual(
|
151 |
+
S("woman.n.01").lowest_common_hypernyms(S("girlfriend.n.02")),
|
152 |
+
[S("woman.n.01")],
|
153 |
+
)
|
154 |
+
|
155 |
+
def test_domains(self):
|
156 |
+
# Test domains.
|
157 |
+
self.assertEqual(S("code.n.03").topic_domains(), [S("computer_science.n.01")])
|
158 |
+
self.assertEqual(S("pukka.a.01").region_domains(), [S("india.n.01")])
|
159 |
+
self.assertEqual(S("freaky.a.01").usage_domains(), [S("slang.n.02")])
|
160 |
+
|
161 |
+
def test_in_topic_domains(self):
|
162 |
+
# Test in domains.
|
163 |
+
self.assertEqual(
|
164 |
+
S("computer_science.n.01").in_topic_domains()[0], S("access.n.05")
|
165 |
+
)
|
166 |
+
self.assertEqual(S("germany.n.01").in_region_domains()[23], S("trillion.n.02"))
|
167 |
+
self.assertEqual(S("slang.n.02").in_usage_domains()[1], S("airhead.n.01"))
|
168 |
+
|
169 |
+
def test_wordnet_similarities(self):
|
170 |
+
# Path based similarities.
|
171 |
+
self.assertAlmostEqual(S("cat.n.01").path_similarity(S("cat.n.01")), 1.0)
|
172 |
+
self.assertAlmostEqual(S("dog.n.01").path_similarity(S("cat.n.01")), 0.2)
|
173 |
+
self.assertAlmostEqual(
|
174 |
+
S("car.n.01").path_similarity(S("automobile.v.01")),
|
175 |
+
S("automobile.v.01").path_similarity(S("car.n.01")),
|
176 |
+
)
|
177 |
+
self.assertAlmostEqual(
|
178 |
+
S("big.a.01").path_similarity(S("dog.n.01")),
|
179 |
+
S("dog.n.01").path_similarity(S("big.a.01")),
|
180 |
+
)
|
181 |
+
self.assertAlmostEqual(
|
182 |
+
S("big.a.01").path_similarity(S("long.a.01")),
|
183 |
+
S("long.a.01").path_similarity(S("big.a.01")),
|
184 |
+
)
|
185 |
+
self.assertAlmostEqual(
|
186 |
+
S("dog.n.01").lch_similarity(S("cat.n.01")), 2.028, places=3
|
187 |
+
)
|
188 |
+
self.assertAlmostEqual(
|
189 |
+
S("dog.n.01").wup_similarity(S("cat.n.01")), 0.8571, places=3
|
190 |
+
)
|
191 |
+
self.assertAlmostEqual(
|
192 |
+
S("car.n.01").wup_similarity(S("automobile.v.01")),
|
193 |
+
S("automobile.v.01").wup_similarity(S("car.n.01")),
|
194 |
+
)
|
195 |
+
self.assertAlmostEqual(
|
196 |
+
S("big.a.01").wup_similarity(S("dog.n.01")),
|
197 |
+
S("dog.n.01").wup_similarity(S("big.a.01")),
|
198 |
+
)
|
199 |
+
self.assertAlmostEqual(
|
200 |
+
S("big.a.01").wup_similarity(S("long.a.01")),
|
201 |
+
S("long.a.01").wup_similarity(S("big.a.01")),
|
202 |
+
)
|
203 |
+
self.assertAlmostEqual(
|
204 |
+
S("big.a.01").lch_similarity(S("long.a.01")),
|
205 |
+
S("long.a.01").lch_similarity(S("big.a.01")),
|
206 |
+
)
|
207 |
+
# Information Content similarities.
|
208 |
+
brown_ic = wnic.ic("ic-brown.dat")
|
209 |
+
self.assertAlmostEqual(
|
210 |
+
S("dog.n.01").jcn_similarity(S("cat.n.01"), brown_ic), 0.4497, places=3
|
211 |
+
)
|
212 |
+
semcor_ic = wnic.ic("ic-semcor.dat")
|
213 |
+
self.assertAlmostEqual(
|
214 |
+
S("dog.n.01").lin_similarity(S("cat.n.01"), semcor_ic), 0.8863, places=3
|
215 |
+
)
|
216 |
+
|
217 |
+
def test_omw_lemma_no_trailing_underscore(self):
|
218 |
+
expected = sorted(
|
219 |
+
[
|
220 |
+
"popolna_sprememba_v_mišljenju",
|
221 |
+
"popoln_obrat",
|
222 |
+
"preobrat",
|
223 |
+
"preobrat_v_mišljenju",
|
224 |
+
]
|
225 |
+
)
|
226 |
+
self.assertEqual(sorted(S("about-face.n.02").lemma_names(lang="slv")), expected)
|
227 |
+
|
228 |
+
def test_iterable_type_for_all_lemma_names(self):
|
229 |
+
# Duck-test for iterables.
|
230 |
+
# See https://stackoverflow.com/a/36230057/610569
|
231 |
+
cat_lemmas = wn.all_lemma_names(lang="cat")
|
232 |
+
eng_lemmas = wn.all_lemma_names(lang="eng")
|
233 |
+
|
234 |
+
self.assertTrue(hasattr(eng_lemmas, "__iter__"))
|
235 |
+
self.assertTrue(hasattr(eng_lemmas, "__next__") or hasattr(eng_lemmas, "next"))
|
236 |
+
self.assertTrue(eng_lemmas.__iter__() is eng_lemmas)
|
237 |
+
|
238 |
+
self.assertTrue(hasattr(cat_lemmas, "__iter__"))
|
239 |
+
self.assertTrue(hasattr(cat_lemmas, "__next__") or hasattr(eng_lemmas, "next"))
|
240 |
+
self.assertTrue(cat_lemmas.__iter__() is cat_lemmas)
|
llmeval-env/lib/python3.10/site-packages/nltk/twitter/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (793 Bytes). View file
|
|