File size: 12,934 Bytes
618fdc2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ec0918d
 
618fdc2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ec0918d
618fdc2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
# Copyright Jiaqi Liu
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import re
from typing import Callable

import yaml

logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')

"""
The language identifier for German language.

.. py:data:: GERMAN
   :value: German
   :type: string
"""
GERMAN = "German"

"""
The language identifier for Latin language.

.. py:data:: LATIN
   :value: Latin
   :type: string
"""
LATIN = "Latin"

"""
The language identifier for Ancient Greek.

.. py:data:: ANCIENT_GREEK
   :value: Ancient Greek
   :type: string
"""
ANCIENT_GREEK = "Ancient Greek"

EXCLUDED_INFLECTION_ENTRIES = [
    "",
    "singular",
    "plural",
    "masculine",
    "feminine",
    "neuter",
    "nominative",
    "genitive",
    "dative",
    "accusative",
    "N/A"
]

ENGLISH_PROPOSITIONS = {
    "about", "above", "across", "after", "against", "along", "among", "around", "at", "before", "behind", "below",
    "beneath", "beside", "between", "beyond", "by", "down", "during", "except", "for", "from", "in", "inside", "into",
    "like", "near", "of", "off", "on", "onto", "out", "outside", "over", "past", "since", "through", "throughout", "to",
    "toward", "under", "underneath", "until", "up", "upon", "with", "within", "without"
}
EXCLUDED_DEFINITION_TOKENS = {"the"} | ENGLISH_PROPOSITIONS


def get_vocabulary(yaml_path: str) -> list:
    with open(yaml_path, "r") as f:
        return yaml.safe_load(f)["vocabulary"]


def get_definitions(word) -> list[(str, str)]:
    """
    Extract definitions from a word as a list of bi-tuples, with the first element being the predicate and the second
    being the definition.

    For example (in YAML)::

    definition:
      - term: nämlich
        definition:
          - (adj.) same
          - (adv.) namely
          - because

    The method will return `[("adj.", "same"), ("adv.", "namely"), (None, "because")]`

    The method works for the single-definition case, i.e.::

    definition:
      - term: na klar
        definition: of course

    returns a list of one tupple `[(None, "of course")]`

    Note that any definition are converted to string. If the word does not contain a field named exactly "definition", a
    ValueError is raised.

    :param word:  A dictionary that contains a "definition" key whose value is either a single-value or a list of
                  single-values
    :return: a list of two-element tuples, where the first element being the predicate (can be `None`) and the second
             being the definition
    """
    logging.info("Extracting definitions from {}".format(word))

    if "definition" not in word:
        raise ValueError("{} does not contain 'definition' field. Maybe there is a typo".format(word))

    predicate_with_definition = []

    definitions = [word["definition"]] if not isinstance(word["definition"], list) else word["definition"]

    for definition in definitions:
        definition = str(definition)

        definition = definition.strip()

        match = re.match(r"\((.*?)\)", definition)
        if match:
            predicate_with_definition.append((match.group(1), re.sub(r'\(.*?\)', '', definition).strip()))
        else:
            predicate_with_definition.append((None, definition))

    return predicate_with_definition


def get_attributes(
        word: object,
        language: str,
        node_label_attribute_key: str,
        inflection_supplier: Callable[[object], dict]=lambda word: {}
) -> dict[str, str]:
    """
    Returns a flat map as the Term node properties stored in Neo4J.

    :param word:  A dict object representing a vocabulary
    :param language:  The language of the vocabulary. Can only be one of the constants defined in this file:
    :py:data:`GERMAN` / :py:data:`LATIN` / :py:data:`ANCIENT_GREEK`
    :param node_label_attribute_key:  The attribute key in the returned map whose value contains the node caption
    :param inflection_supplier:  A functional object that, given a YAML dictionary, returns the inflection table of that
    word. The key of the table can be arbitrary but the value must be a sole inflected word

    :return: a flat map containing all the YAML encoded information about the vocabulary
    """
    return {node_label_attribute_key: word["term"], "language": language} | inflection_supplier(word) | get_audio(word)


def get_audio(word: object) -> dict:
    """
    Returns the pronunciation of a word in the form of a map with key being "audio" and value being a string pointing to
    the URL of the audio file.

    The word should be a dict object containing an "audio" string attribute, otherwise this function returns an empty
    map

    :param word:  A dict object representing a vocabulary

    :return: a single-entry map or empty map
    """
    if "audio" not in word:
        return {}
    return {"audio": word["audio"]}


def get_inferred_links(
        vocabulary: list[dict],
        label_key: str,
        inflection_supplier: Callable[[object], dict[str, str]]=lambda word: {}
) -> list[dict]:
    """
    Return a list of inferred links between related vocabularies.

    This function is the point of extending link inference capabilities. At this point, the link inference includes

    - :py:meth:`token sharing <wilhelm_data_loader.vocabulary_parser.get_inferred_tokenization_links>`
    - :py:meth:`token sharing <wilhelm_data_loader.vocabulary_parser.get_levenshtein_links>`

    :param vocabulary:  A wilhelm-vocabulary repo YAML file deserialized
    :param label_key:  The name of the node attribute that will be used as the label in displaying the node
    :param inflection_supplier:  A functional object that, given a YAML dictionary, returns the inflection table of that
    word. The key of the table can be arbitrary but the value must be a sole inflected word

    :return: a list of link object, each of which has a "source_label", a "target_label", and an "attributes" key
    """
    return (get_inferred_tokenization_links(vocabulary, label_key, inflection_supplier) +
            get_structurally_similar_links(vocabulary, label_key))


def get_definition_tokens(word: dict) -> set[str]:
    definitions = [pair[1] for pair in get_definitions(word)]
    tokens = set()

    for token in set(sum([definition.split(" ") for definition in set().union(set(definitions))], [])):
        cleansed = token.lower().strip()
        if cleansed not in EXCLUDED_DEFINITION_TOKENS:
            tokens.add(cleansed)

    return tokens


def get_term_tokens(word: dict) -> set[str]:
    term = word["term"]
    tokens = set()

    for token in term.split(" "):
        cleansed = token.lower().strip()
        if cleansed not in {"der", "die", "das"}:
            tokens.add(cleansed)

    return tokens


def get_inflection_tokens(
        word: dict,
        inflection_supplier: Callable[[object], dict[str, str]]=lambda word: {}
) -> set[str]:
    tokens = set()

    for key, value in inflection_supplier(word).items():
        if value not in EXCLUDED_INFLECTION_ENTRIES:
            for inflection in value.split(","):
                cleansed = inflection.lower().strip()
                tokens.add(cleansed)

    return tokens


def get_tokens_of(word: dict, inflection_supplier: Callable[[object], dict[str, str]]=lambda word: {}) -> set[str]:
    return get_inflection_tokens(word, inflection_supplier) | get_term_tokens(word) | get_definition_tokens(word)


def get_inferred_tokenization_links(
        vocabulary: list[dict],
        label_key: str,
        inflection_supplier: Callable[[object], dict[str, str]]=lambda word: {}
) -> list[dict]:
    """
    Return a list of inferred links between related vocabulary terms which are related to one another.

    This mapping will be used to create more links in graph database.

    This was inspired by the spotting the relationships among::

        vocabulary:
          - term: das Jahr
            definition: the year
            declension:
              - ["",         singular,        plural        ]
              - [nominative, Jahr,            "Jahre, Jahr" ]
              - [genitive,   "Jahres, Jahrs", "Jahre, Jahr" ]
              - [dative,     Jahr,            "Jahren, Jahr"]
              - [accusative, Jahr,            "Jahre, Jahr" ]
          - term: seit zwei Jahren
            definition: for two years
          - term: in den letzten Jahren
            definition: in recent years

    1. Both 2nd and 3rd are related to the 1st and the two links can be inferred by observing that "Jahren" in 2nd and
       3rd match the declension table of the 1st
    2. In addition, the 2nd and 3rd are related because they both have "Jahren".

    Given the 2 observations above, this function tokenizes the "term" and the declension table of each word. If two
    words share at least 1 token, they are defined to be "related"

    :param vocabulary:  A wilhelm-vocabulary repo YAML file deserialized
    :param label_key:  The name of the node attribute that will be used as the label in displaying the node

    :return: a list of link object, each of which has a "source_label", a "target_label", and an "attributes" key
    """
    all_vocabulary_tokenizations_by_term = dict(
        [word["term"], get_tokens_of(word, inflection_supplier)] for word in vocabulary)
    inferred_links = []
    for this_word in vocabulary:
        this_term = this_word["term"]

        for that_term, that_term_tokens in all_vocabulary_tokenizations_by_term.items():
            jump_to_next_term = False

            if this_term == that_term:
                continue

            for this_token in get_term_tokens(this_word):
                for that_token in that_term_tokens:
                    if this_token.lower().strip() == that_token:
                        inferred_links.append({
                            "source_label": this_term,
                            "target_label": that_term,
                            "attributes": {label_key: "term related"},
                        })
                        jump_to_next_term = True
                        break

                if jump_to_next_term:
                    break

    return inferred_links


def get_structurally_similar_links(vocabulary: list[dict], label_key: str) -> list[dict]:
    """
    Return a list of inferred links between structurally-related vocabulary terms that are determined by the function
    :py:meth:`token sharing <wilhelm_data_loader.vocabulary_parser.is_structurally_similar>`.

    This was inspired by the spotting the relationships among::

        vocabulary:
          - term: anschließen
            definition: to connect
          - term: anschließend
            definition:
              - (adj.) following
              - (adv.) afterwards
          - term: nachher
            definition: (adv.) afterwards

    :param vocabulary:  A wilhelm-vocabulary repo YAML file deserialized
    :param label_key:  The name of the node attribute that will be used as the label in displaying the node

    :return: a list of link object, each of which has a "source_label", a "target_label", and an "attributes" key
    """
    inferred_links = []

    for this in vocabulary:
        for that in vocabulary:
            this_term = this["term"]
            that_term = that["term"]
            if is_structurally_similar(this_term, that_term):
                inferred_links.append({
                    "source_label": this_term,
                    "target_label": that_term,
                    "attributes": {label_key: "structurally similar"},
                })

    return inferred_links


def is_structurally_similar(this_word: str, that_word: str) -> bool:
    """
    Returns whether or not two string words are structurally similar.

    Two words are structurally similar iff the two share the same word stem. If two word strings are equal, this
    function returns `False`.

    :param this_word:  The first word to compare structurally
    :param that_word:  The second word to compare structurally

    :return: `True` if two words are structurally similar, or `False` otherwise
    """
    if this_word is that_word:
        return False

    return get_stem(this_word) == get_stem(that_word)


def get_stem(word: str) -> str:
    from nltk.stem.snowball import GermanStemmer
    stemmer = GermanStemmer()
    return stemmer.stem(word)