Lazy-Val commited on
Commit
a38b597
·
verified ·
1 Parent(s): 30d0611

Update spaCy pipeline

Browse files
it_trf_nrp-any-py3-none-any.whl CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e1c331f6965c59e36deb2bb546991791a0055e2abd1c39a00bda81d5590bdacd
3
- size 825600865
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:630a2f2e11e2171e974dd9765e6d2bc3a5a42582cf5ebb8247dd803f8925997e
3
+ size 825604584
use_custom_tokenizer.py CHANGED
@@ -1,12 +1,168 @@
1
- from spacy.util import registry
2
  from spacy.tokenizer import Tokenizer
3
- import pathlib
 
4
 
5
  @registry.tokenizers("customize_tokenizer")
6
  def make_customize_tokenizer():
7
  def customize_tokenizer(nlp):
8
- tokenizer = Tokenizer(nlp.vocab)
9
- script_dir = pathlib.Path(__file__).parent.resolve()
10
- return tokenizer.from_disk(script_dir / "tokenizer")
11
 
12
  return customize_tokenizer
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
  from spacy.tokenizer import Tokenizer
3
+ from spacy.util import compile_infix_regex, compile_prefix_regex, compile_suffix_regex, registry
4
+ from spacy.symbols import ORTH
5
 
6
  @registry.tokenizers("customize_tokenizer")
7
  def make_customize_tokenizer():
8
  def customize_tokenizer(nlp):
9
+ return custom_tokenizer(nlp)
 
 
10
 
11
  return customize_tokenizer
12
+
13
+ # File included for bundling
14
+ # spacy/custom_tokenizer/custom_tokenizer.py
15
+ EXTENDED_LETTER_RANGE = "A-Za-zäöüÄÖÜàòèéìù"
16
+ DATE = r"[0-3][1-9]\.[0-1][1-9]\.[1-2][0-9]{3}"
17
+ TOP_LEVEL_DOMAINS = "ch|at|de|com|edu|org|gov|net|fr|uk|be|es|pl|it|eu|nl|ba|cz|dk|al|ad|bg|by|fi|gr|ie|li|lu|no|pt|ro|rs|ru|se|si|sk"
18
+
19
+ DOT_AFTER_WORD = [
20
+ rf"(?<!www\.)(?<=([a-zA-ZäöüÄÖÜ]){{{i}}})\.(?!({TOP_LEVEL_DOMAINS}))"
21
+ for i in range(3, 30)
22
+ ]
23
+
24
+ DOT_AFTER_DATE = rf"(?<=({DATE}))\."
25
+
26
+ infix_res = [
27
+ r"[\(\[\]\)]",
28
+ r"(?<=\.--)\.", # DOT after .--
29
+ rf"\.(?=[{EXTENDED_LETTER_RANGE}]{{3,20}})", # DOT before word
30
+ r"'\.\.", # e.g., 'Tscheicha'.. -> "Tscheicha" "'..", then split ".." as suffix
31
+ *DOT_AFTER_WORD, # when there is no space after the dot
32
+ r"[A-Z](?=\. )", # DOT after capital letter
33
+ DOT_AFTER_DATE,
34
+ ]
35
+
36
+ LETTER_DOUBLE_ENDING_DOT_VAR_LENGTH = [ # DOT after letter, e.g., A.G., or u.s.w.
37
+ rf"(?<=([{EXTENDED_LETTER_RANGE}]\.){{{i}}})\." for i in range(1, 30)
38
+ ]
39
+
40
+ suffix_res = [
41
+ r"(?<=\d)[\.]", # DOT after number
42
+ r"(?<=[\.])[\]\)]", # Closing brackets with DOT before
43
+ rf"[\)\]](?=[\(\[\.{EXTENDED_LETTER_RANGE}0-9]+)", # Closing brackets with word/brackets after
44
+ r"(?<=')\.\.", # split "..'" -> ".." "'"
45
+ r"\.\.\.",
46
+ *LETTER_DOUBLE_ENDING_DOT_VAR_LENGTH,
47
+ r"(?<=[A-Z])\.",
48
+ ]
49
+
50
+ DOT_DOT_PLUS = r"\.\.+"
51
+ DOT_DOT_PLUS_FIXED = r"\.\.\.+"
52
+ NUMBER_DASH_NUMBER = r"(?<=[0-9])-(?=[0-9])"
53
+ NUMBER_SIGN_NUMBER = r"(?<=[0-9])[+\-\*^](?=[0-9-])"
54
+ NUMBER_SIGN_NUMBER_FIXED = r"(?<=[0-9])[+\*^](?=[0-9])"
55
+
56
+
57
+ # Given a nlp object, return a custom tokenizer that splits on special cases and with unwanted tokenization removed
58
+ def custom_tokenizer(nlp):
59
+ nlp.tokenizer = Tokenizer(nlp.vocab)
60
+
61
+ prefix_regex = compile_prefix_regex(nlp.Defaults.prefixes)
62
+ nlp.tokenizer.prefix_search = prefix_regex.search
63
+
64
+ # We use the default infixes and remove some cases that lead to unwanted tokenization.
65
+ # The removed cases are: [number]-[number] and[number][sign][number]
66
+ # We don't want to remove all signs, so we readd the NUMBER_SIGN_NUMBER_FIXED case that only excludes
67
+ # the minus sign, since we don't want to split for example CH-501.3.014.015-5
68
+ infixes = nlp.Defaults.infixes
69
+ if NUMBER_DASH_NUMBER in infixes:
70
+ infixes.remove(NUMBER_DASH_NUMBER)
71
+ if NUMBER_SIGN_NUMBER in infixes:
72
+ infixes.remove(NUMBER_SIGN_NUMBER)
73
+ infixes.append(NUMBER_SIGN_NUMBER_FIXED)
74
+ infixes += infix_res
75
+ infix_regex = compile_infix_regex(infixes)
76
+ nlp.tokenizer.infix_finditer = infix_regex.finditer
77
+
78
+ # We remove the "..+" case and replace it with "...+" to be able to split on ".."
79
+ suffixes = nlp.Defaults.suffixes
80
+ if DOT_DOT_PLUS in suffixes:
81
+ suffixes.remove(DOT_DOT_PLUS)
82
+ suffixes.append(DOT_DOT_PLUS_FIXED)
83
+ suffixes += suffix_res
84
+ suffix_regex = compile_suffix_regex(suffixes)
85
+ nlp.tokenizer.suffix_search = suffix_regex.search
86
+
87
+ # Add all special cases (e.g., GmbH. -> GmbH .)
88
+ for special_case, tokens in special_cases.items():
89
+ nlp.tokenizer.add_special_case(special_case, tokens)
90
+
91
+ nlp.tokenizer.token_match = re.compile(r"^\[$").search
92
+
93
+ return nlp.tokenizer
94
+
95
+ # File included for bundling
96
+ # spacy/custom_tokenizer/custom_tokenizer_special_cases.py
97
+
98
+ # Special cases following either pattern:
99
+ # word. -> word. e.g., etc. which we don't want to split (an exception to the general rule)
100
+ # word.. -> word. . e.g., Liq.. which we want to split after the first dot
101
+ special_cases = {
102
+ "cf.": [{ORTH: "cf."}],
103
+ "etc.": [{ORTH: "etc."}],
104
+ "usw.": [{ORTH: "usw."}],
105
+ "u.s.w.": [{ORTH: "u.s.w."}],
106
+ "u.ä.": [{ORTH: "u.ä."}],
107
+ "Liq..": [{ORTH: "Liq."}, {ORTH: "."}],
108
+ "Cie..": [{ORTH: "Cie."}, {ORTH: "."}],
109
+ "Co..": [{ORTH: "Co."}, {ORTH: "."}],
110
+ "S.à.r.l.": [{ORTH: "S.à.r.l."}],
111
+ "r.l.": [{ORTH: "r.l."}],
112
+ "R.l.": [{ORTH: "R.l."}],
113
+ "g.l.": [{ORTH: "g.l."}],
114
+ "S.c.r.l.": [{ORTH: "S.c.r.l."}],
115
+ "u.a.": [{ORTH: "u.a."}],
116
+ "u.a.m.": [{ORTH: "u.a.m."}],
117
+ "s.à.r.l.": [{ORTH: "s.à.r.l."}],
118
+ "S.a.r.l.": [{ORTH: "S.a.r.l."}],
119
+ "s.a.r.l.": [{ORTH: "s.a.r.l."}],
120
+ "s.àr.l.": [{ORTH: "s.àr.l."}],
121
+ "u.d.g.": [{ORTH: "u.d.g."}],
122
+ "S.a.g.l.": [{ORTH: "S.a.g.l."}],
123
+ "S.r.l.": [{ORTH: "S.r.l."}],
124
+ "S.r.": [{ORTH: "S.r."}],
125
+ "Ltd..": [{ORTH: "Ltd."}, {ORTH: "."}],
126
+ "LTD..": [{ORTH: "LTD."}, {ORTH: "."}],
127
+ "ltd..": [{ORTH: "ltd."}, {ORTH: "."}],
128
+ "Corp..": [{ORTH: "Corp."}, {ORTH: "."}],
129
+ "Inc..": [{ORTH: "Inc."}, {ORTH: "."}],
130
+ "dgl..": [{ORTH: "dgl."}, {ORTH: "."}],
131
+ "ect..": [{ORTH: "ect."}, {ORTH: "."}], # typo of etc.
132
+ "co..": [{ORTH: "co."}, {ORTH: "."}],
133
+ "CO..": [{ORTH: "CO."}, {ORTH: "."}],
134
+ "Ing..": [{ORTH: "Ing."}, {ORTH: "."}],
135
+ "HRegV..": [{ORTH: "HRegV."}, {ORTH: "."}],
136
+ "ehf..": [{ORTH: "ehf."}, {ORTH: "."}],
137
+ "Gen..": [{ORTH: "Gen."}, {ORTH: "."}],
138
+ "Var..": [{ORTH: "Var."}, {ORTH: "."}],
139
+ "b.v..": [{ORTH: "b.v."}, {ORTH: "."}],
140
+ "Dr..": [{ORTH: "Dr."}, {ORTH: "."}],
141
+ "Br..": [{ORTH: "Br."}, {ORTH: "."}],
142
+ "iu..": [{ORTH: "iu."}, {ORTH: "."}],
143
+ "Ch..": [{ORTH: "Ch."}, {ORTH: "."}],
144
+ "Inh..": [{ORTH: "Inh."}, {ORTH: "."}],
145
+ "sf..": [{ORTH: "sf."}, {ORTH: "."}],
146
+ "sen..": [{ORTH: "sen."}, {ORTH: "."}],
147
+ "Std..": [{ORTH: "Std."}, {ORTH: "."}],
148
+ "d.o.o..": [{ORTH: "d.o.o."}, {ORTH: "."}],
149
+ "M.Sc..": [{ORTH: "M.Sc."}, {ORTH: "."}],
150
+ "s.a..": [{ORTH: "s.a."}, {ORTH: "."}],
151
+ "ag..": [{ORTH: "ag."}, {ORTH: "."}],
152
+ "Fa..": [{ORTH: "Fa."}, {ORTH: "."}],
153
+ "Ti..": [{ORTH: "Ti."}, {ORTH: "."}],
154
+ "div..": [{ORTH: "div."}, {ORTH: "."}],
155
+ "ä..": [{ORTH: "ä."}, {ORTH: "."}],
156
+ "v.k.s.s..": [{ORTH: "v.k.s.s."}, {ORTH: "."}],
157
+ "ecc..": [{ORTH: "ecc."}, {ORTH: "."}],
158
+ "fed..": [{ORTH: "fed."}, {ORTH: "."}],
159
+ "Psy-K..": [{ORTH: "Psy-K."}, {ORTH: "."}],
160
+ "dipl.fed..": [{ORTH: "dipl.fed."}, {ORTH: "."}],
161
+ "Jr..": [{ORTH: "Jr."}, {ORTH: "."}],
162
+ "succ..": [{ORTH: "succ."}, {ORTH: "."}],
163
+ "méd..": [{ORTH: "méd."}, {ORTH: "."}],
164
+ "ass..": [{ORTH: "ass."}, {ORTH: "."}],
165
+ "env..": [{ORTH: "env."}, {ORTH: "."}],
166
+ "Int..": [{ORTH: "Int."}, {ORTH: "."}],
167
+ "Chr..": [{ORTH: "Chr."}, {ORTH: "."}],
168
+ }