jack-oh commited on
Commit
352ea4c
1 Parent(s): 74295c1

Update from jack

Browse files
Files changed (5) hide show
  1. config.json +19 -0
  2. pytorch_model.bin +3 -0
  3. tokenization_morp.py +393 -0
  4. tokenizer_config.json +1 -0
  5. vocab.txt +0 -0
config.json ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "attention_probs_dropout_prob": 0.1,
3
+ "directionality": "bidi",
4
+ "hidden_act": "gelu",
5
+ "hidden_dropout_prob": 0.1,
6
+ "hidden_size": 768,
7
+ "initializer_range": 0.02,
8
+ "intermediate_size": 3072,
9
+ "max_position_embeddings": 512,
10
+ "num_attention_heads": 12,
11
+ "num_hidden_layers": 12,
12
+ "pooler_fc_size": 768,
13
+ "pooler_num_attention_heads": 12,
14
+ "pooler_num_fc_layers": 3,
15
+ "pooler_size_per_head": 128,
16
+ "pooler_type": "first_token_transform",
17
+ "type_vocab_size": 2,
18
+ "vocab_size": 30349
19
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ca0963b24d245ee2c4dbe3d413ec5306221340f8fa3b941f55ae07dbbe7de039
3
+ size 437453900
tokenization_morp.py ADDED
@@ -0,0 +1,393 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ #
17
+ # 형태소분석 기반 BERT를 위한 Tokenization Class
18
+ # 수정: joonho.lim
19
+ # 일자: 2019-05-23
20
+ #
21
+ """Tokenization classes."""
22
+
23
+ from __future__ import absolute_import
24
+ from __future__ import division
25
+ from __future__ import print_function
26
+
27
+ import collections
28
+ import unicodedata
29
+ import os
30
+ import logging
31
+
32
+ from transformers.file_utils import cached_path
33
+
34
+ logger = logging.getLogger(__name__)
35
+
36
+ PRETRAINED_VOCAB_ARCHIVE_MAP = {
37
+ 'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt",
38
+ 'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-vocab.txt",
39
+ 'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-vocab.txt",
40
+ 'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-vocab.txt",
41
+ 'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-vocab.txt",
42
+ 'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-vocab.txt",
43
+ 'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-vocab.txt",
44
+ }
45
+ PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP = {
46
+ 'bert-base-uncased': 512,
47
+ 'bert-large-uncased': 512,
48
+ 'bert-base-cased': 512,
49
+ 'bert-large-cased': 512,
50
+ 'bert-base-multilingual-uncased': 512,
51
+ 'bert-base-multilingual-cased': 512,
52
+ 'bert-base-chinese': 512,
53
+ }
54
+ VOCAB_NAME = 'vocab.txt'
55
+
56
+
57
+ def load_vocab(vocab_file):
58
+ """Loads a vocabulary file into a dictionary."""
59
+ vocab = collections.OrderedDict()
60
+ index = 0
61
+ with open(vocab_file, "r", encoding="utf-8") as reader:
62
+ while True:
63
+ token = reader.readline()
64
+ if not token:
65
+ break
66
+
67
+ ### joonho.lim @ 2019-03-15
68
+ if token.find('n_iters=') == 0 or token.find('max_length=') == 0 :
69
+ continue
70
+ token = token.split('\t')[0]
71
+
72
+ token = token.strip()
73
+ vocab[token] = index
74
+ index += 1
75
+ return vocab
76
+
77
+
78
+ def whitespace_tokenize(text):
79
+ """Runs basic whitespace cleaning and splitting on a peice of text."""
80
+ text = text.strip()
81
+ if not text:
82
+ return []
83
+ tokens = text.split()
84
+ return tokens
85
+
86
+
87
+ class BertTokenizer(object):
88
+ """Runs end-to-end tokenization: punctuation splitting + wordpiece"""
89
+
90
+ def __init__(self, vocab_file, do_lower_case=True, max_len=None,
91
+ never_split=("[UNK]", "[SEP]", "[PAD]", "[CLS]", "[MASK]")):
92
+ if not os.path.isfile(vocab_file):
93
+ raise ValueError(
94
+ "Can't find a vocabulary file at path '{}'. To load the vocabulary from a Google pretrained "
95
+ "model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`".format(vocab_file))
96
+ self.vocab = load_vocab(vocab_file)
97
+ self.ids_to_tokens = collections.OrderedDict(
98
+ [(ids, tok) for tok, ids in self.vocab.items()])
99
+ self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case,
100
+ never_split=never_split)
101
+ self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)
102
+ self.max_len = max_len if max_len is not None else int(1e12)
103
+
104
+ def tokenize(self, text):
105
+ split_tokens = []
106
+ for token in self.basic_tokenizer.tokenize(text):
107
+ ### joonho.lim @ 2019-03-15
108
+ token += '_'
109
+ for sub_token in self.wordpiece_tokenizer.tokenize(token):
110
+ split_tokens.append(sub_token)
111
+ return split_tokens
112
+
113
+ def convert_tokens_to_ids(self, tokens):
114
+ """Converts a sequence of tokens into ids using the vocab."""
115
+ ids = []
116
+ for token in tokens:
117
+ ids.append(self.vocab[token])
118
+ if len(ids) > self.max_len:
119
+ raise ValueError(
120
+ "Token indices sequence length is longer than the specified maximum "
121
+ " sequence length for this BERT model ({} > {}). Running this"
122
+ " sequence through BERT will result in indexing errors".format(len(ids), self.max_len)
123
+ )
124
+ return ids
125
+
126
+ def convert_ids_to_tokens(self, ids):
127
+ """Converts a sequence of ids in wordpiece tokens using the vocab."""
128
+ tokens = []
129
+ for i in ids:
130
+ tokens.append(self.ids_to_tokens[i])
131
+ return tokens
132
+
133
+ @classmethod
134
+ def from_pretrained(cls, pretrained_model_name, cache_dir=None, *inputs, **kwargs):
135
+ """
136
+ Instantiate a PreTrainedBertModel from a pre-trained model file.
137
+ Download and cache the pre-trained model file if needed.
138
+ """
139
+ if pretrained_model_name in PRETRAINED_VOCAB_ARCHIVE_MAP:
140
+ vocab_file = PRETRAINED_VOCAB_ARCHIVE_MAP[pretrained_model_name]
141
+ else:
142
+ vocab_file = pretrained_model_name
143
+ if os.path.isdir(vocab_file):
144
+ vocab_file = os.path.join(vocab_file, VOCAB_NAME)
145
+ # redirect to the cache, if necessary
146
+ try:
147
+ resolved_vocab_file = cached_path(vocab_file, cache_dir=cache_dir)
148
+ except FileNotFoundError:
149
+ logger.error(
150
+ "Model name '{}' was not found in model name list ({}). "
151
+ "We assumed '{}' was a path or url but couldn't find any file "
152
+ "associated to this path or url.".format(
153
+ pretrained_model_name,
154
+ ', '.join(PRETRAINED_VOCAB_ARCHIVE_MAP.keys()),
155
+ vocab_file))
156
+ return None
157
+ if resolved_vocab_file == vocab_file:
158
+ logger.info("loading vocabulary file {}".format(vocab_file))
159
+ else:
160
+ logger.info("loading vocabulary file {} from cache at {}".format(
161
+ vocab_file, resolved_vocab_file))
162
+ if pretrained_model_name in PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP:
163
+ # if we're using a pretrained model, ensure the tokenizer wont index sequences longer
164
+ # than the number of positional embeddings
165
+ max_len = PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP[pretrained_model_name]
166
+ kwargs['max_len'] = min(kwargs.get('max_len', int(1e12)), max_len)
167
+ # Instantiate tokenizer.
168
+ tokenizer = cls(resolved_vocab_file, *inputs, **kwargs)
169
+ return tokenizer
170
+
171
+
172
+ class BasicTokenizer(object):
173
+ """Runs basic tokenization (punctuation splitting, lower casing, etc.)."""
174
+
175
+ def __init__(self,
176
+ do_lower_case=True,
177
+ never_split=("[UNK]", "[SEP]", "[PAD]", "[CLS]", "[MASK]")):
178
+ """Constructs a BasicTokenizer.
179
+
180
+ Args:
181
+ do_lower_case: Whether to lower case the input.
182
+ """
183
+ self.do_lower_case = do_lower_case
184
+ self.never_split = never_split
185
+
186
+ def tokenize(self, text):
187
+ """Tokenizes a piece of text."""
188
+ text = self._clean_text(text)
189
+
190
+ ### joonho.lim @ 2019-03-15
191
+ # # # This was added on November 1st, 2018 for the multilingual and Chinese
192
+ # # # models. This is also applied to the English models now, but it doesn't
193
+ # # # matter since the English models were not trained on any Chinese data
194
+ # # # and generally don't have any Chinese data in them (there are Chinese
195
+ # # # characters in the vocabulary because Wikipedia does have some Chinese
196
+ # # # words in the English Wikipedia.).
197
+ # # text = self._tokenize_chinese_chars(text)
198
+
199
+ orig_tokens = whitespace_tokenize(text)
200
+ split_tokens = []
201
+ for token in orig_tokens:
202
+ if self.do_lower_case and token not in self.never_split:
203
+ token = token.lower()
204
+ token = self._run_strip_accents(token)
205
+ split_tokens.extend(self._run_split_on_punc(token))
206
+
207
+ output_tokens = whitespace_tokenize(" ".join(split_tokens))
208
+ return output_tokens
209
+
210
+ def _run_strip_accents(self, text):
211
+ """Strips accents from a piece of text."""
212
+ text = unicodedata.normalize("NFD", text)
213
+ output = []
214
+ for char in text:
215
+ cat = unicodedata.category(char)
216
+ if cat == "Mn":
217
+ continue
218
+ output.append(char)
219
+ return "".join(output)
220
+
221
+ def _run_split_on_punc(self, text):
222
+ """Splits punctuation on a piece of text."""
223
+ if text in self.never_split:
224
+ return [text]
225
+ chars = list(text)
226
+ i = 0
227
+ start_new_word = True
228
+ output = []
229
+ while i < len(chars):
230
+ char = chars[i]
231
+ if _is_punctuation(char):
232
+ output.append([char])
233
+ start_new_word = True
234
+ else:
235
+ if start_new_word:
236
+ output.append([])
237
+ start_new_word = False
238
+ output[-1].append(char)
239
+ i += 1
240
+
241
+ return ["".join(x) for x in output]
242
+
243
+ def _tokenize_chinese_chars(self, text):
244
+ """Adds whitespace around any CJK character."""
245
+ output = []
246
+ for char in text:
247
+ cp = ord(char)
248
+ if self._is_chinese_char(cp):
249
+ output.append(" ")
250
+ output.append(char)
251
+ output.append(" ")
252
+ else:
253
+ output.append(char)
254
+ return "".join(output)
255
+
256
+ def _is_chinese_char(self, cp):
257
+ """Checks whether CP is the codepoint of a CJK character."""
258
+ # This defines a "chinese character" as anything in the CJK Unicode block:
259
+ # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
260
+ #
261
+ # Note that the CJK Unicode block is NOT all Japanese and Korean characters,
262
+ # despite its name. The modern Korean Hangul alphabet is a different block,
263
+ # as is Japanese Hiragana and Katakana. Those alphabets are used to write
264
+ # space-separated words, so they are not treated specially and handled
265
+ # like the all of the other languages.
266
+ if ((cp >= 0x4E00 and cp <= 0x9FFF) or #
267
+ (cp >= 0x3400 and cp <= 0x4DBF) or #
268
+ (cp >= 0x20000 and cp <= 0x2A6DF) or #
269
+ (cp >= 0x2A700 and cp <= 0x2B73F) or #
270
+ (cp >= 0x2B740 and cp <= 0x2B81F) or #
271
+ (cp >= 0x2B820 and cp <= 0x2CEAF) or
272
+ (cp >= 0xF900 and cp <= 0xFAFF) or #
273
+ (cp >= 0x2F800 and cp <= 0x2FA1F)): #
274
+ return True
275
+
276
+ return False
277
+
278
+ def _clean_text(self, text):
279
+ """Performs invalid character removal and whitespace cleanup on text."""
280
+ output = []
281
+ for char in text:
282
+ cp = ord(char)
283
+ if cp == 0 or cp == 0xfffd or _is_control(char):
284
+ continue
285
+ if _is_whitespace(char):
286
+ output.append(" ")
287
+ else:
288
+ output.append(char)
289
+ return "".join(output)
290
+
291
+
292
+ class WordpieceTokenizer(object):
293
+ """Runs WordPiece tokenization."""
294
+
295
+ def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=100):
296
+ self.vocab = vocab
297
+ self.unk_token = unk_token
298
+ self.max_input_chars_per_word = max_input_chars_per_word
299
+
300
+ def tokenize(self, text):
301
+ """Tokenizes a piece of text into its word pieces.
302
+
303
+ This uses a greedy longest-match-first algorithm to perform tokenization
304
+ using the given vocabulary.
305
+
306
+ For example:
307
+ input = "unaffable"
308
+ output = ["un", "##aff", "##able"]
309
+
310
+ Args:
311
+ text: A single token or whitespace separated tokens. This should have
312
+ already been passed through `BasicTokenizer`.
313
+
314
+ Returns:
315
+ A list of wordpiece tokens.
316
+ """
317
+
318
+ output_tokens = []
319
+ for token in whitespace_tokenize(text):
320
+ chars = list(token)
321
+ if len(chars) > self.max_input_chars_per_word:
322
+ output_tokens.append(self.unk_token)
323
+ continue
324
+
325
+ is_bad = False
326
+ start = 0
327
+ sub_tokens = []
328
+ while start < len(chars):
329
+ end = len(chars)
330
+ cur_substr = None
331
+ while start < end:
332
+ substr = "".join(chars[start:end])
333
+ ### joonho.lim @ 2019-03-15
334
+ # if start > 0:
335
+ # substr = "##" + substr
336
+ if substr in self.vocab:
337
+ cur_substr = substr
338
+ break
339
+ end -= 1
340
+ if cur_substr is None:
341
+ is_bad = True
342
+ break
343
+ sub_tokens.append(cur_substr)
344
+ start = end
345
+
346
+ if is_bad:
347
+ output_tokens.append(self.unk_token)
348
+ else:
349
+ output_tokens.extend(sub_tokens)
350
+ return output_tokens
351
+
352
+
353
+ def _is_whitespace(char):
354
+ """Checks whether `chars` is a whitespace character."""
355
+ # \t, \n, and \r are technically contorl characters but we treat them
356
+ # as whitespace since they are generally considered as such.
357
+ if char == " " or char == "\t" or char == "\n" or char == "\r":
358
+ return True
359
+ cat = unicodedata.category(char)
360
+ if cat == "Zs":
361
+ return True
362
+ return False
363
+
364
+
365
+ def _is_control(char):
366
+ """Checks whether `chars` is a control character."""
367
+ # These are technically control characters but we count them as whitespace
368
+ # characters.
369
+ if char == "\t" or char == "\n" or char == "\r":
370
+ return False
371
+ cat = unicodedata.category(char)
372
+ if cat.startswith("C"):
373
+ return True
374
+ return False
375
+
376
+
377
+ def _is_punctuation(char):
378
+ ### joonho.lim @ 2019-03-15
379
+ return char == ' '
380
+
381
+ # """Checks whether `chars` is a punctuation character."""
382
+ # cp = ord(char)
383
+ # # We treat all non-letter/number ASCII as punctuation.
384
+ # # Characters such as "^", "$", and "`" are not in the Unicode
385
+ # # Punctuation class but we treat them as punctuation anyways, for
386
+ # # consistency.
387
+ # if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or
388
+ # (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
389
+ # return True
390
+ # cat = unicodedata.category(char)
391
+ # if cat.startswith("P"):
392
+ # return True
393
+ # return False
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"do_lower_case": false, "max_model_length": 512, "special_tokens_map_file": null, "full_tokenizer_file": null}
vocab.txt ADDED
The diff for this file is too large to render. See raw diff