omrikeren commited on
Commit
e5cc769
1 Parent(s): a72f384

Added files

Browse files
README.md ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language: tr
3
+ tags:
4
+ - roberta
5
+ - language model
6
+ datasets:
7
+ - oscar
8
+ ---
9
+ # TavBERT base model
10
+ A Turkish BERT-style masked language model operating over characters, pre-trained by masking spans of characters, similarly to SpanBERT (Joshi et al., 2020).
11
+
12
+ ### How to use
13
+ ```python
14
+ import numpy as np
15
+ import torch
16
+ from transformers import AutoModelForMaskedLM, AutoTokenizer
17
+
18
+ model = AutoModelForMaskedLM.from_pretrained("tau/tavbert-tr")
19
+ tokenizer = AutoTokenizer.from_pretrained("tau/tavbert-tr")
20
+
21
+ def mask_sentence(sent, span_len=5):
22
+ start_pos = np.random.randint(0, len(sent) - span_len)
23
+ masked_sent = sent[:start_pos] + '[MASK]' * span_len + sent[start_pos + span_len:]
24
+ print("Masked sentence:", masked_sent)
25
+ output = model(**tokenizer.encode_plus(masked_sent,
26
+ return_tensors='pt'))['logits'][0][1:-1]
27
+ preds = [int(x) for x in torch.argmax(torch.softmax(output, axis=1), axis=1)[start_pos:start_pos + span_len]]
28
+ pred_sent = sent[:start_pos] + ''.join(tokenizer.convert_ids_to_tokens(preds)) + sent[start_pos + span_len:]
29
+ print("Model's prediction:", pred_sent)
30
+ ```
31
+ ## Training data
32
+ OSCAR (Ortiz, 2019) Turkish section (27 GB text, 77 million sentences).
config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "RobertaForMaskedLM"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.1,
6
+ "bos_token_id": 0,
7
+ "eos_token_id": 2,
8
+ "gradient_checkpointing": false,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 768,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 3072,
14
+ "layer_norm_eps": 1e-05,
15
+ "max_position_embeddings": 2050,
16
+ "model_type": "roberta",
17
+ "num_attention_heads": 12,
18
+ "num_hidden_layers": 12,
19
+ "pad_token_id": 1,
20
+ "position_embedding_type": "absolute",
21
+ "transformers_version": "4.6.0.dev0",
22
+ "type_vocab_size": 2,
23
+ "use_cache": true,
24
+ "vocab_size": 250
25
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ef2c5f9cc42662893a6b826ceb725f99cd0b89a71c6770d578b708f8bc91cd7e
3
+ size 349766939
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
tokenizer.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"version":"1.0","truncation":null,"padding":null,"added_tokens":[],"normalizer":null,"pre_tokenizer":{"type":"Split","pattern":{"String":""},"behavior":"Isolated","invert":false},"post_processor":{"type":"TemplateProcessing","single":[{"SpecialToken":{"id":"[CLS]","type_id":0}},{"Sequence":{"id":"A","type_id":0}},{"SpecialToken":{"id":"[SEP]","type_id":0}}],"pair":[{"SpecialToken":{"id":"[CLS]","type_id":0}},{"Sequence":{"id":"A","type_id":0}},{"SpecialToken":{"id":"[SEP]","type_id":0}},{"Sequence":{"id":"B","type_id":1}},{"SpecialToken":{"id":"[SEP]","type_id":1}}],"special_tokens":{"[CLS]":{"id":"[CLS]","ids":[0],"tokens":["[CLS]"]},"[SEP]":{"id":"[SEP]","ids":[2],"tokens":["[SEP]"]}}},"decoder":null,"model":{"type":"WordLevel","vocab":{"[CLS]": 0, "[PAD]": 1, "[SEP]": 2, "[UNK]": 3, "unused00": 4, "unused01": 5, "unused02": 6, "unused03": 7, "unused04": 8, "unused05": 9, "unused06": 10, "unused07": 11, "unused08": 12, "unused09": 13, "unused10": 14, "unused11": 15, "unused12": 16, "unused13": 17, "unused14": 18, "unused15": 19, "unused16": 20, "unused17": 21, "unused18": 22, "unused19": 23, "unused20": 24, "unused21": 25, "unused22": 26, "unused23": 27, "unused24": 28, "unused25": 29, "unused26": 30, "unused27": 31, "unused28": 32, "unused29": 33, "unused30": 34, "unused31": 35, "unused32": 36, "unused33": 37, "unused34": 38, "unused35": 39, "unused36": 40, "unused37": 41, "unused38": 42, "unused39": 43, "unused40": 44, "unused41": 45, "unused42": 46, "unused43": 47, "unused44": 48, "unused45": 49, "unused46": 50, "unused47": 51, "unused48": 52, "unused49": 53, "unused50": 54, "unused51": 55, "unused52": 56, "unused53": 57, "unused54": 58, "unused55": 59, "unused56": 60, "unused57": 61, "unused58": 62, "unused59": 63, "unused60": 64, "unused61": 65, "unused62": 66, "unused63": 67, "unused64": 68, "unused65": 69, "unused66": 70, "unused67": 71, "unused68": 72, "unused69": 73, "unused70": 74, "unused71": 75, "unused72": 76, "unused73": 77, "unused74": 78, "unused75": 79, "unused76": 80, "unused77": 81, "unused78": 82, "unused79": 83, "unused80": 84, "unused81": 85, "unused82": 86, "unused83": 87, "unused84": 88, "unused85": 89, "unused86": 90, "unused87": 91, "unused88": 92, "unused89": 93, "unused90": 94, "unused91": 95, "unused92": 96, "unused93": 97, "unused94": 98, "unused95": 99, "unused96": 100, "unused97": 101, "unused98": 102, "unused99": 103, " ": 104, "!": 105, "\"": 106, "#": 107, "$": 108, "%": 109, "&": 110, "'": 111, "(": 112, ")": 113, "*": 114, "+": 115, ",": 116, "-": 117, ".": 118, "/": 119, "0": 120, "1": 121, "2": 122, "3": 123, "4": 124, "5": 125, "6": 126, "7": 127, "8": 128, "9": 129, ":": 130, ";": 131, "<": 132, "=": 133, ">": 134, "?": 135, "@": 136, "A": 137, "B": 138, "C": 139, "D": 140, "E": 141, "F": 142, "G": 143, "H": 144, "I": 145, "J": 146, "K": 147, "L": 148, "M": 149, "N": 150, "O": 151, "P": 152, "Q": 153, "R": 154, "S": 155, "T": 156, "U": 157, "V": 158, "W": 159, "X": 160, "Y": 161, "Z": 162, "[": 163, "\\": 164, "]": 165, "_": 166, "`": 167, "a": 168, "b": 169, "c": 170, "d": 171, "e": 172, "f": 173, "g": 174, "h": 175, "i": 176, "j": 177, "k": 178, "l": 179, "m": 180, "n": 181, "o": 182, "p": 183, "q": 184, "r": 185, "s": 186, "t": 187, "u": 188, "v": 189, "w": 190, "x": 191, "y": 192, "z": 193, "|": 194, "’": 195, "“": 196, "”": 197, "­": 198, "±": 199, "´": 200, "·": 201, "»": 202, "Â": 203, "Ã": 204, "Ä": 205, "Ç": 206, "Ö": 207, "Ü": 208, "â": 209, "ç": 210, "é": 211, "î": 212, "ð": 213, "ö": 214, "û": 215, "ü": 216, "ý": 217, "þ": 218, "Ğ": 219, "ğ": 220, "İ": 221, "ı": 222, "Ş": 223, "ş": 224, "̈": 225, "̧": 226, "а": 227, "в": 228, "е": 229, "и": 230, "к": 231, "л": 232, "н": 233, "о": 234, "р": 235, "с": 236, "т": 237, "у": 238, "ا": 239, "–": 240, "‘": 241, "’": 242, "“": 243, "”": 244, "•": 245, "…": 246, "あ": 247, "�": 248, "[MASK]": 249},"unk_token":"[UNK]"}}
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"special_tokens_map_file": "./special_tokens_map.json", "name_or_path": ".", "tokenizer_class": "PreTrainedTokenizerFast"}
vocab.txt ADDED
@@ -0,0 +1,250 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [CLS]
2
+ [PAD]
3
+ [SEP]
4
+ [UNK]
5
+ unused00
6
+ unused01
7
+ unused02
8
+ unused03
9
+ unused04
10
+ unused05
11
+ unused06
12
+ unused07
13
+ unused08
14
+ unused09
15
+ unused10
16
+ unused11
17
+ unused12
18
+ unused13
19
+ unused14
20
+ unused15
21
+ unused16
22
+ unused17
23
+ unused18
24
+ unused19
25
+ unused20
26
+ unused21
27
+ unused22
28
+ unused23
29
+ unused24
30
+ unused25
31
+ unused26
32
+ unused27
33
+ unused28
34
+ unused29
35
+ unused30
36
+ unused31
37
+ unused32
38
+ unused33
39
+ unused34
40
+ unused35
41
+ unused36
42
+ unused37
43
+ unused38
44
+ unused39
45
+ unused40
46
+ unused41
47
+ unused42
48
+ unused43
49
+ unused44
50
+ unused45
51
+ unused46
52
+ unused47
53
+ unused48
54
+ unused49
55
+ unused50
56
+ unused51
57
+ unused52
58
+ unused53
59
+ unused54
60
+ unused55
61
+ unused56
62
+ unused57
63
+ unused58
64
+ unused59
65
+ unused60
66
+ unused61
67
+ unused62
68
+ unused63
69
+ unused64
70
+ unused65
71
+ unused66
72
+ unused67
73
+ unused68
74
+ unused69
75
+ unused70
76
+ unused71
77
+ unused72
78
+ unused73
79
+ unused74
80
+ unused75
81
+ unused76
82
+ unused77
83
+ unused78
84
+ unused79
85
+ unused80
86
+ unused81
87
+ unused82
88
+ unused83
89
+ unused84
90
+ unused85
91
+ unused86
92
+ unused87
93
+ unused88
94
+ unused89
95
+ unused90
96
+ unused91
97
+ unused92
98
+ unused93
99
+ unused94
100
+ unused95
101
+ unused96
102
+ unused97
103
+ unused98
104
+ unused99
105
+
106
+ !
107
+ "
108
+ #
109
+ $
110
+ %
111
+ &
112
+ '
113
+ (
114
+ )
115
+ *
116
+ +
117
+ ,
118
+ -
119
+ .
120
+ /
121
+ 0
122
+ 1
123
+ 2
124
+ 3
125
+ 4
126
+ 5
127
+ 6
128
+ 7
129
+ 8
130
+ 9
131
+ :
132
+ ;
133
+ <
134
+ =
135
+ >
136
+ ?
137
+ @
138
+ A
139
+ B
140
+ C
141
+ D
142
+ E
143
+ F
144
+ G
145
+ H
146
+ I
147
+ J
148
+ K
149
+ L
150
+ M
151
+ N
152
+ O
153
+ P
154
+ Q
155
+ R
156
+ S
157
+ T
158
+ U
159
+ V
160
+ W
161
+ X
162
+ Y
163
+ Z
164
+ [
165
+ \
166
+ ]
167
+ _
168
+ `
169
+ a
170
+ b
171
+ c
172
+ d
173
+ e
174
+ f
175
+ g
176
+ h
177
+ i
178
+ j
179
+ k
180
+ l
181
+ m
182
+ n
183
+ o
184
+ p
185
+ q
186
+ r
187
+ s
188
+ t
189
+ u
190
+ v
191
+ w
192
+ x
193
+ y
194
+ z
195
+ |
196
+ ’
197
+ “
198
+ ”
199
+ ­
200
+ ±
201
+ ´
202
+ ·
203
+ »
204
+ Â
205
+ Ã
206
+ Ä
207
+ Ç
208
+ Ö
209
+ Ü
210
+ â
211
+ ç
212
+ é
213
+ î
214
+ ð
215
+ ö
216
+ û
217
+ ü
218
+ ý
219
+ þ
220
+ Ğ
221
+ ğ
222
+ İ
223
+ ı
224
+ Ş
225
+ ş
226
+ ̈
227
+ ̧
228
+ а
229
+ в
230
+ е
231
+ и
232
+ к
233
+ л
234
+ н
235
+ о
236
+ р
237
+ с
238
+ т
239
+ у
240
+ ا
241
+
242
+
243
+
244
+
245
+
246
+
247
+
248
+
249
+
250
+ [MASK]