Patrick von Platen commited on
Commit
bf4dfc2
1 Parent(s): 687eada
Files changed (4) hide show
  1. config.json +25 -0
  2. create_config.py +4 -0
  3. tokenizer.json +0 -0
  4. train_tokenizer.py +24 -0
config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "RobertaForMaskedLM"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.1,
6
+ "bos_token_id": 0,
7
+ "eos_token_id": 2,
8
+ "gradient_checkpointing": false,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 768,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 3072,
14
+ "layer_norm_eps": 1e-05,
15
+ "max_position_embeddings": 514,
16
+ "model_type": "roberta",
17
+ "num_attention_heads": 12,
18
+ "num_hidden_layers": 12,
19
+ "pad_token_id": 1,
20
+ "position_embedding_type": "absolute",
21
+ "transformers_version": "4.7.0.dev0",
22
+ "type_vocab_size": 1,
23
+ "use_cache": true,
24
+ "vocab_size": 25165
25
+ }
create_config.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
1
+ from transformers import RobertaConfig
2
+
3
+ config = RobertaConfig.from_pretrained("roberta-base", vocab_size=25165)
4
+ config.save_pretrained("./")
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
train_tokenizer.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datasets import load_dataset
2
+ from tokenizers import trainers, Tokenizer, normalizers, ByteLevelBPETokenizer
3
+
4
+ # load dataset
5
+ dataset = load_dataset("mc4", "sw", split="train")
6
+
7
+ # Instantiate tokenizer
8
+ tokenizer = ByteLevelBPETokenizer()
9
+
10
+ def batch_iterator(batch_size=1000):
11
+ for i in range(0, len(dataset), batch_size):
12
+ yield dataset[i: i + batch_size]["text"]
13
+
14
+ # Customized training
15
+ tokenizer.train_from_iterator(batch_iterator(), vocab_size=25165, min_frequency=2, special_tokens=[
16
+ "<s>",
17
+ "<pad>",
18
+ "</s>",
19
+ "<unk>",
20
+ "<mask>",
21
+ ])
22
+
23
+ # Save files to disk
24
+ tokenizer.save("tokenizer.json")