stas commited on
Commit
6bbc16b
1 Parent(s): 9afb47c

new tiny model

Browse files
config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "XLMRobertaForCausalLM"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.1,
6
+ "bos_token_id": 0,
7
+ "d_ff": 256,
8
+ "d_kv": 8,
9
+ "d_model": 64,
10
+ "eos_token_id": 2,
11
+ "gradient_checkpointing": false,
12
+ "hidden_act": "gelu",
13
+ "hidden_dropout_prob": 0.1,
14
+ "hidden_size": 256,
15
+ "initializer_range": 0.02,
16
+ "intermediate_size": 256,
17
+ "layer_norm_eps": 1e-05,
18
+ "max_position_embeddings": 64,
19
+ "model_type": "xlm-roberta",
20
+ "num_attention_heads": 2,
21
+ "num_decoder_layers": 2,
22
+ "num_heads": 2,
23
+ "num_hidden_layers": 2,
24
+ "num_layers": 2,
25
+ "output_past": true,
26
+ "pad_token_id": 1,
27
+ "position_embedding_type": "absolute",
28
+ "relative_attention_num_buckets": 32,
29
+ "torch_dtype": "float16",
30
+ "transformers_version": "4.9.0.dev0",
31
+ "type_vocab_size": 1,
32
+ "use_cache": true,
33
+ "vocab_size": 5002
34
+ }
make-tiny-xlm-roberta.py ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # coding: utf-8
3
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ # This script creates a tiny random model
18
+ #
19
+ # It will be used then as "hf-internal-testing/tiny-xlm-roberta"
20
+
21
+ # ***To build from scratch***
22
+ #
23
+ # 1. clone sentencepiece into a parent dir
24
+ # git clone https://github.com/google/sentencepiece
25
+ #
26
+ # 2. create a new repo at https://huggingface.co/new
27
+ # make sure to choose 'hf-internal-testing' as the Owner
28
+ #
29
+ # 3. clone
30
+ # git clone https://huggingface.co/hf-internal-testing/tiny-xlm-roberta
31
+ # cd tiny-xlm-roberta
32
+
33
+ # 4. start with some pre-existing script from one of the https://huggingface.co/hf-internal-testing/ tiny model repos, e.g.
34
+ # wget https://huggingface.co/hf-internal-testing/tiny-bert/raw/main/make-tiny-xlm-roberta.py
35
+ # chmod a+x ./make-tiny-xlm-roberta.py
36
+ #
37
+ # 5. edit and re-run this script while fixing it up
38
+ # ./make-tiny-xlm-roberta.py .
39
+ #
40
+ # 6. add/commit/push
41
+ # git add *
42
+ # git commit -m "new tiny model"
43
+ # git push
44
+
45
+ # ***To update***
46
+ #
47
+ # 1. clone the existing repo
48
+ # git clone https://huggingface.co/hf-internal-testing/tiny-xlm-roberta
49
+ # cd tiny-xlm-roberta
50
+ #
51
+ # 2. edit and re-run this script after doing whatever changes are needed
52
+ # ./make-tiny-xlm-roberta.py .
53
+ #
54
+ # 3. commit/push
55
+ # git commit -m "new tiny model"
56
+ # git push
57
+
58
+ from pathlib import Path
59
+ import json
60
+ import tempfile
61
+ import sys
62
+ import os
63
+
64
+ from transformers import XLMRobertaTokenizer, XLMRobertaTokenizerFast, XLMRobertaConfig, XLMRobertaForCausalLM
65
+
66
+ # workaround for fast tokenizer protobuffer issue, and it's much faster too!
67
+ os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python"
68
+
69
+ mname_orig = "xlm-roberta-base"
70
+ mname_tiny = "tiny-xlm-roberta"
71
+ tmp_dir = f"/tmp/{mname_tiny}"
72
+
73
+ ### Tokenizer
74
+
75
+ # Shrink the orig vocab to keep things small
76
+ vocab_keep_items = 5000
77
+ vocab_orig_path = f"{tmp_dir}/sentencepiece.bpe.model"
78
+ vocab_short_path = f"{tmp_dir}/spiece-short.model"
79
+ if 1: # set to 0 to skip this after running once to speed things up during tune up
80
+ # HACK: need the sentencepiece source to get sentencepiece_model_pb2, as it doesn't get installed
81
+ sys.path.append("../sentencepiece/python/src/sentencepiece")
82
+ import sentencepiece_model_pb2 as model
83
+ tokenizer_orig = XLMRobertaTokenizer.from_pretrained(mname_orig)
84
+ tokenizer_orig.save_pretrained(tmp_dir)
85
+ with open(vocab_orig_path, 'rb') as f: data = f.read()
86
+ # adapted from https://blog.ceshine.net/post/trim-down-sentencepiece-vocabulary/
87
+ m = model.ModelProto()
88
+ m.ParseFromString(data)
89
+ print(f"Shrinking vocab from original {len(m.pieces)} dict items")
90
+ for i in range(len(m.pieces) - vocab_keep_items): _ = m.pieces.pop()
91
+ print(f"new dict {len(m.pieces)}")
92
+ with open(vocab_short_path, 'wb') as f: f.write(m.SerializeToString())
93
+ m = None
94
+
95
+ tokenizer_fast_tiny = XLMRobertaTokenizerFast(vocab_file=vocab_short_path)
96
+ tokenizer_tiny = XLMRobertaTokenizer(vocab_file=vocab_short_path)
97
+
98
+ ### Config
99
+
100
+ config_tiny = XLMRobertaConfig.from_pretrained(mname_orig)
101
+ # remember to update this to the actual config as each model is different and then shrink the numbers
102
+ config_tiny.update(dict(
103
+ vocab_size=vocab_keep_items+12,
104
+ d_ff=256,
105
+ d_kv=8,
106
+ d_model=64,
107
+ hidden_size=256,
108
+ intermediate_size=256,
109
+ max_position_embeddings=64,
110
+ num_attention_heads=2,
111
+ num_decoder_layers=2,
112
+ num_heads=2,
113
+ num_hidden_layers=2,
114
+ num_layers=2,
115
+ relative_attention_num_buckets=32,
116
+ ))
117
+ print("New config", config_tiny)
118
+
119
+ ### Model
120
+
121
+ model_tiny = XLMRobertaForCausalLM(config_tiny)
122
+ print(f"{mname_tiny}: num of params {model_tiny.num_parameters()}")
123
+ model_tiny.resize_token_embeddings(len(tokenizer_tiny))
124
+
125
+
126
+ inputs = tokenizer_tiny("hello", return_tensors="pt")
127
+ outputs = model_tiny(**inputs)
128
+ print("Test with normal tokenizer:", len(outputs.logits[0]))
129
+
130
+ inputs = tokenizer_fast_tiny("hello", return_tensors="pt")
131
+ outputs = model_tiny(**inputs)
132
+ print("Test with fast tokenizer:", len(outputs.logits[0]))
133
+
134
+ # Save
135
+ model_tiny.half() # makes it smaller
136
+ model_tiny.save_pretrained(".")
137
+ tokenizer_tiny.save_pretrained(".")
138
+ tokenizer_fast_tiny.save_pretrained(".")
139
+
140
+ print(f"Generated {mname_tiny}")
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f27b39bb93be6fc20581fd2cce5f6db55ee7770c428ebac0df7f01d9e7aac311
3
+ size 4334436
sentencepiece.bpe.model ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ef67a60b933d0da430d4b839301301ada0179b0c71102b0eef4567386faa1588
3
+ size 309222
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
1
+ {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "sep_token": "</s>", "pad_token": "<pad>", "cls_token": "<s>", "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": true}}
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
1
+ {"bos_token": "<s>", "eos_token": "</s>", "sep_token": "</s>", "cls_token": "<s>", "unk_token": "<unk>", "pad_token": "<pad>", "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "sp_model_kwargs": {}, "tokenizer_class": "XLMRobertaTokenizer"}