stas commited on
Commit
b026625
1 Parent(s): b0f8a99
README.md ADDED
@@ -0,0 +1 @@
 
1
+ This is a tiny-electra random model to be used for basic testing.
config.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "ElectraForMaskedLM"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.1,
6
+ "embedding_size": 64,
7
+ "hidden_act": "gelu",
8
+ "hidden_dropout_prob": 0.1,
9
+ "hidden_size": 64,
10
+ "initializer_range": 0.02,
11
+ "intermediate_size": 64,
12
+ "layer_norm_eps": 1e-12,
13
+ "max_position_embeddings": 64,
14
+ "model_type": "electra",
15
+ "num_attention_heads": 2,
16
+ "num_hidden_layers": 2,
17
+ "pad_token_id": 0,
18
+ "position_embedding_type": "absolute",
19
+ "summary_activation": "gelu",
20
+ "summary_last_dropout": 0.1,
21
+ "summary_type": "first",
22
+ "summary_use_proj": true,
23
+ "torch_dtype": "float16",
24
+ "transformers_version": "4.9.0.dev0",
25
+ "type_vocab_size": 2,
26
+ "vocab_size": 30522
27
+ }
make-tiny-electra.py ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # coding: utf-8
3
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ # This script creates a tiny random model
18
+ #
19
+ # It will be used then as "hf-internal-testing/tiny-electra"
20
+
21
+ # ***To build from scratch***
22
+ #
23
+ # 1. clone sentencepiece into a parent dir
24
+ # git clone https://github.com/google/sentencepiece
25
+ #
26
+ # 2. create a new repo at https://huggingface.co/new
27
+ # make sure to choose 'hf-internal-testing' as the Owner
28
+ #
29
+ # 3. clone
30
+ # git clone https://huggingface.co/hf-internal-testing/tiny-electra
31
+ # cd tiny-electra
32
+
33
+ # 4. start with some pre-existing script from one of the https://huggingface.co/hf-internal-testing/ tiny model repos, e.g.
34
+ # wget https://huggingface.co/hf-internal-testing/tiny-electra/raw/main/make-xlm-roberta.py
35
+ # chmod a+x ./make-tiny-electra.py
36
+ # mv ./make-tiny-xlm-roberta.py ./make-tiny-electra.py
37
+ #
38
+ # 5. automatically rename things from the old names to new ones
39
+ # perl -pi -e 's|XLMRoberta|Electra|g' make-tiny-electra.py
40
+ # perl -pi -e 's|xlm-roberta|electra|g' make-tiny-electra.py
41
+ #
42
+ # 6. edit and re-run this script while fixing it up
43
+ # ./make-tiny-electra.py
44
+ #
45
+ # 7. add/commit/push
46
+ # git add *
47
+ # git commit -m "new tiny model"
48
+ # git push
49
+
50
+ # ***To update***
51
+ #
52
+ # 1. clone the existing repo
53
+ # git clone https://huggingface.co/hf-internal-testing/tiny-electra
54
+ # cd tiny-electra
55
+ #
56
+ # 2. edit and re-run this script after doing whatever changes are needed
57
+ # ./make-tiny-electra.py
58
+ #
59
+ # 3. commit/push
60
+ # git commit -m "new tiny model"
61
+ # git push
62
+
63
+ import sys
64
+ import os
65
+
66
+ from transformers import ElectraTokenizer, ElectraTokenizerFast, ElectraConfig, ElectraForMaskedLM
67
+
68
+ mname_orig = "google/electra-small-generator"
69
+ mname_tiny = "tiny-electra"
70
+
71
+ ### Tokenizer
72
+
73
+ tokenizer_fast_tiny = ElectraTokenizerFast.from_pretrained(mname_orig)
74
+ tokenizer_tiny = ElectraTokenizer.from_pretrained(mname_orig)
75
+
76
+ ### Config
77
+
78
+ config_tiny = ElectraConfig.from_pretrained(mname_orig)
79
+ print(config_tiny)
80
+ # remember to update this to the actual config as each model is different and then shrink the numbers
81
+ config_tiny.update(dict(
82
+ embedding_size=64,
83
+ hidden_size=64,
84
+ intermediate_size=64,
85
+ max_position_embeddings=64,
86
+ num_attention_heads=2,
87
+ num_hidden_layers=2,
88
+ ))
89
+ print("New config", config_tiny)
90
+
91
+ ### Model
92
+
93
+ model_tiny = ElectraForMaskedLM(config_tiny)
94
+ print(f"{mname_tiny}: num of params {model_tiny.num_parameters()}")
95
+ model_tiny.resize_token_embeddings(len(tokenizer_tiny))
96
+
97
+
98
+ inputs = tokenizer_tiny("The capital of France is [MASK].", return_tensors="pt")
99
+ labels = tokenizer_tiny("The capital of France is Paris.", return_tensors="pt")["input_ids"]
100
+ outputs = model_tiny(**inputs, labels=labels)
101
+ print("Test with normal tokenizer:", len(outputs.logits[0]))
102
+
103
+ inputs = tokenizer_fast_tiny("The capital of France is [MASK].", return_tensors="pt")
104
+ labels = tokenizer_fast_tiny("The capital of France is Paris.", return_tensors="pt")["input_ids"]
105
+ outputs = model_tiny(**inputs, labels=labels)
106
+ print("Test with normal tokenizer:", len(outputs.logits[0]))
107
+
108
+ # Save
109
+ model_tiny.half() # makes it smaller
110
+ model_tiny.save_pretrained(".")
111
+ tokenizer_tiny.save_pretrained(".")
112
+ tokenizer_fast_tiny.save_pretrained(".")
113
+
114
+ readme = "README.md"
115
+ if not os.path.exists(readme):
116
+ with open(readme, "w") as f:
117
+ f.write(f"This is a {mname_tiny} random model to be used for basic testing.\n")
118
+
119
+ print(f"Generated {mname_tiny}")
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d055d70e8a7ca5b19773a812c87e5206436b1381bfcb3d0ee6ca2b653cc929a2
3
+ size 4102372
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
1
+ {"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
1
+ {"do_lower_case": true, "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "tokenize_chinese_chars": true, "strip_accents": null, "model_max_length": 512, "special_tokens_map_file": null, "name_or_path": "google/electra-small-generator", "tokenizer_class": "ElectraTokenizer"}
vocab.txt ADDED
The diff for this file is too large to render. See raw diff