new tiny model
Browse files- README.md +1 -0
- config.json +32 -0
- make-tiny-albert.py +140 -0
- pytorch_model.bin +3 -0
- special_tokens_map.json +1 -0
- spiece.model +3 -0
- tokenizer.json +0 -0
- tokenizer_config.json +1 -0
README.md
ADDED
@@ -0,0 +1 @@
|
|
|
1 |
+
This is a tiny-albert random model to be used for basic testing.
|
config.json
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"architectures": [
|
3 |
+
"AlbertForMaskedLM"
|
4 |
+
],
|
5 |
+
"attention_probs_dropout_prob": 0.1,
|
6 |
+
"bos_token_id": 2,
|
7 |
+
"classifier_dropout_prob": 0.1,
|
8 |
+
"down_scale_factor": 1,
|
9 |
+
"embedding_size": 64,
|
10 |
+
"eos_token_id": 3,
|
11 |
+
"gap_size": 0,
|
12 |
+
"hidden_act": "gelu",
|
13 |
+
"hidden_dropout_prob": 0.1,
|
14 |
+
"hidden_size": 256,
|
15 |
+
"initializer_range": 0.02,
|
16 |
+
"inner_group_num": 1,
|
17 |
+
"intermediate_size": 128,
|
18 |
+
"layer_norm_eps": 1e-12,
|
19 |
+
"max_position_embeddings": 64,
|
20 |
+
"model_type": "albert",
|
21 |
+
"net_structure_type": 0,
|
22 |
+
"num_attention_heads": 2,
|
23 |
+
"num_hidden_groups": 1,
|
24 |
+
"num_hidden_layers": 2,
|
25 |
+
"num_memory_blocks": 0,
|
26 |
+
"pad_token_id": 0,
|
27 |
+
"position_embedding_type": "absolute",
|
28 |
+
"torch_dtype": "float16",
|
29 |
+
"transformers_version": "4.9.0.dev0",
|
30 |
+
"type_vocab_size": 2,
|
31 |
+
"vocab_size": 5000
|
32 |
+
}
|
make-tiny-albert.py
ADDED
@@ -0,0 +1,140 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python
|
2 |
+
# coding: utf-8
|
3 |
+
# Copyright 2021 The HuggingFace Team. All rights reserved.
|
4 |
+
#
|
5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
6 |
+
# you may not use this file except in compliance with the License.
|
7 |
+
# You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing, software
|
12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
14 |
+
# See the License for the specific language governing permissions and
|
15 |
+
# limitations under the License.
|
16 |
+
|
17 |
+
# This script creates a tiny random model
|
18 |
+
#
|
19 |
+
# It will be used then as "hf-internal-testing/tiny-albert"
|
20 |
+
|
21 |
+
# ***To build from scratch***
|
22 |
+
#
|
23 |
+
# 1. clone sentencepiece into a parent dir
|
24 |
+
# git clone https://github.com/google/sentencepiece
|
25 |
+
#
|
26 |
+
# 2. create a new repo at https://huggingface.co/new
|
27 |
+
# make sure to choose 'hf-internal-testing' as the Owner
|
28 |
+
#
|
29 |
+
# 3. clone
|
30 |
+
# git clone https://huggingface.co/hf-internal-testing/tiny-albert
|
31 |
+
# cd tiny-albert
|
32 |
+
|
33 |
+
# 4. start with some pre-existing script from one of the https://huggingface.co/hf-internal-testing/ tiny model repos, e.g.
|
34 |
+
# wget https://huggingface.co/hf-internal-testing/tiny-albert/raw/main/make-tiny-albert.py
|
35 |
+
# chmod a+x ./make-tiny-albert.py
|
36 |
+
# mv ./make-tiny-albert.py ./make-tiny-albert.py
|
37 |
+
#
|
38 |
+
# 5. automatically rename things from the old names to new ones
|
39 |
+
# perl -pi -e 's|Albert|Albert|g' make-*
|
40 |
+
# perl -pi -e 's|albert|albert|g' make-*
|
41 |
+
#
|
42 |
+
# 6. edit and re-run this script while fixing it up
|
43 |
+
# ./make-tiny-albert.py
|
44 |
+
#
|
45 |
+
# 7. add/commit/push
|
46 |
+
# git add *
|
47 |
+
# git commit -m "new tiny model"
|
48 |
+
# git push
|
49 |
+
|
50 |
+
# ***To update***
|
51 |
+
#
|
52 |
+
# 1. clone the existing repo
|
53 |
+
# git clone https://huggingface.co/hf-internal-testing/tiny-albert
|
54 |
+
# cd tiny-albert
|
55 |
+
#
|
56 |
+
# 2. edit and re-run this script after doing whatever changes are needed
|
57 |
+
# ./make-tiny-albert.py
|
58 |
+
#
|
59 |
+
# 3. commit/push
|
60 |
+
# git commit -m "new tiny model"
|
61 |
+
# git push
|
62 |
+
|
63 |
+
import sys
|
64 |
+
import os
|
65 |
+
|
66 |
+
# workaround for fast tokenizer protobuf issue, and it's much faster too!
|
67 |
+
os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python"
|
68 |
+
|
69 |
+
from transformers import AlbertTokenizerFast, AlbertConfig, AlbertForMaskedLM
|
70 |
+
|
71 |
+
mname_orig = "albert-base-v1"
|
72 |
+
mname_tiny = "tiny-albert"
|
73 |
+
|
74 |
+
### Tokenizer
|
75 |
+
|
76 |
+
# Shrink the orig vocab to keep things small
|
77 |
+
vocab_keep_items = 5000
|
78 |
+
tmp_dir = f"/tmp/{mname_tiny}"
|
79 |
+
vocab_orig_path = f"{tmp_dir}/spiece.model"
|
80 |
+
vocab_short_path = f"{tmp_dir}/spiece-short.model"
|
81 |
+
if 1: # set to 0 to skip this after running once to speed things up during tune up
|
82 |
+
# HACK: need the sentencepiece source to get sentencepiece_model_pb2, as it doesn't get installed
|
83 |
+
sys.path.append("../sentencepiece/python/src/sentencepiece")
|
84 |
+
import sentencepiece_model_pb2 as model
|
85 |
+
tokenizer_orig = AlbertTokenizerFast.from_pretrained(mname_orig)
|
86 |
+
tokenizer_orig.save_pretrained(tmp_dir)
|
87 |
+
with open(vocab_orig_path, 'rb') as f: data = f.read()
|
88 |
+
# adapted from https://blog.ceshine.net/post/trim-down-sentencepiece-vocabulary/
|
89 |
+
m = model.ModelProto()
|
90 |
+
m.ParseFromString(data)
|
91 |
+
print(f"Shrinking vocab from original {len(m.pieces)} dict items")
|
92 |
+
for i in range(len(m.pieces) - vocab_keep_items): _ = m.pieces.pop()
|
93 |
+
print(f"new dict {len(m.pieces)}")
|
94 |
+
with open(vocab_short_path, 'wb') as f: f.write(m.SerializeToString())
|
95 |
+
m = None
|
96 |
+
|
97 |
+
tokenizer_fast_tiny = AlbertTokenizerFast(vocab_file=vocab_short_path)
|
98 |
+
|
99 |
+
|
100 |
+
### Config
|
101 |
+
|
102 |
+
config_tiny = AlbertConfig.from_pretrained(mname_orig)
|
103 |
+
print(config_tiny)
|
104 |
+
# remember to update this to the actual config as each model is different and then shrink the numbers
|
105 |
+
config_tiny.update(dict(
|
106 |
+
vocab_size=vocab_keep_items+12,
|
107 |
+
embedding_size=64,
|
108 |
+
hidden_size=256,
|
109 |
+
intermediate_size=128,
|
110 |
+
max_position_embeddings=64,
|
111 |
+
num_attention_heads=2,
|
112 |
+
num_hidden_groups=1,
|
113 |
+
num_hidden_layers=2,
|
114 |
+
))
|
115 |
+
print("New config", config_tiny)
|
116 |
+
|
117 |
+
### Model
|
118 |
+
|
119 |
+
model_tiny = AlbertForMaskedLM(config_tiny)
|
120 |
+
print(f"{mname_tiny}: num of params {model_tiny.num_parameters()}")
|
121 |
+
model_tiny.resize_token_embeddings(len(tokenizer_fast_tiny))
|
122 |
+
|
123 |
+
# Test
|
124 |
+
inputs = tokenizer_fast_tiny("The capital of France is [MASK].", return_tensors="pt")
|
125 |
+
outputs = model_tiny(**inputs)
|
126 |
+
print("Test with normal tokenizer:", len(outputs.logits[0]))
|
127 |
+
|
128 |
+
# Save
|
129 |
+
model_tiny.half() # makes it smaller
|
130 |
+
model_tiny.save_pretrained(".")
|
131 |
+
tokenizer_fast_tiny.save_pretrained(".")
|
132 |
+
|
133 |
+
#print(model_tiny)
|
134 |
+
|
135 |
+
readme = "README.md"
|
136 |
+
if not os.path.exists(readme):
|
137 |
+
with open(readme, "w") as f:
|
138 |
+
f.write(f"This is a {mname_tiny} random model to be used for basic testing.\n")
|
139 |
+
|
140 |
+
print(f"Generated {mname_tiny}")
|
pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a9a411609d5818e28a2070e85ffcb65a482bfbe0d434024622d54a318fa53fa8
|
3 |
+
size 1396878
|
special_tokens_map.json
ADDED
@@ -0,0 +1 @@
|
|
|
1 |
+
{"bos_token": "[CLS]", "eos_token": "[SEP]", "unk_token": "<unk>", "sep_token": "[SEP]", "pad_token": "<pad>", "cls_token": "[CLS]", "mask_token": {"content": "[MASK]", "single_word": false, "lstrip": true, "rstrip": false, "normalized": true}}
|
spiece.model
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:eedf777f037d8bc9ca0115cca50c7d18569292613c3a23a861fc5a6eff89b961
|
3 |
+
size 321482
|
tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
tokenizer_config.json
ADDED
@@ -0,0 +1 @@
|
|
|
1 |
+
{"do_lower_case": true, "remove_space": true, "keep_accents": false, "bos_token": "[CLS]", "eos_token": "[SEP]", "unk_token": "<unk>", "sep_token": "[SEP]", "pad_token": "<pad>", "cls_token": "[CLS]", "mask_token": {"content": "[MASK]", "single_word": false, "lstrip": true, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "sp_model_kwargs": {}, "tokenizer_class": "AlbertTokenizer"}
|