#!/usr/bin/env python | |
# coding: utf-8 | |
# Copyright 2021 The HuggingFace Team. All rights reserved. | |
# | |
# Licensed under the Apache License, Version 2.0 (the "License"); | |
# you may not use this file except in compliance with the License. | |
# You may obtain a copy of the License at | |
# | |
# http://www.apache.org/licenses/LICENSE-2.0 | |
# | |
# Unless required by applicable law or agreed to in writing, software | |
# distributed under the License is distributed on an "AS IS" BASIS, | |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
# See the License for the specific language governing permissions and | |
# limitations under the License. | |
# This script creates a tiny random model | |
# | |
# It will be used then as "hf-internal-testing/tiny-electra" | |
# ***To build from scratch*** | |
# | |
# 1. clone sentencepiece into a parent dir | |
# git clone https://github.com/google/sentencepiece | |
# | |
# 2. create a new repo at https://huggingface.co/new | |
# make sure to choose 'hf-internal-testing' as the Owner | |
# | |
# 3. clone | |
# git clone https://huggingface.co/hf-internal-testing/tiny-electra | |
# cd tiny-electra | |
# 4. start with some pre-existing script from one of the https://huggingface.co/hf-internal-testing/ tiny model repos, e.g. | |
# wget https://huggingface.co/hf-internal-testing/tiny-electra/raw/main/make-xlm-roberta.py | |
# chmod a+x ./make-tiny-electra.py | |
# mv ./make-tiny-xlm-roberta.py ./make-tiny-electra.py | |
# | |
# 5. automatically rename things from the old names to new ones | |
# perl -pi -e 's|XLMRoberta|Electra|g' make-tiny-electra.py | |
# perl -pi -e 's|xlm-roberta|electra|g' make-tiny-electra.py | |
# | |
# 6. edit and re-run this script while fixing it up | |
# ./make-tiny-electra.py | |
# | |
# 7. add/commit/push | |
# git add * | |
# git commit -m "new tiny model" | |
# git push | |
# ***To update*** | |
# | |
# 1. clone the existing repo | |
# git clone https://huggingface.co/hf-internal-testing/tiny-electra | |
# cd tiny-electra | |
# | |
# 2. edit and re-run this script after doing whatever changes are needed | |
# ./make-tiny-electra.py | |
# | |
# 3. commit/push | |
# git commit -m "new tiny model" | |
# git push | |
import sys | |
import os | |
from transformers import ElectraTokenizerFast, ElectraConfig, ElectraForMaskedLM | |
mname_orig = "google/electra-small-generator" | |
mname_tiny = "tiny-electra" | |
### Tokenizer | |
# Shrink the orig vocab to keep things small (just enough to tokenize any word, so letters+symbols) | |
# ElectraTokenizerFast is fully defined by a tokenizer.json, which contains the vocab and the ids, so we just need to truncate it wisely | |
import subprocess | |
tokenizer_fast = ElectraTokenizerFast.from_pretrained(mname_orig) | |
vocab_keep_items = 5120 | |
tmp_dir = f"/tmp/{mname_tiny}" | |
tokenizer_fast.save_pretrained(tmp_dir) | |
# resize tokenizer.json (vocab.txt will be automatically resized on save_pretrained) | |
# perl -pi -e 's|(2999).*|$1}}}|' tokenizer.json # 0-indexed, so vocab_keep_items-1! | |
closing_pat = "}}}" | |
cmd = (f"perl -pi -e s|({vocab_keep_items-1}).*|$1{closing_pat}| {tmp_dir}/tokenizer.json").split() | |
result = subprocess.run(cmd, capture_output=True, text=True) | |
# reload with modified tokenizer | |
tokenizer_fast_tiny = ElectraTokenizerFast.from_pretrained(tmp_dir) | |
# it seems that ElectraTokenizer is not needed and ElectraTokenizerFast does the job | |
### Config | |
config_tiny = ElectraConfig.from_pretrained(mname_orig) | |
print(config_tiny) | |
# remember to update this to the actual config as each model is different and then shrink the numbers | |
config_tiny.update(dict( | |
embedding_size=64, | |
hidden_size=64, | |
intermediate_size=64, | |
max_position_embeddings=512, | |
num_attention_heads=2, | |
num_hidden_layers=2, | |
vocab_size=vocab_keep_items, | |
)) | |
print("New config", config_tiny) | |
### Model | |
model_tiny = ElectraForMaskedLM(config_tiny) | |
print(f"{mname_tiny}: num of params {model_tiny.num_parameters()}") | |
model_tiny.resize_token_embeddings(len(tokenizer_fast_tiny)) | |
# Test | |
inputs = tokenizer_fast_tiny("The capital of France is [MASK].", return_tensors="pt") | |
outputs = model_tiny(**inputs) | |
print("Test with normal tokenizer:", len(outputs.logits[0])) | |
# Save | |
model_tiny.half() # makes it smaller | |
model_tiny.save_pretrained(".") | |
tokenizer_fast_tiny.save_pretrained(".") | |
#print(model_tiny) | |
readme = "README.md" | |
if not os.path.exists(readme): | |
with open(readme, "w") as f: | |
f.write(f"This is a {mname_tiny} random model to be used for basic testing.\n") | |
print(f"Generated {mname_tiny}") | |