tiny-xlm-roberta / make-tiny-xlm-roberta.py
stas's picture
doc
915669c
#!/usr/bin/env python
# coding: utf-8
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script creates a tiny random model
#
# It will be used then as "hf-internal-testing/tiny-xlm-roberta"
# ***To build from scratch***
#
# 1. clone sentencepiece into a parent dir
# git clone https://github.com/google/sentencepiece
#
# 2. create a new repo at https://huggingface.co/new
# make sure to choose 'hf-internal-testing' as the Owner
#
# 3. clone
# git clone https://huggingface.co/hf-internal-testing/tiny-xlm-roberta
# cd tiny-xlm-roberta
#
# 4. start with some pre-existing script from one of the https://huggingface.co/hf-internal-testing/ tiny model repos, e.g.
# wget https://huggingface.co/hf-internal-testing/tiny-albert/raw/main/make-tiny-albert.py
# chmod a+x ./make-tiny-albert.py
# mv ./make-tiny-albert.py ./make-tiny-xlm-roberta.py
#
# 5. automatically rename things from the old names to new ones
# perl -pi -e 's|Albert|XLMRoberta|g' make-tiny-xlm-roberta.py
# perl -pi -e 's|albert|xlm-roberta|g' make-tiny-xlm-roberta.py
#
# 6. edit and re-run this script while fixing it up
# ./make-tiny-xlm-roberta.py
#
# 7. add/commit/push
# git add *
# git commit -m "new tiny model"
# git push
# ***To update***
#
# 1. clone the existing repo
# git clone https://huggingface.co/hf-internal-testing/tiny-xlm-roberta
# cd tiny-xlm-roberta
#
# 2. edit and re-run this script after doing whatever changes are needed
# ./make-tiny-xlm-roberta.py
#
# 3. commit/push
# git commit -m "new tiny model"
# git push
import sys
import os
# workaround for fast tokenizer protobuf issue, and it's much faster too!
os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python"
from transformers import XLMRobertaTokenizerFast, XLMRobertaConfig, XLMRobertaForCausalLM
mname_orig = "xlm-roberta-base"
mname_tiny = "tiny-xlm-roberta"
### Tokenizer
# Shrink the orig vocab to keep things small
vocab_keep_items = 5000
tmp_dir = f"/tmp/{mname_tiny}"
vocab_orig_path = f"{tmp_dir}/sentencepiece.bpe.model"
vocab_short_path = f"{tmp_dir}/spiece-short.model"
if 1: # set to 0 to skip this after running once to speed things up during tune up
# HACK: need the sentencepiece source to get sentencepiece_model_pb2, as it doesn't get installed
sys.path.append("../sentencepiece/python/src/sentencepiece")
import sentencepiece_model_pb2 as model
tokenizer_orig = XLMRobertaTokenizerFast.from_pretrained(mname_orig)
tokenizer_orig.save_pretrained(tmp_dir)
with open(vocab_orig_path, 'rb') as f: data = f.read()
# adapted from https://blog.ceshine.net/post/trim-down-sentencepiece-vocabulary/
m = model.ModelProto()
m.ParseFromString(data)
print(f"Shrinking vocab from original {len(m.pieces)} dict items")
for i in range(len(m.pieces) - vocab_keep_items): _ = m.pieces.pop()
print(f"new dict {len(m.pieces)}")
with open(vocab_short_path, 'wb') as f: f.write(m.SerializeToString())
m = None
tokenizer_fast_tiny = XLMRobertaTokenizerFast(vocab_file=vocab_short_path)
### Config
config_tiny = XLMRobertaConfig.from_pretrained(mname_orig)
print(config_tiny)
# remember to update this to the actual config as each model is different and then shrink the numbers
config_tiny.update(dict(
vocab_size=vocab_keep_items+12,
d_ff=256,
d_kv=8,
d_model=64,
hidden_size=256,
intermediate_size=256,
max_position_embeddings=64,
num_attention_heads=2,
num_decoder_layers=2,
num_heads=2,
num_hidden_layers=2,
num_layers=2,
relative_attention_num_buckets=32,
))
print("New config", config_tiny)
### Model
model_tiny = XLMRobertaForCausalLM(config_tiny)
print(f"{mname_tiny}: num of params {model_tiny.num_parameters()}")
model_tiny.resize_token_embeddings(len(tokenizer_fast_tiny))
# Test
inputs = tokenizer_fast_tiny("hello", return_tensors="pt")
outputs = model_tiny(**inputs)
print("Test with fast tokenizer:", len(outputs.logits[0]))
# Save
model_tiny.half() # makes it smaller
model_tiny.save_pretrained(".")
tokenizer_fast_tiny.save_pretrained(".")
readme = "README.md"
if not os.path.exists(readme):
with open(readme, "w") as f:
f.write(f"This is a {mname_tiny} random model to be used for basic testing.\n")
print(f"Generated {mname_tiny}")