File size: 2,094 Bytes
8f8552d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
#!/usr/bin/env python
# coding: utf-8
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version is derived from https://huggingface.co/hf-internal-testing/tiny-random-m2m_100
# but with max_position_embeddings=512 so that we don't need to recreate pos embeddings during forward
#
# It will be used then as "stas/tiny-m2m_100"

# Build
from transformers import M2M100Tokenizer, M2M100Config, M2M100ForConditionalGeneration

mname = "hf-internal-testing/tiny-random-m2m_100"

tokenizer = M2M100Tokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
config = M2M100Config.from_pretrained(mname)
# replicate the existing tiny model but we need longer max_position_embeddings
config.update(dict(
    max_position_embeddings=512,
))

tiny_model = M2M100ForConditionalGeneration(config)
print(f"num of params {tiny_model.num_parameters()}")

# Test
model_inputs = tokenizer("Making tiny model", return_tensors="pt")
gen_tokens = tiny_model.generate(**model_inputs, forced_bos_token_id=tokenizer.get_lang_id("fr"))
print(tokenizer.batch_decode(gen_tokens, skip_special_tokens=True))
#

# Save
mname_tiny = "tiny-m2m_100"
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)

print(f"Generated {mname_tiny}")

# Upload
# transformers-cli upload tiny-m2m_100