import os
import sys
from torch._C import Value
from transformers import BertModel, BertTokenizer, BertForMaskedLM

def save_pretrained_model(cls, model_card, save_root, suffix):
    folder_name = model_card
    if suffix != None:
        folder_name = folder_name + "-" + suffix
    save_path = os.path.join(save_root, folder_name, "model")
    if not os.path.isdir(save_path):
        os.makedirs(save_path)
    if os.path.isdir(save_path) and len(os.listdir(save_path)) != 0:
        print(save_path + " has cached files. Return.")
        return
    model = cls.from_pretrained(model_card)
    model.save_pretrained(save_path)


def save_pretrained_tokenizer(model_card, save_root, suffix):
    folder_name = model_card
    if suffix != None:
        folder_name = folder_name + "-" + suffix
    save_path = os.path.join(save_root, folder_name, "tokenizer")
    if not os.path.isdir(save_path):
        os.makedirs(save_path)
    if os.path.isdir(save_path) and len(os.listdir(save_path)) != 0:
        print(save_path + " has cached files. Return.")
        return
    tokenizer = BertTokenizer.from_pretrained(model_card)
    tokenizer.save_pretrained(save_path)

model_cards = [
    [ BertModel, "bert-base-uncased" , None ],
    [ BertModel, "bert-base-cased", None ],
    [ BertForMaskedLM, "bert-base-cased", "masked-lm" ]
]

if len(sys.argv) != 2:
    raise ValueError("Please provide path to save.")
SAVE_ROOT = sys.argv[1]
for cls, card, suffix in model_cards:
    save_pretrained_model(cls, card, SAVE_ROOT, suffix)
    save_pretrained_tokenizer(card, SAVE_ROOT, suffix)