"""
This script was used to convert our legacy presets into the directory format
used by Kaggle.

This script is for reference only.
"""

import os
import re
import shutil

os.environ["KERAS_HOME"] = os.getcwd()

from keras_hub import models  # noqa: E402
from keras_hub.src.utils.preset_utils import save_to_preset  # noqa: E402

BUCKET = "keras-hub-kaggle"


def to_snake_case(name):
    name = re.sub(r"\W+", "", name)
    name = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", name)
    name = re.sub("([a-z])([A-Z])", r"\1_\2", name).lower()
    return name


if __name__ == "__main__":
    backbone_models = [
        (models.AlbertBackbone, models.AlbertTokenizer),
        (models.BartBackbone, models.BartTokenizer),
        (models.BertBackbone, models.BertTokenizer),
        (models.DebertaV3Backbone, models.DebertaV3Tokenizer),
        (models.DistilBertBackbone, models.DistilBertTokenizer),
        (models.FNetBackbone, models.FNetTokenizer),
        (models.GPT2Backbone, models.GPT2Tokenizer),
        (models.OPTBackbone, models.OPTTokenizer),
        (models.RobertaBackbone, models.RobertaTokenizer),
        (models.T5Backbone, models.T5Tokenizer),
        (models.WhisperBackbone, models.WhisperTokenizer),
        (models.XLMRobertaBackbone, models.XLMRobertaTokenizer),
    ]
    for backbone_cls, tokenizer_cls in backbone_models:
        for preset in backbone_cls.presets:
            backbone = backbone_cls.from_preset(
                preset, name=to_snake_case(backbone_cls.__name__)
            )
            tokenizer = tokenizer_cls.from_preset(
                preset, name=to_snake_case(tokenizer_cls.__name__)
            )
            save_to_preset(
                backbone,
                preset,
                config_filename="config.json",
            )
            save_to_preset(
                tokenizer,
                preset,
                config_filename="tokenizer.json",
            )
            # Delete first to clean up any exising version.
            os.system(f"gsutil rm -rf gs://{BUCKET}/{preset}")
            os.system(f"gsutil cp -r {preset} gs://{BUCKET}/{preset}")
            for root, _, files in os.walk(preset):
                for file in files:
                    path = os.path.join(BUCKET, root, file)
                    os.system(
                        f"gcloud storage objects update gs://{path} "
                        "--add-acl-grant=entity=AllUsers,role=READER"
                    )
            # Clean up local disk usage.
            shutil.rmtree("models")
            shutil.rmtree(preset)

    # Handle our single task model.
    preset = "bert_tiny_en_uncased_sst2"
    task = models.BertTextClassifier.from_preset(
        preset, name=to_snake_case(models.BertTextClassifier.__name__)
    )
    tokenizer = models.BertTokenizer.from_preset(
        preset, name=to_snake_case(models.BertTokenizer.__name__)
    )
    save_to_preset(
        task,
        preset,
        config_filename="config.json",
    )
    save_to_preset(
        tokenizer,
        preset,
        config_filename="tokenizer.json",
    )
    # Delete first to clean up any exising version.
    os.system(f"gsutil rm -rf gs://{BUCKET}/{preset}")
    os.system(f"gsutil cp -r {preset} gs://{BUCKET}/{preset}")
    for root, _, files in os.walk(preset):
        for file in files:
            path = os.path.join(BUCKET, root, file)
            os.system(
                f"gcloud storage objects update gs://{path} "
                "--add-acl-grant=entity=AllUsers,role=READER"
            )
    # Clean up local disk usage.
    shutil.rmtree("models")
    shutil.rmtree(preset)
