import os import subprocess from huggingface_hub import HfApi, upload_folder import gradio as gr from huggingface_hub import whoami, list_models def error_str(error, title="Error"): return f"""#### {title} {error}""" if error else "" def url_to_model_id(model_id_str): return model_id_str.split("/")[-2] + "/" + model_id_str.split("/")[-1] if model_id_str.startswith("https://huggingface.co/") else model_id_str def has_diffusion_model(model_id, token): api = HfApi(token=token) return any([f.endswith("diffusion_pytorch_model.bin") for f in api.list_repo_files(repo_id=model_id)]) def get_my_model_names(token): try: author = whoami(token=token) model_infos = list_models(author=author["name"], use_auth_token=token) model_names = [] for model_info in model_infos: model_id = model_info.modelId if has_diffusion_model(model_id, token): model_names.append(model_id) # if not model_names: # return [], Exception("No diffusion models found in your account.") return model_names, None except Exception as e: return [], e def on_token_change(token): if token: model_names, error = get_my_model_names(token) return gr.update(visible=not error), gr.update(choices=model_names, label="Select a model:"), error_str(error) else: return gr.update(visible=False), gr.update(choices=[], label="Select a model:"), None def on_load_model(user_model_id, other_model_id, token): if not user_model_id and not other_model_id: return None, None, None, None, gr.update(value=error_str("Please enter a model ID.")) try: model_id = url_to_model_id(other_model_id) if other_model_id else user_model_id original_model_id = model_id if not has_diffusion_model(model_id, token): return None, None, None, None, gr.update(value=error_str("There are no diffusion weights in the model you selected.")) user = whoami(token=token) model_id = user["name"] + "/" + model_id.split("/")[-1] title = " ".join([w.capitalize() for w in model_id.split("/")[-1].replace("-", " ").replace("_", " ").split(" ")]) description = f"""Demo for {title} Stable Diffusion model.
Add the following tokens to your prompts for the model to work properly: $prefix.""" return gr.update(visible=True), gr.update(value=model_id), gr.update(value=title), gr.update(value=description), None except Exception as e: return None, None, None, None, gr.update(value=error_str(e)) def create_and_push(space_type, other_model_name, radio_model_names, model_id, title, description, prefix, update, token): try: # 1. Create the new space api = HfApi(token=token) repo_url = api.create_repo( repo_id=model_id, exist_ok=update, repo_type="space", space_sdk="gradio", ) # 2. Replace the name, title, and description in the template with open("template/app_simple.py" if space_type == "Simple" else "template/app_advanced.py", "r") as f: app = f.read() app = app.replace("$model_id", url_to_model_id(other_model_name) if other_model_name else radio_model_names) app = app.replace("$title", title) app = app.replace("$description", description) app = app.replace("$prefix", prefix) app = app.replace("$space_id", whoami(token=token)["name"] + "/" + model_id.split("/")[-1]) # 3. save the new app.py file with open("app.py", "w") as f: f.write(app) # 4. Upload the new app.py to the space api.upload_file( path_or_fileobj="app.py", path_in_repo="app.py", repo_id=model_id, token=token, repo_type="space", ) # 5. Upload template/requirements.txt to the space if space_type == "Advanced": api.upload_file( path_or_fileobj="template/requirements.txt", path_in_repo="requirements.txt", repo_id=model_id, token=token, repo_type="space", ) # 5. Delete the app.py file os.remove("app.py") return f"""Successfully created space at: {repo_url}""" except Exception as e: return error_str(e) DESCRIPTION = """### Create a gradio space for your Diffusers🧨 model With this space, you can easily create a gradio demo for your Diffusers model and share it with the community. """ with gr.Blocks() as demo: gr.Markdown(DESCRIPTION) with gr.Row(): with gr.Column(scale=11): with gr.Column(): gr.Markdown("#### 1. Choose a model") input_token = gr.Textbox( max_lines=1, label="Enter your Hugging Face token", placeholder="WRITE permission is required!", ) gr.Markdown("You can get a token [here](https://huggingface.co/settings/tokens)") with gr.Group(visible=False) as group_model: radio_model_names = gr.Radio(label="Your models:") other_model_name = gr.Textbox(label="Other model:", placeholder="URL or model id, e.g. username/model_name") btn_load = gr.Button(value="Load model") with gr.Column(scale=10): with gr.Column(visible=False) as group_create: gr.Markdown("#### 2. Enter details and create the space") name = gr.Textbox(label="Name", placeholder="e.g. diffusers-demo") title = gr.Textbox(label="Title", placeholder="e.g. Diffusers Demo") description = gr.Textbox(label="Description", placeholder="e.g. Demo for my awesome Diffusers model", lines=5) prefix = gr.Textbox(label="Prefix tokens", placeholder="Tokens that are required to be present in the prompt, e.g. `rick and morty style`") gr.Markdown("""#### Choose space type - **Simple** - Runs on GPU using Hugging Face inference API, but you cannot control image generation parameters. - **Advanced** - Runs on CPU by default, with the option to upgrade to GPU. You can control image generation parameters: guidance, number of steps, image size, etc. Also supports **image-to-image** generation.""") space_type =gr.Radio(label="Space type", choices=["Simple", "Advanced"], value="Simple") update = gr.Checkbox(label="Update the space if it already exists?") brn_create = gr.Button("Create the space") error_output = gr.Markdown(label="Output") input_token.change( fn=on_token_change, inputs=input_token, outputs=[group_model, radio_model_names, error_output], queue=False, scroll_to_output=True) btn_load.click( fn=on_load_model, inputs=[radio_model_names, other_model_name, input_token], outputs=[group_create, name, title, description, error_output], queue=False, scroll_to_output=True) brn_create.click( fn=create_and_push, inputs=[space_type, other_model_name, radio_model_names, name, title, description, prefix, update, input_token], outputs=[error_output], scroll_to_output=True ) # gr.Markdown("""""") gr.HTML("""

Space by: Twitter Follow


Buy Me A Coffee

visitors

""") demo.queue() demo.launch(debug=True)