anzorq's picture
Update app.py
8184c9a
import os
import subprocess
from huggingface_hub import HfApi, upload_folder, whoami, list_models, hf_hub_download, upload_file
import gradio as gr
import requests
def error_str(error, title="Error"):
return f"""#### {title}
{error}""" if error else ""
def url_to_model_id(model_id_str):
return model_id_str.split("/")[-2] + "/" + model_id_str.split("/")[-1] if model_id_str.startswith("https://huggingface.co/") else model_id_str
def has_diffusion_model(model_id, token):
api = HfApi(token=token)
return any([f.endswith("diffusion_pytorch_model.bin") for f in api.list_repo_files(repo_id=model_id)])
def get_my_model_names(token):
try:
author = whoami(token=token)
model_infos = list_models(author=author["name"], use_auth_token=token)
model_names = []
for model_info in model_infos:
model_id = model_info.modelId
if has_diffusion_model(model_id, token):
model_names.append(model_id)
# if not model_names:
# return [], Exception("No diffusion models found in your account.")
return model_names, None
except Exception as e:
return [], e
def on_token_change(token):
if token:
model_names, error = get_my_model_names(token)
return gr.update(visible=not error), gr.update(choices=model_names, label="Select a model:"), error_str(error)
else:
return gr.update(visible=False), gr.update(choices=[], label="Select a model:"), None
def on_load_model(user_model_id, other_model_id, token):
if not user_model_id and not other_model_id:
return None, None, None, None, gr.update(value=error_str("Please enter a model ID.")), None
try:
model_id = url_to_model_id(other_model_id) if other_model_id else user_model_id
original_model_id = model_id
if not has_diffusion_model(model_id, token):
return None, None, None, None, gr.update(value=error_str("There are no diffusion weights in the model you selected.")), None
user = whoami(token=token)
model_id = user["name"] + "/" + model_id.split("/")[-1]
title = " ".join([w.capitalize() for w in model_id.split("/")[-1].replace("-", " ").replace("_", " ").split(" ")])
description = f"""Demo for <a href="https://huggingface.co/{original_model_id}">{title}</a> Stable Diffusion model."""
return gr.update(visible=True), gr.update(value=model_id), gr.update(value=title), gr.update(value=description), None, original_model_id
except Exception as e:
return None, None, None, None, gr.update(value=error_str(e)), None
def add_space_badge_to_model_card(model_id, token):
readme_file = 'README.md'
model_card = hf_hub_download(repo_id=model_id, filename=readme_file, token=token)
with open(model_card, "r") as f:
content = f.read()
content = content.split("---\n")
content[2] = "[![Open In Spaces](https://camo.githubusercontent.com/00380c35e60d6b04be65d3d94a58332be5cc93779f630bcdfc18ab9a3a7d3388/68747470733a2f2f696d672e736869656c64732e696f2f62616467652f25463025394625413425393725323048756767696e67253230466163652d5370616365732d626c7565)](https://huggingface.co/spaces/" + model_id + ")\n" + content[2]
content = "---\n".join(content)
with open(readme_file, "w") as f:
f.write(content)
upload_file(
path_or_fileobj=readme_file,
path_in_repo=readme_file,
repo_id=model_id,
token=token,
create_pr=True,
commit_message="Add Space badge to model card",
)
os.remove(readme_file)
def create_and_push(space_type, hardware, private_space, add_badge, other_model_name, radio_model_names, model_id, title, description, prefix, update, token, original_model_id):
try:
# 1. Create the new space
api = HfApi(token=token)
repo_url = api.create_repo(
repo_id=model_id,
exist_ok=update,
repo_type="space",
space_sdk="gradio",
private=private_space
)
api_url = f'https://huggingface.co/api/spaces/{model_id}'
headers = { "Authorization" : f"Bearer {token}"}
# add HUGGING_FACE_HUB_TOKEN secret to new space
requests.post(f'{api_url}/secrets', json={"key":"HUGGING_FACE_HUB_TOKEN","value":token}, headers=headers)
# set new Space Hardware flavor
requests.post(f'{api_url}/hardware', json={'flavor': hardware}, headers=headers)
# 2. Replace the name, title, and description in the template
with open("template/app_simple.py" if space_type == "Simple" else "template/app_advanced.py", "r") as f:
app = f.read()
app = app.replace("$model_id", url_to_model_id(other_model_name) if other_model_name else radio_model_names)
app = app.replace("$title", title)
app = app.replace("$description", description)
app = app.replace("$prefix", prefix)
app = app.replace("$space_id", whoami(token=token)["name"] + "/" + model_id.split("/")[-1])
# 3. save the new app.py file
with open("app.py", "w") as f:
f.write(app)
# 4. Upload the new app.py to the space
api.upload_file(
path_or_fileobj="app.py",
path_in_repo="app.py",
repo_id=model_id,
token=token,
repo_type="space",
)
# 5. Upload template/requirements.txt to the space
if space_type == "Advanced":
api.upload_file(
path_or_fileobj="template/requirements.txt",
path_in_repo="requirements.txt",
repo_id=model_id,
token=token,
repo_type="space",
)
# 5. Delete the app.py file
os.remove("app.py")
# 6. Add the Space badge to the model card
if add_badge:
add_space_badge_to_model_card(original_model_id, token)
return f"""
Successfully created space at: <a href="{repo_url}" target="_blank">{repo_url}</a> <br>
Opened a PR to add the space badge: <a href="https://huggingface.co/{original_model_id}/discussions" target="_blank">https://huggingface.co/{original_model_id}</a>
"""
except Exception as e:
return error_str(e)
DESCRIPTION = """### Create a gradio space for your Diffusers🧨 model
With this space, you can easily create a gradio demo for your Diffusers model and share it with the community.
"""
# <br>
# 1️⃣ Make sure you have created your hugging face account<br>
# 2️⃣ Generate a token here with write access<br>
# 3️⃣ Choose a stable diffusion base model, there are thousands of them here<br>
# 4️⃣ Choose Space type<br>
# 5️⃣ Choose the new Space Hardware<br>
# It is done.
# """
with gr.Blocks() as demo:
gr.Markdown(DESCRIPTION)
with gr.Row():
with gr.Column(scale=11):
with gr.Column():
gr.Markdown("#### 1. Choose a model")
input_token = gr.Textbox(
max_lines=1,
type="password",
label="Enter your Hugging Face token",
placeholder="WRITE permission is required!",
)
gr.Markdown("You can get a token [here](https://huggingface.co/settings/tokens)")
with gr.Group(visible=False) as group_model:
radio_model_names = gr.Radio(label="Your models:")
other_model_name = gr.Textbox(label="Other model:", placeholder="URL or model id, e.g. username/model_name")
btn_load = gr.Button(value="Load model")
with gr.Column(scale=10):
with gr.Column(visible=False) as group_create:
gr.Markdown("#### 2. Enter details and create the space")
name = gr.Textbox(label="Name", placeholder="e.g. diffusers-demo")
title = gr.Textbox(label="Title", placeholder="e.g. Diffusers Demo")
description = gr.Textbox(label="Description", placeholder="e.g. Demo for my awesome Diffusers model", lines=5)
original_model_id = gr.Textbox(visible=False)
prefix = gr.Textbox(label="Prefix tokens", placeholder="Tokens that are required to be present in the prompt, e.g. `rick and morty style`")
gr.Markdown("""#### Choose space type
- **Simple** - Runs on GPU using Hugging Face inference API, but you cannot control image generation parameters.
- **Advanced** - Runs on CPU by default, with the option to upgrade to GPU. You can control image generation parameters: guidance, number of steps, image size, etc. Also supports **image-to-image** generation.""")
space_type =gr.Radio(label="Space type", choices=["Simple", "Advanced"], value="Simple")
update = gr.Checkbox(label="Update the space if it already exists?")
private_space = gr.Checkbox(label="Private Space")
add_badge = gr.Checkbox(label="Add Space badge to the model card (will open a PR)")
gr.Markdown("Choose the new Space Hardware <small>[check pricing page](https://huggingface.co/pricing#spaces), you need payment method to upgrade your Space hardware</small>")
hardware = gr.Dropdown(["cpu-basic","cpu-upgrade","t4-small","t4-medium","a10g-small","a10g-large"],value = "cpu-basic", label="Space Hardware")
btn_create = gr.Button("Create the space")
error_output = gr.Markdown(label="Output")
input_token.change(
fn=on_token_change,
inputs=input_token,
outputs=[group_model, radio_model_names, error_output],
queue=False,
scroll_to_output=True)
btn_load.click(
fn=on_load_model,
inputs=[radio_model_names, other_model_name, input_token],
outputs=[group_create, name, title, description, error_output, original_model_id],
queue=False,
scroll_to_output=True)
btn_create.click(
fn=create_and_push,
inputs=[space_type, hardware, private_space, add_badge, other_model_name, radio_model_names, name, title, description, prefix, update, input_token, original_model_id],
outputs=[error_output],
scroll_to_output=True
)
# gr.Markdown("""<img src="https://raw.githubusercontent.com/huggingface/diffusers/main/docs/source/imgs/diffusers_library.jpg" width="150"/>""")
gr.HTML("""
<div style="border-top: 1px solid #303030;">
<br>
<p>Space by: <a href="https://twitter.com/hahahahohohe"><img src="https://img.shields.io/twitter/follow/hahahahohohe?label=%40anzorq&style=social" alt="Twitter Follow"></a></p><br>
<a href="https://www.buymeacoffee.com/anzorq" target="_blank"><img src="https://cdn.buymeacoffee.com/buttons/v2/default-yellow.png" alt="Buy Me A Coffee" style="height: 45px !important;width: 162px !important;" ></a><br><br>
<p><img src="https://visitor-badge.glitch.me/badge?page_id=anzorq.sd-space-creator" alt="visitors"></p>
</div>
""")
demo.queue()
demo.launch(debug=True)