anzorq commited on
Commit
2f52940
1 Parent(s): e71c87e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +55 -16
app.py CHANGED
@@ -1,9 +1,8 @@
1
  import os
2
  import subprocess
3
- from huggingface_hub import HfApi, upload_folder
4
  import gradio as gr
5
  import requests
6
- from huggingface_hub import whoami, list_models
7
 
8
 
9
  def error_str(error, title="Error"):
@@ -69,7 +68,33 @@ def on_load_model(user_model_id, other_model_id, token):
69
  except Exception as e:
70
  return None, None, None, None, gr.update(value=error_str(e))
71
 
72
- def create_and_push(space_type, hardware, private_space, other_model_name, radio_model_names, model_id, title, description, prefix, update, token):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
73
 
74
  try:
75
 
@@ -124,25 +149,34 @@ def create_and_push(space_type, hardware, private_space, other_model_name, radio
124
  # 5. Delete the app.py file
125
  os.remove("app.py")
126
 
127
- return f"""Successfully created space at: <a href="{repo_url}" target="_blank">{repo_url}</a>"""
 
 
 
 
 
 
 
128
 
129
  except Exception as e:
130
  return error_str(e)
131
 
132
 
133
  DESCRIPTION = """### Create a gradio space for your Diffusers🧨 model
134
- With this space, you can easily create a gradio demo for your Diffusers model and share it with the community.<br>
135
- 1️⃣ Make sure you have created your hugging face account<br>
136
- 2️⃣ Generate a token here with write access<br>
137
- 3️⃣ Choose a stable diffusion base model, there are thousands of them here<br>
138
- 4️⃣ Choose Space type<br>
139
- 5️⃣ Choose the new Space Hardware<br>
140
- It is done.
141
  """
 
 
 
 
 
 
 
 
142
 
143
  with gr.Blocks() as demo:
144
 
145
- # gr.Markdown(DESCRIPTION)
146
  with gr.Row():
147
 
148
  with gr.Column(scale=11):
@@ -167,15 +201,20 @@ with gr.Blocks() as demo:
167
  title = gr.Textbox(label="Title", placeholder="e.g. Diffusers Demo")
168
  description = gr.Textbox(label="Description", placeholder="e.g. Demo for my awesome Diffusers model", lines=5)
169
  prefix = gr.Textbox(label="Prefix tokens", placeholder="Tokens that are required to be present in the prompt, e.g. `rick and morty style`")
 
170
  gr.Markdown("""#### Choose space type
171
  - **Simple** - Runs on GPU using Hugging Face inference API, but you cannot control image generation parameters.
172
  - **Advanced** - Runs on CPU by default, with the option to upgrade to GPU. You can control image generation parameters: guidance, number of steps, image size, etc. Also supports **image-to-image** generation.""")
173
  space_type =gr.Radio(label="Space type", choices=["Simple", "Advanced"], value="Simple")
 
174
  update = gr.Checkbox(label="Update the space if it already exists?")
175
  private_space = gr.Checkbox(label="Private Space")
 
 
176
  gr.Markdown("Choose the new Space Hardware <small>[check pricing page](https://huggingface.co/pricing#spaces), you need payment method to upgrade your Space hardware</small>")
177
  hardware = gr.Dropdown(["cpu-basic","cpu-upgrade","t4-small","t4-medium","a10g-small","a10g-large"],value = "cpu-basic", label="Space Hardware")
178
- brn_create = gr.Button("Create the space")
 
179
 
180
  error_output = gr.Markdown(label="Output")
181
 
@@ -194,9 +233,9 @@ with gr.Blocks() as demo:
194
  queue=False,
195
  scroll_to_output=True)
196
 
197
- brn_create.click(
198
  fn=create_and_push,
199
- inputs=[space_type, hardware, private_space, other_model_name, radio_model_names, name, title, description, prefix, update, input_token],
200
  outputs=[error_output],
201
  scroll_to_output=True
202
  )
@@ -212,4 +251,4 @@ with gr.Blocks() as demo:
212
  """)
213
 
214
  demo.queue()
215
- demo.launch(debug=True)
 
1
  import os
2
  import subprocess
3
+ from huggingface_hub import HfApi, upload_folder, whoami, list_models, hf_hub_download, upload_file
4
  import gradio as gr
5
  import requests
 
6
 
7
 
8
  def error_str(error, title="Error"):
 
68
  except Exception as e:
69
  return None, None, None, None, gr.update(value=error_str(e))
70
 
71
+ def add_space_badge_to_model_card(model_id, token):
72
+
73
+ readme_file = 'README.md'
74
+ model_card = hf_hub_download(repo_id=model_id, filename=readme_file, token=token)
75
+
76
+ with open(model_card, "r") as f:
77
+ content = f.read()
78
+
79
+ content = content.split("---\n")
80
+ content[2] = "[![Open In Spaces](https://camo.githubusercontent.com/00380c35e60d6b04be65d3d94a58332be5cc93779f630bcdfc18ab9a3a7d3388/68747470733a2f2f696d672e736869656c64732e696f2f62616467652f25463025394625413425393725323048756767696e67253230466163652d5370616365732d626c7565)](https://huggingface.co/spaces/" + model_id + ")\n" + content[2]
81
+ content = "---\n".join(content)
82
+
83
+ with open(readme_file, "w") as f:
84
+ f.write(content)
85
+
86
+ upload_file(
87
+ path_or_fileobj=readme_file,
88
+ path_in_repo=readme_file,
89
+ repo_id=model_id,
90
+ token=token,
91
+ create_pr=True,
92
+ commit_message="Add Space badge to model card",
93
+ )
94
+
95
+ os.remove(readme_file)
96
+
97
+ def create_and_push(space_type, hardware, private_space, add_badge, other_model_name, radio_model_names, model_id, title, description, prefix, update, token):
98
 
99
  try:
100
 
 
149
  # 5. Delete the app.py file
150
  os.remove("app.py")
151
 
152
+ # 6. Add the Space badge to the model card
153
+ if add_badge:
154
+ add_space_badge_to_model_card(model_id, token)
155
+
156
+ return f"""
157
+ Successfully created space at: <a href="{repo_url}" target="_blank">{repo_url}</a> <br>
158
+ Opened a PR to add the space badge at: <a href="https://huggingface.co/{model_id}" target="_blank">https://huggingface.co/{model_id}</a>
159
+ """
160
 
161
  except Exception as e:
162
  return error_str(e)
163
 
164
 
165
  DESCRIPTION = """### Create a gradio space for your Diffusers🧨 model
166
+ With this space, you can easily create a gradio demo for your Diffusers model and share it with the community.
 
 
 
 
 
 
167
  """
168
+ # <br>
169
+ # 1️⃣ Make sure you have created your hugging face account<br>
170
+ # 2️⃣ Generate a token here with write access<br>
171
+ # 3️⃣ Choose a stable diffusion base model, there are thousands of them here<br>
172
+ # 4️⃣ Choose Space type<br>
173
+ # 5️⃣ Choose the new Space Hardware<br>
174
+ # It is done.
175
+ # """
176
 
177
  with gr.Blocks() as demo:
178
 
179
+ gr.Markdown(DESCRIPTION)
180
  with gr.Row():
181
 
182
  with gr.Column(scale=11):
 
201
  title = gr.Textbox(label="Title", placeholder="e.g. Diffusers Demo")
202
  description = gr.Textbox(label="Description", placeholder="e.g. Demo for my awesome Diffusers model", lines=5)
203
  prefix = gr.Textbox(label="Prefix tokens", placeholder="Tokens that are required to be present in the prompt, e.g. `rick and morty style`")
204
+
205
  gr.Markdown("""#### Choose space type
206
  - **Simple** - Runs on GPU using Hugging Face inference API, but you cannot control image generation parameters.
207
  - **Advanced** - Runs on CPU by default, with the option to upgrade to GPU. You can control image generation parameters: guidance, number of steps, image size, etc. Also supports **image-to-image** generation.""")
208
  space_type =gr.Radio(label="Space type", choices=["Simple", "Advanced"], value="Simple")
209
+
210
  update = gr.Checkbox(label="Update the space if it already exists?")
211
  private_space = gr.Checkbox(label="Private Space")
212
+ add_badge = gr.Checkbox(label="Add Space badge to the model card (will open a PR)")
213
+
214
  gr.Markdown("Choose the new Space Hardware <small>[check pricing page](https://huggingface.co/pricing#spaces), you need payment method to upgrade your Space hardware</small>")
215
  hardware = gr.Dropdown(["cpu-basic","cpu-upgrade","t4-small","t4-medium","a10g-small","a10g-large"],value = "cpu-basic", label="Space Hardware")
216
+
217
+ btn_create = gr.Button("Create the space")
218
 
219
  error_output = gr.Markdown(label="Output")
220
 
 
233
  queue=False,
234
  scroll_to_output=True)
235
 
236
+ btn_create.click(
237
  fn=create_and_push,
238
+ inputs=[space_type, hardware, private_space, add_badge, other_model_name, radio_model_names, name, title, description, prefix, update, input_token],
239
  outputs=[error_output],
240
  scroll_to_output=True
241
  )
 
251
  """)
252
 
253
  demo.queue()
254
+ demo.launch(debug=True)