Spaces:
Build error
Build error
Add upload functionality
Browse files- app.py +24 -0
- uploader.py +21 -0
app.py
CHANGED
@@ -16,6 +16,7 @@ import torch
|
|
16 |
|
17 |
from inference import InferencePipeline
|
18 |
from trainer import Trainer
|
|
|
19 |
|
20 |
TITLE = '# LoRA + StableDiffusion Training UI'
|
21 |
DESCRIPTION = 'This is an unofficial demo for [https://github.com/cloneofsimo/lora](https://github.com/cloneofsimo/lora).'
|
@@ -205,6 +206,27 @@ def create_inference_demo(pipe: InferencePipeline) -> gr.Blocks:
|
|
205 |
return demo
|
206 |
|
207 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
208 |
pipe = InferencePipeline()
|
209 |
trainer = Trainer()
|
210 |
|
@@ -222,6 +244,8 @@ with gr.Blocks(css='style.css') as demo:
|
|
222 |
create_training_demo(trainer, pipe)
|
223 |
with gr.TabItem('Test'):
|
224 |
create_inference_demo(pipe)
|
|
|
|
|
225 |
|
226 |
with gr.Accordion('Usage', open=False):
|
227 |
gr.Markdown(USAGE_INFO)
|
|
|
16 |
|
17 |
from inference import InferencePipeline
|
18 |
from trainer import Trainer
|
19 |
+
from uploader import upload
|
20 |
|
21 |
TITLE = '# LoRA + StableDiffusion Training UI'
|
22 |
DESCRIPTION = 'This is an unofficial demo for [https://github.com/cloneofsimo/lora](https://github.com/cloneofsimo/lora).'
|
|
|
206 |
return demo
|
207 |
|
208 |
|
209 |
+
def create_upload_demo() -> gr.Blocks:
|
210 |
+
with gr.Blocks() as demo:
|
211 |
+
model_name = gr.Textbox(label='Model Name')
|
212 |
+
hf_token = gr.Textbox(
|
213 |
+
label='Hugging Face Token (with write permission)')
|
214 |
+
upload_button = gr.Button('Upload')
|
215 |
+
with gr.Box():
|
216 |
+
gr.Markdown('Message')
|
217 |
+
result = gr.Markdown()
|
218 |
+
gr.Markdown('''
|
219 |
+
- You can upload your trained model to your private Model repo (i.e. https://huggingface.co/{your_username}/{model_name}).
|
220 |
+
- You can find your Hugging Face token [here](https://huggingface.co/settings/tokens).
|
221 |
+
''')
|
222 |
+
|
223 |
+
upload_button.click(fn=upload,
|
224 |
+
inputs=[model_name, hf_token],
|
225 |
+
outputs=result)
|
226 |
+
|
227 |
+
return demo
|
228 |
+
|
229 |
+
|
230 |
pipe = InferencePipeline()
|
231 |
trainer = Trainer()
|
232 |
|
|
|
244 |
create_training_demo(trainer, pipe)
|
245 |
with gr.TabItem('Test'):
|
246 |
create_inference_demo(pipe)
|
247 |
+
with gr.TabItem('Upload'):
|
248 |
+
create_upload_demo()
|
249 |
|
250 |
with gr.Accordion('Usage', open=False):
|
251 |
gr.Markdown(USAGE_INFO)
|
uploader.py
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from huggingface_hub import HfApi
|
3 |
+
|
4 |
+
|
5 |
+
def upload(model_name: str, hf_token: str) -> None:
|
6 |
+
api = HfApi(token=hf_token)
|
7 |
+
user_name = api.whoami()['name']
|
8 |
+
model_id = f'{user_name}/{model_name}'
|
9 |
+
try:
|
10 |
+
api.create_repo(model_id, repo_type='model', private=True)
|
11 |
+
api.upload_folder(repo_id=model_id,
|
12 |
+
folder_path='results',
|
13 |
+
path_in_repo='results',
|
14 |
+
repo_type='model')
|
15 |
+
url = f'https://huggingface.co/{model_id}'
|
16 |
+
message = f'Your model was successfully uploaded to [{url}]({url}).'
|
17 |
+
except Exception as e:
|
18 |
+
message = str(e)
|
19 |
+
#raise gr.Error(str(e))
|
20 |
+
|
21 |
+
return gr.update(value=message, visible=True)
|