Spaces:
Runtime error
Runtime error
Clean up
Browse files- app.py +2 -2
- app_training.py +5 -7
- app_upload.py +8 -48
- constants.py +1 -0
- trainer.py +10 -27
- uploader.py +57 -41
app.py
CHANGED
@@ -55,7 +55,7 @@ def show_warning(warning_text: str) -> gr.Blocks:
|
|
55 |
|
56 |
|
57 |
pipe = InferencePipeline(HF_TOKEN)
|
58 |
-
trainer = Trainer(
|
59 |
|
60 |
with gr.Blocks(css='style.css') as demo:
|
61 |
if IS_SHARED_UI:
|
@@ -75,7 +75,7 @@ with gr.Blocks(css='style.css') as demo:
|
|
75 |
gr.Markdown('''
|
76 |
- You can use this tab to upload models later if you choose not to upload models in training time or if upload in training time failed.
|
77 |
''')
|
78 |
-
create_upload_demo(
|
79 |
|
80 |
if not HF_TOKEN:
|
81 |
show_warning(HF_TOKEN_NOT_SPECIFIED_WARNING)
|
|
|
55 |
|
56 |
|
57 |
pipe = InferencePipeline(HF_TOKEN)
|
58 |
+
trainer = Trainer()
|
59 |
|
60 |
with gr.Blocks(css='style.css') as demo:
|
61 |
if IS_SHARED_UI:
|
|
|
75 |
gr.Markdown('''
|
76 |
- You can use this tab to upload models later if you choose not to upload models in training time or if upload in training time failed.
|
77 |
''')
|
78 |
+
create_upload_demo()
|
79 |
|
80 |
if not HF_TOKEN:
|
81 |
show_warning(HF_TOKEN_NOT_SPECIFIED_WARNING)
|
app_training.py
CHANGED
@@ -20,7 +20,6 @@ def create_training_demo(trainer: Trainer,
|
|
20 |
lines = f.readlines()
|
21 |
return ''.join(lines[-10:])
|
22 |
|
23 |
-
hf_token = os.getenv('HF_TOKEN')
|
24 |
with gr.Blocks() as demo:
|
25 |
with gr.Row():
|
26 |
with gr.Column():
|
@@ -48,9 +47,9 @@ def create_training_demo(trainer: Trainer,
|
|
48 |
label='Resolution',
|
49 |
visible=False)
|
50 |
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
with gr.Accordion('Advanced settings', open=False):
|
55 |
num_training_steps = gr.Number(
|
56 |
label='Number of Training Steps',
|
@@ -150,13 +149,12 @@ def create_training_demo(trainer: Trainer,
|
|
150 |
delete_existing_repo,
|
151 |
upload_to,
|
152 |
remove_gpu_after_training,
|
153 |
-
|
154 |
])
|
155 |
return demo
|
156 |
|
157 |
|
158 |
if __name__ == '__main__':
|
159 |
-
|
160 |
-
trainer = Trainer(hf_token)
|
161 |
demo = create_training_demo(trainer)
|
162 |
demo.queue(api_open=False, max_size=1).launch()
|
|
|
20 |
lines = f.readlines()
|
21 |
return ''.join(lines[-10:])
|
22 |
|
|
|
23 |
with gr.Blocks() as demo:
|
24 |
with gr.Row():
|
25 |
with gr.Column():
|
|
|
47 |
label='Resolution',
|
48 |
visible=False)
|
49 |
|
50 |
+
hf_token = gr.Text(label='Hugging Face Write Token',
|
51 |
+
placeholder='',
|
52 |
+
visible=os.getenv('HF_TOKEN') is None)
|
53 |
with gr.Accordion('Advanced settings', open=False):
|
54 |
num_training_steps = gr.Number(
|
55 |
label='Number of Training Steps',
|
|
|
149 |
delete_existing_repo,
|
150 |
upload_to,
|
151 |
remove_gpu_after_training,
|
152 |
+
hf_token,
|
153 |
])
|
154 |
return demo
|
155 |
|
156 |
|
157 |
if __name__ == '__main__':
|
158 |
+
trainer = Trainer()
|
|
|
159 |
demo = create_training_demo(trainer)
|
160 |
demo.queue(api_open=False, max_size=1).launch()
|
app_upload.py
CHANGED
@@ -2,56 +2,21 @@
|
|
2 |
|
3 |
from __future__ import annotations
|
4 |
|
5 |
-
import
|
6 |
|
7 |
import gradio as gr
|
8 |
-
import slugify
|
9 |
|
10 |
from constants import MODEL_LIBRARY_ORG_NAME, UploadTarget
|
11 |
-
from uploader import
|
12 |
from utils import find_exp_dirs
|
13 |
|
14 |
|
15 |
-
class ModelUploader(Uploader):
|
16 |
-
def upload_model(
|
17 |
-
self,
|
18 |
-
folder_path: str,
|
19 |
-
repo_name: str,
|
20 |
-
upload_to: str,
|
21 |
-
private: bool,
|
22 |
-
delete_existing_repo: bool,
|
23 |
-
input_hf_token: str | None = None,
|
24 |
-
return_html_link: bool = True,
|
25 |
-
) -> str:
|
26 |
-
if not folder_path:
|
27 |
-
raise ValueError
|
28 |
-
if not repo_name:
|
29 |
-
repo_name = pathlib.Path(folder_path).name
|
30 |
-
repo_name = slugify.slugify(repo_name)
|
31 |
-
|
32 |
-
if upload_to == UploadTarget.PERSONAL_PROFILE.value:
|
33 |
-
organization = ''
|
34 |
-
elif upload_to == UploadTarget.MODEL_LIBRARY.value:
|
35 |
-
organization = MODEL_LIBRARY_ORG_NAME
|
36 |
-
else:
|
37 |
-
raise ValueError
|
38 |
-
|
39 |
-
return self.upload(folder_path,
|
40 |
-
repo_name,
|
41 |
-
organization=organization,
|
42 |
-
private=private,
|
43 |
-
delete_existing_repo=delete_existing_repo,
|
44 |
-
input_hf_token=input_hf_token,
|
45 |
-
return_html_link=return_html_link)
|
46 |
-
|
47 |
-
|
48 |
def load_local_model_list() -> dict:
|
49 |
choices = find_exp_dirs()
|
50 |
return gr.update(choices=choices, value=choices[0] if choices else None)
|
51 |
|
52 |
|
53 |
-
def create_upload_demo(
|
54 |
-
uploader = ModelUploader(hf_token)
|
55 |
model_dirs = find_exp_dirs()
|
56 |
|
57 |
with gr.Blocks() as demo:
|
@@ -72,9 +37,8 @@ def create_upload_demo(hf_token: str | None) -> gr.Blocks:
|
|
72 |
choices=[_.value for _ in UploadTarget],
|
73 |
value=UploadTarget.MODEL_LIBRARY.value)
|
74 |
model_name = gr.Textbox(label='Model Name')
|
75 |
-
|
76 |
-
|
77 |
-
visible=False if hf_token else True)
|
78 |
upload_button = gr.Button('Upload')
|
79 |
gr.Markdown(f'''
|
80 |
- You can upload your trained model to your personal profile (i.e. https://huggingface.co/{{your_username}}/{{model_name}}) or to the public [Tune-A-Video Library](https://huggingface.co/{MODEL_LIBRARY_ORG_NAME}) (i.e. https://huggingface.co/{MODEL_LIBRARY_ORG_NAME}/{{model_name}}).
|
@@ -86,23 +50,19 @@ def create_upload_demo(hf_token: str | None) -> gr.Blocks:
|
|
86 |
reload_button.click(fn=load_local_model_list,
|
87 |
inputs=None,
|
88 |
outputs=model_dir)
|
89 |
-
upload_button.click(fn=
|
90 |
inputs=[
|
91 |
model_dir,
|
92 |
model_name,
|
93 |
upload_to,
|
94 |
use_private_repo,
|
95 |
delete_existing_repo,
|
96 |
-
|
97 |
],
|
98 |
outputs=output_message)
|
99 |
-
|
100 |
return demo
|
101 |
|
102 |
|
103 |
if __name__ == '__main__':
|
104 |
-
|
105 |
-
|
106 |
-
hf_token = os.getenv('HF_TOKEN')
|
107 |
-
demo = create_upload_demo(hf_token)
|
108 |
demo.queue(api_open=False, max_size=1).launch()
|
|
|
2 |
|
3 |
from __future__ import annotations
|
4 |
|
5 |
+
import os
|
6 |
|
7 |
import gradio as gr
|
|
|
8 |
|
9 |
from constants import MODEL_LIBRARY_ORG_NAME, UploadTarget
|
10 |
+
from uploader import upload
|
11 |
from utils import find_exp_dirs
|
12 |
|
13 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
14 |
def load_local_model_list() -> dict:
|
15 |
choices = find_exp_dirs()
|
16 |
return gr.update(choices=choices, value=choices[0] if choices else None)
|
17 |
|
18 |
|
19 |
+
def create_upload_demo() -> gr.Blocks:
|
|
|
20 |
model_dirs = find_exp_dirs()
|
21 |
|
22 |
with gr.Blocks() as demo:
|
|
|
37 |
choices=[_.value for _ in UploadTarget],
|
38 |
value=UploadTarget.MODEL_LIBRARY.value)
|
39 |
model_name = gr.Textbox(label='Model Name')
|
40 |
+
hf_token = gr.Text(label='Hugging Face Write Token',
|
41 |
+
visible=os.getenv('HF_TOKEN') is None)
|
|
|
42 |
upload_button = gr.Button('Upload')
|
43 |
gr.Markdown(f'''
|
44 |
- You can upload your trained model to your personal profile (i.e. https://huggingface.co/{{your_username}}/{{model_name}}) or to the public [Tune-A-Video Library](https://huggingface.co/{MODEL_LIBRARY_ORG_NAME}) (i.e. https://huggingface.co/{MODEL_LIBRARY_ORG_NAME}/{{model_name}}).
|
|
|
50 |
reload_button.click(fn=load_local_model_list,
|
51 |
inputs=None,
|
52 |
outputs=model_dir)
|
53 |
+
upload_button.click(fn=upload,
|
54 |
inputs=[
|
55 |
model_dir,
|
56 |
model_name,
|
57 |
upload_to,
|
58 |
use_private_repo,
|
59 |
delete_existing_repo,
|
60 |
+
hf_token,
|
61 |
],
|
62 |
outputs=output_message)
|
|
|
63 |
return demo
|
64 |
|
65 |
|
66 |
if __name__ == '__main__':
|
67 |
+
demo = create_upload_demo()
|
|
|
|
|
|
|
68 |
demo.queue(api_open=False, max_size=1).launch()
|
constants.py
CHANGED
@@ -8,3 +8,4 @@ class UploadTarget(enum.Enum):
|
|
8 |
|
9 |
MODEL_LIBRARY_ORG_NAME = 'Tune-A-Video-library'
|
10 |
SAMPLE_MODEL_REPO = 'Tune-A-Video-library/a-man-is-surfing'
|
|
|
|
8 |
|
9 |
MODEL_LIBRARY_ORG_NAME = 'Tune-A-Video-library'
|
10 |
SAMPLE_MODEL_REPO = 'Tune-A-Video-library/a-man-is-surfing'
|
11 |
+
URL_TO_JOIN_MODEL_LIBRARY_ORG = 'https://huggingface.co/organizations/Tune-A-Video-library/share/YjTcaNJmKyeHFpMBioHhzBcTzCYddVErEk'
|
trainer.py
CHANGED
@@ -14,19 +14,14 @@ import torch
|
|
14 |
from huggingface_hub import HfApi
|
15 |
from omegaconf import OmegaConf
|
16 |
|
17 |
-
from
|
18 |
from utils import save_model_card
|
19 |
|
20 |
sys.path.append('Tune-A-Video')
|
21 |
|
22 |
-
URL_TO_JOIN_MODEL_LIBRARY_ORG = 'https://huggingface.co/organizations/Tune-A-Video-library/share/YjTcaNJmKyeHFpMBioHhzBcTzCYddVErEk'
|
23 |
-
|
24 |
|
25 |
class Trainer:
|
26 |
-
def __init__(self
|
27 |
-
self.hf_token = hf_token
|
28 |
-
self.model_uploader = ModelUploader(hf_token)
|
29 |
-
|
30 |
self.checkpoint_dir = pathlib.Path('checkpoints')
|
31 |
self.checkpoint_dir.mkdir(exist_ok=True)
|
32 |
|
@@ -44,12 +39,6 @@ class Trainer:
|
|
44 |
cwd=org_dir)
|
45 |
return model_dir.as_posix()
|
46 |
|
47 |
-
def join_model_library_org(self, token: str) -> None:
|
48 |
-
subprocess.run(
|
49 |
-
shlex.split(
|
50 |
-
f'curl -X POST -H "Authorization: Bearer {token}" -H "Content-Type: application/json" {URL_TO_JOIN_MODEL_LIBRARY_ORG}'
|
51 |
-
))
|
52 |
-
|
53 |
def run(
|
54 |
self,
|
55 |
training_video: str,
|
@@ -72,7 +61,7 @@ class Trainer:
|
|
72 |
delete_existing_repo: bool,
|
73 |
upload_to: str,
|
74 |
remove_gpu_after_training: bool,
|
75 |
-
|
76 |
) -> None:
|
77 |
if not torch.cuda.is_available():
|
78 |
raise gr.Error('CUDA is not available.')
|
@@ -96,10 +85,6 @@ class Trainer:
|
|
96 |
shutil.rmtree(output_dir, ignore_errors=True)
|
97 |
output_dir.mkdir(parents=True)
|
98 |
|
99 |
-
if upload_to_hub:
|
100 |
-
self.join_model_library_org(
|
101 |
-
self.hf_token if self.hf_token else input_hf_token)
|
102 |
-
|
103 |
config = OmegaConf.load('Tune-A-Video/configs/man-surfing.yaml')
|
104 |
config.pretrained_model_path = self.download_base_model(base_model)
|
105 |
config.output_dir = output_dir.as_posix()
|
@@ -146,20 +131,18 @@ class Trainer:
|
|
146 |
f.write('Training completed!\n')
|
147 |
|
148 |
if upload_to_hub:
|
149 |
-
upload_message =
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
input_hf_token=input_hf_token,
|
156 |
-
return_html_link=False)
|
157 |
with open(self.log_file, 'a') as f:
|
158 |
f.write(upload_message)
|
159 |
|
160 |
if remove_gpu_after_training:
|
161 |
space_id = os.getenv('SPACE_ID')
|
162 |
if space_id:
|
163 |
-
api = HfApi(token=
|
164 |
api.request_space_hardware(repo_id=space_id,
|
165 |
hardware='cpu-basic')
|
|
|
14 |
from huggingface_hub import HfApi
|
15 |
from omegaconf import OmegaConf
|
16 |
|
17 |
+
from uploader import upload
|
18 |
from utils import save_model_card
|
19 |
|
20 |
sys.path.append('Tune-A-Video')
|
21 |
|
|
|
|
|
22 |
|
23 |
class Trainer:
|
24 |
+
def __init__(self):
|
|
|
|
|
|
|
25 |
self.checkpoint_dir = pathlib.Path('checkpoints')
|
26 |
self.checkpoint_dir.mkdir(exist_ok=True)
|
27 |
|
|
|
39 |
cwd=org_dir)
|
40 |
return model_dir.as_posix()
|
41 |
|
|
|
|
|
|
|
|
|
|
|
|
|
42 |
def run(
|
43 |
self,
|
44 |
training_video: str,
|
|
|
61 |
delete_existing_repo: bool,
|
62 |
upload_to: str,
|
63 |
remove_gpu_after_training: bool,
|
64 |
+
hf_token: str,
|
65 |
) -> None:
|
66 |
if not torch.cuda.is_available():
|
67 |
raise gr.Error('CUDA is not available.')
|
|
|
85 |
shutil.rmtree(output_dir, ignore_errors=True)
|
86 |
output_dir.mkdir(parents=True)
|
87 |
|
|
|
|
|
|
|
|
|
88 |
config = OmegaConf.load('Tune-A-Video/configs/man-surfing.yaml')
|
89 |
config.pretrained_model_path = self.download_base_model(base_model)
|
90 |
config.output_dir = output_dir.as_posix()
|
|
|
131 |
f.write('Training completed!\n')
|
132 |
|
133 |
if upload_to_hub:
|
134 |
+
upload_message = upload(local_folder_path=output_dir.as_posix(),
|
135 |
+
target_repo_name=output_model_name,
|
136 |
+
upload_to=upload_to,
|
137 |
+
private=use_private_repo,
|
138 |
+
delete_existing_repo=delete_existing_repo,
|
139 |
+
hf_token=hf_token)
|
|
|
|
|
140 |
with open(self.log_file, 'a') as f:
|
141 |
f.write(upload_message)
|
142 |
|
143 |
if remove_gpu_after_training:
|
144 |
space_id = os.getenv('SPACE_ID')
|
145 |
if space_id:
|
146 |
+
api = HfApi(token=os.getenv('HF_TOKEN') or hf_token)
|
147 |
api.request_space_hardware(repo_id=space_id,
|
148 |
hardware='cpu-basic')
|
uploader.py
CHANGED
@@ -1,47 +1,63 @@
|
|
1 |
from __future__ import annotations
|
2 |
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
from huggingface_hub import HfApi
|
4 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
|
6 |
-
|
7 |
-
|
8 |
-
self.hf_token = hf_token
|
9 |
-
|
10 |
-
def upload(self,
|
11 |
-
folder_path: str,
|
12 |
-
repo_name: str,
|
13 |
-
organization: str = '',
|
14 |
-
repo_type: str = 'model',
|
15 |
-
private: bool = True,
|
16 |
-
delete_existing_repo: bool = False,
|
17 |
-
input_hf_token: str | None = None,
|
18 |
-
return_html_link: bool = True) -> str:
|
19 |
-
|
20 |
-
api = HfApi(token=self.hf_token or input_hf_token)
|
21 |
-
|
22 |
-
if not folder_path:
|
23 |
-
raise ValueError
|
24 |
-
if not repo_name:
|
25 |
-
raise ValueError
|
26 |
-
if not organization:
|
27 |
-
organization = api.whoami()['name']
|
28 |
-
|
29 |
-
repo_id = f'{organization}/{repo_name}'
|
30 |
-
if delete_existing_repo:
|
31 |
-
try:
|
32 |
-
api.delete_repo(repo_id, repo_type=repo_type)
|
33 |
-
except Exception:
|
34 |
-
pass
|
35 |
try:
|
36 |
-
api.
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
|
|
|
|
|
1 |
from __future__ import annotations
|
2 |
|
3 |
+
import os
|
4 |
+
import pathlib
|
5 |
+
import shlex
|
6 |
+
import subprocess
|
7 |
+
|
8 |
+
import slugify
|
9 |
from huggingface_hub import HfApi
|
10 |
|
11 |
+
from constants import (MODEL_LIBRARY_ORG_NAME, URL_TO_JOIN_MODEL_LIBRARY_ORG,
|
12 |
+
UploadTarget)
|
13 |
+
|
14 |
+
|
15 |
+
def join_model_library_org(hf_token: str) -> None:
|
16 |
+
subprocess.run(
|
17 |
+
shlex.split(
|
18 |
+
f'curl -X POST -H "Authorization: Bearer {hf_token}" -H "Content-Type: application/json" {URL_TO_JOIN_MODEL_LIBRARY_ORG}'
|
19 |
+
))
|
20 |
+
|
21 |
+
|
22 |
+
def upload(local_folder_path: str,
|
23 |
+
target_repo_name: str,
|
24 |
+
upload_to: str,
|
25 |
+
private: bool = True,
|
26 |
+
delete_existing_repo: bool = False,
|
27 |
+
hf_token: str = '') -> str:
|
28 |
+
hf_token = os.getenv('HF_TOKEN') or hf_token
|
29 |
+
if not hf_token:
|
30 |
+
raise ValueError
|
31 |
+
api = HfApi(token=hf_token)
|
32 |
+
|
33 |
+
if not local_folder_path:
|
34 |
+
raise ValueError
|
35 |
+
if not target_repo_name:
|
36 |
+
target_repo_name = pathlib.Path(local_folder_path).name
|
37 |
+
target_repo_name = slugify.slugify(target_repo_name)
|
38 |
+
|
39 |
+
if upload_to == UploadTarget.PERSONAL_PROFILE.value:
|
40 |
+
organization = api.whoami()['name']
|
41 |
+
elif upload_to == UploadTarget.MODEL_LIBRARY.value:
|
42 |
+
organization = MODEL_LIBRARY_ORG_NAME
|
43 |
+
join_model_library_org(hf_token)
|
44 |
+
else:
|
45 |
+
raise ValueError
|
46 |
|
47 |
+
repo_id = f'{organization}/{target_repo_name}'
|
48 |
+
if delete_existing_repo:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
49 |
try:
|
50 |
+
api.delete_repo(repo_id, repo_type='model')
|
51 |
+
except Exception:
|
52 |
+
pass
|
53 |
+
try:
|
54 |
+
api.create_repo(repo_id, repo_type='model', private=private)
|
55 |
+
api.upload_folder(repo_id=repo_id,
|
56 |
+
folder_path=local_folder_path,
|
57 |
+
path_in_repo='.',
|
58 |
+
repo_type='model')
|
59 |
+
url = f'https://huggingface.co/{repo_id}'
|
60 |
+
message = f'Your model was successfully uploaded to {url}.'
|
61 |
+
except Exception as e:
|
62 |
+
message = str(e)
|
63 |
+
return message
|