Spaces:
Sleeping
Sleeping
Update
Browse files- app.py +13 -3
- app_inference.py +4 -2
- app_training.py +2 -7
- app_upload.py +2 -2
app.py
CHANGED
@@ -9,6 +9,7 @@ import gradio as gr
|
|
9 |
import torch
|
10 |
|
11 |
from app_inference import create_inference_demo
|
|
|
12 |
from app_training import create_training_demo
|
13 |
from app_upload import create_upload_demo
|
14 |
from inference import InferencePipeline
|
@@ -68,14 +69,23 @@ with gr.Blocks(css='style.css') as demo:
|
|
68 |
gr.Markdown(TITLE)
|
69 |
with gr.Tabs():
|
70 |
with gr.TabItem('Train'):
|
71 |
-
create_training_demo(trainer,
|
|
|
|
|
72 |
with gr.TabItem('Run'):
|
73 |
-
create_inference_demo(pipe,
|
|
|
|
|
74 |
with gr.TabItem('Upload'):
|
75 |
gr.Markdown('''
|
76 |
- You can use this tab to upload models later if you choose not to upload models in training time or if upload in training time failed.
|
77 |
''')
|
78 |
-
create_upload_demo()
|
|
|
|
|
|
|
|
|
|
|
79 |
|
80 |
if not HF_TOKEN:
|
81 |
show_warning(HF_TOKEN_NOT_SPECIFIED_WARNING)
|
|
|
9 |
import torch
|
10 |
|
11 |
from app_inference import create_inference_demo
|
12 |
+
from app_system_monitor import create_monitor_demo
|
13 |
from app_training import create_training_demo
|
14 |
from app_upload import create_upload_demo
|
15 |
from inference import InferencePipeline
|
|
|
69 |
gr.Markdown(TITLE)
|
70 |
with gr.Tabs():
|
71 |
with gr.TabItem('Train'):
|
72 |
+
create_training_demo(trainer,
|
73 |
+
pipe,
|
74 |
+
disable_run_button=IS_SHARED_UI)
|
75 |
with gr.TabItem('Run'):
|
76 |
+
create_inference_demo(pipe,
|
77 |
+
HF_TOKEN,
|
78 |
+
disable_run_button=IS_SHARED_UI)
|
79 |
with gr.TabItem('Upload'):
|
80 |
gr.Markdown('''
|
81 |
- You can use this tab to upload models later if you choose not to upload models in training time or if upload in training time failed.
|
82 |
''')
|
83 |
+
create_upload_demo(disable_run_button=IS_SHARED_UI)
|
84 |
+
|
85 |
+
with gr.Row():
|
86 |
+
if not IS_SHARED_UI and not os.getenv('DISABLE_SYSTEM_MONITOR'):
|
87 |
+
with gr.Accordion(label='System info', open=False):
|
88 |
+
create_monitor_demo()
|
89 |
|
90 |
if not HF_TOKEN:
|
91 |
show_warning(HF_TOKEN_NOT_SPECIFIED_WARNING)
|
app_inference.py
CHANGED
@@ -62,7 +62,8 @@ class InferenceUtil:
|
|
62 |
|
63 |
|
64 |
def create_inference_demo(pipe: InferencePipeline,
|
65 |
-
hf_token: str | None = None
|
|
|
66 |
app = InferenceUtil(hf_token)
|
67 |
|
68 |
with gr.Blocks() as demo:
|
@@ -117,7 +118,8 @@ def create_inference_demo(pipe: InferencePipeline,
|
|
117 |
step=0.1,
|
118 |
value=7.5)
|
119 |
|
120 |
-
run_button = gr.Button('Generate'
|
|
|
121 |
|
122 |
gr.Markdown('''
|
123 |
- After training, you can press "Reload Model List" button to load your trained model names.
|
|
|
62 |
|
63 |
|
64 |
def create_inference_demo(pipe: InferencePipeline,
|
65 |
+
hf_token: str | None = None,
|
66 |
+
disable_run_button: bool = False) -> gr.Blocks:
|
67 |
app = InferenceUtil(hf_token)
|
68 |
|
69 |
with gr.Blocks() as demo:
|
|
|
118 |
step=0.1,
|
119 |
value=7.5)
|
120 |
|
121 |
+
run_button = gr.Button('Generate',
|
122 |
+
interactive=not disable_run_button)
|
123 |
|
124 |
gr.Markdown('''
|
125 |
- After training, you can press "Reload Model List" button to load your trained model names.
|
app_training.py
CHANGED
@@ -6,7 +6,6 @@ import os
|
|
6 |
|
7 |
import gradio as gr
|
8 |
|
9 |
-
from app_system_monitor import create_monitor_demo
|
10 |
from constants import UploadTarget
|
11 |
from inference import InferencePipeline
|
12 |
from trainer import Trainer
|
@@ -14,7 +13,7 @@ from trainer import Trainer
|
|
14 |
|
15 |
def create_training_demo(trainer: Trainer,
|
16 |
pipe: InferencePipeline | None = None,
|
17 |
-
|
18 |
def read_log() -> str:
|
19 |
with open(trainer.log_file) as f:
|
20 |
lines = f.readlines()
|
@@ -112,7 +111,7 @@ def create_training_demo(trainer: Trainer,
|
|
112 |
interactive=bool(os.getenv('SPACE_ID')),
|
113 |
visible=False)
|
114 |
run_button = gr.Button('Start Training',
|
115 |
-
interactive=not
|
116 |
|
117 |
with gr.Box():
|
118 |
gr.Text(label='Log',
|
@@ -120,10 +119,6 @@ def create_training_demo(trainer: Trainer,
|
|
120 |
lines=10,
|
121 |
max_lines=10,
|
122 |
every=1)
|
123 |
-
if not disable_training and not os.getenv(
|
124 |
-
'DISABLE_SYSTEM_MONITOR'):
|
125 |
-
with gr.Accordion(label='System info', open=False):
|
126 |
-
create_monitor_demo()
|
127 |
|
128 |
if pipe is not None:
|
129 |
run_button.click(fn=pipe.clear)
|
|
|
6 |
|
7 |
import gradio as gr
|
8 |
|
|
|
9 |
from constants import UploadTarget
|
10 |
from inference import InferencePipeline
|
11 |
from trainer import Trainer
|
|
|
13 |
|
14 |
def create_training_demo(trainer: Trainer,
|
15 |
pipe: InferencePipeline | None = None,
|
16 |
+
disable_run_button: bool = False) -> gr.Blocks:
|
17 |
def read_log() -> str:
|
18 |
with open(trainer.log_file) as f:
|
19 |
lines = f.readlines()
|
|
|
111 |
interactive=bool(os.getenv('SPACE_ID')),
|
112 |
visible=False)
|
113 |
run_button = gr.Button('Start Training',
|
114 |
+
interactive=not disable_run_button)
|
115 |
|
116 |
with gr.Box():
|
117 |
gr.Text(label='Log',
|
|
|
119 |
lines=10,
|
120 |
max_lines=10,
|
121 |
every=1)
|
|
|
|
|
|
|
|
|
122 |
|
123 |
if pipe is not None:
|
124 |
run_button.click(fn=pipe.clear)
|
app_upload.py
CHANGED
@@ -16,7 +16,7 @@ def load_local_model_list() -> dict:
|
|
16 |
return gr.update(choices=choices, value=choices[0] if choices else None)
|
17 |
|
18 |
|
19 |
-
def create_upload_demo() -> gr.Blocks:
|
20 |
model_dirs = find_exp_dirs()
|
21 |
|
22 |
with gr.Blocks() as demo:
|
@@ -39,7 +39,7 @@ def create_upload_demo() -> gr.Blocks:
|
|
39 |
model_name = gr.Textbox(label='Model Name')
|
40 |
hf_token = gr.Text(label='Hugging Face Write Token',
|
41 |
visible=os.getenv('HF_TOKEN') is None)
|
42 |
-
upload_button = gr.Button('Upload')
|
43 |
gr.Markdown(f'''
|
44 |
- You can upload your trained model to your personal profile (i.e. https://huggingface.co/{{your_username}}/{{model_name}}) or to the public [Tune-A-Video Library](https://huggingface.co/{MODEL_LIBRARY_ORG_NAME}) (i.e. https://huggingface.co/{MODEL_LIBRARY_ORG_NAME}/{{model_name}}).
|
45 |
''')
|
|
|
16 |
return gr.update(choices=choices, value=choices[0] if choices else None)
|
17 |
|
18 |
|
19 |
+
def create_upload_demo(disable_run_button: bool = False) -> gr.Blocks:
|
20 |
model_dirs = find_exp_dirs()
|
21 |
|
22 |
with gr.Blocks() as demo:
|
|
|
39 |
model_name = gr.Textbox(label='Model Name')
|
40 |
hf_token = gr.Text(label='Hugging Face Write Token',
|
41 |
visible=os.getenv('HF_TOKEN') is None)
|
42 |
+
upload_button = gr.Button('Upload', interactive=not disable_run_button)
|
43 |
gr.Markdown(f'''
|
44 |
- You can upload your trained model to your personal profile (i.e. https://huggingface.co/{{your_username}}/{{model_name}}) or to the public [Tune-A-Video Library](https://huggingface.co/{MODEL_LIBRARY_ORG_NAME}) (i.e. https://huggingface.co/{MODEL_LIBRARY_ORG_NAME}/{{model_name}}).
|
45 |
''')
|