Spaces:
Build error
Build error
rlawjdghek
commited on
Commit
β’
983b029
1
Parent(s):
0d73cbc
stableviton 512
Browse files- app.py +7 -7
- configs/{VITON512.yaml β VITON.yaml} +0 -0
app.py
CHANGED
@@ -24,8 +24,8 @@ from preprocess.openpose.run_openpose import OpenPose
|
|
24 |
|
25 |
os.environ['GRADIO_TEMP_DIR'] = './tmp' # TODO: turn off when final upload
|
26 |
|
27 |
-
IMG_H =
|
28 |
-
IMG_W =
|
29 |
|
30 |
openpose_model_hd = OpenPose(0)
|
31 |
parsing_model_hd = Parsing(0)
|
@@ -38,13 +38,13 @@ category_dict = ['upperbody', 'lowerbody', 'dress']
|
|
38 |
category_dict_utils = ['upper_body', 'lower_body', 'dresses']
|
39 |
|
40 |
# #### model init >>>>
|
41 |
-
config = OmegaConf.load("./configs/
|
42 |
config.model.params.img_H = IMG_H
|
43 |
config.model.params.img_W = IMG_W
|
44 |
params = config.model.params
|
45 |
|
46 |
model = create_model(config_path=None, config=config)
|
47 |
-
model.load_state_dict(torch.load("./checkpoints/
|
48 |
model = model.cuda()
|
49 |
model.eval()
|
50 |
sampler = PLMSSampler(model)
|
@@ -130,7 +130,6 @@ def process_hd(vton_img, garm_img, n_steps):
|
|
130 |
batch,
|
131 |
n_steps
|
132 |
)
|
133 |
-
breakpoint()
|
134 |
return sample
|
135 |
|
136 |
|
@@ -181,12 +180,13 @@ with gr.Blocks(css='style.css') as demo:
|
|
181 |
examples_per_page=14,
|
182 |
examples=example_garment_ps)
|
183 |
with gr.Column():
|
184 |
-
result_gallery = gr.
|
|
|
185 |
with gr.Column():
|
186 |
run_button = gr.Button(value="Run")
|
187 |
# TODO: change default values (important!)
|
188 |
# n_samples = gr.Slider(label="Images", minimum=1, maximum=4, value=1, step=1)
|
189 |
-
n_steps = gr.Slider(label="Steps", minimum=20, maximum=
|
190 |
# guidance_scale = gr.Slider(label="Guidance scale", minimum=1.0, maximum=5.0, value=2.0, step=0.1)
|
191 |
# seed = gr.Slider(label="Seed", minimum=-1, maximum=2147483647, step=1, value=-1)
|
192 |
|
|
|
24 |
|
25 |
os.environ['GRADIO_TEMP_DIR'] = './tmp' # TODO: turn off when final upload
|
26 |
|
27 |
+
IMG_H = 512
|
28 |
+
IMG_W = 384
|
29 |
|
30 |
openpose_model_hd = OpenPose(0)
|
31 |
parsing_model_hd = Parsing(0)
|
|
|
38 |
category_dict_utils = ['upper_body', 'lower_body', 'dresses']
|
39 |
|
40 |
# #### model init >>>>
|
41 |
+
config = OmegaConf.load("./configs/VITON.yaml")
|
42 |
config.model.params.img_H = IMG_H
|
43 |
config.model.params.img_W = IMG_W
|
44 |
params = config.model.params
|
45 |
|
46 |
model = create_model(config_path=None, config=config)
|
47 |
+
model.load_state_dict(torch.load("./checkpoints/VITONHD.ckpt", map_location="cpu")["state_dict"])
|
48 |
model = model.cuda()
|
49 |
model.eval()
|
50 |
sampler = PLMSSampler(model)
|
|
|
130 |
batch,
|
131 |
n_steps
|
132 |
)
|
|
|
133 |
return sample
|
134 |
|
135 |
|
|
|
180 |
examples_per_page=14,
|
181 |
examples=example_garment_ps)
|
182 |
with gr.Column():
|
183 |
+
result_gallery = gr.Image(label='Output', show_label=False, preview=True, scale=1)
|
184 |
+
# result_gallery = gr.Gallery(label='Output', show_label=False, elem_id="gallery", preview=True, scale=1)
|
185 |
with gr.Column():
|
186 |
run_button = gr.Button(value="Run")
|
187 |
# TODO: change default values (important!)
|
188 |
# n_samples = gr.Slider(label="Images", minimum=1, maximum=4, value=1, step=1)
|
189 |
+
n_steps = gr.Slider(label="Steps", minimum=20, maximum=70, value=20, step=1)
|
190 |
# guidance_scale = gr.Slider(label="Guidance scale", minimum=1.0, maximum=5.0, value=2.0, step=0.1)
|
191 |
# seed = gr.Slider(label="Seed", minimum=-1, maximum=2147483647, step=1, value=-1)
|
192 |
|
configs/{VITON512.yaml β VITON.yaml}
RENAMED
File without changes
|