using JSON block for code ouput
Browse files
app.py
CHANGED
@@ -9,19 +9,19 @@ def device_change(device, config):
|
|
9 |
|
10 |
config = set_config(config, 'device', device)
|
11 |
|
12 |
-
return config,
|
13 |
|
14 |
def model_refiner_change(refiner, config):
|
15 |
|
16 |
config = set_config(config, 'refiner', refiner)
|
17 |
|
18 |
-
return config,
|
19 |
|
20 |
def cpu_offload_change(cpu_offload, config):
|
21 |
|
22 |
config = set_config(config, 'cpu_offload', cpu_offload)
|
23 |
|
24 |
-
return config,
|
25 |
|
26 |
def models_change(model, scheduler, config):
|
27 |
|
@@ -55,85 +55,85 @@ def models_change(model, scheduler, config):
|
|
55 |
# safety_checker_change(in_safety_checker.value, config)
|
56 |
# requires_safety_checker_change(in_requires_safety_checker.value, config)
|
57 |
|
58 |
-
return model_description, refiner, trigger_token, use_safetensors, scheduler, config,
|
59 |
|
60 |
def data_type_change(data_type, config):
|
61 |
|
62 |
config = set_config(config, 'data_type', data_type)
|
63 |
|
64 |
-
return config,
|
65 |
|
66 |
def tensorfloat32_change(allow_tensorfloat32, config):
|
67 |
|
68 |
config = set_config(config, 'allow_tensorfloat32', allow_tensorfloat32)
|
69 |
|
70 |
-
return config,
|
71 |
|
72 |
def inference_steps_change(inference_steps, config):
|
73 |
|
74 |
config = set_config(config, 'inference_steps', inference_steps)
|
75 |
|
76 |
-
return config,
|
77 |
|
78 |
def manual_seed_change(manual_seed, config):
|
79 |
|
80 |
config = set_config(config, 'manual_seed', manual_seed)
|
81 |
|
82 |
-
return config,
|
83 |
|
84 |
def guidance_scale_change(guidance_scale, config):
|
85 |
|
86 |
config = set_config(config, 'guidance_scale', guidance_scale)
|
87 |
|
88 |
-
return config,
|
89 |
|
90 |
def enable_vae_slicing_change(enable_vae_slicing, config):
|
91 |
|
92 |
config = set_config(config, 'enable_vae_slicing', enable_vae_slicing)
|
93 |
|
94 |
-
return config,
|
95 |
|
96 |
def enable_vae_tiling_change(enable_vae_tiling, config):
|
97 |
|
98 |
config = set_config(config, 'enable_vae_tiling', enable_vae_tiling)
|
99 |
|
100 |
-
return config,
|
101 |
|
102 |
def prompt_change(prompt, config):
|
103 |
|
104 |
config = set_config(config, 'prompt', prompt)
|
105 |
|
106 |
-
return config,
|
107 |
|
108 |
def trigger_token_change(trigger_token, config):
|
109 |
|
110 |
config = set_config(config, 'trigger_token', trigger_token)
|
111 |
|
112 |
-
return config,
|
113 |
|
114 |
def negative_prompt_change(negative_prompt, config):
|
115 |
|
116 |
config = set_config(config, 'negative_prompt', negative_prompt)
|
117 |
|
118 |
-
return config,
|
119 |
|
120 |
def variant_change(variant, config):
|
121 |
|
122 |
config = set_config(config, 'variant', variant)
|
123 |
|
124 |
-
return config,
|
125 |
|
126 |
def safety_checker_change(safety_checker, config):
|
127 |
|
128 |
config = set_config(config, 'safety_checker', safety_checker)
|
129 |
|
130 |
-
return config,
|
131 |
|
132 |
def requires_safety_checker_change(requires_safety_checker, config):
|
133 |
|
134 |
config = set_config(config, 'requires_safety_checker', requires_safety_checker)
|
135 |
|
136 |
-
return config,
|
137 |
|
138 |
def auto_encoders_change(auto_encoder, config):
|
139 |
|
@@ -146,7 +146,7 @@ def auto_encoders_change(auto_encoder, config):
|
|
146 |
|
147 |
config = set_config(config, 'auto_encoder', auto_encoder)
|
148 |
|
149 |
-
return auto_encoder_description, config,
|
150 |
|
151 |
def schedulers_change(scheduler, config):
|
152 |
|
@@ -159,7 +159,7 @@ def schedulers_change(scheduler, config):
|
|
159 |
|
160 |
config = set_config(config, 'scheduler', scheduler)
|
161 |
|
162 |
-
return scheduler_description, config,
|
163 |
|
164 |
def adapters_textual_inversion_change(adapter_textual_inversion, config):
|
165 |
|
@@ -174,13 +174,13 @@ def adapters_textual_inversion_change(adapter_textual_inversion, config):
|
|
174 |
|
175 |
config = set_config(config, 'adapter_textual_inversion', adapter_textual_inversion)
|
176 |
|
177 |
-
return adapter_textual_inversion_description, in_adapters_textual_inversion_token, config,
|
178 |
|
179 |
def textual_inversion_token_change(adapter_textual_inversion_token, config):
|
180 |
|
181 |
config = set_config(config, 'adapter_textual_inversion_token', adapter_textual_inversion_token)
|
182 |
|
183 |
-
return config,
|
184 |
|
185 |
def run_inference(config, config_history, progress=gr.Progress(track_tqdm=True)):
|
186 |
|
@@ -382,7 +382,8 @@ with gr.Blocks(analytics_enabled=False) as demo:
|
|
382 |
out_image = gr.Image()
|
383 |
out_code = gr.Code(assemble_code(config.value), label="Code")
|
384 |
with gr.Row():
|
385 |
-
out_config = gr.Code(value=str(config.value), label="Current config")
|
|
|
386 |
with gr.Row():
|
387 |
out_config_history = gr.Markdown(dict_list_to_markdown_table(config_history.value))
|
388 |
|
|
|
9 |
|
10 |
config = set_config(config, 'device', device)
|
11 |
|
12 |
+
return config, config, assemble_code(config)
|
13 |
|
14 |
def model_refiner_change(refiner, config):
|
15 |
|
16 |
config = set_config(config, 'refiner', refiner)
|
17 |
|
18 |
+
return config, config, assemble_code(config)
|
19 |
|
20 |
def cpu_offload_change(cpu_offload, config):
|
21 |
|
22 |
config = set_config(config, 'cpu_offload', cpu_offload)
|
23 |
|
24 |
+
return config, config, assemble_code(config)
|
25 |
|
26 |
def models_change(model, scheduler, config):
|
27 |
|
|
|
55 |
# safety_checker_change(in_safety_checker.value, config)
|
56 |
# requires_safety_checker_change(in_requires_safety_checker.value, config)
|
57 |
|
58 |
+
return model_description, refiner, trigger_token, use_safetensors, scheduler, config, config, assemble_code(config)
|
59 |
|
60 |
def data_type_change(data_type, config):
|
61 |
|
62 |
config = set_config(config, 'data_type', data_type)
|
63 |
|
64 |
+
return config, config, assemble_code(config)
|
65 |
|
66 |
def tensorfloat32_change(allow_tensorfloat32, config):
|
67 |
|
68 |
config = set_config(config, 'allow_tensorfloat32', allow_tensorfloat32)
|
69 |
|
70 |
+
return config, config, assemble_code(config)
|
71 |
|
72 |
def inference_steps_change(inference_steps, config):
|
73 |
|
74 |
config = set_config(config, 'inference_steps', inference_steps)
|
75 |
|
76 |
+
return config, config, assemble_code(config)
|
77 |
|
78 |
def manual_seed_change(manual_seed, config):
|
79 |
|
80 |
config = set_config(config, 'manual_seed', manual_seed)
|
81 |
|
82 |
+
return config, config, assemble_code(config)
|
83 |
|
84 |
def guidance_scale_change(guidance_scale, config):
|
85 |
|
86 |
config = set_config(config, 'guidance_scale', guidance_scale)
|
87 |
|
88 |
+
return config, config, assemble_code(config)
|
89 |
|
90 |
def enable_vae_slicing_change(enable_vae_slicing, config):
|
91 |
|
92 |
config = set_config(config, 'enable_vae_slicing', enable_vae_slicing)
|
93 |
|
94 |
+
return config, config, assemble_code(config)
|
95 |
|
96 |
def enable_vae_tiling_change(enable_vae_tiling, config):
|
97 |
|
98 |
config = set_config(config, 'enable_vae_tiling', enable_vae_tiling)
|
99 |
|
100 |
+
return config, config, assemble_code(config)
|
101 |
|
102 |
def prompt_change(prompt, config):
|
103 |
|
104 |
config = set_config(config, 'prompt', prompt)
|
105 |
|
106 |
+
return config, config, assemble_code(config)
|
107 |
|
108 |
def trigger_token_change(trigger_token, config):
|
109 |
|
110 |
config = set_config(config, 'trigger_token', trigger_token)
|
111 |
|
112 |
+
return config, config, assemble_code(config)
|
113 |
|
114 |
def negative_prompt_change(negative_prompt, config):
|
115 |
|
116 |
config = set_config(config, 'negative_prompt', negative_prompt)
|
117 |
|
118 |
+
return config, config, assemble_code(config)
|
119 |
|
120 |
def variant_change(variant, config):
|
121 |
|
122 |
config = set_config(config, 'variant', variant)
|
123 |
|
124 |
+
return config, config, assemble_code(config)
|
125 |
|
126 |
def safety_checker_change(safety_checker, config):
|
127 |
|
128 |
config = set_config(config, 'safety_checker', safety_checker)
|
129 |
|
130 |
+
return config, config, assemble_code(config)
|
131 |
|
132 |
def requires_safety_checker_change(requires_safety_checker, config):
|
133 |
|
134 |
config = set_config(config, 'requires_safety_checker', requires_safety_checker)
|
135 |
|
136 |
+
return config, config, assemble_code(config)
|
137 |
|
138 |
def auto_encoders_change(auto_encoder, config):
|
139 |
|
|
|
146 |
|
147 |
config = set_config(config, 'auto_encoder', auto_encoder)
|
148 |
|
149 |
+
return auto_encoder_description, config, config, assemble_code(config)
|
150 |
|
151 |
def schedulers_change(scheduler, config):
|
152 |
|
|
|
159 |
|
160 |
config = set_config(config, 'scheduler', scheduler)
|
161 |
|
162 |
+
return scheduler_description, config, config, assemble_code(config)
|
163 |
|
164 |
def adapters_textual_inversion_change(adapter_textual_inversion, config):
|
165 |
|
|
|
174 |
|
175 |
config = set_config(config, 'adapter_textual_inversion', adapter_textual_inversion)
|
176 |
|
177 |
+
return adapter_textual_inversion_description, in_adapters_textual_inversion_token, config, config, assemble_code(config)
|
178 |
|
179 |
def textual_inversion_token_change(adapter_textual_inversion_token, config):
|
180 |
|
181 |
config = set_config(config, 'adapter_textual_inversion_token', adapter_textual_inversion_token)
|
182 |
|
183 |
+
return config, config, assemble_code(config)
|
184 |
|
185 |
def run_inference(config, config_history, progress=gr.Progress(track_tqdm=True)):
|
186 |
|
|
|
382 |
out_image = gr.Image()
|
383 |
out_code = gr.Code(assemble_code(config.value), label="Code")
|
384 |
with gr.Row():
|
385 |
+
# out_config = gr.Code(value=str(config.value), label="Current config")
|
386 |
+
out_config = gr.JSON(value=config.value, label="Current config")
|
387 |
with gr.Row():
|
388 |
out_config_history = gr.Markdown(dict_list_to_markdown_table(config_history.value))
|
389 |
|