Alexander Bagus commited on
Commit
548acb6
·
1 Parent(s): 9f51691
Files changed (1) hide show
  1. app.py +31 -17
app.py CHANGED
@@ -79,6 +79,9 @@ pipe.transformer.layers._repeated_blocks = ["ZImageTransformerBlock"]
79
  spaces.aoti_blocks_load(pipe.transformer.layers,
80
  "zerogpu-aoti/Z-Image", variant="fa3")
81
 
 
 
 
82
 
83
  @spaces.GPU
84
  def inference(
@@ -161,9 +164,10 @@ with gr.Blocks() as demo:
161
  run_button = gr.Button("Run", variant="primary")
162
  with gr.Column():
163
  output_image = gr.Image(label="Generated image", show_label=False)
164
- polished_prompt = gr.Textbox(label="Polished prompt", interactive=False)
165
- with gr.Accordion("Control image", open=False):
166
  control_image = gr.Image(label="Control image", show_label=False)
 
167
 
168
  with gr.Accordion("Advanced Settings", open=False):
169
  seed = gr.Slider(
@@ -211,23 +215,33 @@ with gr.Blocks() as demo:
211
  gr.Examples(examples=examples, inputs=[input_image, prompt])
212
 
213
  gr.HTML(read_file("static/footer.html"))
214
- gr.on(
215
- triggers=[run_button.click, prompt.submit],
216
- fn=inference,
217
- inputs=[
218
- prompt,
219
- input_image,
220
- image_scale,
221
- control_context_scale,
222
- seed,
223
- randomize_seed,
224
- guidance_scale,
225
- num_inference_steps,
226
- ],
227
- outputs=[output_image, seed],
228
  ).then(
229
-
 
 
230
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
231
 
232
  if __name__ == "__main__":
233
  demo.launch(mcp_server=True, css=css)
 
79
  spaces.aoti_blocks_load(pipe.transformer.layers,
80
  "zerogpu-aoti/Z-Image", variant="fa3")
81
 
82
+ def prepare(prompt, input_image):
83
+ polished_prompt = polish_prompt(prompt)
84
+ return polished_prompt
85
 
86
  @spaces.GPU
87
  def inference(
 
164
  run_button = gr.Button("Run", variant="primary")
165
  with gr.Column():
166
  output_image = gr.Image(label="Generated image", show_label=False)
167
+
168
+ with gr.Accordion("Preprocessor output", open=False):
169
  control_image = gr.Image(label="Control image", show_label=False)
170
+ polished_prompt = gr.Textbox(label="Polished prompt", interactive=False)
171
 
172
  with gr.Accordion("Advanced Settings", open=False):
173
  seed = gr.Slider(
 
215
  gr.Examples(examples=examples, inputs=[input_image, prompt])
216
 
217
  gr.HTML(read_file("static/footer.html"))
218
+ run_button.click(
219
+ fn=prepare,
220
+ inputs=prompt,
221
+ outputs=[polished_prompt]
222
+ # outputs=gr.State(), # Pass to the next function, not to UI at this step
 
 
 
 
 
 
 
 
 
223
  ).then(
224
+ # fn=generate_image,
225
+ # inputs=None, # This will automatically use the previous result
226
+ # outputs=output
227
  )
228
+ # gr.on(
229
+ # triggers=[run_button.click, prompt.submit],
230
+ # fn=inference,
231
+ # inputs=[
232
+ # prompt,
233
+ # input_image,
234
+ # image_scale,
235
+ # control_context_scale,
236
+ # seed,
237
+ # randomize_seed,
238
+ # guidance_scale,
239
+ # num_inference_steps,
240
+ # ],
241
+ # outputs=[output_image, seed],
242
+ # ).then(
243
+
244
+ # )
245
 
246
  if __name__ == "__main__":
247
  demo.launch(mcp_server=True, css=css)