Fabrice-TIERCELIN commited on
Commit
10b38cc
1 Parent(s): 0539164

Input (*.png, *.webp, *.jpeg, *.gif, *.bmp)

Browse files
Files changed (1) hide show
  1. gradio_demo.py +2 -2
gradio_demo.py CHANGED
@@ -512,14 +512,14 @@ with gr.Blocks(title="SUPIR") as interface:
512
  """)
513
  gr.HTML(title_html)
514
 
515
- input_image = gr.Image(label="Input", show_label=True, type="numpy", height=600, elem_id="image-input")
516
  with gr.Row():
517
  with gr.Column():
518
  rotate_anti_90_button = gr.Button(value="⤴ Rotate -90°", elem_id="rotate_anti_90_button", visible=False)
519
  with gr.Column():
520
  rotate_90_button = gr.Button(value="⤵ Rotate +90°", elem_id="rotate_90_button", visible=False)
521
  with gr.Group():
522
- prompt = gr.Textbox(label="Image description", info="Help the AI understand what the image represents; describe as much as possible, especially the details we can't see on the original image; I advise you to write in English because other languages may not be handled", value="", placeholder="A 27 years old woman, walking, in Santiago, morning, Summer, photorealistic", lines=3)
523
  prompt_hint = gr.HTML("You can use a <a href='"'https://huggingface.co/spaces/MaziyarPanahi/llava-llama-3-8b'"'>LlaVa space</a> to auto-generate the description of your image.")
524
  upscale = gr.Radio([["x1", 1], ["x2", 2], ["x3", 3], ["x4", 4], ["x5", 5], ["x6", 6], ["x7", 7], ["x8", 8]], label="Upscale factor", info="Resolution x1 to x8", value=2, interactive=True)
525
  allocation = gr.Radio([["1 min", 1], ["2 min", 2], ["3 min", 3], ["4 min", 4], ["5 min", 5], ["6 min", 6], ["7 min", 7], ["8 min", 8], ["9 min", 9], ["10 min", 10]], label="GPU allocation time", info="lower=May abort run, higher=Quota penalty for next runs", value=6, interactive=True)
 
512
  """)
513
  gr.HTML(title_html)
514
 
515
+ input_image = gr.Image(label="Input (*.png, *.webp, *.jpeg, *.gif, *.bmp)", show_label=True, type="numpy", height=600, elem_id="image-input")
516
  with gr.Row():
517
  with gr.Column():
518
  rotate_anti_90_button = gr.Button(value="⤴ Rotate -90°", elem_id="rotate_anti_90_button", visible=False)
519
  with gr.Column():
520
  rotate_90_button = gr.Button(value="⤵ Rotate +90°", elem_id="rotate_90_button", visible=False)
521
  with gr.Group():
522
+ prompt = gr.Textbox(label="Image description", info="Help the AI understand what the image represents; describe as much as possible, especially the details we can't see on the original image; I advise you to write in English because other languages may not be handled", value="", placeholder="A 33 years old man, walking, in the street, Santiago, morning, Summer, photorealistic", lines=3)
523
  prompt_hint = gr.HTML("You can use a <a href='"'https://huggingface.co/spaces/MaziyarPanahi/llava-llama-3-8b'"'>LlaVa space</a> to auto-generate the description of your image.")
524
  upscale = gr.Radio([["x1", 1], ["x2", 2], ["x3", 3], ["x4", 4], ["x5", 5], ["x6", 6], ["x7", 7], ["x8", 8]], label="Upscale factor", info="Resolution x1 to x8", value=2, interactive=True)
525
  allocation = gr.Radio([["1 min", 1], ["2 min", 2], ["3 min", 3], ["4 min", 4], ["5 min", 5], ["6 min", 6], ["7 min", 7], ["8 min", 8], ["9 min", 9], ["10 min", 10]], label="GPU allocation time", info="lower=May abort run, higher=Quota penalty for next runs", value=6, interactive=True)