Fabrice-TIERCELIN commited on
Commit
8f3e82e
1 Parent(s): 10b38cc

Rotation radio buttons

Browse files
Files changed (1) hide show
  1. gradio_demo.py +20 -32
gradio_demo.py CHANGED
@@ -70,19 +70,7 @@ if torch.cuda.device_count() > 0:
70
  def check_upload(input_image):
71
  if input_image is None:
72
  raise gr.Error("Please provide an image to restore.")
73
- return [gr.update(visible = False)] * 2
74
-
75
- def rotate_anti_90(image_array):
76
- if image_array is None:
77
- raise gr.Error("Please provide an image to rotate.")
78
-
79
- return np.array(list(zip(*image_array))[::-1])
80
-
81
- def rotate_90(image_array):
82
- if image_array is None:
83
- raise gr.Error("Please provide an image to rotate.")
84
-
85
- return np.array(list(zip(*image_array[::-1])))
86
 
87
  def update_seed(is_randomize_seed, seed):
88
  if is_randomize_seed:
@@ -136,6 +124,7 @@ def llave_process(input_image, temperature, top_p, qs=None):
136
 
137
  def stage2_process(
138
  noisy_image,
 
139
  denoise_image,
140
  prompt,
141
  a_prompt,
@@ -165,6 +154,7 @@ def stage2_process(
165
  allocation
166
  ):
167
  print("noisy_image: " + str(noisy_image))
 
168
  print("denoise_image: " + str(denoise_image))
169
  print("prompt: " + str(prompt))
170
  print("a_prompt: " + str(a_prompt))
@@ -213,11 +203,22 @@ def stage2_process(
213
  else:
214
  a_prompt = prompt + a_prompt
215
  print("Final prompt: " + str(a_prompt))
 
216
  noisy_image = noisy_image if denoise_image is None else denoise_image
 
 
 
 
 
 
 
 
 
217
  if 1 < downscale:
218
  input_height, input_width, input_channel = noisy_image.shape
219
  noisy_image = np.array(Image.fromarray(noisy_image).resize((input_width // downscale, input_height // downscale), Image.LANCZOS))
220
 
 
221
  if allocation == 1:
222
  return restore_in_1min(
223
  noisy_image, denoise_image, prompt, a_prompt, n_prompt, num_samples, min_size, downscale, upscale, edm_steps, s_stage1, s_stage2, s_cfg, randomize_seed, seed, s_churn, s_noise, color_fix_type, diff_dtype, ae_dtype, gamma_correction, linear_CFG, linear_s_stage2, spt_linear_CFG, spt_linear_s_stage2, model_select, output_format, allocation
@@ -513,11 +514,7 @@ with gr.Blocks(title="SUPIR") as interface:
513
  gr.HTML(title_html)
514
 
515
  input_image = gr.Image(label="Input (*.png, *.webp, *.jpeg, *.gif, *.bmp)", show_label=True, type="numpy", height=600, elem_id="image-input")
516
- with gr.Row():
517
- with gr.Column():
518
- rotate_anti_90_button = gr.Button(value="⤴ Rotate -90°", elem_id="rotate_anti_90_button", visible=False)
519
- with gr.Column():
520
- rotate_90_button = gr.Button(value="⤵ Rotate +90°", elem_id="rotate_90_button", visible=False)
521
  with gr.Group():
522
  prompt = gr.Textbox(label="Image description", info="Help the AI understand what the image represents; describe as much as possible, especially the details we can't see on the original image; I advise you to write in English because other languages may not be handled", value="", placeholder="A 33 years old man, walking, in the street, Santiago, morning, Summer, photorealistic", lines=3)
523
  prompt_hint = gr.HTML("You can use a <a href='"'https://huggingface.co/spaces/MaziyarPanahi/llava-llama-3-8b'"'>LlaVa space</a> to auto-generate the description of your image.")
@@ -610,6 +607,7 @@ with gr.Blocks(title="SUPIR") as interface:
610
  fn = stage2_process,
611
  inputs = [
612
  input_image,
 
613
  denoise_image,
614
  prompt,
615
  a_prompt,
@@ -647,6 +645,7 @@ with gr.Blocks(title="SUPIR") as interface:
647
  examples = [
648
  [
649
  "./Examples/Example1.png",
 
650
  None,
651
  "Group of people, walking, happy, in the street, photorealistic, 8k, extremely detailled",
652
  "Cinematic, High Contrast, highly detailed, taken using a Canon EOS R camera, hyper detailed photo - realistic maximum detail, 32k, Color Grading, ultra HD, extreme meticulous detailing, skin pore detailing, hyper sharpness, perfect without deformations.",
@@ -677,6 +676,7 @@ with gr.Blocks(title="SUPIR") as interface:
677
  ],
678
  [
679
  "./Examples/Example2.jpeg",
 
680
  None,
681
  "The head of a tabby cat, in a house, photorealistic, 8k, extremely detailled",
682
  "Cinematic, High Contrast, highly detailed, taken using a Canon EOS R camera, hyper detailed photo - realistic maximum detail, 32k, Color Grading, ultra HD, extreme meticulous detailing, skin pore detailing, hyper sharpness, perfect without deformations.",
@@ -715,20 +715,7 @@ with gr.Blocks(title="SUPIR") as interface:
715
  input_image.upload(fn = check_upload, inputs = [
716
  input_image
717
  ], outputs = [
718
- rotate_anti_90_button,
719
- rotate_90_button
720
- ], queue = False, show_progress = False)
721
-
722
- rotate_anti_90_button.click(fn = rotate_anti_90, inputs = [
723
- input_image
724
- ], outputs = [
725
- input_image
726
- ], queue = False, show_progress = False)
727
-
728
- rotate_90_button.click(fn = rotate_90, inputs = [
729
- input_image
730
- ], outputs = [
731
- input_image
732
  ], queue = False, show_progress = False)
733
 
734
  denoise_button.click(fn = check, inputs = [
@@ -761,6 +748,7 @@ with gr.Blocks(title="SUPIR") as interface:
761
  input_image
762
  ], outputs = [], queue = False, show_progress = False).success(fn=stage2_process, inputs = [
763
  input_image,
 
764
  denoise_image,
765
  prompt,
766
  a_prompt,
 
70
  def check_upload(input_image):
71
  if input_image is None:
72
  raise gr.Error("Please provide an image to restore.")
73
+ return gr.update(visible = True)
 
 
 
 
 
 
 
 
 
 
 
 
74
 
75
  def update_seed(is_randomize_seed, seed):
76
  if is_randomize_seed:
 
124
 
125
  def stage2_process(
126
  noisy_image,
127
+ rotation,
128
  denoise_image,
129
  prompt,
130
  a_prompt,
 
154
  allocation
155
  ):
156
  print("noisy_image: " + str(noisy_image))
157
+ print("rotation: " + str(rotation))
158
  print("denoise_image: " + str(denoise_image))
159
  print("prompt: " + str(prompt))
160
  print("a_prompt: " + str(a_prompt))
 
203
  else:
204
  a_prompt = prompt + a_prompt
205
  print("Final prompt: " + str(a_prompt))
206
+
207
  noisy_image = noisy_image if denoise_image is None else denoise_image
208
+
209
+ if rotation == 90:
210
+ noisy_image = np.array(list(zip(*noisy_image[::-1])))
211
+ elif rotation == 180:
212
+ noisy_image = np.array(list(zip(*noisy_image[::-1])))
213
+ noisy_image = np.array(list(zip(*noisy_image[::-1])))
214
+ elif rotation == -90:
215
+ noisy_image = np.array(list(zip(*noisy_image))[::-1])
216
+
217
  if 1 < downscale:
218
  input_height, input_width, input_channel = noisy_image.shape
219
  noisy_image = np.array(Image.fromarray(noisy_image).resize((input_width // downscale, input_height // downscale), Image.LANCZOS))
220
 
221
+ # Allocation
222
  if allocation == 1:
223
  return restore_in_1min(
224
  noisy_image, denoise_image, prompt, a_prompt, n_prompt, num_samples, min_size, downscale, upscale, edm_steps, s_stage1, s_stage2, s_cfg, randomize_seed, seed, s_churn, s_noise, color_fix_type, diff_dtype, ae_dtype, gamma_correction, linear_CFG, linear_s_stage2, spt_linear_CFG, spt_linear_s_stage2, model_select, output_format, allocation
 
514
  gr.HTML(title_html)
515
 
516
  input_image = gr.Image(label="Input (*.png, *.webp, *.jpeg, *.gif, *.bmp)", show_label=True, type="numpy", height=600, elem_id="image-input")
517
+ rotation = gr.Radio([["No rotation", 0], ["⤵ Rotate +90°", 90], ["↩ Return 180°", 180], ["⤴ Rotate -90°", -90]], label="Orientation correction", info="Will apply the following rotation before restoring the image; the AI needs a good orientation to understand the content", value=0, interactive=True, visible=False)
 
 
 
 
518
  with gr.Group():
519
  prompt = gr.Textbox(label="Image description", info="Help the AI understand what the image represents; describe as much as possible, especially the details we can't see on the original image; I advise you to write in English because other languages may not be handled", value="", placeholder="A 33 years old man, walking, in the street, Santiago, morning, Summer, photorealistic", lines=3)
520
  prompt_hint = gr.HTML("You can use a <a href='"'https://huggingface.co/spaces/MaziyarPanahi/llava-llama-3-8b'"'>LlaVa space</a> to auto-generate the description of your image.")
 
607
  fn = stage2_process,
608
  inputs = [
609
  input_image,
610
+ rotation,
611
  denoise_image,
612
  prompt,
613
  a_prompt,
 
645
  examples = [
646
  [
647
  "./Examples/Example1.png",
648
+ 0,
649
  None,
650
  "Group of people, walking, happy, in the street, photorealistic, 8k, extremely detailled",
651
  "Cinematic, High Contrast, highly detailed, taken using a Canon EOS R camera, hyper detailed photo - realistic maximum detail, 32k, Color Grading, ultra HD, extreme meticulous detailing, skin pore detailing, hyper sharpness, perfect without deformations.",
 
676
  ],
677
  [
678
  "./Examples/Example2.jpeg",
679
+ 0,
680
  None,
681
  "The head of a tabby cat, in a house, photorealistic, 8k, extremely detailled",
682
  "Cinematic, High Contrast, highly detailed, taken using a Canon EOS R camera, hyper detailed photo - realistic maximum detail, 32k, Color Grading, ultra HD, extreme meticulous detailing, skin pore detailing, hyper sharpness, perfect without deformations.",
 
715
  input_image.upload(fn = check_upload, inputs = [
716
  input_image
717
  ], outputs = [
718
+ rotation
 
 
 
 
 
 
 
 
 
 
 
 
 
719
  ], queue = False, show_progress = False)
720
 
721
  denoise_button.click(fn = check, inputs = [
 
748
  input_image
749
  ], outputs = [], queue = False, show_progress = False).success(fn=stage2_process, inputs = [
750
  input_image,
751
+ rotation,
752
  denoise_image,
753
  prompt,
754
  a_prompt,