Fabrice-TIERCELIN commited on
Commit
a08a8ca
1 Parent(s): 581d0c0

Readd heic format support

Browse files
Files changed (2) hide show
  1. app.py +8 -4
  2. requirements.txt +42 -41
app.py CHANGED
@@ -16,6 +16,9 @@ from gradio_imageslider import ImageSlider
16
  from PIL import Image
17
  from SUPIR.util import HWC3, upscale_image, fix_resize, convert_dtype, create_SUPIR_model, load_QF_ckpt
18
  from huggingface_hub import hf_hub_download
 
 
 
19
 
20
  max_64_bit_int = 2**32 - 1
21
 
@@ -203,8 +206,8 @@ def restore_in_Xmin(
203
 
204
  input_format = re.sub(r"^.*\.([^\.]+)$", r"\1", noisy_image)
205
 
206
- if input_format not in ['png', 'webp', 'jpg', 'jpeg', 'gif', 'bmp']:
207
- gr.Warning('Invalid image format. Please first convert into *.png, *.webp, *.jpg, *.jpeg, *.gif or *.bmp.')
208
  return None, None, None, None
209
 
210
  if output_format == "input":
@@ -508,6 +511,7 @@ The images are not stored but the logs are saved during a month.
508
  ## **How to get SUPIR**
509
  You can get SUPIR on HuggingFace by [duplicating this space](https://huggingface.co/spaces/Fabrice-TIERCELIN/SUPIR?duplicate=true) and set GPU.
510
  You can also install SUPIR on your computer following [this tutorial](https://huggingface.co/blog/MonsterMMORPG/supir-sota-image-upscale-better-than-magnific-ai).
 
511
  ## **Terms of use**
512
  By using this service, users are required to agree to the following terms: The service is a research preview intended for non-commercial use only. It only provides limited safety measures and may generate offensive content. It must not be used for any illegal, harmful, violent, racist, or sexual purposes. The service may collect user dialogue data for future research. Please submit a feedback to us if you get any inappropriate answer! We will collect those to keep improving our models. For an optimal experience, please use desktop computers for this demo, as mobile devices may compromise its quality.
513
  ## **License**
@@ -526,14 +530,14 @@ with gr.Blocks() as interface:
526
  """)
527
  gr.HTML(title_html)
528
 
529
- input_image = gr.Image(label="Input (*.png, *.webp, *.jpeg, *.jpg, *.gif, *.bmp)", show_label=True, type="filepath", height=600, elem_id="image-input")
530
  rotation = gr.Radio([["No rotation", 0], ["⤵ Rotate +90°", 90], ["↩ Return 180°", 180], ["⤴ Rotate -90°", -90]], label="Orientation correction", info="Will apply the following rotation before restoring the image; the AI needs a good orientation to understand the content", value=0, interactive=True, visible=False)
531
  with gr.Group():
532
  prompt = gr.Textbox(label="Image description", info="Help the AI understand what the image represents; describe as much as possible, especially the details we can't see on the original image; you can write in any language", value="", placeholder="A 33 years old man, walking, in the street, Santiago, morning, Summer, photorealistic", lines=3)
533
  prompt_hint = gr.HTML("You can use a <a href='"'https://huggingface.co/spaces/MaziyarPanahi/llava-llama-3-8b'"'>LlaVa space</a> to auto-generate the description of your image.")
534
  upscale = gr.Radio([["x1", 1], ["x2", 2], ["x3", 3], ["x4", 4], ["x5", 5], ["x6", 6], ["x7", 7], ["x8", 8], ["x9", 9], ["x10", 10]], label="Upscale factor", info="Resolution x1 to x10", value=2, interactive=True)
535
  allocation = gr.Radio([["1 min", 1], ["2 min", 2], ["3 min", 3], ["4 min", 4], ["5 min", 5], ["6 min", 6], ["7 min", 7], ["8 min (discouraged)", 8], ["9 min (discouraged)", 9], ["10 min (discouraged)", 10]], label="GPU allocation time", info="lower=May abort run, higher=Quota penalty for next runs", value=7, interactive=True)
536
- output_format = gr.Radio([["As input", "input"], ["*.png", "png"], ["*.webp", "webp"], ["*.jpeg", "jpeg"], ["*.gif", "gif"], ["*.bmp", "bmp"]], label="Image format for result", info="File extention", value="input", interactive=True)
537
 
538
  with gr.Accordion("Pre-denoising (optional)", open=False):
539
  gamma_correction = gr.Slider(label="Gamma Correction", info = "lower=lighter, higher=darker", minimum=0.1, maximum=2.0, value=1.0, step=0.1)
 
16
  from PIL import Image
17
  from SUPIR.util import HWC3, upscale_image, fix_resize, convert_dtype, create_SUPIR_model, load_QF_ckpt
18
  from huggingface_hub import hf_hub_download
19
+ from pillow_heif import register_heif_opener
20
+
21
+ register_heif_opener()
22
 
23
  max_64_bit_int = 2**32 - 1
24
 
 
206
 
207
  input_format = re.sub(r"^.*\.([^\.]+)$", r"\1", noisy_image)
208
 
209
+ if input_format not in ['png', 'webp', 'jpg', 'jpeg', 'gif', 'bmp', 'heic']:
210
+ gr.Warning('Invalid image format. Please first convert into *.png, *.webp, *.jpg, *.jpeg, *.gif, *.bmp or *.heic.')
211
  return None, None, None, None
212
 
213
  if output_format == "input":
 
511
  ## **How to get SUPIR**
512
  You can get SUPIR on HuggingFace by [duplicating this space](https://huggingface.co/spaces/Fabrice-TIERCELIN/SUPIR?duplicate=true) and set GPU.
513
  You can also install SUPIR on your computer following [this tutorial](https://huggingface.co/blog/MonsterMMORPG/supir-sota-image-upscale-better-than-magnific-ai).
514
+ You can install _Pinokio_ on your computer and then install _SUPIR_ into it. It should be quite easy if you have an Nvidia GPU.
515
  ## **Terms of use**
516
  By using this service, users are required to agree to the following terms: The service is a research preview intended for non-commercial use only. It only provides limited safety measures and may generate offensive content. It must not be used for any illegal, harmful, violent, racist, or sexual purposes. The service may collect user dialogue data for future research. Please submit a feedback to us if you get any inappropriate answer! We will collect those to keep improving our models. For an optimal experience, please use desktop computers for this demo, as mobile devices may compromise its quality.
517
  ## **License**
 
530
  """)
531
  gr.HTML(title_html)
532
 
533
+ input_image = gr.Image(label="Input (*.png, *.webp, *.jpeg, *.jpg, *.gif, *.bmp, *.heic)", show_label=True, type="filepath", height=600, elem_id="image-input")
534
  rotation = gr.Radio([["No rotation", 0], ["⤵ Rotate +90°", 90], ["↩ Return 180°", 180], ["⤴ Rotate -90°", -90]], label="Orientation correction", info="Will apply the following rotation before restoring the image; the AI needs a good orientation to understand the content", value=0, interactive=True, visible=False)
535
  with gr.Group():
536
  prompt = gr.Textbox(label="Image description", info="Help the AI understand what the image represents; describe as much as possible, especially the details we can't see on the original image; you can write in any language", value="", placeholder="A 33 years old man, walking, in the street, Santiago, morning, Summer, photorealistic", lines=3)
537
  prompt_hint = gr.HTML("You can use a <a href='"'https://huggingface.co/spaces/MaziyarPanahi/llava-llama-3-8b'"'>LlaVa space</a> to auto-generate the description of your image.")
538
  upscale = gr.Radio([["x1", 1], ["x2", 2], ["x3", 3], ["x4", 4], ["x5", 5], ["x6", 6], ["x7", 7], ["x8", 8], ["x9", 9], ["x10", 10]], label="Upscale factor", info="Resolution x1 to x10", value=2, interactive=True)
539
  allocation = gr.Radio([["1 min", 1], ["2 min", 2], ["3 min", 3], ["4 min", 4], ["5 min", 5], ["6 min", 6], ["7 min", 7], ["8 min (discouraged)", 8], ["9 min (discouraged)", 9], ["10 min (discouraged)", 10]], label="GPU allocation time", info="lower=May abort run, higher=Quota penalty for next runs", value=7, interactive=True)
540
+ output_format = gr.Radio([["As input", "input"], ["*.png", "png"], ["*.webp", "webp"], ["*.jpeg", "jpeg"], ["*.gif", "gif"], ["*.bmp", "bmp"], ["*.heic", "heic"]], label="Image format for result", info="File extention", value="input", interactive=True)
541
 
542
  with gr.Accordion("Pre-denoising (optional)", open=False):
543
  gamma_correction = gr.Slider(label="Gamma Correction", info = "lower=lighter, higher=darker", minimum=0.1, maximum=2.0, value=1.0, step=0.1)
requirements.txt CHANGED
@@ -1,41 +1,42 @@
1
- fastapi>=0.111.0
2
- gradio>=4.31.4
3
- gradio_imageslider>=0.0.20
4
- gradio_client>=0.16.4
5
- Markdown>=3.6
6
- numpy>=1.26.4
7
- requests>=2.32.2
8
- sentencepiece>=0.2.0
9
- tokenizers>=0.11.3
10
- torchvision>=0.18.0
11
- uvicorn>=0.29.0
12
- wandb>=0.17.0
13
- httpx>=0.27.0
14
- transformers>=4.41.0
15
- accelerate>=0.30.1
16
- scikit-learn>=1.5.0
17
- sentencepiece>=0.2.0
18
- einops>=0.8.0
19
- einops-exts>=0.0.4
20
- timm>=1.0.3
21
- openai-clip>=1.0.1
22
- fsspec>=2024.5.0
23
- kornia>=0.7.2
24
- matplotlib>=3.9.0
25
- ninja>=1.11.1.1
26
- omegaconf>=2.3.0
27
- open-clip-torch==2.24.0
28
- opencv-python>=4.9.0.80
29
- pandas>=2.2.2
30
- Pillow>=10.3.0
31
- pytorch-lightning>=2.2.4
32
- PyYAML>=6.0.1
33
- scipy>=1.13.0
34
- tqdm>=4.66.4
35
- triton>=2.3.0
36
- urllib3>=2.2.1
37
- webdataset>=0.2.86
38
- xformers>=0.0.26.post1
39
- facexlib>=0.3.0
40
- k-diffusion>=0.1.1.post1
41
- diffusers>=0.27.2
 
 
1
+ fastapi>=0.111.0
2
+ gradio>=4.31.4
3
+ gradio_imageslider>=0.0.20
4
+ gradio_client>=0.16.4
5
+ Markdown>=3.6
6
+ numpy>=1.26.4
7
+ requests>=2.32.2
8
+ sentencepiece>=0.2.0
9
+ tokenizers>=0.11.3
10
+ torchvision>=0.18.0
11
+ uvicorn>=0.29.0
12
+ wandb>=0.17.0
13
+ httpx>=0.27.0
14
+ transformers>=4.41.0
15
+ accelerate>=0.30.1
16
+ scikit-learn>=1.5.0
17
+ sentencepiece>=0.2.0
18
+ einops>=0.8.0
19
+ einops-exts>=0.0.4
20
+ timm>=1.0.3
21
+ openai-clip>=1.0.1
22
+ fsspec>=2024.5.0
23
+ kornia>=0.7.2
24
+ matplotlib>=3.9.0
25
+ ninja>=1.11.1.1
26
+ omegaconf>=2.3.0
27
+ open-clip-torch==2.24.0
28
+ opencv-python>=4.9.0.80
29
+ pandas>=2.2.2
30
+ Pillow>=10.3.0
31
+ pytorch-lightning>=2.2.4
32
+ PyYAML>=6.0.1
33
+ scipy>=1.13.0
34
+ tqdm>=4.66.4
35
+ triton>=2.3.0
36
+ urllib3>=2.2.1
37
+ webdataset>=0.2.86
38
+ xformers>=0.0.26.post1
39
+ facexlib>=0.3.0
40
+ k-diffusion>=0.1.1.post1
41
+ diffusers>=0.27.2
42
+ pillow-heif>=0.17.0