File size: 2,123 Bytes
f36012f
ad12fad
f36012f
 
ad12fad
 
 
 
f36012f
 
ad12fad
 
 
 
 
 
8e2fd29
 
ad12fad
 
 
 
1da56e7
ad12fad
f36012f
ad12fad
844f6ea
ad12fad
 
 
3b0c834
 
ad12fad
78cabe9
ad12fad
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
import gradio as gr
import os


os.system("git clone https://github.com/megvii-research/NAFNet")
os.system("mv NAFNet/* ./")
os.system("mv *.pth experiments/pretrained_models/")
os.system("python3 setup.py develop --no_cuda_ext --user")


def inference(image_l, image_r):
    if not os.path.exists('tmp'):
      os.system('mkdir tmp')
    image_l.save("tmp/lr_l.png", "PNG")
    image_r.save("tmp/lr_r.png", "PNG")
    os.system("python basicsr/demo_ssr.py -opt options/test/NAFSSR/NAFSSR-L_4x.yml"
             +" --input_l_path ./tmp/lr_l.png --input_r_path ./tmp/lr_r.png"
             +" --output_l_path ./tmp/image_l.png --output_r_path ./tmp/image_r.png")
 
    return 'tmp/image_l.png', 'tmp/image_r.png'
   
title = "NAFNet"
description = "Gradio demo for <b>NAFNet: Nonlinear Activation Free Network for Image Restoration</b>. NAFNet achieves state-of-the-art performance on three tasks: image denoising, image debluring and stereo image super-resolution (SR). See the paper and project page for detailed results below. Here, we provide a demo for stereo image super-resolution (SR). To use it, simply upload your left and right view images, or click the examples to load them. Inference needs some time (>100s) since this demo uses CPU."
article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2204.04676' target='_blank'>Simple Baselines for Image Restoration</a> | <a href='https://arxiv.org/abs/2204.08714' target='_blank'>NAFSSR: Stereo Image Super-Resolution Using NAFNet</a>  | <a href='https://github.com/megvii-research/NAFNet' target='_blank'> Github Repo</a></p>"


examples = [['demo/lr_img_l.png', 'demo/lr_img_r.png']]
            
iface = gr.Interface(
    inference, 
    [gr.inputs.Image(type="pil", label="Input (Left View)"), 
    gr.inputs.Image(type="pil", label="Input (Right View)")], 
    [gr.outputs.Image(type="file", label="Output (Left View)"),
    gr.outputs.Image(type="file", label="Output (Right View)")],
    title=title,
    description=description,
    article=article,
    enable_queue=True,
    examples=examples
    )
iface.launch(debug=True,enable_queue=True)