import gradio as gr import os os.system("git clone https://github.com/megvii-research/NAFNet") os.system("mv NAFNet/* ./") os.system("mv *.pth experiments/pretrained_models/") os.system("python3 setup.py develop --no_cuda_ext --user") def inference(image_l, image_r): if not os.path.exists('tmp'): os.system('mkdir tmp') image_l.save("tmp/lr_l.png", "PNG") image_r.save("tmp/lr_r.png", "PNG") os.system("python basicsr/demo_ssr.py -opt options/test/NAFSSR/NAFSSR-L_4x.yml" +" --input_l_path ./tmp/lr_l.png --input_r_path ./tmp/lr_r.png" +" --output_l_path ./tmp/image_l.png --output_r_path ./tmp/image_r.png") return 'tmp/image_l.png', 'tmp/image_r.png' title = "NAFNet" description = "Gradio demo for NAFNet: Nonlinear Activation Free Network for Image Restoration. NAFNet achieves state-of-the-art performance on three tasks: image denoising, image debluring and stereo image super-resolution (SR). See the paper and project page for detailed results below. Here, we provide a demo for stereo image super-resolution (SR). To use it, simply upload your left and right view images, or click the examples to load them. Inference needs some time (>100s) since this demo uses CPU." article = "

Simple Baselines for Image Restoration | NAFSSR: Stereo Image Super-Resolution Using NAFNet | Github Repo

" examples = [['demo/lr_img_l.png', 'demo/lr_img_r.png']] iface = gr.Interface( inference, [gr.inputs.Image(type="pil", label="Input (Left View)"), gr.inputs.Image(type="pil", label="Input (Right View)")], [gr.outputs.Image(type="file", label="Output (Left View)"), gr.outputs.Image(type="file", label="Output (Right View)")], title=title, description=description, article=article, enable_queue=True, examples=examples ) iface.launch(debug=True,enable_queue=True)