import gradio as gr import sys, os import torch import matplotlib.pylab as plt from PIL import ImageOps def pump_matching(img1, img2, trained_with_st=False, scale=300, max_scale=1, max_rot=0, use_gpu=False): img1 = ImageOps.exif_transpose(img1) img2 = ImageOps.exif_transpose(img2) use_singlescale = max_scale==1 and max_rot==0 if use_singlescale: # single from test_singlescale import Main, arg_parser else: from test_multiscale import Main, arg_parser parser = arg_parser() args_list = ['--img1','dummy','--img2','dummy','--post-filter', '--desc','PUMP-stytrf' if trained_with_st else 'PUMP','--resize',str(scale)] if not use_gpu: args_list += ['--device', 'cpu'] if not use_singlescale: args_list += ['--max-scale',str(max_scale),'--max-rot',str(max_rot)] args = parser.parse_args(args_list) corres = Main().run_from_args_with_images(img1, img2, args) fig1 = plt.figure(1) plt.clf() ax1 = plt.gca() ax1.imshow(img1) ax1.axis('off') plt.tight_layout(pad=0) fig2 = plt.figure(2) plt.clf() ax2 = plt.gca() ax2.imshow(img2) ax2.axis('off') plt.tight_layout(pad=0) from tools.viz import plot_grid if corres.shape[-1] > 4: corres = corres[corres[:,4]>0,:] # select non-null correspondences if corres.shape[0]>0: plot_grid(corres, ax1, ax2, marker='+') img1 = None img2 = None return fig1, fig2 has_cuda = torch.cuda.is_available() and torch.cuda.device_count()>0 title = "PUMP local descriptor demo" description = "This is a visualization demo for the PUMP local descriptors presented in our CVPR 2022 paper PUMP: Pyramidal and Uniqueness Matching Priors for Unsupervised Learning of Local Features.

WARNING: this demo runs on cpus with downscaled images, without multi-scale or multi-rotations testing, due to limited memory and computational resources, please check out our original github repo for these features.

" article = "

Original Github Repo

" iface = gr.Interface( fn=pump_matching, inputs=[ gr.inputs.Image(shape=None, type="pil", label="First Image"), gr.inputs.Image(shape=None, type="pil", label="Second Image"), gr.inputs.Checkbox(default=False, label="Use the model trained with style transfer"), #gr.inputs.Slider(minimum=300, maximum=600, default=400, step=1, label="Original test scale"), #gr.inputs.Slider(minimum=1, maximum=4, default=1, step=0.1, label="Multi Scale Testing - maximum scale (makes it slower)"), #gr.inputs.Slider(minimum=0, maximum=180, default=0, step=45, label="Multi Rotation Testing - max rot (makes it slower)"),] #+ ([gr.inputs.Checkbox(default=True, label='Use GPU instead of CPU')] if has_cuda else []),""" ], outputs=[ gr.outputs.Image(type="plot", label="Matches in the first image"), gr.outputs.Image(type="plot", label="Matches in the second image"), ], title=title, theme='peach', description=description, article=article, examples=[ ['datasets/gradio_demo/cat_src.jpg','datasets/gradio_demo/cat_tgt.jpg',False],#,400,1,0]+([True] if has_cuda else []), ['datasets/gradio_demo/food_src.jpg','datasets/gradio_demo/food_tgt.jpg',False],#,400,1,0]+([True] if has_cuda else []), ['datasets/demo_warp/mountains_src.jpg','datasets/demo_warp/mountains_tgt.jpg',False],#,400,1,0]+([True] if has_cuda else []), ] ) iface.launch(enable_queue=True)