import gradio as gr from matplotlib import pyplot as plt from mapper.utils.io import read_image from mapper.utils.exif import EXIF from mapper.utils.wrappers import Camera from perspective2d import PerspectiveFields import numpy as np from typing import Optional, Tuple description = """

MapItAnywhere (MIA)
Empowering Bird’s Eye View Mapping using Large-scale Public Data
with Neural Matching

Project Page | Paper | Code

Mapper generates birds-eye-view maps from first person view monocular images. Try our demo by uploading your own images.

""" class ImageCalibrator(PerspectiveFields): def __init__(self, version: str = "Paramnet-360Cities-edina-centered"): super().__init__(version) self.eval() def run( self, image_rgb: np.ndarray, focal_length: Optional[float] = None, exif: Optional[EXIF] = None, ) -> Tuple[Tuple[float, float], Camera]: h, w, *_ = image_rgb.shape if focal_length is None and exif is not None: _, focal_ratio = exif.extract_focal() if focal_ratio != 0: focal_length = focal_ratio * max(h, w) calib = self.inference(img_bgr=image_rgb[..., ::-1]) roll_pitch = (calib["pred_roll"].item(), calib["pred_pitch"].item()) if focal_length is None: vfov = calib["pred_vfov"].item() focal_length = h / 2 / np.tan(np.deg2rad(vfov) / 2) camera = Camera.from_dict( { "model": "SIMPLE_PINHOLE", "width": w, "height": h, "params": [focal_length, w / 2 + 0.5, h / 2 + 0.5], } ) return roll_pitch, camera def run(input_img): calibrator = ImageCalibrator().to("cuda") image_path = input_img.name image = read_image(image_path) image = image.to("cuda") with open(image_path, "rb") as fid: exif = EXIF(fid, lambda: image.shape[:2]) gravity, camera = calibrator.run(image, exif=exif) print(f"Gravity: {gravity}") print(f"Camera: {camera._data}") plt.imshow(image) plt.axis('off') fig1 = plt.gcf() return fig1 demo = gr.Interface( fn=run, inputs=[ gr.File(file_types=["image"], label="Input Image") ], outputs=[ gr.Plot(label="Inputs", format="png") ], description=description,) demo.launch(share=True)