import gradio as gr from transformers import pipeline import torch import numpy as np from PIL import Image import gradio as gr from gradio_client import Client import os import spaces import json dpt_beit = pipeline(task = "depth-estimation", model="Intel/dpt-beit-base-384") depth_anything = pipeline(task = "depth-estimation", model="nielsr/depth-anything-small") dpt_large = pipeline(task = "depth-estimation", model="intel/dpt-large") @spaces.GPU def depth_anything_inference(image_path): return depth_anything(image_path)["depth"] @spaces.GPU def dpt_beit_inference(image): return dpt_beit(image)["depth"] @spaces.GPU def dpt_large_inference(image): return dpt_large(image)["depth"] def infer(image): return dpt_large_inference(image), dpt_beit_inference(image), depth_anything_inference(image) css = """ #mkd { height: 500px; overflow: auto; border: 1px solid #ccc; } """ with gr.Blocks(css=css) as demo: gr.HTML("

Compare Depth Estimation Models

") with gr.Row(): input_img = gr.Image(label="Input Image") with gr.Row(): output_1 = gr.Image(type="pil", label="DPT-Large") output_2 = gr.Image(type="pil", label="DPT with BeiT Backbone") output_3 = gr.Image(type="pil", label="Depth Anything") gr.Examples([["https://huggingface.co/spaces/merve/compare_depth_models/resolve/main/bee.JPG"]], inputs = input_img, outputs = [output_1, output_2, output_3], fn=infer, cache_examples=True, label='Click on any Examples below to get depth estimation results quickly 👇' ) input_img.change(infer, [input_img], [output_1, output_2, output_3]) gr.launch(debug=True)