File size: 1,391 Bytes
81d10ae
 
 
 
 
 
 
 
 
0361028
81d10ae
 
 
4d5842c
81d10ae
 
 
 
 
 
 
0361028
4d5842c
 
81d10ae
 
 
4d5842c
81d10ae
 
0361028
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
509cbdf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
import gradio as gr
from transformers import pipeline
import torch
import numpy as np
from PIL import Image
import gradio as gr
from gradio_client import Client
import os
import spaces
import json

dpt_beit = pipeline(task = "depth-estimation", model="Intel/dpt-beit-base-384")
depth_anything = pipeline(task = "depth-estimation", model="nielsr/depth-anything-small")
dpt_large = pipeline(task = "depth-estimation", model="intel/dpt-large")
@spaces.GPU
def depth_anything_inference(image_path):
  return depth_anything(image_path)["depth"]
@spaces.GPU
def dpt_beit_inference(image):
  return dpt_beit(image)["depth"]

@spaces.GPU
def dpt_large_inference(image):
  return dpt_large(image)["depth"]
    

def infer(image):
  return dpt_large_inference(image), dpt_beit_inference(image), depth_anything_inference(image)


css = """
  #mkd {
    height: 500px; 
    overflow: auto; 
    border: 1px solid #ccc; 
  }
"""
with gr.Blocks(css=css) as demo:
  gr.HTML("<h1><center>Compare Depth Estimation Models<center><h1>")


  with gr.Row():
    input_img = gr.Image(label="Input Image")
    with gr.Row():
      output_1 = gr.Image(type="pil", label="DPT-Large")
      output_2 = gr.Image(type="pil", label="DPT with BeiT Backbone")
      output_3 = gr.Image(type="pil", label="Depth Anything")
  

  input_img.change(infer, [input_img], [output_1, output_2, output_3])


demo.launch(debug=True)