Spaces:
Runtime error
Runtime error
File size: 3,544 Bytes
81d10ae 0361028 81d10ae 2a330a0 664df21 2a330a0 664df21 a090503 3691ba9 a090503 3691ba9 81d10ae a090503 3691ba9 81d10ae a090503 3691ba9 6e99eb7 81d10ae 0361028 2a330a0 0361028 2e34687 6c67e45 a090503 129db67 3691ba9 d643ca3 2a330a0 3691ba9 ce2a7fd 3691ba9 0361028 2a330a0 0361028 52c1ca6 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 |
import gradio as gr
from transformers import pipeline
import torch
import numpy as np
from PIL import Image
import gradio as gr
from gradio_client import Client
import os
import spaces
import json
from gradio_depth_pred import create_demo as create_depth_pred_demo
from gradio_im_to_3d import create_demo as create_im_to_3d_demo
#dpt_beit = pipeline(task = "depth-estimation", model="Intel/dpt-beit-base-384", device=0)
dpt_beit = pipeline(task = "depth-estimation", model="Intel/dpt-beit-large-512", device=0)
#depth_anything = pipeline(task = "depth-estimation", model="nielsr/depth-anything-small", device=0)
depth_anything = pipeline(task = "depth-estimation", model="LiheYoung/depth-anything-large-hf", device=0)
dpt_large = pipeline(task = "depth-estimation", model="intel/dpt-large", device=0)
def depth_anything_inference(img):
return depth_anything(img)["depth"]
def dpt_beit_inference(img):
return dpt_beit(img)["depth"]
def dpt_large_inference(img):
return dpt_large(img)["depth"]
@spaces.GPU
def infer(img):
if img is None:
return None, None, None
else:
return dpt_large_inference(img), dpt_beit_inference(img), depth_anything_inference(img)
css = """
#mkd {
height: 500px;
overflow: auto;
border: 1px solid #ccc;
}
"""
css_zoe = """
#img-display-container {
max-height: 50vh;
}
#img-display-input {
max-height: 40vh;
}
#img-display-output {
max-height: 40vh;
}
"""
with gr.Blocks(css=css) as demo:
gr.HTML("<h1><center>Compare Depth Estimation Models<center><h1>")
gr.Markdown("In this Space, you can compare different depth estimation models: [DPT-Large](https://huggingface.co/Intel/dpt-large), [DPT with BeiT backbone](https://huggingface.co/Intel/dpt-beit-large-512) and the recent [Depth Anything Model small checkpoint](https://huggingface.co/LiheYoung/depth-anything-small-hf). 🤩")
gr.Markdown("You can also see how they compare in terms of speed [here](https://huggingface2.notion.site/DPT-Benchmarks-1e516b0ba193460e865c47b3a5681efb?pvs=4).")
gr.Markdown("Simply upload an image or try one of the examples to see the outputs.")
with gr.Column():
with gr.Row():
input_img = gr.Image(label="Input Image", type="pil")
with gr.Row():
output_1 = gr.Image(type="pil", label="Intel dpt-large")
output_2 = gr.Image(type="pil", label="DPT with BeiT Backbone, dpt-beit-large-512")
output_3 = gr.Image(type="pil", label="LiheYoung/depth-anything-large-hf")
gr.Examples([["bee.jpg"], ["cat.png"], ["cats.png"]],
inputs = input_img,
outputs = [output_1, output_2, output_3],
fn=infer,
cache_examples=True,
label='Click on any Examples below to get depth estimation results quickly 👇'
)
input_img.change(infer, [input_img], [output_1, output_2, output_3])
with gr.Blocks(css=css|css_zoe) as demo2:
gr.Markdown(title)
gr.Markdown(description)
with gr.Tab("Depth Prediction"):
create_depth_pred_demo(model)
with gr.Tab("Image to 3D"):
create_im_to_3d_demo(model)
with gr.Tab("360 Panorama to 3D"):
create_pano_to_3d_demo(model)
gr.HTML('''<br><br><br><center>You can duplicate this Space to skip the queue:<a href="https://huggingface.co/spaces/shariqfarooq/ZoeDepth?duplicate=true"><img src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a><br>
<p><img src="https://visitor-badge.glitch.me/badge?page_id=shariqfarooq.zoedepth_demo_hf" alt="visitors"></p></center>''')
demo.launch(debug=True, share=True) |