Spaces:
Runtime error
Runtime error
File size: 1,956 Bytes
831c6b2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 |
import gradio as gr
from transformers import DPTFeatureExtractor, DPTForDepthEstimation
import torch
import numpy as np
from PIL import Image
from pathlib import Path
from depth_viewer import depthviewer2html
feature_extractor = DPTFeatureExtractor.from_pretrained("Intel/dpt-large")
model = DPTForDepthEstimation.from_pretrained("Intel/dpt-large")
def process_image(image_path):
image_path = Path(image_path)
image = Image.open(image_path)
# prepare image for the model
encoding = feature_extractor(image, return_tensors="pt")
# forward pass
with torch.no_grad():
outputs = model(**encoding)
predicted_depth = outputs.predicted_depth
# interpolate to original size
prediction = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1),
size=image.size[::-1],
mode="bicubic",
align_corners=False,
).squeeze()
output = prediction.cpu().numpy()
depth = (output * 255 / np.max(output)).astype('uint8')
h = depthviewer2html(image,depth)
return [h]
title = "3d Visualization of Depth Maps Generated using MiDaS"
description = "Improved 3D interactive depth viewer using Three.js embedded in a Gradio app. For more details see the <a href='https://colab.research.google.com/drive/1l2l8U7Vhq9RnvV2tHyfhrXKNuHfmb4IP?usp=sharing'>Colab Notebook.</a>"
examples = [["examples/owl1.jpg"],['examples/marsattacks.jpg'],['examples/kitten.jpg']]
iface = gr.Interface(fn=process_image,
inputs=[gr.Image(type="filepath",label="Input Image")],
outputs=[gr.HTML(label='Depth Viewer',elem_id='depth-viewer')],
title=title,
description=description,
examples=examples,
allow_flagging="never",
cache_examples=False,
css='#depth-viewer: {height:300px;}')
iface.launch(debug=True, enable_queue=False) |