MSaadTariq's picture
Update app.py
295917c verified
from transformers import pipeline
depth_estimator = pipeline(task="depth-estimation",
model="Intel/dpt-hybrid-midas")
import torch
import numpy as np
import gradio as gr
from PIL import Image
from transformers import Pipeline
def launch(input_image):
out = depth_estimator(input_image)
# resize the prediction
prediction = torch.nn.functional.interpolate(
out["predicted_depth"].unsqueeze(1),
size=input_image.size[::-1],
mode="bicubic",
align_corners=False,
)
# normalize the prediction
output = prediction.squeeze().numpy()
formatted = (output * 255 / np.max(output)).astype("uint8")
depth = Image.fromarray(formatted)
return depth
iface = gr.Interface(launch,
inputs=[gr.Image(label="Upload image", type="pil")],
outputs=[gr.Image(label="Depth Map", type="pil")],
title="DepthSense",
description="Dive into the unseen depths of your images! Simply upload and let DepthSense reveal a whole new dimension of your visuals, instantly" )
iface.launch()