MonocularDepth / DPT.py
ohjho
fource reload
a90faa2
import cv2, torch
import urllib.request
import numpy as np
from PIL import Image
MODEL_DICT = {
"DPT_Large": "MiDaS v3 - Large (highest accuracy, slowest inference speed)",
"DPT_Hybrid": "MiDaS v3 - Hybrid (medium accuracy, medium inference speed)",
"MiDaS_small": "MiDaS v2.1 - Small (lowest accuracy, highest inference speed)"
}
def load_model(model_type = 'DPT_Large'):
assert model_type in MODEL_DICT.keys(), f'{model_type} is not a valid model_type: {MODEL_DICT.keys()}'
midas = torch.hub.load("intel-isl/MiDaS", model_type, force_reload=True)
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
print(f'---DPT will use device: {device}')
midas.to(device)
midas.eval()
midas_transforms = torch.hub.load("intel-isl/MiDaS", "transforms")
if model_type == "DPT_Large" or model_type == "DPT_Hybrid":
transform = midas_transforms.dpt_transform
else:
transform = midas_transforms.small_transform
return {
'midas': midas, 'device': device, 'transform': transform
}
def inference(img_array_rgb, model_obj, as_pil = False):
'''run DPT model and returns a PIL image'''
# img = cv2.imread(img.name)
# img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
midas = model_obj['midas']
transform = model_obj['transform']
device = model_obj['device']
input_batch = transform(img_array_rgb).to(device)
with torch.no_grad():
prediction = midas(input_batch)
prediction = torch.nn.functional.interpolate(
prediction.unsqueeze(1),
size=img_array_rgb.shape[:2],
mode="bicubic",
align_corners=False,
).squeeze()
output = prediction.cpu().numpy()
formatted = (output * 255 / np.max(output)).astype('uint8')
img = Image.fromarray(formatted)
return img if as_pil else formatted
# inputs = gr.inputs.Image(type='file', label="Original Image")
# outputs = gr.outputs.Image(type="pil",label="Output Image")
# title = "DPT-Large"
# description = "Gradio demo for DPT-Large:Vision Transformers for Dense Prediction.To use it, simply upload your image, or click one of the examples to load them. Read more at the links below."
# article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2103.13413' target='_blank'>Vision Transformers for Dense Prediction</a> | <a href='https://github.com/intel-isl/MiDaS' target='_blank'>Github Repo</a></p>"
#
# examples=[['dog.jpg']]
# gr.Interface(inference, inputs, outputs, title=title, description=description, article=article, analytics_enabled=False,examples=examples, enable_queue=True).launch(debug=True)