radames's picture
radames HF staff
upgrade to gradio b40
e7ad7d9
raw history blame
No virus
3.87 kB
from doctest import Example
import gradio as gr
from transformers import DPTFeatureExtractor, DPTForDepthEstimation
import torch
import numpy as np
from PIL import Image, ImageOps
from pathlib import Path
import glob
from autostereogram.converter import StereogramConverter
from datetime import datetime
import time
feature_extractor = DPTFeatureExtractor.from_pretrained("Intel/dpt-large")
model = DPTForDepthEstimation.from_pretrained("Intel/dpt-large")
stereo_converter = StereogramConverter()
def process_image(image_path):
print("\n\n\n")
print("Processing image:", image_path)
last_time = time.time()
image_raw = Image.open(Path(image_path))
image = image_raw.resize(
(1280, int(1280 * image_raw.size[1] / image_raw.size[0])),
Image.Resampling.LANCZOS)
# prepare image for the model
encoding = feature_extractor(image, return_tensors="pt")
# forward pass
with torch.no_grad():
outputs = model(**encoding)
predicted_depth = outputs.predicted_depth
# interpolate to original size
prediction = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1),
size=image.size[::-1],
mode="bicubic",
align_corners=False,
).squeeze()
output = prediction.cpu().numpy()
depth_image = (output * 255 / np.max(output)).astype('uint8')
depth_image_padded = np.array(ImageOps.pad(
Image.fromarray(depth_image), (1280, 720)))
stereo_image = stereo_converter.convert_depth_to_stereogram_with_thread_pool(
depth_image_padded, False).astype(np.uint8)
stereo_image_pil = Image.fromarray(stereo_image).convert('RGB')
image_name = f'stereo_image_{datetime.now().strftime("%Y%m%d_%H%M%S")}.jpg'
stereo_image_pil.save(image_name)
print(time.time() - last_time)
print("\n\n\n")
return [depth_image_padded, stereo_image, image_name]
examples_images = [[f] for f in sorted(glob.glob('examples/*.jpg'))]
blocks = gr.Blocks()
input_image = gr.Image(type="filepath", label="Input Image")
predicted_depth = gr.Image(label="Predicted Depth", type="pil")
autostereogram = gr.Image(label="Autostereogram", type="pil")
file_download = gr.File(label="Download Image")
def load_example(example_id):
processed_examples = [
component.preprocess_example(sample)
for component, sample in zip(
[input_image], examples_images[example_id]
)
]
if len(processed_examples) == 1:
return processed_examples[0]
else:
return processed_examples
with blocks:
gr.Markdown('''
## Depth Image to Autostereogram (Magic Eye)
This demo is a variation from the original [DPT Demo](https://huggingface.co/spaces/nielsr/dpt-depth-estimation).
Zero-shot depth estimation from an image, then it uses [pystereogram](https://github.com/yxiao1996/pystereogram)
to generate the autostereogram (Magic Eye)
<base target="_blank">
''')
with gr.Row():
examples_c = gr.components.Dataset(
components=[input_image],
samples=examples_images,
type="index",
)
examples_c.click(
load_example,
inputs=[examples_c],
outputs=[input_image],
_postprocess=False,
queue=False,
)
with gr.Row():
with gr.Column():
input_image.render()
button = gr.Button("Predict")
button.click(fn=process_image, inputs=[input_image],
outputs=[predicted_depth,
autostereogram, file_download],
)
with gr.Column():
predicted_depth.render()
with gr.Row():
autostereogram.render()
with gr.Row():
with gr.Column():
file_download.render()
if __name__ == "__main__":
blocks.launch(debug=True)