ClaireOzzz commited on
Commit
ad93de6
1 Parent(s): 5197c6f

added 3rd model

Browse files
app.py CHANGED
@@ -9,6 +9,7 @@ from huggingface_hub import login, HfFileSystem, snapshot_download, HfApi, creat
9
 
10
  from app_train import create_training_demo
11
  from sdxl.app_inference import create_inference_demo
 
12
 
13
  css="""
14
  #col-container {max-width: 780px; margin-left: auto; margin-right: auto;}
@@ -86,9 +87,7 @@ with gr.Blocks(css=css) as demo:
86
  with gr.Tab("Generation"):
87
  create_inference_demo()
88
  with gr.Tab("Visualisation"):
89
- gr.Markdown('''
90
- - You can use this tab to upload models later if you choose not to upload models in training time or if upload in training time failed.
91
- ''')
92
 
93
 
94
  demo.queue(max_size=1).launch(debug=True, share=True)
 
9
 
10
  from app_train import create_training_demo
11
  from sdxl.app_inference import create_inference_demo
12
+ from depthgltf.app_visualisation import create_visual_demo
13
 
14
  css="""
15
  #col-container {max-width: 780px; margin-left: auto; margin-right: auto;}
 
87
  with gr.Tab("Generation"):
88
  create_inference_demo()
89
  with gr.Tab("Visualisation"):
90
+ create_visual_demo();
 
 
91
 
92
 
93
  demo.queue(max_size=1).launch(debug=True, share=True)
depthgltf/.gitattributes ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.onnx filter=lfs diff=lfs merge=lfs -text
14
+ *.ot filter=lfs diff=lfs merge=lfs -text
15
+ *.parquet filter=lfs diff=lfs merge=lfs -text
16
+ *.pb filter=lfs diff=lfs merge=lfs -text
17
+ *.pt filter=lfs diff=lfs merge=lfs -text
18
+ *.pth filter=lfs diff=lfs merge=lfs -text
19
+ *.rar filter=lfs diff=lfs merge=lfs -text
20
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
22
+ *.tflite filter=lfs diff=lfs merge=lfs -text
23
+ *.tgz filter=lfs diff=lfs merge=lfs -text
24
+ *.xz filter=lfs diff=lfs merge=lfs -text
25
+ *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
depthgltf/.gitignore ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Python build
2
+ .eggs/
3
+ gradio.egg-info/*
4
+ !gradio.egg-info/requires.txt
5
+ !gradio.egg-info/PKG-INFO
6
+ dist/
7
+ *.pyc
8
+ __pycache__/
9
+ *.py[cod]
10
+ *$py.class
11
+ build/
12
+
13
+ # JS build
14
+ gradio/templates/frontend
15
+ # Secrets
16
+ .env
17
+
18
+ # Gradio run artifacts
19
+ *.db
20
+ *.sqlite3
21
+ gradio/launches.json
22
+ flagged/
23
+ gradio_cached_examples/
24
+
25
+ # Tests
26
+ .coverage
27
+ coverage.xml
28
+ test.txt
29
+
30
+ # Demos
31
+ demo/tmp.zip
32
+ demo/files/*.avi
33
+ demo/files/*.mp4
34
+
35
+ # Etc
36
+ .idea/*
37
+ .DS_Store
38
+ *.bak
39
+ workspace.code-workspace
40
+ *.h5
41
+ .vscode/
42
+
43
+ # log files
44
+ .pnpm-debug.log
45
+ venv/
46
+ *.db-journal
depthgltf/README.md ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Dpt Depth Estimation + 3D
3
+ emoji: ⚡
4
+ colorFrom: blue
5
+ colorTo: red
6
+ sdk: gradio
7
+ sdk_version: 3.0b8
8
+ app_file: app.py
9
+ pinned: false
10
+ ---
11
+
12
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
depthgltf/app_visualisations.py ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import DPTFeatureExtractor, DPTForDepthEstimation
3
+ import torch
4
+ import numpy as np
5
+ from PIL import Image
6
+ import open3d as o3d
7
+ from pathlib import Path
8
+ import os
9
+
10
+ feature_extractor = DPTFeatureExtractor.from_pretrained("Intel/dpt-large")
11
+ model = DPTForDepthEstimation.from_pretrained("Intel/dpt-large")
12
+
13
+
14
+ def process_image(image_path):
15
+ image_path = Path(image_path)
16
+ image_raw = Image.open(image_path)
17
+ image = image_raw.resize(
18
+ (800, int(800 * image_raw.size[1] / image_raw.size[0])),
19
+ Image.Resampling.LANCZOS)
20
+
21
+ # prepare image for the model
22
+ encoding = feature_extractor(image, return_tensors="pt")
23
+
24
+ # forward pass
25
+ with torch.no_grad():
26
+ outputs = model(**encoding)
27
+ predicted_depth = outputs.predicted_depth
28
+
29
+ # interpolate to original size
30
+ prediction = torch.nn.functional.interpolate(
31
+ predicted_depth.unsqueeze(1),
32
+ size=image.size[::-1],
33
+ mode="bicubic",
34
+ align_corners=False,
35
+ ).squeeze()
36
+ output = prediction.cpu().numpy()
37
+ depth_image = (output * 255 / np.max(output)).astype('uint8')
38
+ try:
39
+ gltf_path = create_3d_obj(np.array(image), depth_image, image_path)
40
+ img = Image.fromarray(depth_image)
41
+ return [img, gltf_path, gltf_path]
42
+ except Exception as e:
43
+ gltf_path = create_3d_obj(
44
+ np.array(image), depth_image, image_path, depth=8)
45
+ img = Image.fromarray(depth_image)
46
+ return [img, gltf_path, gltf_path]
47
+ except:
48
+ print("Error reconstructing 3D model")
49
+ raise Exception("Error reconstructing 3D model")
50
+
51
+
52
+ def create_3d_obj(rgb_image, depth_image, image_path, depth=10):
53
+ depth_o3d = o3d.geometry.Image(depth_image)
54
+ image_o3d = o3d.geometry.Image(rgb_image)
55
+ rgbd_image = o3d.geometry.RGBDImage.create_from_color_and_depth(
56
+ image_o3d, depth_o3d, convert_rgb_to_intensity=False)
57
+ w = int(depth_image.shape[1])
58
+ h = int(depth_image.shape[0])
59
+
60
+ camera_intrinsic = o3d.camera.PinholeCameraIntrinsic()
61
+ camera_intrinsic.set_intrinsics(w, h, 500, 500, w/2, h/2)
62
+
63
+ pcd = o3d.geometry.PointCloud.create_from_rgbd_image(
64
+ rgbd_image, camera_intrinsic)
65
+
66
+ print('normals')
67
+ pcd.normals = o3d.utility.Vector3dVector(
68
+ np.zeros((1, 3))) # invalidate existing normals
69
+ pcd.estimate_normals(
70
+ search_param=o3d.geometry.KDTreeSearchParamHybrid(radius=0.01, max_nn=30))
71
+ pcd.orient_normals_towards_camera_location(
72
+ camera_location=np.array([0., 0., 1000.]))
73
+ pcd.transform([[1, 0, 0, 0],
74
+ [0, -1, 0, 0],
75
+ [0, 0, -1, 0],
76
+ [0, 0, 0, 1]])
77
+ pcd.transform([[-1, 0, 0, 0],
78
+ [0, 1, 0, 0],
79
+ [0, 0, 1, 0],
80
+ [0, 0, 0, 1]])
81
+
82
+ print('run Poisson surface reconstruction')
83
+ with o3d.utility.VerbosityContextManager(o3d.utility.VerbosityLevel.Debug) as cm:
84
+ mesh_raw, densities = o3d.geometry.TriangleMesh.create_from_point_cloud_poisson(
85
+ pcd, depth=depth, width=0, scale=1.1, linear_fit=True)
86
+
87
+ voxel_size = max(mesh_raw.get_max_bound() - mesh_raw.get_min_bound()) / 256
88
+ print(f'voxel_size = {voxel_size:e}')
89
+ mesh = mesh_raw.simplify_vertex_clustering(
90
+ voxel_size=voxel_size,
91
+ contraction=o3d.geometry.SimplificationContraction.Average)
92
+
93
+ # vertices_to_remove = densities < np.quantile(densities, 0.001)
94
+ # mesh.remove_vertices_by_mask(vertices_to_remove)
95
+ bbox = pcd.get_axis_aligned_bounding_box()
96
+ mesh_crop = mesh.crop(bbox)
97
+ gltf_path = f'./{image_path.stem}.gltf'
98
+ o3d.io.write_triangle_mesh(
99
+ gltf_path, mesh_crop, write_triangle_uvs=True)
100
+ return gltf_path
101
+
102
+
103
+ title = "Demo: zero-shot depth estimation with DPT + 3D Point Cloud"
104
+ description = "This demo is a variation from the original <a href='https://huggingface.co/spaces/nielsr/dpt-depth-estimation' target='_blank'>DPT Demo</a>. It uses the DPT model to predict the depth of an image and then uses 3D Point Cloud to create a 3D object."
105
+ examples = [["examples/" + img] for img in os.listdir("examples/")]
106
+
107
+ iface = gr.Interface(fn=process_image,
108
+ inputs=[gr.Image(
109
+ type="filepath", label="Input Image")],
110
+ outputs=[gr.Image(label="predicted depth", type="pil"),
111
+ gr.Model3D(label="3d mesh reconstruction", clear_color=[
112
+ 1.0, 1.0, 1.0, 1.0]),
113
+ gr.File(label="3d gLTF")],
114
+ title=title,
115
+ description=description,
116
+ examples=examples,
117
+ allow_flagging="never",
118
+ cache_examples=False)
119
+ iface.launch(debug=True, enable_queue=False, share=True)
depthgltf/examples/1-jonathan-borba-CgWTqYxHEkg-unsplash.jpg ADDED
depthgltf/examples/2-ronan-furuta-cvM7AC22dSI-unsplash.jpg ADDED
depthgltf/examples/3-artem-beliaikin-vyxOD0NuJbs-unsplash.jpg ADDED
depthgltf/examples/alisa-anton-PXgXLgDPv6w-unsplash.jpg ADDED
depthgltf/examples/joel-muniz-KodMXENNaas-unsplash.jpg ADDED
depthgltf/examples/opollo-photography-nxy9wFUiksg-unsplash.jpg ADDED
depthgltf/examples/zeynep-sumer-HE2nWVI62BY-unsplash.jpg ADDED
depthgltf/packages.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ libgl1-mesa-glx
depthgltf/requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ torch
2
+ git+https://github.com/nielsrogge/transformers.git@add_dpt_redesign#egg=transformers
3
+ numpy
4
+ Pillow
5
+ gradio
6
+ jinja2
7
+ open3d