Tohru127 commited on
Commit
bea6499
·
verified ·
1 Parent(s): e4b3e88

Upload main.py

Browse files
Files changed (1) hide show
  1. main.py +84 -0
main.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import matplotlib
2
+ matplotlib.use('TkAgg') # Use TkAgg backend for better compatibility
3
+ import matplotlib.pyplot as plt
4
+ from matplotlib import pyplot as plt
5
+ from PIL import Image
6
+ import torch
7
+ from transformers import GLPNForDepthEstimation, GLPNImageProcessor
8
+
9
+
10
+ feature_extractor = GLPNImageProcessor.from_pretrained("vinvino02/glpn-nyu")
11
+ model = GLPNForDepthEstimation.from_pretrained("vinvino02/glpn-nyu")
12
+
13
+ image=Image.open(r"/Users/priyadharshinirameskumar/Desktop/venv/ROOM.jpg")
14
+ new_height=480 if image.height > 480 else image.height
15
+ new_height -=(new_height % 32)
16
+ new_width =int(new_height * image.width / image.height)
17
+ diff =new_width % 32
18
+
19
+ new_width=new_width - diff if diff <16 else new_width + (32 - diff)
20
+ new_size = (new_width,new_height)
21
+ image = image.resize(new_size)
22
+
23
+ inputs = feature_extractor(images=image, return_tensors="pt")
24
+
25
+
26
+ with torch.no_grad():
27
+ outputs = model(**inputs)
28
+ predicted_depth = outputs.predicted_depth
29
+
30
+ pad = 16
31
+ output = predicted_depth.squeeze().cpu().numpy() * 1000.0
32
+ output = output[pad:-pad, pad:-pad]
33
+ image = image.crop((pad, pad, image.width - pad, image.height - pad))
34
+
35
+ fig,ax = plt.subplots(1,2)
36
+ ax[0].imshow(image)
37
+ ax[0].tick_params(left=False,bottom=False,labelleft=False,labelbottom=False)
38
+ ax[1].imshow(output,cmap='plasma')
39
+ ax[1].tick_params(left=False,bottom=False,labelleft=False,labelbottom=False)
40
+ plt.tight_layout()
41
+ plt.pause(5)
42
+
43
+ import numpy as np
44
+ import open3d as o3d
45
+
46
+ width, height = image.size
47
+
48
+ depth_image = (output *255 / np.max(output)).astype(np.uint8)
49
+ image = np.array(image)
50
+
51
+ depth_o3d = o3d.geometry.Image(depth_image)
52
+ image_0ed = o3d.geometry.Image(image)
53
+ rgbd_image = o3d.geometry.RGBDImage.create_from_color_and_depth(image_0ed, depth_o3d, convert_rgb_to_intensity=False)
54
+
55
+ camera_intrinstic = o3d.camera.PinholeCameraIntrinsic()
56
+ camera_intrinstic.set_intrinsics(width, height, 500, 500, width / 2, height / 2)
57
+
58
+ pcd_raw = o3d.geometry.PointCloud.create_from_rgbd_image(rgbd_image, camera_intrinstic)
59
+ o3d.visualization.draw_geometries([pcd_raw])
60
+
61
+
62
+ cl,ind = pcd_raw.remove_statistical_outlier(nb_neighbors=20,std_ratio=20.0)
63
+ pcd=pcd_raw.select_by_index(ind)
64
+
65
+ pcd.estimate_normals()
66
+ pcd.orient_normals_to_align_with_direction()
67
+
68
+ o3d.visualization.draw_geometries([pcd])
69
+
70
+
71
+ mesh = o3d.geometry.TriangleMesh.create_from_point_cloud_poisson(pcd,depth=10,n_threads=1)[0]
72
+
73
+ rotation = mesh.get_rotation_matrix_from_xyz((np.pi,0,0))
74
+ mesh.rotate(rotation,center=(0,0,0))
75
+
76
+ o3d.visualization.draw_geometrics([mesh], mesh_show_back_face=True)
77
+
78
+ # mesh_uniform = mesh.paint_uniform_color([0.9,0.8,0.9])
79
+ # mesh_uniform.compute_vertex_normals()
80
+ # o3d.visualization.draw_geometries([mesh_uniform], mesh_show_back_face=True)
81
+
82
+ o3d.io.write_triangle_mesh("mesh.ply",mesh)
83
+
84
+