dylanebert HF staff commited on
Commit
5e1c565
1 Parent(s): 8f22fd7

add pipeline

Browse files
.gitignore ADDED
@@ -0,0 +1,163 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ share/python-wheels/
24
+ *.egg-info/
25
+ .installed.cfg
26
+ *.egg
27
+ MANIFEST
28
+
29
+ # PyInstaller
30
+ # Usually these files are written by a python script from a template
31
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
32
+ *.manifest
33
+ *.spec
34
+
35
+ # Installer logs
36
+ pip-log.txt
37
+ pip-delete-this-directory.txt
38
+
39
+ # Unit test / coverage reports
40
+ htmlcov/
41
+ .tox/
42
+ .nox/
43
+ .coverage
44
+ .coverage.*
45
+ .cache
46
+ nosetests.xml
47
+ coverage.xml
48
+ *.cover
49
+ *.py,cover
50
+ .hypothesis/
51
+ .pytest_cache/
52
+ cover/
53
+
54
+ # Translations
55
+ *.mo
56
+ *.pot
57
+
58
+ # Django stuff:
59
+ *.log
60
+ local_settings.py
61
+ db.sqlite3
62
+ db.sqlite3-journal
63
+
64
+ # Flask stuff:
65
+ instance/
66
+ .webassets-cache
67
+
68
+ # Scrapy stuff:
69
+ .scrapy
70
+
71
+ # Sphinx documentation
72
+ docs/_build/
73
+
74
+ # PyBuilder
75
+ .pybuilder/
76
+ target/
77
+
78
+ # Jupyter Notebook
79
+ .ipynb_checkpoints
80
+
81
+ # IPython
82
+ profile_default/
83
+ ipython_config.py
84
+
85
+ # pyenv
86
+ # For a library or package, you might want to ignore these files since the code is
87
+ # intended to run in multiple environments; otherwise, check them in:
88
+ # .python-version
89
+
90
+ # pipenv
91
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
93
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
94
+ # install all needed dependencies.
95
+ #Pipfile.lock
96
+
97
+ # poetry
98
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
99
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
100
+ # commonly ignored for libraries.
101
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
102
+ #poetry.lock
103
+
104
+ # pdm
105
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
106
+ #pdm.lock
107
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
108
+ # in version control.
109
+ # https://pdm.fming.dev/#use-with-ide
110
+ .pdm.toml
111
+
112
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
113
+ __pypackages__/
114
+
115
+ # Celery stuff
116
+ celerybeat-schedule
117
+ celerybeat.pid
118
+
119
+ # SageMath parsed files
120
+ *.sage.py
121
+
122
+ # Environments
123
+ .env
124
+ .venv
125
+ env/
126
+ venv/
127
+ ENV/
128
+ env.bak/
129
+ venv.bak/
130
+
131
+ # Spyder project settings
132
+ .spyderproject
133
+ .spyproject
134
+
135
+ # Rope project settings
136
+ .ropeproject
137
+
138
+ # mkdocs documentation
139
+ /site
140
+
141
+ # mypy
142
+ .mypy_cache/
143
+ .dmypy.json
144
+ dmypy.json
145
+
146
+ # Pyre type checker
147
+ .pyre/
148
+
149
+ # pytype static type analyzer
150
+ .pytype/
151
+
152
+ # Cython debug symbols
153
+ cython_debug/
154
+
155
+ # PyCharm
156
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
157
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
158
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
159
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
160
+ #.idea/
161
+
162
+ # Custom
163
+ venv/
README.md CHANGED
@@ -1,3 +1,8 @@
1
  ---
2
  license: mit
 
3
  ---
 
 
 
 
 
1
  ---
2
  license: mit
3
+ pipeline_tag: image-to-3d
4
  ---
5
+
6
+ This custom pipeline encapsulates [LGM](https://huggingface.co/ashawkey/LGM).
7
+
8
+ Original LGM paper: [LGM: Large Multi-View Gaussian Model for High-Resolution 3D Content Creation](https://huggingface.co/papers/2402.05054).
lgm/config.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "_class_name": "LGM",
3
+ "_diffusers_version": "0.27.2"
4
+ }
lgm/diffusion_pytorch_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:79e5160e1fc45559515579a7e41ffc22606cf41c3ed8581b09dae9b4ce437099
3
+ size 830126192
lgm/lgm.py ADDED
@@ -0,0 +1,783 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import warnings
3
+ from functools import partial
4
+ from typing import Literal, Tuple
5
+
6
+ import numpy as np
7
+ import torch
8
+ import torch.nn.functional as F
9
+ from diff_gaussian_rasterization import (
10
+ GaussianRasterizationSettings,
11
+ GaussianRasterizer,
12
+ )
13
+ from diffusers import ConfigMixin, ModelMixin
14
+ from torch import Tensor, nn
15
+
16
+
17
+ def look_at(campos):
18
+ forward_vector = -campos / np.linalg.norm(campos, axis=-1)
19
+ up_vector = np.array([0, 1, 0], dtype=np.float32)
20
+ right_vector = np.cross(up_vector, forward_vector)
21
+ up_vector = np.cross(forward_vector, right_vector)
22
+ R = np.stack([right_vector, up_vector, forward_vector], axis=-1)
23
+ return R
24
+
25
+
26
+ def orbit_camera(elevation, azimuth, radius=1):
27
+ elevation = np.deg2rad(elevation)
28
+ azimuth = np.deg2rad(azimuth)
29
+ x = radius * np.cos(elevation) * np.sin(azimuth)
30
+ y = -radius * np.sin(elevation)
31
+ z = radius * np.cos(elevation) * np.cos(azimuth)
32
+ campos = np.array([x, y, z])
33
+ T = np.eye(4, dtype=np.float32)
34
+ T[:3, :3] = look_at(campos)
35
+ T[:3, 3] = campos
36
+ return T
37
+
38
+
39
+ def get_rays(pose, h, w, fovy, opengl=True):
40
+ x, y = torch.meshgrid(
41
+ torch.arange(w, device=pose.device),
42
+ torch.arange(h, device=pose.device),
43
+ indexing="xy",
44
+ )
45
+ x = x.flatten()
46
+ y = y.flatten()
47
+
48
+ cx = w * 0.5
49
+ cy = h * 0.5
50
+
51
+ focal = h * 0.5 / np.tan(0.5 * np.deg2rad(fovy))
52
+
53
+ camera_dirs = F.pad(
54
+ torch.stack(
55
+ [
56
+ (x - cx + 0.5) / focal,
57
+ (y - cy + 0.5) / focal * (-1.0 if opengl else 1.0),
58
+ ],
59
+ dim=-1,
60
+ ),
61
+ (0, 1),
62
+ value=(-1.0 if opengl else 1.0),
63
+ )
64
+
65
+ rays_d = camera_dirs @ pose[:3, :3].transpose(0, 1)
66
+ rays_o = pose[:3, 3].unsqueeze(0).expand_as(rays_d)
67
+
68
+ rays_o = rays_o.view(h, w, 3)
69
+ rays_d = F.normalize(rays_d, dim=-1).view(h, w, 3)
70
+
71
+ return rays_o, rays_d
72
+
73
+
74
+ class GaussianRenderer:
75
+ def __init__(self, fovy, output_size):
76
+ self.output_size = output_size
77
+
78
+ self.bg_color = torch.tensor([1, 1, 1], dtype=torch.float32, device="cuda")
79
+
80
+ zfar = 2.5
81
+ znear = 0.1
82
+ self.tan_half_fov = np.tan(0.5 * np.deg2rad(fovy))
83
+ self.proj_matrix = torch.zeros(4, 4, dtype=torch.float32)
84
+ self.proj_matrix[0, 0] = 1 / self.tan_half_fov
85
+ self.proj_matrix[1, 1] = 1 / self.tan_half_fov
86
+ self.proj_matrix[2, 2] = (zfar + znear) / (zfar - znear)
87
+ self.proj_matrix[3, 2] = -(zfar * znear) / (zfar - znear)
88
+ self.proj_matrix[2, 3] = 1
89
+
90
+ def render(
91
+ self,
92
+ gaussians,
93
+ cam_view,
94
+ cam_view_proj,
95
+ cam_pos,
96
+ bg_color=None,
97
+ scale_modifier=1,
98
+ ):
99
+ device = gaussians.device
100
+ B, V = cam_view.shape[:2]
101
+
102
+ images = []
103
+ alphas = []
104
+ for b in range(B):
105
+
106
+ means3D = gaussians[b, :, 0:3].contiguous().float()
107
+ opacity = gaussians[b, :, 3:4].contiguous().float()
108
+ scales = gaussians[b, :, 4:7].contiguous().float()
109
+ rotations = gaussians[b, :, 7:11].contiguous().float()
110
+ rgbs = gaussians[b, :, 11:].contiguous().float()
111
+
112
+ for v in range(V):
113
+ view_matrix = cam_view[b, v].float()
114
+ view_proj_matrix = cam_view_proj[b, v].float()
115
+ campos = cam_pos[b, v].float()
116
+
117
+ raster_settings = GaussianRasterizationSettings(
118
+ image_height=self.output_size,
119
+ image_width=self.output_size,
120
+ tanfovx=self.tan_half_fov,
121
+ tanfovy=self.tan_half_fov,
122
+ bg=self.bg_color if bg_color is None else bg_color,
123
+ scale_modifier=scale_modifier,
124
+ viewmatrix=view_matrix,
125
+ projmatrix=view_proj_matrix,
126
+ sh_degree=0,
127
+ campos=campos,
128
+ prefiltered=False,
129
+ debug=False,
130
+ )
131
+
132
+ rasterizer = GaussianRasterizer(raster_settings=raster_settings)
133
+
134
+ rendered_image, _, _, rendered_alpha = rasterizer(
135
+ means3D=means3D,
136
+ means2D=torch.zeros_like(
137
+ means3D, dtype=torch.float32, device=device
138
+ ),
139
+ shs=None,
140
+ colors_precomp=rgbs,
141
+ opacities=opacity,
142
+ scales=scales,
143
+ rotations=rotations,
144
+ cov3D_precomp=None,
145
+ )
146
+
147
+ rendered_image = rendered_image.clamp(0, 1)
148
+
149
+ images.append(rendered_image)
150
+ alphas.append(rendered_alpha)
151
+
152
+ images = torch.stack(images, dim=0).view(
153
+ B, V, 3, self.output_size, self.output_size
154
+ )
155
+ alphas = torch.stack(alphas, dim=0).view(
156
+ B, V, 1, self.output_size, self.output_size
157
+ )
158
+
159
+ return {"image": images, "alpha": alphas}
160
+
161
+ def save_ply(self, gaussians, path):
162
+ assert gaussians.shape[0] == 1, "only support batch size 1"
163
+
164
+ from plyfile import PlyData, PlyElement
165
+
166
+ means3D = gaussians[0, :, 0:3].contiguous().float()
167
+ opacity = gaussians[0, :, 3:4].contiguous().float()
168
+ scales = gaussians[0, :, 4:7].contiguous().float()
169
+ rotations = gaussians[0, :, 7:11].contiguous().float()
170
+ shs = gaussians[0, :, 11:].unsqueeze(1).contiguous().float()
171
+
172
+ mask = opacity.squeeze(-1) >= 0.005
173
+ means3D = means3D[mask]
174
+ opacity = opacity[mask]
175
+ scales = scales[mask]
176
+ rotations = rotations[mask]
177
+ shs = shs[mask]
178
+
179
+ opacity = opacity.clamp(1e-6, 1 - 1e-6)
180
+ opacity = torch.log(opacity / (1 - opacity))
181
+ scales = torch.log(scales + 1e-8)
182
+ shs = (shs - 0.5) / 0.28209479177387814
183
+
184
+ xyzs = means3D.detach().cpu().numpy()
185
+ f_dc = (
186
+ shs.detach().transpose(1, 2).flatten(start_dim=1).contiguous().cpu().numpy()
187
+ )
188
+ opacities = opacity.detach().cpu().numpy()
189
+ scales = scales.detach().cpu().numpy()
190
+ rotations = rotations.detach().cpu().numpy()
191
+
192
+ h = ["x", "y", "z"]
193
+ for i in range(f_dc.shape[1]):
194
+ h.append("f_dc_{}".format(i))
195
+ h.append("opacity")
196
+ for i in range(scales.shape[1]):
197
+ h.append("scale_{}".format(i))
198
+ for i in range(rotations.shape[1]):
199
+ h.append("rot_{}".format(i))
200
+
201
+ dtype_full = [(attribute, "f4") for attribute in h]
202
+
203
+ elements = np.empty(xyzs.shape[0], dtype=dtype_full)
204
+ attributes = np.concatenate((xyzs, f_dc, opacities, scales, rotations), axis=1)
205
+ elements[:] = list(map(tuple, attributes))
206
+ el = PlyElement.describe(elements, "vertex")
207
+
208
+ PlyData([el]).write(path)
209
+
210
+
211
+ class LGM(ModelMixin, ConfigMixin):
212
+ def __init__(self):
213
+ super().__init__()
214
+
215
+ self.input_size = 256
216
+ self.splat_size = 128
217
+ self.output_size = 512
218
+ self.radius = 1.5
219
+ self.fovy = 49.1
220
+
221
+ self.unet = UNet(
222
+ 9,
223
+ 14,
224
+ down_channels=(64, 128, 256, 512, 1024, 1024),
225
+ down_attention=(False, False, False, True, True, True),
226
+ mid_attention=True,
227
+ up_channels=(1024, 1024, 512, 256, 128),
228
+ up_attention=(True, True, True, False, False),
229
+ )
230
+
231
+ self.conv = nn.Conv2d(14, 14, kernel_size=1)
232
+ self.gs = GaussianRenderer(self.fovy, self.output_size)
233
+
234
+ self.pos_act = lambda x: x.clamp(-1, 1)
235
+ self.scale_act = lambda x: 0.1 * F.softplus(x)
236
+ self.opacity_act = lambda x: torch.sigmoid(x)
237
+ self.rot_act = F.normalize
238
+ self.rgb_act = lambda x: 0.5 * torch.tanh(x) + 0.5
239
+
240
+ def prepare_default_rays(self, device, elevation=0):
241
+ cam_poses = np.stack(
242
+ [
243
+ orbit_camera(elevation, 0, radius=self.radius),
244
+ orbit_camera(elevation, 90, radius=self.radius),
245
+ orbit_camera(elevation, 180, radius=self.radius),
246
+ orbit_camera(elevation, 270, radius=self.radius),
247
+ ],
248
+ axis=0,
249
+ )
250
+ cam_poses = torch.from_numpy(cam_poses)
251
+
252
+ rays_embeddings = []
253
+ for i in range(cam_poses.shape[0]):
254
+ rays_o, rays_d = get_rays(
255
+ cam_poses[i], self.input_size, self.input_size, self.fovy
256
+ )
257
+ rays_plucker = torch.cat(
258
+ [torch.cross(rays_o, rays_d, dim=-1), rays_d], dim=-1
259
+ )
260
+ rays_embeddings.append(rays_plucker)
261
+
262
+ rays_embeddings = (
263
+ torch.stack(rays_embeddings, dim=0)
264
+ .permute(0, 3, 1, 2)
265
+ .contiguous()
266
+ .to(device)
267
+ )
268
+
269
+ return rays_embeddings
270
+
271
+ def forward(self, images):
272
+ B, V, C, H, W = images.shape
273
+ images = images.view(B * V, C, H, W)
274
+
275
+ x = self.unet(images)
276
+ x = self.conv(x)
277
+
278
+ x = x.reshape(B, 4, 14, self.splat_size, self.splat_size)
279
+
280
+ x = x.permute(0, 1, 3, 4, 2).reshape(B, -1, 14)
281
+
282
+ pos = self.pos_act(x[..., 0:3])
283
+ opacity = self.opacity_act(x[..., 3:4])
284
+ scale = self.scale_act(x[..., 4:7])
285
+ rotation = self.rot_act(x[..., 7:11])
286
+ rgbs = self.rgb_act(x[..., 11:])
287
+
288
+ gaussians = torch.cat([pos, opacity, scale, rotation, rgbs], dim=-1)
289
+
290
+ return gaussians
291
+
292
+
293
+ # =============================================================================
294
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
295
+ #
296
+ # This source code is licensed under the Apache License, Version 2.0
297
+ # found in the LICENSE file in the root directory of this source tree.
298
+
299
+ # References:
300
+ # https://github.com/facebookresearch/dino/blob/master/vision_transformer.py
301
+ # https://github.com/rwightman/pytorch-image-models/tree/master/timm/models/vision_transformer.py
302
+ # =============================================================================
303
+ XFORMERS_ENABLED = os.environ.get("XFORMERS_DISABLED") is None
304
+ try:
305
+ if XFORMERS_ENABLED:
306
+ from xformers.ops import memory_efficient_attention, unbind
307
+
308
+ XFORMERS_AVAILABLE = True
309
+ warnings.warn("xFormers is available (Attention)")
310
+ else:
311
+ warnings.warn("xFormers is disabled (Attention)")
312
+ raise ImportError
313
+ except ImportError:
314
+ XFORMERS_AVAILABLE = False
315
+ warnings.warn("xFormers is not available (Attention)")
316
+
317
+
318
+ class Attention(nn.Module):
319
+ def __init__(
320
+ self,
321
+ dim: int,
322
+ num_heads: int = 8,
323
+ qkv_bias: bool = False,
324
+ proj_bias: bool = True,
325
+ attn_drop: float = 0.0,
326
+ proj_drop: float = 0.0,
327
+ ) -> None:
328
+ super().__init__()
329
+ self.num_heads = num_heads
330
+ head_dim = dim // num_heads
331
+ self.scale = head_dim**-0.5
332
+
333
+ self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
334
+ self.attn_drop = nn.Dropout(attn_drop)
335
+ self.proj = nn.Linear(dim, dim, bias=proj_bias)
336
+ self.proj_drop = nn.Dropout(proj_drop)
337
+
338
+ def forward(self, x: Tensor) -> Tensor:
339
+ B, N, C = x.shape
340
+ qkv = (
341
+ self.qkv(x)
342
+ .reshape(B, N, 3, self.num_heads, C // self.num_heads)
343
+ .permute(2, 0, 3, 1, 4)
344
+ )
345
+
346
+ q, k, v = qkv[0] * self.scale, qkv[1], qkv[2]
347
+ attn = q @ k.transpose(-2, -1)
348
+
349
+ attn = attn.softmax(dim=-1)
350
+ attn = self.attn_drop(attn)
351
+
352
+ x = (attn @ v).transpose(1, 2).reshape(B, N, C)
353
+ x = self.proj(x)
354
+ x = self.proj_drop(x)
355
+ return x
356
+
357
+
358
+ class MemEffAttention(Attention):
359
+ def forward(self, x: Tensor, attn_bias=None) -> Tensor:
360
+ if not XFORMERS_AVAILABLE:
361
+ if attn_bias is not None:
362
+ raise AssertionError("xFormers is required for using nested tensors")
363
+ return super().forward(x)
364
+
365
+ B, N, C = x.shape
366
+ qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads)
367
+
368
+ q, k, v = unbind(qkv, 2)
369
+
370
+ x = memory_efficient_attention(q, k, v, attn_bias=attn_bias)
371
+ x = x.reshape([B, N, C])
372
+
373
+ x = self.proj(x)
374
+ x = self.proj_drop(x)
375
+ return x
376
+
377
+
378
+ class CrossAttention(nn.Module):
379
+ def __init__(
380
+ self,
381
+ dim: int,
382
+ dim_q: int,
383
+ dim_k: int,
384
+ dim_v: int,
385
+ num_heads: int = 8,
386
+ qkv_bias: bool = False,
387
+ proj_bias: bool = True,
388
+ attn_drop: float = 0.0,
389
+ proj_drop: float = 0.0,
390
+ ) -> None:
391
+ super().__init__()
392
+ self.dim = dim
393
+ self.num_heads = num_heads
394
+ head_dim = dim // num_heads
395
+ self.scale = head_dim**-0.5
396
+
397
+ self.to_q = nn.Linear(dim_q, dim, bias=qkv_bias)
398
+ self.to_k = nn.Linear(dim_k, dim, bias=qkv_bias)
399
+ self.to_v = nn.Linear(dim_v, dim, bias=qkv_bias)
400
+ self.attn_drop = nn.Dropout(attn_drop)
401
+ self.proj = nn.Linear(dim, dim, bias=proj_bias)
402
+ self.proj_drop = nn.Dropout(proj_drop)
403
+
404
+ def forward(self, q: Tensor, k: Tensor, v: Tensor) -> Tensor:
405
+ B, N, _ = q.shape
406
+ M = k.shape[1]
407
+
408
+ q = self.scale * self.to_q(q).reshape(
409
+ B, N, self.num_heads, self.dim // self.num_heads
410
+ ).permute(0, 2, 1, 3)
411
+ k = (
412
+ self.to_k(k)
413
+ .reshape(B, M, self.num_heads, self.dim // self.num_heads)
414
+ .permute(0, 2, 1, 3)
415
+ )
416
+ v = (
417
+ self.to_v(v)
418
+ .reshape(B, M, self.num_heads, self.dim // self.num_heads)
419
+ .permute(0, 2, 1, 3)
420
+ )
421
+
422
+ attn = q @ k.transpose(-2, -1)
423
+
424
+ attn = attn.softmax(dim=-1)
425
+ attn = self.attn_drop(attn)
426
+
427
+ x = (attn @ v).transpose(1, 2).reshape(B, N, -1)
428
+ x = self.proj(x)
429
+ x = self.proj_drop(x)
430
+ return x
431
+
432
+
433
+ class MemEffCrossAttention(CrossAttention):
434
+ def forward(self, q: Tensor, k: Tensor, v: Tensor, attn_bias=None) -> Tensor:
435
+ if not XFORMERS_AVAILABLE:
436
+ if attn_bias is not None:
437
+ raise AssertionError("xFormers is required for using nested tensors")
438
+ return super().forward(q, k, v)
439
+
440
+ B, N, _ = q.shape
441
+ M = k.shape[1]
442
+
443
+ q = self.scale * self.to_q(q).reshape(
444
+ B, N, self.num_heads, self.dim // self.num_heads
445
+ )
446
+ k = self.to_k(k).reshape(B, M, self.num_heads, self.dim // self.num_heads)
447
+ v = self.to_v(v).reshape(B, M, self.num_heads, self.dim // self.num_heads)
448
+
449
+ x = memory_efficient_attention(q, k, v, attn_bias=attn_bias)
450
+ x = x.reshape(B, N, -1)
451
+
452
+ x = self.proj(x)
453
+ x = self.proj_drop(x)
454
+ return x
455
+
456
+
457
+ # =============================================================================
458
+ # End of xFormers
459
+
460
+
461
+ class MVAttention(nn.Module):
462
+ def __init__(
463
+ self,
464
+ dim: int,
465
+ num_heads: int = 8,
466
+ qkv_bias: bool = False,
467
+ proj_bias: bool = True,
468
+ attn_drop: float = 0.0,
469
+ proj_drop: float = 0.0,
470
+ groups: int = 32,
471
+ eps: float = 1e-5,
472
+ residual: bool = True,
473
+ skip_scale: float = 1,
474
+ num_frames: int = 4,
475
+ ):
476
+ super().__init__()
477
+
478
+ self.residual = residual
479
+ self.skip_scale = skip_scale
480
+ self.num_frames = num_frames
481
+
482
+ self.norm = nn.GroupNorm(
483
+ num_groups=groups, num_channels=dim, eps=eps, affine=True
484
+ )
485
+ self.attn = MemEffAttention(
486
+ dim, num_heads, qkv_bias, proj_bias, attn_drop, proj_drop
487
+ )
488
+
489
+ def forward(self, x):
490
+ BV, C, H, W = x.shape
491
+ B = BV // self.num_frames
492
+
493
+ res = x
494
+ x = self.norm(x)
495
+
496
+ x = (
497
+ x.reshape(B, self.num_frames, C, H, W)
498
+ .permute(0, 1, 3, 4, 2)
499
+ .reshape(B, -1, C)
500
+ )
501
+ x = self.attn(x)
502
+ x = (
503
+ x.reshape(B, self.num_frames, H, W, C)
504
+ .permute(0, 1, 4, 2, 3)
505
+ .reshape(BV, C, H, W)
506
+ )
507
+
508
+ if self.residual:
509
+ x = (x + res) * self.skip_scale
510
+ return x
511
+
512
+
513
+ class ResnetBlock(nn.Module):
514
+ def __init__(
515
+ self,
516
+ in_channels: int,
517
+ out_channels: int,
518
+ resample: Literal["default", "up", "down"] = "default",
519
+ groups: int = 32,
520
+ eps: float = 1e-5,
521
+ skip_scale: float = 1,
522
+ ):
523
+ super().__init__()
524
+
525
+ self.in_channels = in_channels
526
+ self.out_channels = out_channels
527
+ self.skip_scale = skip_scale
528
+
529
+ self.norm1 = nn.GroupNorm(
530
+ num_groups=groups, num_channels=in_channels, eps=eps, affine=True
531
+ )
532
+ self.conv1 = nn.Conv2d(
533
+ in_channels, out_channels, kernel_size=3, stride=1, padding=1
534
+ )
535
+
536
+ self.norm2 = nn.GroupNorm(
537
+ num_groups=groups, num_channels=out_channels, eps=eps, affine=True
538
+ )
539
+ self.conv2 = nn.Conv2d(
540
+ out_channels, out_channels, kernel_size=3, stride=1, padding=1
541
+ )
542
+
543
+ self.act = F.silu
544
+
545
+ self.resample = None
546
+ if resample == "up":
547
+ self.resample = partial(F.interpolate, scale_factor=2.0, mode="nearest")
548
+ elif resample == "down":
549
+ self.resample = nn.AvgPool2d(kernel_size=2, stride=2)
550
+
551
+ self.shortcut = nn.Identity()
552
+ if self.in_channels != self.out_channels:
553
+ self.shortcut = nn.Conv2d(
554
+ in_channels, out_channels, kernel_size=1, bias=True
555
+ )
556
+
557
+ def forward(self, x):
558
+ res = x
559
+ x = self.norm1(x)
560
+ x = self.act(x)
561
+ if self.resample:
562
+ res = self.resample(res)
563
+ x = self.resample(x)
564
+ x = self.conv1(x)
565
+ x = self.norm2(x)
566
+ x = self.act(x)
567
+ x = self.conv2(x)
568
+ x = (x + self.shortcut(res)) * self.skip_scale
569
+ return x
570
+
571
+
572
+ class DownBlock(nn.Module):
573
+ def __init__(
574
+ self,
575
+ in_channels: int,
576
+ out_channels: int,
577
+ num_layers: int = 1,
578
+ downsample: bool = True,
579
+ attention: bool = True,
580
+ attention_heads: int = 16,
581
+ skip_scale: float = 1,
582
+ ):
583
+ super().__init__()
584
+
585
+ nets = []
586
+ attns = []
587
+ for i in range(num_layers):
588
+ in_channels = in_channels if i == 0 else out_channels
589
+ nets.append(ResnetBlock(in_channels, out_channels, skip_scale=skip_scale))
590
+ if attention:
591
+ attns.append(
592
+ MVAttention(out_channels, attention_heads, skip_scale=skip_scale)
593
+ )
594
+ else:
595
+ attns.append(None)
596
+ self.nets = nn.ModuleList(nets)
597
+ self.attns = nn.ModuleList(attns)
598
+
599
+ self.downsample = None
600
+ if downsample:
601
+ self.downsample = nn.Conv2d(
602
+ out_channels, out_channels, kernel_size=3, stride=2, padding=1
603
+ )
604
+
605
+ def forward(self, x):
606
+ xs = []
607
+ for attn, net in zip(self.attns, self.nets):
608
+ x = net(x)
609
+ if attn:
610
+ x = attn(x)
611
+ xs.append(x)
612
+ if self.downsample:
613
+ x = self.downsample(x)
614
+ xs.append(x)
615
+ return x, xs
616
+
617
+
618
+ class MidBlock(nn.Module):
619
+ def __init__(
620
+ self,
621
+ in_channels: int,
622
+ num_layers: int = 1,
623
+ attention: bool = True,
624
+ attention_heads: int = 16,
625
+ skip_scale: float = 1,
626
+ ):
627
+ super().__init__()
628
+
629
+ nets = []
630
+ attns = []
631
+ nets.append(ResnetBlock(in_channels, in_channels, skip_scale=skip_scale))
632
+ for _ in range(num_layers):
633
+ nets.append(ResnetBlock(in_channels, in_channels, skip_scale=skip_scale))
634
+ if attention:
635
+ attns.append(
636
+ MVAttention(in_channels, attention_heads, skip_scale=skip_scale)
637
+ )
638
+ else:
639
+ attns.append(None)
640
+ self.nets = nn.ModuleList(nets)
641
+ self.attns = nn.ModuleList(attns)
642
+
643
+ def forward(self, x):
644
+ x = self.nets[0](x)
645
+ for attn, net in zip(self.attns, self.nets[1:]):
646
+ if attn:
647
+ x = attn(x)
648
+ x = net(x)
649
+ return x
650
+
651
+
652
+ class UpBlock(nn.Module):
653
+ def __init__(
654
+ self,
655
+ in_channels: int,
656
+ prev_out_channels: int,
657
+ out_channels: int,
658
+ num_layers: int = 1,
659
+ upsample: bool = True,
660
+ attention: bool = True,
661
+ attention_heads: int = 16,
662
+ skip_scale: float = 1,
663
+ ):
664
+ super().__init__()
665
+
666
+ nets = []
667
+ attns = []
668
+ for i in range(num_layers):
669
+ cin = in_channels if i == 0 else out_channels
670
+ cskip = prev_out_channels if (i == num_layers - 1) else out_channels
671
+
672
+ nets.append(ResnetBlock(cin + cskip, out_channels, skip_scale=skip_scale))
673
+ if attention:
674
+ attns.append(
675
+ MVAttention(out_channels, attention_heads, skip_scale=skip_scale)
676
+ )
677
+ else:
678
+ attns.append(None)
679
+ self.nets = nn.ModuleList(nets)
680
+ self.attns = nn.ModuleList(attns)
681
+
682
+ self.upsample = None
683
+ if upsample:
684
+ self.upsample = nn.Conv2d(
685
+ out_channels, out_channels, kernel_size=3, stride=1, padding=1
686
+ )
687
+
688
+ def forward(self, x, xs):
689
+ for attn, net in zip(self.attns, self.nets):
690
+ res_x = xs[-1]
691
+ xs = xs[:-1]
692
+ x = torch.cat([x, res_x], dim=1)
693
+ x = net(x)
694
+ if attn:
695
+ x = attn(x)
696
+ if self.upsample:
697
+ x = F.interpolate(x, scale_factor=2.0, mode="nearest")
698
+ x = self.upsample(x)
699
+ return x
700
+
701
+
702
+ class UNet(nn.Module):
703
+ def __init__(
704
+ self,
705
+ in_channels: int = 9,
706
+ out_channels: int = 14,
707
+ down_channels: Tuple[int, ...] = (64, 128, 256, 512, 1024, 1024),
708
+ down_attention: Tuple[bool, ...] = (False, False, False, True, True, True),
709
+ mid_attention: bool = True,
710
+ up_channels: Tuple[int, ...] = (1024, 1024, 512, 256, 128),
711
+ up_attention: Tuple[bool, ...] = (True, True, True, False, False),
712
+ layers_per_block: int = 2,
713
+ skip_scale: float = np.sqrt(0.5),
714
+ ):
715
+ super().__init__()
716
+
717
+ self.conv_in = nn.Conv2d(
718
+ in_channels, down_channels[0], kernel_size=3, stride=1, padding=1
719
+ )
720
+
721
+ down_blocks = []
722
+ cout = down_channels[0]
723
+ for i in range(len(down_channels)):
724
+ cin = cout
725
+ cout = down_channels[i]
726
+
727
+ down_blocks.append(
728
+ DownBlock(
729
+ cin,
730
+ cout,
731
+ num_layers=layers_per_block,
732
+ downsample=(i != len(down_channels) - 1),
733
+ attention=down_attention[i],
734
+ skip_scale=skip_scale,
735
+ )
736
+ )
737
+ self.down_blocks = nn.ModuleList(down_blocks)
738
+
739
+ self.mid_block = MidBlock(
740
+ down_channels[-1], attention=mid_attention, skip_scale=skip_scale
741
+ )
742
+
743
+ up_blocks = []
744
+ cout = up_channels[0]
745
+ for i in range(len(up_channels)):
746
+ cin = cout
747
+ cout = up_channels[i]
748
+ cskip = down_channels[max(-2 - i, -len(down_channels))]
749
+
750
+ up_blocks.append(
751
+ UpBlock(
752
+ cin,
753
+ cskip,
754
+ cout,
755
+ num_layers=layers_per_block + 1,
756
+ upsample=(i != len(up_channels) - 1),
757
+ attention=up_attention[i],
758
+ skip_scale=skip_scale,
759
+ )
760
+ )
761
+ self.up_blocks = nn.ModuleList(up_blocks)
762
+ self.norm_out = nn.GroupNorm(
763
+ num_channels=up_channels[-1], num_groups=32, eps=1e-5
764
+ )
765
+ self.conv_out = nn.Conv2d(
766
+ up_channels[-1], out_channels, kernel_size=3, stride=1, padding=1
767
+ )
768
+
769
+ def forward(self, x):
770
+ x = self.conv_in(x)
771
+ xss = [x]
772
+ for block in self.down_blocks:
773
+ x, xs = block(x)
774
+ xss.extend(xs)
775
+ x = self.mid_block(x)
776
+ for block in self.up_blocks:
777
+ xs = xss[-len(block.nets) :]
778
+ xss = xss[: -len(block.nets)]
779
+ x = block(x, xs)
780
+ x = self.norm_out(x)
781
+ x = F.silu(x)
782
+ x = self.conv_out(x)
783
+ return x
model_index.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "LGMPipeline",
3
+ "_diffusers_version": "0.27.2",
4
+ "lgm": [
5
+ "lgm",
6
+ "LGM"
7
+ ]
8
+ }
pipeline.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import rembg
3
+ import torch
4
+ import torch.nn.functional as F
5
+ import torchvision.transforms.functional as TF
6
+ from diffusers import DiffusionPipeline
7
+
8
+
9
+ class LGMPipeline(DiffusionPipeline):
10
+ def __init__(self, lgm):
11
+ super().__init__()
12
+
13
+ self.bg_remover = rembg.new_session()
14
+
15
+ self.imagenet_default_mean = (0.485, 0.456, 0.406)
16
+ self.imagenet_default_std = (0.229, 0.224, 0.225)
17
+
18
+ lgm = lgm.half().cuda()
19
+ self.register_modules(lgm=lgm)
20
+
21
+ def save_ply(self, gaussians, path):
22
+ self.lgm.gs.save_ply(gaussians, path)
23
+
24
+ @torch.no_grad()
25
+ def __call__(self, images):
26
+ unstacked = []
27
+ for i in range(4):
28
+ image = rembg.remove(images[i], session=self.bg_remover)
29
+ image = images.astype(np.float32) / 255.0
30
+ image = image[..., :3] * image[..., -1:] + (1 - image[..., -1:])
31
+ unstacked.append(image)
32
+ images = np.concatenate(
33
+ [
34
+ np.concatenate([unstacked[1], unstacked[2]], axis=1),
35
+ np.concatenate([unstacked[3], unstacked[0]], axis=1),
36
+ ],
37
+ axis=0,
38
+ )
39
+ images = np.stack([images[1], images[2], images[3], images[0]], axis=0)
40
+ images = torch.from_numpy(images).permute(0, 3, 1, 2).float().cuda()
41
+ images = F.interpolate(
42
+ images,
43
+ size=(256, 256),
44
+ mode="bilinear",
45
+ align_corners=False,
46
+ )
47
+ images = TF.normalize(
48
+ images, self.imagenet_default_mean, self.imagenet_default_std
49
+ )
50
+
51
+ rays_embeddings = self.lgm.prepare_default_rays("cuda", elevation=0)
52
+ images = torch.cat([images, rays_embeddings], dim=1).unsqueeze(0)
53
+ images = images.half().cuda()
54
+
55
+ result = self.lgm(images)
56
+ return result