LLFF / infer_upsample.py
SlekLi's picture
Upload 3 files
98a682c verified
"""
infer_upsample.py
=================
ไฝฟ็”จ่ฎญ็ปƒๅฅฝ็š„ Transformer๏ผŒไปŽ็ฒ—ๅฐบๅบฆ๏ผˆLn๏ผ‰่‡ชๅ›žๅฝ’็”Ÿๆˆ็ป†ๅฐบๅบฆ๏ผˆL(n-1)๏ผ‰ใ€‚
ๆต็จ‹๏ผš
1. ่ฏปๅ–็ฒ—ๅฐบๅบฆ้‡ๅŒ–ๆ•ฐๆฎ๏ผˆ.npz๏ผ‰
2. ไธบๆฏไธช็ฒ—่Š‚็‚นๆž„้€ ๅ‰็ผ€ๅบๅˆ—๏ผˆparent + uncles๏ผ‰
3. ่‡ชๅ›žๅฝ’็”Ÿๆˆๅญ่Š‚็‚น๏ผˆ้‡ๅˆฐ role=EOS ๆˆ–่ถ…่ฟ‡ MAX_CHILDREN ๅˆ™ๅœๆญข๏ผ‰
4. ๅฐ†ๅญ่Š‚็‚น้‡ๅŒ–็ดขๅผ•่งฃ็ ไธบ็œŸๅฎžๅฑžๆ€ง๏ผˆๆŸฅ codebook๏ผ‰
5. ๅ†™ๅ‡บๆ–ฐ็š„ .ply ๆ–‡ไปถ
role ็ผ–็ ๏ผˆไธŽ่ฎญ็ปƒไธ€่‡ด๏ผ‰๏ผš
0 = parent 1 = uncle 2 = child 3 = EOS 4 = PAD
"""
import os
import argparse
import pickle
import numpy as np
import torch
import torch.nn.functional as F
from plyfile import PlyData, PlyElement
# โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€
# ๅธธ้‡๏ผˆไธŽ build_sequences / train_transformer ไธ€่‡ด๏ผ‰
# โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€
ROLE_PARENT = 0
ROLE_UNCLE = 1
ROLE_CHILD = 2
ROLE_EOS = 3
ROLE_PAD = 4
MAX_CHILDREN = 32
MAX_UNCLES = 4
MAX_SEQ_LEN = 1 + MAX_UNCLES + MAX_CHILDREN + 1 # = 38
N_SCALE = 16384
N_ROT = 16384
N_DC = 4096
N_SH = 4096
N_ROLE = 4
TOKEN_DTYPE = np.dtype([
('dx', np.float32),
('dy', np.float32),
('dz', np.float32),
('scale_idx', np.int32),
('rot_idx', np.int32),
('dc_idx', np.int32),
('sh_idx', np.int32),
('opacity', np.float32),
('role', np.uint8),
])
# โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€
# 1. ๅŠ ่ฝฝๆจกๅž‹
# โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€
def load_model(ckpt_path: str, device: str = 'cpu'):
from train_transformer import SplitTransformer
ckpt = torch.load(ckpt_path, map_location=device)
config = ckpt.get('config', {})
model = SplitTransformer(**config).to(device)
state = ckpt.get('model_state', ckpt)
model.load_state_dict(state)
model.eval()
print(f"[load] {os.path.basename(ckpt_path)} "
f"d_model={config.get('d_model')}, "
f"n_layers={config.get('n_layers')}")
return model
# โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€
# 2. ๅŠ ่ฝฝ codebook
# โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€
def load_codebooks(codebook_dir: str) -> dict:
cbs = {}
for name in ['scale', 'rotation', 'dc', 'sh']:
path = os.path.join(codebook_dir, f"{name}_codebook.npz")
cbs[name] = np.load(path)['codebook'].astype(np.float32)
print(f"[load] {name}_codebook: {cbs[name].shape}")
return cbs
# โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€
# 3. ๅŠ ่ฝฝ้‡ๅŒ–ๆ•ฐๆฎ
# โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€
def load_quantized(npz_path: str) -> dict:
npz = np.load(npz_path)
return {
'scale_indices': npz['scale_indices'],
'rotation_indices': npz['rotation_indices'],
'dc_indices': npz['dc_indices'],
'sh_indices': npz['sh_indices'],
'positions': npz['positions'],
'opacities': npz['opacities'].squeeze(),
}
# โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€
# 4. ๆž„้€ ๅ‰็ผ€ batch๏ผˆparent + uncles๏ผ‰
# โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€
def make_prefix_batch(
p_idx: int,
quant: dict,
max_uncles: int = MAX_UNCLES,
device: str = 'cpu',
) -> tuple:
"""
ๆž„้€ ็ฒ—่Š‚็‚น p_idx ็š„ๅ‰็ผ€ batch๏ผˆparent + uncles๏ผ‰๏ผŒ
่ฟ”ๅ›ž (batch_dict, parent_pos)ใ€‚
batch_dict ไธญๆฏไธชๅผ ้‡ shape (1, prefix_len)ใ€‚
"""
N = quant['positions'].shape[0]
parent_pos = quant['positions'][p_idx]
tokens = []
# โ”€โ”€ parent๏ผˆๅๆ ‡็ฝฎ้›ถ๏ผ‰โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€
t = _make_np_token(p_idx, quant, parent_pos, ROLE_PARENT)
t['dx'] = t['dy'] = t['dz'] = 0.0
tokens.append(t)
# โ”€โ”€ uncle โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€
half = max_uncles // 2
added_uncles = 0
for offset in list(range(-half, 0)) + list(range(1, half + 1)):
u_idx = p_idx + offset
if 0 <= u_idx < N and added_uncles < max_uncles:
tokens.append(_make_np_token(u_idx, quant, parent_pos, ROLE_UNCLE))
added_uncles += 1
seq = np.array(tokens, dtype=TOKEN_DTYPE)
return _seq_to_batch(seq, device), parent_pos
def _make_np_token(gauss_idx: int, quant: dict,
parent_pos: np.ndarray, role: int) -> np.ndarray:
pos = quant['positions'][gauss_idx]
delta = pos - parent_pos
token = np.zeros(1, dtype=TOKEN_DTYPE)
token['dx'] = delta[0]
token['dy'] = delta[1]
token['dz'] = delta[2]
token['scale_idx'] = quant['scale_indices'][gauss_idx]
token['rot_idx'] = quant['rotation_indices'][gauss_idx]
token['dc_idx'] = quant['dc_indices'][gauss_idx]
token['sh_idx'] = quant['sh_indices'][gauss_idx]
token['opacity'] = quant['opacities'][gauss_idx]
token['role'] = role
return token[0]
def _seq_to_batch(seq: np.ndarray, device: str) -> dict:
"""ๅฐ† numpy ๅบๅˆ—่ฝฌไธบๆจกๅž‹่พ“ๅ…ฅ dict๏ผŒbatch_size=1ใ€‚"""
L = len(seq)
xyz = np.stack([seq['dx'], seq['dy'], seq['dz']], axis=1) # (L, 3)
return {
'xyz': torch.tensor(xyz, device=device).float().unsqueeze(0),
'scale': torch.tensor(seq['scale_idx'].astype(np.int64), device=device).unsqueeze(0),
'rot': torch.tensor(seq['rot_idx'].astype(np.int64), device=device).unsqueeze(0),
'dc': torch.tensor(seq['dc_idx'].astype(np.int64), device=device).unsqueeze(0),
'sh': torch.tensor(seq['sh_idx'].astype(np.int64), device=device).unsqueeze(0),
'opacity': torch.tensor(seq['opacity'].astype(np.float32), device=device).unsqueeze(0),
'role': torch.tensor(seq['role'].astype(np.int64), device=device).unsqueeze(0),
'attn_mask': torch.ones(1, L, dtype=torch.bool, device=device),
# Dataset ้‡Œ็š„ไธคไธช loss_mask ๆŽจๆ–ญๆ—ถไธ้œ€่ฆ๏ผŒไฝ† forward ไธ็”จๅฎƒไปฌ๏ผŒๅฏ็œ็•ฅ
}
def _append_token(batch: dict, token_np: np.ndarray, device: str) -> dict:
"""ๅฐ†ๆ–ฐ้ข„ๆต‹็š„ token ๆ‹ผๆŽฅๅˆฐ batch ๆœซๅฐพ๏ผŒ็”จไบŽไธ‹ไธ€ๆญฅ่‡ชๅ›žๅฝ’ใ€‚"""
new_xyz = torch.tensor(
[[[token_np['dx'], token_np['dy'], token_np['dz']]]],
dtype=torch.float32, device=device
)
def cat(key, val, dtype):
new = torch.tensor([[val]], dtype=dtype, device=device)
return torch.cat([batch[key], new], dim=1)
return {
'xyz': torch.cat([batch['xyz'], new_xyz], dim=1),
'scale': cat('scale', int(token_np['scale_idx']), torch.int64),
'rot': cat('rot', int(token_np['rot_idx']), torch.int64),
'dc': cat('dc', int(token_np['dc_idx']), torch.int64),
'sh': cat('sh', int(token_np['sh_idx']), torch.int64),
'opacity': cat('opacity', float(token_np['opacity']), torch.float32),
'role': cat('role', int(token_np['role']), torch.int64),
'attn_mask': torch.cat([
batch['attn_mask'],
torch.ones(1, 1, dtype=torch.bool, device=device)
], dim=1),
}
# โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€
# 5. ่‡ชๅ›žๅฝ’็”Ÿๆˆๅญ่Š‚็‚น
# โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€
def generate_children(
model: object,
prefix_batch: dict,
parent_pos: np.ndarray,
max_children: int = MAX_CHILDREN,
temperature: float = 0.8,
top_k: int = 50,
device: str = 'cpu',
) -> list:
"""
็ป™ๅฎšๅ‰็ผ€ batch๏ผˆparent + uncles๏ผ‰๏ผŒ่‡ชๅ›žๅฝ’้‡‡ๆ ทๅญ่Š‚็‚นใ€‚
ๆฏๆญฅๅ…ˆ้ข„ๆต‹ role๏ผš
role=2(child) โ†’ ็ปง็ปญ้ข„ๆต‹็‰นๅพ๏ผŒๅŠ ๅ…ฅๅบๅˆ—
role=3(EOS) โ†’ ๆๅ‰็ปˆๆญข
ๅ…ถไป– โ†’ ๅผ‚ๅธธ๏ผŒๅผบๅˆถ็ปˆๆญข
่ฟ”ๅ›ž list of dict๏ผŒๆฏไธช dict ๅŒ…ๅซๅญ่Š‚็‚นๆ‰€ๆœ‰ๅญ—ๆฎต + world_posใ€‚
"""
current_batch = prefix_batch
children = []
def _sample_cls(logits: torch.Tensor, n_classes: int) -> int:
logits = logits / temperature
if top_k > 0:
k = min(top_k, n_classes)
topk_vals, _ = torch.topk(logits, k)
threshold = topk_vals[-1]
logits = logits.masked_fill(logits < threshold, float('-inf'))
probs = F.softmax(logits, dim=-1)
return int(torch.multinomial(probs, 1).item())
for _ in range(max_children):
with torch.no_grad():
pred = model(current_batch)
# โ”€โ”€ ๅ…ˆ้ข„ๆต‹ role โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€
role_logits = pred['role'][0, -1, :] # (4,)
pred_role = _sample_cls(role_logits, N_ROLE)
if pred_role == ROLE_EOS:
break # ๆจกๅž‹้ข„ๆต‹ๅˆฐ็ป“ๆŸ็ฌฆ๏ผŒๅœๆญข
if pred_role != ROLE_CHILD:
# ้ข„ๆต‹ๅ‡บไบ† parent/uncle๏ผŒๆจกๅž‹ๅผ‚ๅธธ๏ผŒๅผบๅˆถ็ปˆๆญข
break
# โ”€โ”€ role=child๏ผŒ้ข„ๆต‹ๅ…ถไป–็‰นๅพ โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€
pred_scale = _sample_cls(pred['scale'][0, -1, :], N_SCALE)
pred_rot = _sample_cls(pred['rot'][0, -1, :], N_ROT)
pred_dc = _sample_cls(pred['dc'][0, -1, :], N_DC)
pred_sh = _sample_cls(pred['sh'][0, -1, :], N_SH)
pred_xyz = pred['xyz'][0, -1, :].cpu().numpy() # (3,) ็›ธๅฏนๅ็งป
pred_opa = float(pred['opacity'][0, -1, 0].cpu())
# ่ฎฐๅฝ•ๅญ่Š‚็‚นไฟกๆฏ
child = {
'dx': float(pred_xyz[0]),
'dy': float(pred_xyz[1]),
'dz': float(pred_xyz[2]),
'scale_idx': pred_scale,
'rot_idx': pred_rot,
'dc_idx': pred_dc,
'sh_idx': pred_sh,
'opacity': float(np.clip(pred_opa, -10, 10)),
'role': ROLE_CHILD,
'world_pos': parent_pos + pred_xyz, # ไธ–็•Œๅๆ ‡
}
children.append(child)
# ๅฐ†ๆ–ฐ token ๅŠ ๅ…ฅๅบๅˆ—๏ผˆไพ›ไธ‹ไธ€ๆญฅ็”Ÿๆˆ๏ผ‰
np_token = np.zeros(1, dtype=TOKEN_DTYPE)
np_token['dx'] = child['dx']
np_token['dy'] = child['dy']
np_token['dz'] = child['dz']
np_token['scale_idx'] = pred_scale
np_token['rot_idx'] = pred_rot
np_token['dc_idx'] = pred_dc
np_token['sh_idx'] = pred_sh
np_token['opacity'] = child['opacity']
np_token['role'] = ROLE_CHILD
current_batch = _append_token(current_batch, np_token[0], device)
return children
# โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€
# 6. ๅ†™ๅ‡บ .ply
# โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€
def children_to_ply(
all_children: list,
codebooks: dict,
save_path: str,
n_sh_rest: int = 45,
) -> None:
N = len(all_children)
if N == 0:
print("[write_ply] ่ญฆๅ‘Š๏ผšๆฒกๆœ‰ไปปไฝ•ๅญ่Š‚็‚น๏ผŒ่ทณ่ฟ‡ๅ†™ๅ‡บ")
return
print(f"[write_ply] ๅ…ฑ {N} ไธชๅญ่Š‚็‚น๏ผŒ่งฃ็ ๅนถๅ†™ๅ‡บ {save_path} ...")
positions = np.array([c['world_pos'] for c in all_children], dtype=np.float32)
opacities = np.array([c['opacity'] for c in all_children], dtype=np.float32)
scale_idx = np.array([c['scale_idx'] for c in all_children], dtype=np.int32)
rot_idx = np.array([c['rot_idx'] for c in all_children], dtype=np.int32)
dc_idx = np.array([c['dc_idx'] for c in all_children], dtype=np.int32)
sh_idx = np.array([c['sh_idx'] for c in all_children], dtype=np.int32)
# ้‡ๅŒ–็ดขๅผ• โ†’ ็œŸๅฎžๅฑžๆ€ง๏ผˆcodebook ๆŸฅ่กจ๏ผ‰
scales = codebooks['scale'][scale_idx] # (N, 3)
rotations = codebooks['rotation'][rot_idx] # (N, 4)
dc = codebooks['dc'][dc_idx] # (N, 3)
sh_rest = codebooks['sh'][sh_idx] # (N, 45)
# ๆž„้€  PLY vertex ็ป“ๆž„
fields = (
[('x','f4'), ('y','f4'), ('z','f4'),
('opacity','f4'),
('scale_0','f4'), ('scale_1','f4'), ('scale_2','f4'),
('rot_0','f4'), ('rot_1','f4'), ('rot_2','f4'), ('rot_3','f4'),
('f_dc_0','f4'), ('f_dc_1','f4'), ('f_dc_2','f4')] +
[(f'f_rest_{i}', 'f4') for i in range(n_sh_rest)]
)
vd = np.zeros(N, dtype=np.dtype(fields))
vd['x'] = positions[:, 0]
vd['y'] = positions[:, 1]
vd['z'] = positions[:, 2]
vd['opacity'] = opacities
vd['scale_0'] = scales[:, 0]
vd['scale_1'] = scales[:, 1]
vd['scale_2'] = scales[:, 2]
vd['rot_0'] = rotations[:, 0]
vd['rot_1'] = rotations[:, 1]
vd['rot_2'] = rotations[:, 2]
vd['rot_3'] = rotations[:, 3]
vd['f_dc_0'] = dc[:, 0]
vd['f_dc_1'] = dc[:, 1]
vd['f_dc_2'] = dc[:, 2]
for i in range(n_sh_rest):
vd[f'f_rest_{i}'] = sh_rest[:, i]
os.makedirs(os.path.dirname(os.path.abspath(save_path)), exist_ok=True)
PlyData([PlyElement.describe(vd, 'vertex')]).write(save_path)
size_mb = os.path.getsize(save_path) / 1024 / 1024
print(f"[write_ply] ๅฎŒๆˆ {size_mb:.2f} MB")
# โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€
# 7. ไธปๆŽจๆ–ญๆต็จ‹
# โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€
def infer_upsample(
ckpt_path: str,
quant_npz: str,
codebook_dir: str,
save_path: str,
max_uncles: int = MAX_UNCLES,
max_children: int = MAX_CHILDREN,
temperature: float = 0.8,
top_k: int = 50,
device: str = 'auto',
max_gaussians: int = -1,
) -> None:
if device == 'auto':
device = 'cuda' if torch.cuda.is_available() else 'cpu'
print(f"[infer] device={device}")
model = load_model(ckpt_path, device)
codebooks = load_codebooks(codebook_dir)
quant = load_quantized(quant_npz)
N = quant['positions'].shape[0]
if max_gaussians > 0:
N = min(N, max_gaussians)
print(f"[infer] ๅค„็† {N} ไธช็ฒ—่Š‚็‚น๏ผŒๆœ€ๅคš็”Ÿๆˆ {N * max_children} ไธชๅญ่Š‚็‚น")
all_children = []
total_generated = 0
early_stop_count = 0
for p_idx in range(N):
if p_idx % 5000 == 0:
print(f" ่ฟ›ๅบฆ๏ผš{p_idx}/{N} ๅทฒ็”Ÿๆˆๅญ่Š‚็‚น๏ผš{total_generated}")
prefix_batch, parent_pos = make_prefix_batch(
p_idx, quant, max_uncles=max_uncles, device=device
)
children = generate_children(
model, prefix_batch, parent_pos,
max_children=max_children,
temperature=temperature,
top_k=top_k,
device=device,
)
if len(children) < max_children:
early_stop_count += 1
all_children.extend(children)
total_generated += len(children)
print(f"\n[infer] ็”ŸๆˆๅฎŒๆˆ")
print(f" ๆ€ปๅญ่Š‚็‚นๆ•ฐ๏ผš{total_generated}")
print(f" ๅนณๅ‡ๆฏ็ฒ—่Š‚็‚นๅญ่Š‚็‚นๆ•ฐ๏ผš{total_generated / max(N, 1):.2f}")
print(f" EOS ๆๅ‰็ปˆๆญขๆฌกๆ•ฐ๏ผš{early_stop_count} / {N} "
f"({100 * early_stop_count / max(N, 1):.1f}%)")
children_to_ply(all_children, codebooks, save_path)
print(f"\n[infer] ๅฎŒๆˆ๏ผ่พ“ๅ‡บ โ†’ {save_path}")
# โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€
# 8. CLI
# โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€
def parse_args():
p = argparse.ArgumentParser(description="็”จ Transformer ไปŽ็ฒ—ๅฐบๅบฆ็”Ÿๆˆ็ป†ๅฐบๅบฆ 3DGS")
p.add_argument('--ckpt', required=True, help='ๆจกๅž‹ checkpoint ่ทฏๅพ„')
p.add_argument('--quant_npz', required=True, help='็ฒ—ๅฐบๅบฆ้‡ๅŒ–ๆ•ฐๆฎ .npz')
p.add_argument('--codebook_dir', required=True, help='codebook ็›ฎๅฝ•')
p.add_argument('--save_path', required=True, help='่พ“ๅ‡บ .ply ่ทฏๅพ„')
p.add_argument('--max_uncles', type=int, default=MAX_UNCLES)
p.add_argument('--max_children', type=int, default=MAX_CHILDREN)
p.add_argument('--temperature', type=float, default=0.8)
p.add_argument('--top_k', type=int, default=50)
p.add_argument('--device', default='auto')
p.add_argument('--max_gaussians', type=int, default=-1,
help='่ฐƒ่ฏ•็”จ๏ผšๅชๅค„็†ๅ‰ N ไธช็ฒ—่Š‚็‚น')
return p.parse_args()
if __name__ == '__main__':
args = parse_args()
infer_upsample(
ckpt_path=args.ckpt,
quant_npz=args.quant_npz,
codebook_dir=args.codebook_dir,
save_path=args.save_path,
max_uncles=args.max_uncles,
max_children=args.max_children,
temperature=args.temperature,
top_k=args.top_k,
device=args.device,
max_gaussians=args.max_gaussians,
)