code / SparseNeuS_demo_v1 /confs /one2345_lod0_val_demo.conf
Chao Xu
code pruning
216282e
# - for the lod1 geometry network, using adaptive cost for sparse cost regularization network
#- for lod1 rendering network, using depth-adaptive render
general {
base_exp_dir = exp/lod0 # !!! where you store the results and checkpoints to be used
recording = [
./,
./data
./ops
./models
./loss
]
}
dataset {
trainpath = ../
valpath = ../ # !!! where you store the validation data
testpath = ../
imgScale_train = 1.0
imgScale_test = 1.0
nviews = 5
clean_image = True
importance_sample = True
test_ref_views = [23]
# test dataset
test_n_views = 2
test_img_wh = [256, 256]
test_clip_wh = [0, 0]
test_scan_id = scan110
train_img_idx = [49, 50, 52, 53, 54, 56, 58] #[21, 22, 23, 24, 25] #
test_img_idx = [51, 55, 57] #[32, 33, 34] #
test_dir_comment = train
}
train {
learning_rate = 2e-4
learning_rate_milestone = [100000, 150000, 200000]
learning_rate_factor = 0.5
end_iter = 200000
save_freq = 5000
val_freq = 1
val_mesh_freq = 1
report_freq = 100
N_rays = 512
validate_resolution_level = 4
anneal_start = 0
anneal_end = 25000
anneal_start_lod1 = 0
anneal_end_lod1 = 15000
use_white_bkgd = True
# Loss
# ! for training the lod1 network, don't use this regularization in first 10k steps; then use the regularization
sdf_igr_weight = 0.1
sdf_sparse_weight = 0.02 # 0.002 for lod1 network; 0.02 for lod0 network
sdf_decay_param = 100 # cannot be too large, which decide the tsdf range
fg_bg_weight = 0.01 # first 0.01
bg_ratio = 0.3
if_fix_lod0_networks = False
}
model {
num_lods = 1
sdf_network_lod0 {
lod = 0,
ch_in = 56, # the channel num of fused pyramid features
voxel_size = 0.02105263, # 0.02083333, should be 2/95
vol_dims = [96, 96, 96],
hidden_dim = 128,
cost_type = variance_mean
d_pyramid_feature_compress = 16,
regnet_d_out = 16,
num_sdf_layers = 4,
# position embedding
multires = 6
}
sdf_network_lod1 {
lod = 1,
ch_in = 56, # the channel num of fused pyramid features
voxel_size = 0.0104712, #0.01041667, should be 2/191
vol_dims = [192, 192, 192],
hidden_dim = 128,
cost_type = variance_mean
d_pyramid_feature_compress = 8,
regnet_d_out = 16,
num_sdf_layers = 4,
# position embedding
multires = 6
}
variance_network {
init_val = 0.2
}
variance_network_lod1 {
init_val = 0.2
}
rendering_network {
in_geometry_feat_ch = 16
in_rendering_feat_ch = 56
anti_alias_pooling = True
}
rendering_network_lod1 {
in_geometry_feat_ch = 16 # default 8
in_rendering_feat_ch = 56
anti_alias_pooling = True
}
trainer {
n_samples_lod0 = 64
n_importance_lod0 = 64
n_samples_lod1 = 64
n_importance_lod1 = 64
n_outside = 0 # 128 if render_outside_uniform_sampling
perturb = 1.0
alpha_type = div
}
}