NeuralBody / configs /nerf /nerf_313.yaml
pengsida
initial commit
1ba539f
raw
history blame
3.84 kB
task: 'if_nerf'
gpus: [0]
train_dataset_module: 'lib.datasets.light_stage.multi_view_dataset'
train_dataset_path: 'lib/datasets/light_stage/multi_view_dataset.py'
test_dataset_module: 'lib.datasets.light_stage.multi_view_dataset'
test_dataset_path: 'lib/datasets/light_stage/multi_view_dataset.py'
network_module: 'lib.networks.nerf'
network_path: 'lib/networks/nerf.py'
renderer_module: 'lib.networks.renderer.volume_renderer'
renderer_path: 'lib/networks/renderer/volume_renderer.py'
trainer_module: 'lib.train.trainers.nerf.py'
trainer_path: 'lib/train/trainers/nerf.py'
evaluator_module: 'lib.evaluators.if_nerf'
evaluator_path: 'lib/evaluators/if_nerf.py'
visualizer_module: 'lib.visualizers.if_nerf'
visualizer_path: 'lib/visualizers/if_nerf.py'
human: 313
train_dataset:
data_root: 'data/zju_mocap/CoreView_313'
human: 'CoreView_313'
ann_file: 'data/zju_mocap/CoreView_313/annots.npy'
split: 'train'
test_dataset:
data_root: 'data/zju_mocap/CoreView_313'
human: 'CoreView_313'
ann_file: 'data/zju_mocap/CoreView_313/annots.npy'
split: 'test'
train:
batch_size: 1
collator: ''
lr: 5e-4
weight_decay: 0
epoch: 400
scheduler:
type: 'exponential'
gamma: 0.1
decay_epochs: 1000
num_workers: 16
test:
sampler: 'FrameSampler'
batch_size: 1
collator: ''
ep_iter: 500
save_ep: 1000
eval_ep: 1000
# training options
netdepth: 8
netwidth: 256
netdepth_fine: 8
netwidth_fine: 256
netchunk: 65536
chunk: 32768
no_batching: True
# rendering options
use_viewdirs: True
i_embed: 0
xyz_res: 10
view_res: 4
raw_noise_std: 0
lindisp: False
N_samples: 64
N_importance: 128
N_rand: 1024
perturb: 1
white_bkgd: False
num_render_views: 50
# data options
ratio: 0.5
num_train_frame: 1
smpl: 'smpl'
params: 'params'
voxel_size: [0.005, 0.005, 0.005] # dhw
# record options
log_interval: 1
novel_view_cfg:
train_dataset_module: 'lib.datasets.light_stage.multi_view_demo_dataset'
train_dataset_path: 'lib/datasets/light_stage/multi_view_demo_dataset.py'
test_dataset_module: 'lib.datasets.light_stage.multi_view_demo_dataset'
test_dataset_path: 'lib/datasets/light_stage/multi_view_demo_dataset.py'
renderer_module: 'lib.networks.renderer.volume_renderer'
renderer_path: 'lib/networks/renderer/volume_renderer.py'
visualizer_module: 'lib.visualizers.if_nerf_demo'
visualizer_path: 'lib/visualizers/if_nerf_demo.py'
test:
sampler: ''
novel_pose_cfg:
train_dataset_module: 'lib.datasets.light_stage.multi_view_perform_dataset'
train_dataset_path: 'lib/datasets/light_stage/multi_view_perform_dataset.py'
test_dataset_module: 'lib.datasets.light_stage.multi_view_perform_dataset'
test_dataset_path: 'lib/datasets/light_stage/multi_view_perform_dataset.py'
renderer_module: 'lib.networks.renderer.volume_renderer'
renderer_path: 'lib/networks/renderer/volume_renderer.py'
visualizer_module: 'lib.visualizers.if_nerf_perform'
visualizer_path: 'lib/visualizers/if_nerf_perform.py'
test:
sampler: ''
mesh_cfg:
train_dataset_module: 'lib.datasets.light_stage.multi_view_mesh_dataset'
train_dataset_path: 'lib/datasets/light_stage/multi_view_mesh_dataset.py'
test_dataset_module: 'lib.datasets.light_stage.multi_view_mesh_dataset'
test_dataset_path: 'lib/datasets/light_stage/multi_view_mesh_dataset.py'
network_module: 'lib.networks.latent_xyzc'
network_path: 'lib/networks/latent_xyzc.py'
renderer_module: 'lib.networks.renderer.volume_mesh_renderer'
renderer_path: 'lib/networks/renderer/volume_mesh_renderer.py'
visualizer_module: 'lib.visualizers.if_nerf_mesh'
visualizer_path: 'lib/visualizers/if_nerf_mesh.py'
mesh_th: 5
test:
sampler: 'FrameSampler'
frame_sampler_interval: 1