dataset: blender batching: single_image factor: 0 num_coarse_samples: 64 num_fine_samples: 128 use_viewdirs: true white_bkgd: true batch_size: 1024 randomized: true max_steps: 500000 print_every: 100 render_every: 5000 save_every: 5000 use_semantic_loss: true clip_model_name: openai/clip-vit-base-patch32 clip_output_dtype: float32 sc_loss_factor: 4 sc_loss_every: 16 sc_loss_mult: 10 few_shot: 8