text stringlengths 0 4.99k |
|---|
# Compute 3D query points. |
# Equation: r(t) = o+td -> Building the \"t\" here. |
t_vals = tf.linspace(near, far, num_samples) |
if rand: |
# Inject uniform noise into sample space to make the sampling |
# continuous. |
shape = list(ray_origins.shape[:-1]) + [num_samples] |
noise = tf.random.uniform(shape=shape) * (far - near) / num_samples |
t_vals = t_vals + noise |
# Equation: r(t) = o + td -> Building the \"r\" here. |
rays = ray_origins[..., None, :] + ( |
ray_directions[..., None, :] * t_vals[..., None] |
) |
rays_flat = tf.reshape(rays, [-1, 3]) |
rays_flat = encode_position(rays_flat) |
return (rays_flat, t_vals) |
def map_fn(pose): |
\"\"\"Maps individual pose to flattened rays and sample points. |
Args: |
pose: The pose matrix of the camera. |
Returns: |
Tuple of flattened rays and sample points corresponding to the |
camera pose. |
\"\"\" |
(ray_origins, ray_directions) = get_rays(height=H, width=W, focal=focal, pose=pose) |
(rays_flat, t_vals) = render_flat_rays( |
ray_origins=ray_origins, |
ray_directions=ray_directions, |
near=2.0, |
far=6.0, |
num_samples=NUM_SAMPLES, |
rand=True, |
) |
return (rays_flat, t_vals) |
# Create the training split. |
split_index = int(num_images * 0.8) |
# Split the images into training and validation. |
train_images = images[:split_index] |
val_images = images[split_index:] |
# Split the poses into training and validation. |
train_poses = poses[:split_index] |
val_poses = poses[split_index:] |
# Make the training pipeline. |
train_img_ds = tf.data.Dataset.from_tensor_slices(train_images) |
train_pose_ds = tf.data.Dataset.from_tensor_slices(train_poses) |
train_ray_ds = train_pose_ds.map(map_fn, num_parallel_calls=AUTO) |
training_ds = tf.data.Dataset.zip((train_img_ds, train_ray_ds)) |
train_ds = ( |
training_ds.shuffle(BATCH_SIZE) |
.batch(BATCH_SIZE, drop_remainder=True, num_parallel_calls=AUTO) |
.prefetch(AUTO) |
) |
# Make the validation pipeline. |
val_img_ds = tf.data.Dataset.from_tensor_slices(val_images) |
val_pose_ds = tf.data.Dataset.from_tensor_slices(val_poses) |
val_ray_ds = val_pose_ds.map(map_fn, num_parallel_calls=AUTO) |
validation_ds = tf.data.Dataset.zip((val_img_ds, val_ray_ds)) |
val_ds = ( |
validation_ds.shuffle(BATCH_SIZE) |
.batch(BATCH_SIZE, drop_remainder=True, num_parallel_calls=AUTO) |
.prefetch(AUTO) |
) |
NeRF model |
The model is a multi-layer perceptron (MLP), with ReLU as its non-linearity. |
An excerpt from the paper: |
\"We encourage the representation to be multiview-consistent by restricting the network to predict the volume density sigma as a function of only the location x, while allowing the RGB color c to be predicted as a function of both location and viewing direction. To accomplish this, the MLP first processes the input 3D ... |
Here we have gone for a minimal implementation and have used 64 Dense units instead of 256 as mentioned in the paper. |
def get_nerf_model(num_layers, num_pos): |
\"\"\"Generates the NeRF neural network. |
Args: |
num_layers: The number of MLP layers. |
num_pos: The number of dimensions of positional encoding. |
Returns: |
The [`tf.keras`](https://www.tensorflow.org/api_docs/python/tf/keras) model. |
\"\"\" |
inputs = keras.Input(shape=(num_pos, 2 * 3 * POS_ENCODE_DIMS + 3)) |
x = inputs |
for i in range(num_layers): |
x = layers.Dense(units=64, activation=\"relu\")(x) |
if i % 4 == 0 and i > 0: |
# Inject residual connection. |
x = layers.concatenate([x, inputs], axis=-1) |
outputs = layers.Dense(units=4)(x) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.