text stringlengths 0 4.99k |
|---|
return keras.Model(inputs=inputs, outputs=outputs) |
def render_rgb_depth(model, rays_flat, t_vals, rand=True, train=True): |
\"\"\"Generates the RGB image and depth map from model prediction. |
Args: |
model: The MLP model that is trained to predict the rgb and |
volume density of the volumetric scene. |
rays_flat: The flattened rays that serve as the input to |
the NeRF model. |
t_vals: The sample points for the rays. |
rand: Choice to randomise the sampling strategy. |
train: Whether the model is in the training or testing phase. |
Returns: |
Tuple of rgb image and depth map. |
\"\"\" |
# Get the predictions from the nerf model and reshape it. |
if train: |
predictions = model(rays_flat) |
else: |
predictions = model.predict(rays_flat) |
predictions = tf.reshape(predictions, shape=(BATCH_SIZE, H, W, NUM_SAMPLES, 4)) |
# Slice the predictions into rgb and sigma. |
rgb = tf.sigmoid(predictions[..., :-1]) |
sigma_a = tf.nn.relu(predictions[..., -1]) |
# Get the distance of adjacent intervals. |
delta = t_vals[..., 1:] - t_vals[..., :-1] |
# delta shape = (num_samples) |
if rand: |
delta = tf.concat( |
[delta, tf.broadcast_to([1e10], shape=(BATCH_SIZE, H, W, 1))], axis=-1 |
) |
alpha = 1.0 - tf.exp(-sigma_a * delta) |
else: |
delta = tf.concat( |
[delta, tf.broadcast_to([1e10], shape=(BATCH_SIZE, 1))], axis=-1 |
) |
alpha = 1.0 - tf.exp(-sigma_a * delta[:, None, None, :]) |
# Get transmittance. |
exp_term = 1.0 - alpha |
epsilon = 1e-10 |
transmittance = tf.math.cumprod(exp_term + epsilon, axis=-1, exclusive=True) |
weights = alpha * transmittance |
rgb = tf.reduce_sum(weights[..., None] * rgb, axis=-2) |
if rand: |
depth_map = tf.reduce_sum(weights * t_vals, axis=-1) |
else: |
depth_map = tf.reduce_sum(weights * t_vals[:, None, None], axis=-1) |
return (rgb, depth_map) |
Training |
The training step is implemented as part of a custom keras.Model subclass so that we can make use of the model.fit functionality. |
class NeRF(keras.Model): |
def __init__(self, nerf_model): |
super().__init__() |
self.nerf_model = nerf_model |
def compile(self, optimizer, loss_fn): |
super().compile() |
self.optimizer = optimizer |
self.loss_fn = loss_fn |
self.loss_tracker = keras.metrics.Mean(name=\"loss\") |
self.psnr_metric = keras.metrics.Mean(name=\"psnr\") |
def train_step(self, inputs): |
# Get the images and the rays. |
(images, rays) = inputs |
(rays_flat, t_vals) = rays |
with tf.GradientTape() as tape: |
# Get the predictions from the model. |
rgb, _ = render_rgb_depth( |
model=self.nerf_model, rays_flat=rays_flat, t_vals=t_vals, rand=True |
) |
loss = self.loss_fn(images, rgb) |
# Get the trainable variables. |
trainable_variables = self.nerf_model.trainable_variables |
# Get the gradeints of the trainiable variables with respect to the loss. |
gradients = tape.gradient(loss, trainable_variables) |
# Apply the grads and optimize the model. |
self.optimizer.apply_gradients(zip(gradients, trainable_variables)) |
# Get the PSNR of the reconstructed images and the source images. |
psnr = tf.image.psnr(images, rgb, max_val=1.0) |
# Compute our own metrics |
self.loss_tracker.update_state(loss) |
self.psnr_metric.update_state(psnr) |
return {\"loss\": self.loss_tracker.result(), \"psnr\": self.psnr_metric.result()} |
def test_step(self, inputs): |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.